From 4fa26dd2d677e9e8383258f7ac88339f0ef6c781 Mon Sep 17 00:00:00 2001 From: Michael Fabian 'Xaymar' Dirks Date: Sun, 28 Aug 2022 13:44:00 +0200 Subject: [PATCH] code: Always initialize or cast to correct type --- source/encoders/codecs/h264.cpp | 4 ++-- source/encoders/encoder-aom-av1.cpp | 20 +++++++++---------- source/encoders/encoder-ffmpeg.cpp | 2 +- source/filters/filter-autoframing.cpp | 10 +++++++--- source/filters/filter-blur.cpp | 6 +++--- source/gfx/blur/gfx-blur-dual-filtering.cpp | 10 ++++++---- source/gfx/gfx-debug.cpp | 8 ++++---- source/nvidia/ar/nvidia-ar-feature.hpp | 2 +- .../nvidia/vfx/nvidia-vfx-superresolution.cpp | 6 +++--- source/obs/gs/gs-mipmapper.cpp | 6 ++++-- source/updater.cpp | 8 ++++---- source/util/util-logging.cpp | 2 +- source/util/util-platform.cpp | 8 ++++---- 13 files changed, 50 insertions(+), 42 deletions(-) diff --git a/source/encoders/codecs/h264.cpp b/source/encoders/codecs/h264.cpp index 5ca26e2a..c69826ab 100644 --- a/source/encoders/codecs/h264.cpp +++ b/source/encoders/codecs/h264.cpp @@ -72,9 +72,9 @@ uint32_t streamfx::encoder::codec::h264::get_packet_reference_count(uint8_t* ptr // Try and figure out the ideal priority. switch (static_cast((*nal_ptr) & 0x5)) { case nal_unit_type::CODED_SLICE_NONIDR: - return (*nal_ptr >> 5) & 0x2; + return static_cast((*nal_ptr >> 5) & 0x2); case nal_unit_type::CODED_SLICE_IDR: - return (*nal_ptr >> 5) & 0x2; + return static_cast((*nal_ptr >> 5) & 0x2); default: break; } diff --git a/source/encoders/encoder-aom-av1.cpp b/source/encoders/encoder-aom-av1.cpp index b834e13f..e66323a4 100644 --- a/source/encoders/encoder-aom-av1.cpp +++ b/source/encoders/encoder-aom-av1.cpp @@ -420,7 +420,7 @@ aom_av1_instance::aom_av1_instance(obs_data_t* settings, obs_encoder_t* self, bo if (auto threads = obs_data_get_int(settings, ST_KEY_ADVANCED_THREADS); threads > 0) { _settings.threads = static_cast(threads); } else { - _settings.threads = std::thread::hardware_concurrency(); + _settings.threads = static_cast(std::thread::hardware_concurrency()); } _settings.rowmultithreading = static_cast(obs_data_get_int(settings, ST_KEY_ADVANCED_ROWMULTITHREADING)); @@ -664,16 +664,16 @@ bool aom_av1_instance::update(obs_data_t* settings) } { // Rate Control - _settings.rc_bitrate = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE)); + _settings.rc_bitrate = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE)); _settings.rc_bitrate_overshoot = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE_UNDERSHOOT)); _settings.rc_bitrate_undershoot = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE_OVERSHOOT)); - _settings.rc_quality = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUALITY)); + _settings.rc_quality = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUALITY)); _settings.rc_quantizer_min = - static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MINIMUM)); + static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MINIMUM)); _settings.rc_quantizer_max = - static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MAXIMUM)); + static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MAXIMUM)); _settings.rc_buffer_ms = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_BUFFER_SIZE)); _settings.rc_buffer_initial_ms = static_cast(obs_data_get_int(settings, ST_KEY_RATECONTROL_BUFFER_SIZE_INITIAL)); @@ -687,12 +687,12 @@ bool aom_av1_instance::update(obs_data_t* settings) _settings.kf_mode = AOM_KF_AUTO; if (is_seconds) { - _settings.kf_distance_max = static_cast( + _settings.kf_distance_max = static_cast( std::lround(obs_data_get_double(settings, ST_KEY_KEYFRAMES_INTERVAL_SECONDS) * static_cast(obsFPSnum) / static_cast(obsFPSden))); } else { _settings.kf_distance_max = - static_cast(obs_data_get_int(settings, ST_KEY_KEYFRAMES_INTERVAL_FRAMES)); + static_cast(obs_data_get_int(settings, ST_KEY_KEYFRAMES_INTERVAL_FRAMES)); } _settings.kf_distance_min = _settings.kf_distance_max; } @@ -719,15 +719,15 @@ bool aom_av1_instance::update(obs_data_t* settings) _cfg.g_h = _settings.height; // Time Base (Rate is inverted Time Base) - _cfg.g_timebase.num = _settings.fps.den; - _cfg.g_timebase.den = _settings.fps.num; + _cfg.g_timebase.num = static_cast(_settings.fps.den); + _cfg.g_timebase.den = static_cast(_settings.fps.num); // !INFO: Whenever OBS decides to support anything but 8-bits, let me know. _cfg.g_bit_depth = AOM_BITS_8; _cfg.g_input_bit_depth = AOM_BITS_8; // Monochrome color - _cfg.monochrome = _settings.monochrome ? 1 : 0; + _cfg.monochrome = _settings.monochrome ? 1u : 0u; } { // Encoder diff --git a/source/encoders/encoder-ffmpeg.cpp b/source/encoders/encoder-ffmpeg.cpp index 78ab4705..18726c84 100644 --- a/source/encoders/encoder-ffmpeg.cpp +++ b/source/encoders/encoder-ffmpeg.cpp @@ -671,7 +671,7 @@ int ffmpeg_instance::receive_packet(bool* received_packet, struct encoder_packet // In theory, this is done by OBS, but its not doing a great job. packet->priority = packet->keyframe ? 3 : 2; packet->drop_priority = 3; - for (size_t idx = 0, edx = _packet->side_data_elems; idx < edx; idx++) { + for (size_t idx = 0, edx = static_cast(_packet->side_data_elems); idx < edx; idx++) { auto& side_data = _packet->side_data[idx]; if (side_data.type == AV_PKT_DATA_QUALITY_STATS) { // Decisions based on picture type, if present. diff --git a/source/filters/filter-autoframing.cpp b/source/filters/filter-autoframing.cpp index 4b7e3b05..dd79ba51 100644 --- a/source/filters/filter-autoframing.cpp +++ b/source/filters/filter-autoframing.cpp @@ -212,7 +212,7 @@ autoframing_instance::autoframing_instance(obs_data_t* data, obs_source_t* self) std::make_shared<::streamfx::obs::gs::effect>(::streamfx::data_file_path("effects/standard.effect")); // Create the Vertex Buffer for rendering. - _vb = std::make_shared<::streamfx::obs::gs::vertex_buffer>(4u, 1u); + _vb = std::make_shared<::streamfx::obs::gs::vertex_buffer>(uint32_t{4}, uint8_t{1}); vec3_set(_vb->at(0).position, 0, 0, 0); vec3_set(_vb->at(1).position, 1, 0, 0); vec3_set(_vb->at(2).position, 0, 1, 0); @@ -432,9 +432,13 @@ void autoframing_instance::video_tick(float_t seconds) _out_size = _size; if (_frame_aspect_ratio > 0.0) { if (width > height) { - _out_size.first = std::lroundf(static_cast(_out_size.second) * _frame_aspect_ratio); + _out_size.first = + static_cast(std::lroundf(static_cast(_out_size.second) * _frame_aspect_ratio), 0, + std::numeric_limits::max()); } else { - _out_size.second = std::lroundf(static_cast(_out_size.first) * _frame_aspect_ratio); + _out_size.second = + static_cast(std::lroundf(static_cast(_out_size.first) * _frame_aspect_ratio), 0, + std::numeric_limits::max()); } } } diff --git a/source/filters/filter-blur.cpp b/source/filters/filter-blur.cpp index 64c8e77c..5156f217 100644 --- a/source/filters/filter-blur.cpp +++ b/source/filters/filter-blur.cpp @@ -331,9 +331,9 @@ void blur_instance::update(obs_data_t* settings) } if ((_mask.type == mask_type::Image) || (_mask.type == mask_type::Source)) { uint32_t color = static_cast(obs_data_get_int(settings, ST_KEY_MASK_COLOR)); - _mask.color.r = ((color >> 0) & 0xFF) / 255.0f; - _mask.color.g = ((color >> 8) & 0xFF) / 255.0f; - _mask.color.b = ((color >> 16) & 0xFF) / 255.0f; + _mask.color.r = static_cast((color >> 0) & 0xFF) / 255.0f; + _mask.color.g = static_cast((color >> 8) & 0xFF) / 255.0f; + _mask.color.b = static_cast((color >> 16) & 0xFF) / 255.0f; _mask.color.a = static_cast(obs_data_get_double(settings, ST_KEY_MASK_ALPHA)); _mask.multiplier = float_t(obs_data_get_double(settings, ST_KEY_MASK_MULTIPLIER)); } diff --git a/source/gfx/blur/gfx-blur-dual-filtering.cpp b/source/gfx/blur/gfx-blur-dual-filtering.cpp index 6cddb3ec..f4cc372b 100644 --- a/source/gfx/blur/gfx-blur-dual-filtering.cpp +++ b/source/gfx/blur/gfx-blur-dual-filtering.cpp @@ -279,8 +279,9 @@ std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::dual_filterin // Apply effect.get_parameter("pImage").set_texture(tex); - effect.get_parameter("pImageSize").set_float2(float_t(owidth), float_t(oheight)); - effect.get_parameter("pImageTexel").set_float2(0.5f / owidth, 0.5f / oheight); + effect.get_parameter("pImageSize").set_float2(static_cast(owidth), static_cast(oheight)); + effect.get_parameter("pImageTexel") + .set_float2(0.5f / static_cast(owidth), 0.5f / static_cast(oheight)); { auto op = _rts[n]->render(owidth, oheight); @@ -308,8 +309,9 @@ std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::dual_filterin // Apply effect.get_parameter("pImage").set_texture(tex); - effect.get_parameter("pImageSize").set_float2(float_t(iwidth), float_t(iheight)); - effect.get_parameter("pImageTexel").set_float2(0.5f / iwidth, 0.5f / iheight); + effect.get_parameter("pImageSize").set_float2(static_cast(iwidth), static_cast(iheight)); + effect.get_parameter("pImageTexel") + .set_float2(0.5f / static_cast(iwidth), 0.5f / static_cast(iheight)); { auto op = _rts[n - 1]->render(owidth, oheight); diff --git a/source/gfx/gfx-debug.cpp b/source/gfx/gfx-debug.cpp index 5ce77d3a..77eb8ce8 100644 --- a/source/gfx/gfx-debug.cpp +++ b/source/gfx/gfx-debug.cpp @@ -80,7 +80,7 @@ void streamfx::gfx::debug::draw_point(float x, float y, uint32_t color) obs::gs::context gctx{}; if (!_point_vb) { - _point_vb = std::make_shared(1u, 1u); + _point_vb = std::make_shared(uint32_t{1}, uint8_t{1}); } { @@ -102,7 +102,7 @@ void streamfx::gfx::debug::draw_line(float x, float y, float x2, float y2, uint3 obs::gs::context gctx{}; if (!_line_vb) { - _line_vb = std::make_shared(2u, 1u); + _line_vb = std::make_shared(uint32_t{2}, uint8_t{1}); } { @@ -130,7 +130,7 @@ void streamfx::gfx::debug::draw_arrow(float x, float y, float x2, float y2, floa obs::gs::context gctx{}; if (!_arrow_vb) { - _arrow_vb = std::make_shared(5u, 1u); + _arrow_vb = std::make_shared(uint32_t{5}, uint8_t{1}); } float dx = x2 - x; @@ -198,7 +198,7 @@ void streamfx::gfx::debug::draw_rectangle(float x, float y, float w, float h, bo obs::gs::context gctx{}; if (!_quad_vb) { - _quad_vb = std::make_shared(5u, 1u); + _quad_vb = std::make_shared(uint32_t{5}, uint8_t{1}); } if (frame) { diff --git a/source/nvidia/ar/nvidia-ar-feature.hpp b/source/nvidia/ar/nvidia-ar-feature.hpp index 773591ec..44cda225 100644 --- a/source/nvidia/ar/nvidia-ar-feature.hpp +++ b/source/nvidia/ar/nvidia-ar-feature.hpp @@ -104,7 +104,7 @@ namespace streamfx::nvidia::ar { result = _nvar->NvAR_GetF32Array(_fx.get(), param, &data, &size); - value.resize(size); + value.resize(static_cast(size)); memcpy(value.data(), data, size * sizeof(float)); return result; diff --git a/source/nvidia/vfx/nvidia-vfx-superresolution.cpp b/source/nvidia/vfx/nvidia-vfx-superresolution.cpp index 57e89a71..eefc9f30 100644 --- a/source/nvidia/vfx/nvidia-vfx-superresolution.cpp +++ b/source/nvidia/vfx/nvidia-vfx-superresolution.cpp @@ -114,7 +114,7 @@ void streamfx::nvidia::vfx::superresolution::set_strength(float strength) _dirty = true; // Update Effect - uint32_t value = (_strength >= .5f) ? 1 : 0; + uint32_t value = (_strength >= .5f) ? 1u : 0u; auto gctx = ::streamfx::obs::gs::context(); auto cctx = ::streamfx::nvidia::cuda::obs::get()->get_context()->enter(); if (auto res = set(::streamfx::nvidia::vfx::PARAMETER_STRENGTH, value); @@ -198,8 +198,8 @@ void streamfx::nvidia::vfx::superresolution::size(std::pair } // Calculate Output Size. - output_size.first = static_cast(std::lround(input_size.first * _scale)); - output_size.second = static_cast(std::lround(input_size.second * _scale)); + output_size.first = static_cast(std::lround(static_cast(input_size.first) * _scale)); + output_size.second = static_cast(std::lround(static_cast(input_size.second) * _scale)); // Verify that this is a valid scale factor. float width_mul = (static_cast(output_size.first) / static_cast(input_size.first)); diff --git a/source/obs/gs/gs-mipmapper.cpp b/source/obs/gs/gs-mipmapper.cpp index 8e9d3095..b9eada3a 100644 --- a/source/obs/gs/gs-mipmapper.cpp +++ b/source/obs/gs/gs-mipmapper.cpp @@ -168,7 +168,8 @@ void opengl_copy_subregion(opengl_info& info, std::shared_ptr(mip_level), 0, 0, 0, 0, static_cast(width), + static_cast(height)); D_OPENGL_CHECK_ERROR("glCopyTexSubImage2D(GL_TEXTURE_2D, mip_level, 0, 0, 0, 0, width, height);"); // Target -/-> Texture Unit 1 @@ -212,7 +213,8 @@ streamfx::obs::gs::mipmapper::mipmapper() uint32_t streamfx::obs::gs::mipmapper::calculate_max_mip_level(uint32_t width, uint32_t height) { - return static_cast(1 + std::lroundl(floor(log2(std::max(width, height))))); + return static_cast( + 1 + std::lroundl(floor(log2(std::max(static_cast(width), static_cast(height)))))); } void streamfx::obs::gs::mipmapper::rebuild(std::shared_ptr source, diff --git a/source/updater.cpp b/source/updater.cpp index f4e82bc5..75d14c1b 100644 --- a/source/updater.cpp +++ b/source/updater.cpp @@ -215,12 +215,12 @@ streamfx::version_info::operator std::string() { std::vector buffer(25, 0); if (stage != version_stage::STABLE) { - auto types = stage_to_string(stage); - size_t len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16 "%.1s%" PRIu16, major, - minor, patch, types.data(), tweak); + auto types = stage_to_string(stage); + int len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16 "%.1s%" PRIu16, major, + minor, patch, types.data(), tweak); return std::string(buffer.data(), buffer.data() + len); } else { - size_t len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16, major, minor, patch); + int len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16, major, minor, patch); return std::string(buffer.data(), buffer.data() + len); } } diff --git a/source/util/util-logging.cpp b/source/util/util-logging.cpp index 8c3cfa70..01649136 100644 --- a/source/util/util-logging.cpp +++ b/source/util/util-logging.cpp @@ -37,7 +37,7 @@ void streamfx::util::logging::log(level lvl, const char* format, ...) va_list vargs_copy; va_copy(vargs_copy, vargs); int32_t ret = vsnprintf(buffer.data(), buffer.size(), format, vargs); - buffer.resize(ret + 1); + buffer.resize(static_cast(ret) + 1); ret = vsnprintf(buffer.data(), buffer.size(), format, vargs_copy); va_end(vargs); diff --git a/source/util/util-platform.cpp b/source/util/util-platform.cpp index 56e63f50..0fe41e94 100644 --- a/source/util/util-platform.cpp +++ b/source/util/util-platform.cpp @@ -45,8 +45,8 @@ std::string streamfx::util::platform::native_to_utf8(std::wstring const& v) { std::vector buffer((v.length() + 1) * 4, 0); - DWORD res = WideCharToMultiByte(CP_UTF8, 0, v.c_str(), static_cast(v.length()), buffer.data(), - static_cast(buffer.size()), nullptr, nullptr); + int res = WideCharToMultiByte(CP_UTF8, 0, v.c_str(), static_cast(v.length()), buffer.data(), + static_cast(buffer.size()), nullptr, nullptr); if (res == 0) { D_LOG_WARNING("Failed to convert '%ls' to UTF-8 format.", v.c_str()); throw std::runtime_error("Failed to convert Windows-native to UTF-8."); @@ -66,8 +66,8 @@ std::wstring streamfx::util::platform::utf8_to_native(std::string const& v) { std::vector buffer(v.length() + 1, 0); - DWORD res = MultiByteToWideChar(CP_UTF8, 0, v.c_str(), static_cast(v.length()), buffer.data(), - static_cast(buffer.size())); + int res = MultiByteToWideChar(CP_UTF8, 0, v.c_str(), static_cast(v.length()), buffer.data(), + static_cast(buffer.size())); if (res == 0) { D_LOG_WARNING("Failed to convert '%s' to native format.", v.c_str()); throw std::runtime_error("Failed to convert UTF-8 to Windows-native.");