code: Always initialize or cast to correct type

This commit is contained in:
Michael Fabian 'Xaymar' Dirks 2022-08-28 13:44:00 +02:00
parent de703867e6
commit 4fa26dd2d6
13 changed files with 50 additions and 42 deletions

View file

@ -72,9 +72,9 @@ uint32_t streamfx::encoder::codec::h264::get_packet_reference_count(uint8_t* ptr
// Try and figure out the ideal priority.
switch (static_cast<nal_unit_type>((*nal_ptr) & 0x5)) {
case nal_unit_type::CODED_SLICE_NONIDR:
return (*nal_ptr >> 5) & 0x2;
return static_cast<uint32_t>((*nal_ptr >> 5) & 0x2);
case nal_unit_type::CODED_SLICE_IDR:
return (*nal_ptr >> 5) & 0x2;
return static_cast<uint32_t>((*nal_ptr >> 5) & 0x2);
default:
break;
}

View file

@ -420,7 +420,7 @@ aom_av1_instance::aom_av1_instance(obs_data_t* settings, obs_encoder_t* self, bo
if (auto threads = obs_data_get_int(settings, ST_KEY_ADVANCED_THREADS); threads > 0) {
_settings.threads = static_cast<int8_t>(threads);
} else {
_settings.threads = std::thread::hardware_concurrency();
_settings.threads = static_cast<int8_t>(std::thread::hardware_concurrency());
}
_settings.rowmultithreading =
static_cast<int8_t>(obs_data_get_int(settings, ST_KEY_ADVANCED_ROWMULTITHREADING));
@ -664,16 +664,16 @@ bool aom_av1_instance::update(obs_data_t* settings)
}
{ // Rate Control
_settings.rc_bitrate = static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE));
_settings.rc_bitrate = static_cast<int8_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE));
_settings.rc_bitrate_overshoot =
static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE_UNDERSHOOT));
_settings.rc_bitrate_undershoot =
static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_BITRATE_OVERSHOOT));
_settings.rc_quality = static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUALITY));
_settings.rc_quality = static_cast<int8_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUALITY));
_settings.rc_quantizer_min =
static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MINIMUM));
static_cast<int8_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MINIMUM));
_settings.rc_quantizer_max =
static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MAXIMUM));
static_cast<int8_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_LIMITS_QUANTIZER_MAXIMUM));
_settings.rc_buffer_ms = static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_BUFFER_SIZE));
_settings.rc_buffer_initial_ms =
static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_RATECONTROL_BUFFER_SIZE_INITIAL));
@ -687,12 +687,12 @@ bool aom_av1_instance::update(obs_data_t* settings)
_settings.kf_mode = AOM_KF_AUTO;
if (is_seconds) {
_settings.kf_distance_max = static_cast<unsigned int>(
_settings.kf_distance_max = static_cast<int32_t>(
std::lround(obs_data_get_double(settings, ST_KEY_KEYFRAMES_INTERVAL_SECONDS)
* static_cast<double>(obsFPSnum) / static_cast<double>(obsFPSden)));
} else {
_settings.kf_distance_max =
static_cast<unsigned int>(obs_data_get_int(settings, ST_KEY_KEYFRAMES_INTERVAL_FRAMES));
static_cast<int32_t>(obs_data_get_int(settings, ST_KEY_KEYFRAMES_INTERVAL_FRAMES));
}
_settings.kf_distance_min = _settings.kf_distance_max;
}
@ -719,15 +719,15 @@ bool aom_av1_instance::update(obs_data_t* settings)
_cfg.g_h = _settings.height;
// Time Base (Rate is inverted Time Base)
_cfg.g_timebase.num = _settings.fps.den;
_cfg.g_timebase.den = _settings.fps.num;
_cfg.g_timebase.num = static_cast<int>(_settings.fps.den);
_cfg.g_timebase.den = static_cast<int>(_settings.fps.num);
// !INFO: Whenever OBS decides to support anything but 8-bits, let me know.
_cfg.g_bit_depth = AOM_BITS_8;
_cfg.g_input_bit_depth = AOM_BITS_8;
// Monochrome color
_cfg.monochrome = _settings.monochrome ? 1 : 0;
_cfg.monochrome = _settings.monochrome ? 1u : 0u;
}
{ // Encoder

View file

@ -671,7 +671,7 @@ int ffmpeg_instance::receive_packet(bool* received_packet, struct encoder_packet
// In theory, this is done by OBS, but its not doing a great job.
packet->priority = packet->keyframe ? 3 : 2;
packet->drop_priority = 3;
for (size_t idx = 0, edx = _packet->side_data_elems; idx < edx; idx++) {
for (size_t idx = 0, edx = static_cast<size_t>(_packet->side_data_elems); idx < edx; idx++) {
auto& side_data = _packet->side_data[idx];
if (side_data.type == AV_PKT_DATA_QUALITY_STATS) {
// Decisions based on picture type, if present.

View file

@ -212,7 +212,7 @@ autoframing_instance::autoframing_instance(obs_data_t* data, obs_source_t* self)
std::make_shared<::streamfx::obs::gs::effect>(::streamfx::data_file_path("effects/standard.effect"));
// Create the Vertex Buffer for rendering.
_vb = std::make_shared<::streamfx::obs::gs::vertex_buffer>(4u, 1u);
_vb = std::make_shared<::streamfx::obs::gs::vertex_buffer>(uint32_t{4}, uint8_t{1});
vec3_set(_vb->at(0).position, 0, 0, 0);
vec3_set(_vb->at(1).position, 1, 0, 0);
vec3_set(_vb->at(2).position, 0, 1, 0);
@ -432,9 +432,13 @@ void autoframing_instance::video_tick(float_t seconds)
_out_size = _size;
if (_frame_aspect_ratio > 0.0) {
if (width > height) {
_out_size.first = std::lroundf(static_cast<float>(_out_size.second) * _frame_aspect_ratio);
_out_size.first =
static_cast<uint32_t>(std::lroundf(static_cast<float>(_out_size.second) * _frame_aspect_ratio), 0,
std::numeric_limits<uint32_t>::max());
} else {
_out_size.second = std::lroundf(static_cast<float>(_out_size.first) * _frame_aspect_ratio);
_out_size.second =
static_cast<uint32_t>(std::lroundf(static_cast<float>(_out_size.first) * _frame_aspect_ratio), 0,
std::numeric_limits<uint32_t>::max());
}
}
}

View file

@ -331,9 +331,9 @@ void blur_instance::update(obs_data_t* settings)
}
if ((_mask.type == mask_type::Image) || (_mask.type == mask_type::Source)) {
uint32_t color = static_cast<uint32_t>(obs_data_get_int(settings, ST_KEY_MASK_COLOR));
_mask.color.r = ((color >> 0) & 0xFF) / 255.0f;
_mask.color.g = ((color >> 8) & 0xFF) / 255.0f;
_mask.color.b = ((color >> 16) & 0xFF) / 255.0f;
_mask.color.r = static_cast<float>((color >> 0) & 0xFF) / 255.0f;
_mask.color.g = static_cast<float>((color >> 8) & 0xFF) / 255.0f;
_mask.color.b = static_cast<float>((color >> 16) & 0xFF) / 255.0f;
_mask.color.a = static_cast<float_t>(obs_data_get_double(settings, ST_KEY_MASK_ALPHA));
_mask.multiplier = float_t(obs_data_get_double(settings, ST_KEY_MASK_MULTIPLIER));
}

View file

@ -279,8 +279,9 @@ std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::dual_filterin
// Apply
effect.get_parameter("pImage").set_texture(tex);
effect.get_parameter("pImageSize").set_float2(float_t(owidth), float_t(oheight));
effect.get_parameter("pImageTexel").set_float2(0.5f / owidth, 0.5f / oheight);
effect.get_parameter("pImageSize").set_float2(static_cast<float>(owidth), static_cast<float>(oheight));
effect.get_parameter("pImageTexel")
.set_float2(0.5f / static_cast<float>(owidth), 0.5f / static_cast<float>(oheight));
{
auto op = _rts[n]->render(owidth, oheight);
@ -308,8 +309,9 @@ std::shared_ptr<::streamfx::obs::gs::texture> streamfx::gfx::blur::dual_filterin
// Apply
effect.get_parameter("pImage").set_texture(tex);
effect.get_parameter("pImageSize").set_float2(float_t(iwidth), float_t(iheight));
effect.get_parameter("pImageTexel").set_float2(0.5f / iwidth, 0.5f / iheight);
effect.get_parameter("pImageSize").set_float2(static_cast<float>(iwidth), static_cast<float>(iheight));
effect.get_parameter("pImageTexel")
.set_float2(0.5f / static_cast<float>(iwidth), 0.5f / static_cast<float>(iheight));
{
auto op = _rts[n - 1]->render(owidth, oheight);

View file

@ -80,7 +80,7 @@ void streamfx::gfx::debug::draw_point(float x, float y, uint32_t color)
obs::gs::context gctx{};
if (!_point_vb) {
_point_vb = std::make_shared<obs::gs::vertex_buffer>(1u, 1u);
_point_vb = std::make_shared<obs::gs::vertex_buffer>(uint32_t{1}, uint8_t{1});
}
{
@ -102,7 +102,7 @@ void streamfx::gfx::debug::draw_line(float x, float y, float x2, float y2, uint3
obs::gs::context gctx{};
if (!_line_vb) {
_line_vb = std::make_shared<obs::gs::vertex_buffer>(2u, 1u);
_line_vb = std::make_shared<obs::gs::vertex_buffer>(uint32_t{2}, uint8_t{1});
}
{
@ -130,7 +130,7 @@ void streamfx::gfx::debug::draw_arrow(float x, float y, float x2, float y2, floa
obs::gs::context gctx{};
if (!_arrow_vb) {
_arrow_vb = std::make_shared<obs::gs::vertex_buffer>(5u, 1u);
_arrow_vb = std::make_shared<obs::gs::vertex_buffer>(uint32_t{5}, uint8_t{1});
}
float dx = x2 - x;
@ -198,7 +198,7 @@ void streamfx::gfx::debug::draw_rectangle(float x, float y, float w, float h, bo
obs::gs::context gctx{};
if (!_quad_vb) {
_quad_vb = std::make_shared<obs::gs::vertex_buffer>(5u, 1u);
_quad_vb = std::make_shared<obs::gs::vertex_buffer>(uint32_t{5}, uint8_t{1});
}
if (frame) {

View file

@ -104,7 +104,7 @@ namespace streamfx::nvidia::ar {
result = _nvar->NvAR_GetF32Array(_fx.get(), param, &data, &size);
value.resize(size);
value.resize(static_cast<size_t>(size));
memcpy(value.data(), data, size * sizeof(float));
return result;

View file

@ -114,7 +114,7 @@ void streamfx::nvidia::vfx::superresolution::set_strength(float strength)
_dirty = true;
// Update Effect
uint32_t value = (_strength >= .5f) ? 1 : 0;
uint32_t value = (_strength >= .5f) ? 1u : 0u;
auto gctx = ::streamfx::obs::gs::context();
auto cctx = ::streamfx::nvidia::cuda::obs::get()->get_context()->enter();
if (auto res = set(::streamfx::nvidia::vfx::PARAMETER_STRENGTH, value);
@ -198,8 +198,8 @@ void streamfx::nvidia::vfx::superresolution::size(std::pair<uint32_t, uint32_t>
}
// Calculate Output Size.
output_size.first = static_cast<uint32_t>(std::lround(input_size.first * _scale));
output_size.second = static_cast<uint32_t>(std::lround(input_size.second * _scale));
output_size.first = static_cast<uint32_t>(std::lround(static_cast<float>(input_size.first) * _scale));
output_size.second = static_cast<uint32_t>(std::lround(static_cast<float>(input_size.second) * _scale));
// Verify that this is a valid scale factor.
float width_mul = (static_cast<float>(output_size.first) / static_cast<float>(input_size.first));

View file

@ -168,7 +168,8 @@ void opengl_copy_subregion(opengl_info& info, std::shared_ptr<streamfx::obs::gs:
D_OPENGL_CHECK_ERROR("glBindTexture(GL_TEXTURE_2D, info.target);");
// Copy Data
glCopyTexSubImage2D(GL_TEXTURE_2D, mip_level, 0, 0, 0, 0, width, height);
glCopyTexSubImage2D(GL_TEXTURE_2D, static_cast<GLint>(mip_level), 0, 0, 0, 0, static_cast<GLsizei>(width),
static_cast<GLsizei>(height));
D_OPENGL_CHECK_ERROR("glCopyTexSubImage2D(GL_TEXTURE_2D, mip_level, 0, 0, 0, 0, width, height);");
// Target -/-> Texture Unit 1
@ -212,7 +213,8 @@ streamfx::obs::gs::mipmapper::mipmapper()
uint32_t streamfx::obs::gs::mipmapper::calculate_max_mip_level(uint32_t width, uint32_t height)
{
return static_cast<uint32_t>(1 + std::lroundl(floor(log2(std::max<GLint>(width, height)))));
return static_cast<uint32_t>(
1 + std::lroundl(floor(log2(std::max<GLint>(static_cast<GLint>(width), static_cast<GLint>(height))))));
}
void streamfx::obs::gs::mipmapper::rebuild(std::shared_ptr<streamfx::obs::gs::texture> source,

View file

@ -215,12 +215,12 @@ streamfx::version_info::operator std::string()
{
std::vector<char> buffer(25, 0);
if (stage != version_stage::STABLE) {
auto types = stage_to_string(stage);
size_t len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16 "%.1s%" PRIu16, major,
minor, patch, types.data(), tweak);
auto types = stage_to_string(stage);
int len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16 "%.1s%" PRIu16, major,
minor, patch, types.data(), tweak);
return std::string(buffer.data(), buffer.data() + len);
} else {
size_t len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16, major, minor, patch);
int len = snprintf(buffer.data(), buffer.size(), "%" PRIu16 ".%" PRIu16 ".%" PRIu16, major, minor, patch);
return std::string(buffer.data(), buffer.data() + len);
}
}

View file

@ -37,7 +37,7 @@ void streamfx::util::logging::log(level lvl, const char* format, ...)
va_list vargs_copy;
va_copy(vargs_copy, vargs);
int32_t ret = vsnprintf(buffer.data(), buffer.size(), format, vargs);
buffer.resize(ret + 1);
buffer.resize(static_cast<size_t>(ret) + 1);
ret = vsnprintf(buffer.data(), buffer.size(), format, vargs_copy);
va_end(vargs);

View file

@ -45,8 +45,8 @@ std::string streamfx::util::platform::native_to_utf8(std::wstring const& v)
{
std::vector<char> buffer((v.length() + 1) * 4, 0);
DWORD res = WideCharToMultiByte(CP_UTF8, 0, v.c_str(), static_cast<int>(v.length()), buffer.data(),
static_cast<int>(buffer.size()), nullptr, nullptr);
int res = WideCharToMultiByte(CP_UTF8, 0, v.c_str(), static_cast<int>(v.length()), buffer.data(),
static_cast<int>(buffer.size()), nullptr, nullptr);
if (res == 0) {
D_LOG_WARNING("Failed to convert '%ls' to UTF-8 format.", v.c_str());
throw std::runtime_error("Failed to convert Windows-native to UTF-8.");
@ -66,8 +66,8 @@ std::wstring streamfx::util::platform::utf8_to_native(std::string const& v)
{
std::vector<wchar_t> buffer(v.length() + 1, 0);
DWORD res = MultiByteToWideChar(CP_UTF8, 0, v.c_str(), static_cast<int>(v.length()), buffer.data(),
static_cast<int>(buffer.size()));
int res = MultiByteToWideChar(CP_UTF8, 0, v.c_str(), static_cast<int>(v.length()), buffer.data(),
static_cast<int>(buffer.size()));
if (res == 0) {
D_LOG_WARNING("Failed to convert '%s' to native format.", v.c_str());
throw std::runtime_error("Failed to convert UTF-8 to Windows-native.");