Bug 1952339 - Vendor libwebrtc from ef95b20fbc

Upstream commit: https://webrtc.googlesource.com/src/+/ef95b20fbc59fbe33c066201e84ec51fda7fc762
    Move safe_conversions.h to webrtc namespace

    Bug: webrtc:42232595
    Change-Id: I4145233e74fc684539d493423cafe142159fe8ff
    Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/378720
    Reviewed-by: Harald Alvestrand <hta@webrtc.org>
    Commit-Queue: Evan Shrubsole <eshr@webrtc.org>
    Cr-Commit-Position: refs/heads/main@{#43973}

Differential Revision: https://phabricator.services.mozilla.com/D243977
This commit is contained in:
Michael Froman
2025-03-07 16:10:52 -06:00
parent eec1ff0969
commit 6b112f8877
133 changed files with 353 additions and 359 deletions

View File

@@ -1,4 +1,4 @@
# ./mach python dom/media/webrtc/third_party_build/vendor-libwebrtc.py --from-local /home/mfroman/mozilla/elm/.moz-fast-forward/moz-libwebrtc --commit mozpatches libwebrtc
libwebrtc updated from /home/mfroman/mozilla/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2025-03-07T22:09:49.131599+00:00.
libwebrtc updated from /home/mfroman/mozilla/elm/.moz-fast-forward/moz-libwebrtc commit mozpatches on 2025-03-07T22:10:42.478763+00:00.
# base of lastest vendoring
59410dcd99
ef95b20fbc

View File

@@ -29,7 +29,7 @@ std::optional<AudioDecoderL16::Config> AudioDecoderL16::SdpToConfig(
const SdpAudioFormat& format) {
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
config.num_channels = checked_cast<int>(format.num_channels);
if (absl::EqualsIgnoreCase(format.name, "L16") && config.IsOk()) {
return config;
}

View File

@@ -34,13 +34,13 @@ namespace webrtc {
std::optional<AudioEncoderL16::Config> AudioEncoderL16::SdpToConfig(
const SdpAudioFormat& format) {
if (!rtc::IsValueInRangeForNumericType<int>(format.num_channels)) {
if (!IsValueInRangeForNumericType<int>(format.num_channels)) {
RTC_DCHECK_NOTREACHED();
return std::nullopt;
}
Config config;
config.sample_rate_hz = format.clockrate_hz;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
config.num_channels = dchecked_cast<int>(format.num_channels);
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
const auto ptime = StringToNumber<int>(ptime_iter->second);
@@ -62,8 +62,7 @@ void AudioEncoderL16::AppendSupportedEncoders(
AudioCodecInfo AudioEncoderL16::QueryAudioEncoder(
const AudioEncoderL16::Config& config) {
RTC_DCHECK(config.IsOk());
return {config.sample_rate_hz,
rtc::dchecked_cast<size_t>(config.num_channels),
return {config.sample_rate_hz, dchecked_cast<size_t>(config.num_channels),
config.sample_rate_hz * config.num_channels * 16};
}

View File

@@ -34,7 +34,7 @@ std::optional<AudioDecoderG711::Config> AudioDecoderG711::SdpToConfig(
(is_pcmu || is_pcma)) {
Config config;
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
config.num_channels = dchecked_cast<int>(format.num_channels);
if (!config.IsOk()) {
RTC_DCHECK_NOTREACHED();
return std::nullopt;

View File

@@ -40,7 +40,7 @@ std::optional<AudioEncoderG711::Config> AudioEncoderG711::SdpToConfig(
(is_pcmu || is_pcma)) {
Config config;
config.type = is_pcmu ? Config::Type::kPcmU : Config::Type::kPcmA;
config.num_channels = rtc::dchecked_cast<int>(format.num_channels);
config.num_channels = dchecked_cast<int>(format.num_channels);
config.frame_size_ms = 20;
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
@@ -68,7 +68,7 @@ void AudioEncoderG711::AppendSupportedEncoders(
AudioCodecInfo AudioEncoderG711::QueryAudioEncoder(const Config& config) {
RTC_DCHECK(config.IsOk());
return {8000, rtc::dchecked_cast<size_t>(config.num_channels),
return {8000, dchecked_cast<size_t>(config.num_channels),
64000 * config.num_channels};
}

View File

@@ -30,7 +30,7 @@ std::optional<AudioDecoderG722::Config> AudioDecoderG722::SdpToConfig(
if (absl::EqualsIgnoreCase(format.name, "G722") &&
format.clockrate_hz == 8000 &&
(format.num_channels == 1 || format.num_channels == 2)) {
return Config{rtc::dchecked_cast<int>(format.num_channels)};
return Config{dchecked_cast<int>(format.num_channels)};
}
return std::nullopt;
}

View File

@@ -40,7 +40,7 @@ std::optional<AudioEncoderG722Config> AudioEncoderG722::SdpToConfig(
}
AudioEncoderG722Config config;
config.num_channels = rtc::checked_cast<int>(format.num_channels);
config.num_channels = checked_cast<int>(format.num_channels);
auto ptime_iter = format.parameters.find("ptime");
if (ptime_iter != format.parameters.end()) {
auto ptime = StringToNumber<int>(ptime_iter->second);
@@ -66,7 +66,7 @@ void AudioEncoderG722::AppendSupportedEncoders(
AudioCodecInfo AudioEncoderG722::QueryAudioEncoder(
const AudioEncoderG722Config& config) {
RTC_DCHECK(config.IsOk());
return {16000, rtc::dchecked_cast<size_t>(config.num_channels),
return {16000, dchecked_cast<size_t>(config.num_channels),
64000 * config.num_channels};
}

View File

@@ -40,8 +40,7 @@ int I010DataSize(int width,
int stride_v) {
CheckValidDimensions(width, height, stride_y, stride_u, stride_v);
int64_t h = height, y = stride_y, u = stride_u, v = stride_v;
return rtc::checked_cast<int>(kBytesPerPixel *
(y * h + (u + v) * ((h + 1) / 2)));
return checked_cast<int>(kBytesPerPixel * (y * h + (u + v) * ((h + 1) / 2)));
}
} // namespace

View File

@@ -41,7 +41,7 @@ int I210DataSize(int width,
int stride_v) {
CheckValidDimensions(width, height, stride_y, stride_u, stride_v);
int64_t h = height, y = stride_y, u = stride_u, v = stride_v;
return rtc::checked_cast<int>(kBytesPerPixel * (y * h + u * h + v * h));
return checked_cast<int>(kBytesPerPixel * (y * h + u * h + v * h));
}
} // namespace

View File

@@ -43,7 +43,7 @@ int I410DataSize(int width,
int stride_v) {
CheckValidDimensions(width, height, stride_y, stride_u, stride_v);
int64_t h = height, y = stride_y, u = stride_u, v = stride_v;
return rtc::checked_cast<int>(kBytesPerPixel * (y * h + u * h + v * h));
return checked_cast<int>(kBytesPerPixel * (y * h + u * h + v * h));
}
} // namespace

View File

@@ -44,7 +44,7 @@ int I420DataSize(int width,
// Do the size calculation using 64bit integers and use checked_cast to catch
// overflow.
int64_t h = height, y = stride_y, u = stride_u, v = stride_v;
return rtc::checked_cast<int>(y * h + (u + v) * ((h + 1) / 2));
return checked_cast<int>(y * h + (u + v) * ((h + 1) / 2));
}
} // namespace

View File

@@ -43,7 +43,7 @@ int I422DataSize(int width,
int stride_v) {
CheckValidDimensions(width, height, stride_y, stride_u, stride_v);
int64_t h = height, y = stride_y, u = stride_u, v = stride_v;
return rtc::checked_cast<int>(y * h + u * h + v * h);
return checked_cast<int>(y * h + u * h + v * h);
}
} // namespace

View File

@@ -42,7 +42,7 @@ int I444DataSize(int width,
int stride_v) {
CheckValidDimensions(width, height, stride_y, stride_u, stride_v);
int64_t h = height, y = stride_y, u = stride_u, v = stride_v;
return rtc::checked_cast<int>(y * h + u * h + v * h);
return checked_cast<int>(y * h + u * h + v * h);
}
} // namespace

View File

@@ -33,7 +33,7 @@ static const int kBufferAlignment = 64;
int NV12DataSize(int width, int height, int stride_y, int stride_uv) {
CheckValidDimensions(width, height, stride_y, stride_uv, stride_uv);
int64_t h = height, y = stride_y, uv = stride_uv;
return rtc::checked_cast<int>(y * h + uv * ((h + 1) / 2));
return checked_cast<int>(y * h + uv * ((h + 1) / 2));
}
} // namespace

View File

@@ -27,7 +27,7 @@ uint16_t VideoSendTiming::GetDeltaCappedMs(int64_t base_ms, int64_t time_ms) {
RTC_DLOG(LS_ERROR) << "Delta " << (time_ms - base_ms)
<< "ms expected to be positive";
}
return rtc::saturated_cast<uint16_t>(time_ms - base_ms);
return saturated_cast<uint16_t>(time_ms - base_ms);
}
uint16_t VideoSendTiming::GetDeltaCappedMs(TimeDelta delta) {
@@ -35,7 +35,7 @@ uint16_t VideoSendTiming::GetDeltaCappedMs(TimeDelta delta) {
RTC_DLOG(LS_ERROR) << "Delta " << delta.ms()
<< "ms expected to be positive";
}
return rtc::saturated_cast<uint16_t>(delta.ms());
return saturated_cast<uint16_t>(delta.ms());
}
TimingFrameInfo::TimingFrameInfo()

View File

@@ -697,7 +697,7 @@ void ChannelReceive::OnRtpPacket(const RtpPacketReceived& packet) {
AbsoluteCaptureTimeInterpolator::GetSource(header.ssrc,
header.arrOfCSRCs),
header.timestamp,
rtc::saturated_cast<uint32_t>(packet_copy.payload_type_frequency()),
saturated_cast<uint32_t>(packet_copy.payload_type_frequency()),
header.extension.absolute_capture_time);
ReceivePacket(packet_copy.data(), packet_copy.size(), header,

View File

@@ -209,7 +209,7 @@ int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame* frame) {
int16_t* frame_data = frame->mutable_data();
for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_;
i++) {
frame_data[i] = rtc::saturated_cast<int16_t>(scale * frame_data[i]);
frame_data[i] = saturated_cast<int16_t>(scale * frame_data[i]);
}
return 0;
}

View File

@@ -90,7 +90,7 @@ void ChannelMixer::Transform(AudioFrame* frame) {
}
const size_t index = output_channels_ * i + output_ch;
RTC_CHECK_LE(index, audio_vector_size_);
out_audio[index] = rtc::saturated_cast<int16_t>(acc_value);
out_audio[index] = saturated_cast<int16_t>(acc_value);
}
}

View File

@@ -107,7 +107,7 @@ void AudioEgress::SendAudioData(std::unique_ptr<AudioFrame> audio_frame) {
}
encoder_context_.frame_rtp_timestamp_ +=
rtc::dchecked_cast<uint32_t>(audio_frame->samples_per_channel_);
dchecked_cast<uint32_t>(audio_frame->samples_per_channel_);
});
}

View File

@@ -91,9 +91,7 @@ class AudioIngress : public AudioMixer::Source {
AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
int sampling_rate,
AudioFrame* audio_frame) override;
int Ssrc() const override {
return rtc::dchecked_cast<int>(remote_ssrc_.load());
}
int Ssrc() const override { return dchecked_cast<int>(remote_ssrc_.load()); }
int PreferredSampleRate() const override {
std::optional<NetEq::DecoderFormat> decoder =
neteq_->GetCurrentDecoderFormat();

View File

@@ -66,7 +66,7 @@ bool CanDecreaseResolutionTo(int target_pixels,
const VideoStreamInputState& input_state,
const VideoSourceRestrictions& restrictions) {
int max_pixels_per_frame =
rtc::dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
std::numeric_limits<int>::max()));
return target_pixels < max_pixels_per_frame &&
target_pixels_min >= input_state.min_pixels_per_frame();
@@ -76,7 +76,7 @@ bool CanIncreaseResolutionTo(int target_pixels,
const VideoSourceRestrictions& restrictions) {
int max_pixels_wanted = GetIncreasedMaxPixelsWanted(target_pixels);
int max_pixels_per_frame =
rtc::dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
dchecked_cast<int>(restrictions.max_pixels_per_frame().value_or(
std::numeric_limits<int>::max()));
return max_pixels_wanted > max_pixels_per_frame;
}
@@ -84,15 +84,14 @@ bool CanIncreaseResolutionTo(int target_pixels,
bool CanDecreaseFrameRateTo(int max_frame_rate,
const VideoSourceRestrictions& restrictions) {
const int fps_wanted = std::max(kMinFrameRateFps, max_frame_rate);
return fps_wanted <
rtc::dchecked_cast<int>(restrictions.max_frame_rate().value_or(
std::numeric_limits<int>::max()));
return fps_wanted < dchecked_cast<int>(restrictions.max_frame_rate().value_or(
std::numeric_limits<int>::max()));
}
bool CanIncreaseFrameRateTo(int max_frame_rate,
const VideoSourceRestrictions& restrictions) {
return max_frame_rate >
rtc::dchecked_cast<int>(restrictions.max_frame_rate().value_or(
dchecked_cast<int>(restrictions.max_frame_rate().value_or(
std::numeric_limits<int>::max()));
}

View File

@@ -284,8 +284,7 @@ std::map<BitrateAllocatorObserver*, int> NormalRateAllocation(
allocation[observer_config.observer];
if (priority_margin > 0 && bitrate > 0) {
int64_t extra_bitrate = std::min<int64_t>(priority_margin, bitrate);
allocation[observer_config.observer] +=
rtc::dchecked_cast<int>(extra_bitrate);
allocation[observer_config.observer] += dchecked_cast<int>(extra_bitrate);
observers_capacities[observer_config.observer] -= extra_bitrate;
bitrate -= extra_bitrate;
}
@@ -494,7 +493,7 @@ void BitrateAllocator::OnNetworkEstimateChanged(TargetTransferRate msg) {
int loss_ratio_255 = msg.network_estimate.loss_rate_ratio * 255;
last_fraction_loss_ =
rtc::dchecked_cast<uint8_t>(SafeClamp(loss_ratio_255, 0, 255));
dchecked_cast<uint8_t>(SafeClamp(loss_ratio_255, 0, 255));
last_rtt_ = msg.network_estimate.round_trip_time.ms();
last_bwe_period_ms_ = msg.network_estimate.bwe_period.ms();

View File

@@ -310,8 +310,7 @@ TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByRsid) {
}
for (size_t i = 0; i < arraysize(rsids); i++) {
auto packet =
CreatePacketWithSsrcRsid(rtc::checked_cast<uint32_t>(i), rsids[i]);
auto packet = CreatePacketWithSsrcRsid(checked_cast<uint32_t>(i), rsids[i]);
EXPECT_CALL(sinks[i], OnRtpPacket(SamePacketAs(*packet))).Times(1);
EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
}
@@ -325,8 +324,7 @@ TEST_F(RtpDemuxerTest, OnRtpPacketCalledOnCorrectSinkByMid) {
}
for (size_t i = 0; i < arraysize(mids); i++) {
auto packet =
CreatePacketWithSsrcMid(rtc::checked_cast<uint32_t>(i), mids[i]);
auto packet = CreatePacketWithSsrcMid(checked_cast<uint32_t>(i), mids[i]);
EXPECT_CALL(sinks[i], OnRtpPacket(SamePacketAs(*packet))).Times(1);
EXPECT_TRUE(demuxer_.OnRtpPacket(*packet));
}
@@ -380,7 +378,7 @@ TEST_F(RtpDemuxerTest, PacketsDeliveredInRightOrder) {
std::unique_ptr<RtpPacketReceived> packets[5];
for (size_t i = 0; i < arraysize(packets); i++) {
packets[i] = CreatePacketWithSsrc(ssrc);
packets[i]->SetSequenceNumber(rtc::checked_cast<uint16_t>(i));
packets[i]->SetSequenceNumber(checked_cast<uint16_t>(i));
}
InSequence sequence;

View File

@@ -856,8 +856,8 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update,
CalculateOverheadRate(update.target_bitrate, max_total_packet_size,
packet_overhead, Frequency::Hertz(framerate));
// TODO(srte): We probably should not accept 0 payload bitrate here.
payload_bitrate_bps = rtc::saturated_cast<uint32_t>(payload_bitrate_bps -
overhead_rate.bps());
payload_bitrate_bps =
saturated_cast<uint32_t>(payload_bitrate_bps - overhead_rate.bps());
}
// Get the encoder target rate. It is the estimated network rate -
@@ -865,7 +865,7 @@ void RtpVideoSender::OnBitrateUpdated(BitrateAllocationUpdate update,
// TODO(srte): We should multiply with 255 here.
encoder_target_rate_bps_ = fec_controller_->UpdateFecRates(
payload_bitrate_bps, framerate,
rtc::saturated_cast<uint8_t>(update.packet_loss_ratio * 256),
saturated_cast<uint8_t>(update.packet_loss_ratio * 256),
loss_mask_vector_, update.round_trip_time.ms());
if (!fec_allowed_) {
encoder_target_rate_bps_ = payload_bitrate_bps;

View File

@@ -30,5 +30,5 @@ int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
sum += (vector1[i] * vector2[i]) >> scaling;
}
return rtc::saturated_cast<int32_t>(sum);
return webrtc::saturated_cast<int32_t>(sum);
}

View File

@@ -316,8 +316,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAudioNetworkAdaptationBitrate) {
for (size_t i = 0; i < event_count_; ++i) {
if (i == 0 || !force_repeated_fields_) {
auto runtime_config = std::make_unique<AudioEncoderRuntimeConfig>();
const int bitrate_bps = rtc::checked_cast<int>(
prng_.Rand(0, std::numeric_limits<int32_t>::max()));
const int bitrate_bps =
checked_cast<int>(prng_.Rand(0, std::numeric_limits<int32_t>::max()));
runtime_config->bitrate_bps = bitrate_bps;
events[i] = std::make_unique<RtcEventAudioNetworkAdaptation>(
std::move(runtime_config));
@@ -417,8 +417,8 @@ TEST_P(RtcEventLogEncoderTest, RtcEventAudioNetworkAdaptationAll) {
for (size_t i = 0; i < event_count_; ++i) {
if (i == 0 || !force_repeated_fields_) {
auto runtime_config = std::make_unique<AudioEncoderRuntimeConfig>();
runtime_config->bitrate_bps = rtc::checked_cast<int>(
prng_.Rand(0, std::numeric_limits<int32_t>::max()));
runtime_config->bitrate_bps =
checked_cast<int>(prng_.Rand(0, std::numeric_limits<int32_t>::max()));
runtime_config->frame_length_ms = prng_.Rand(1, 1000);
runtime_config->uplink_packet_loss_fraction =
std::pow(0.5f, prng_.Rand(1, 8));

View File

@@ -598,32 +598,32 @@ ParsedRtcEventLog::ParseStatus StoreRtpPackets(
RTPHeader header;
RTC_PARSE_CHECK_OR_RETURN(
rtc::IsValueInRangeForNumericType<bool>(*marker_values[i]));
IsValueInRangeForNumericType<bool>(*marker_values[i]));
header.markerBit = static_cast<bool>(*marker_values[i]);
RTC_PARSE_CHECK_OR_RETURN(
rtc::IsValueInRangeForNumericType<uint8_t>(*payload_type_values[i]));
IsValueInRangeForNumericType<uint8_t>(*payload_type_values[i]));
header.payloadType = static_cast<uint8_t>(*payload_type_values[i]);
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<uint16_t>(
*sequence_number_values[i]));
RTC_PARSE_CHECK_OR_RETURN(
IsValueInRangeForNumericType<uint16_t>(*sequence_number_values[i]));
header.sequenceNumber = static_cast<uint16_t>(*sequence_number_values[i]);
RTC_PARSE_CHECK_OR_RETURN(
rtc::IsValueInRangeForNumericType<uint32_t>(*rtp_timestamp_values[i]));
IsValueInRangeForNumericType<uint32_t>(*rtp_timestamp_values[i]));
header.timestamp = static_cast<uint32_t>(*rtp_timestamp_values[i]);
RTC_PARSE_CHECK_OR_RETURN(
rtc::IsValueInRangeForNumericType<uint32_t>(*ssrc_values[i]));
IsValueInRangeForNumericType<uint32_t>(*ssrc_values[i]));
header.ssrc = static_cast<uint32_t>(*ssrc_values[i]);
header.numCSRCs = 0; // TODO(terelius): Implement CSRC.
RTC_PARSE_CHECK_OR_RETURN(
rtc::IsValueInRangeForNumericType<size_t>(*padding_size_values[i]));
IsValueInRangeForNumericType<size_t>(*padding_size_values[i]));
header.paddingLength = static_cast<size_t>(*padding_size_values[i]);
RTC_PARSE_CHECK_OR_RETURN(
rtc::IsValueInRangeForNumericType<size_t>(*header_size_values[i]));
IsValueInRangeForNumericType<size_t>(*header_size_values[i]));
header.headerLength = static_cast<size_t>(*header_size_values[i]);
// TODO(terelius): Should we implement payload_type_frequency?
if (transport_sequence_number_values.size() > i &&
transport_sequence_number_values[i].has_value()) {
header.extension.hasTransportSequenceNumber = true;
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<uint16_t>(
RTC_PARSE_CHECK_OR_RETURN(IsValueInRangeForNumericType<uint16_t>(
transport_sequence_number_values[i].value()));
header.extension.transportSequenceNumber =
static_cast<uint16_t>(transport_sequence_number_values[i].value());
@@ -640,7 +640,7 @@ ParsedRtcEventLog::ParseStatus StoreRtpPackets(
if (absolute_send_time_values.size() > i &&
absolute_send_time_values[i].has_value()) {
header.extension.hasAbsoluteSendTime = true;
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<uint32_t>(
RTC_PARSE_CHECK_OR_RETURN(IsValueInRangeForNumericType<uint32_t>(
absolute_send_time_values[i].value()));
header.extension.absoluteSendTime =
static_cast<uint32_t>(absolute_send_time_values[i].value());
@@ -648,7 +648,7 @@ ParsedRtcEventLog::ParseStatus StoreRtpPackets(
if (video_rotation_values.size() > i &&
video_rotation_values[i].has_value()) {
header.extension.hasVideoRotation = true;
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<uint8_t>(
RTC_PARSE_CHECK_OR_RETURN(IsValueInRangeForNumericType<uint8_t>(
video_rotation_values[i].value()));
header.extension.videoRotation = ConvertCVOByteToVideoRotation(
static_cast<uint8_t>(video_rotation_values[i].value()));
@@ -656,11 +656,11 @@ ParsedRtcEventLog::ParseStatus StoreRtpPackets(
if (audio_level_values.size() > i && audio_level_values[i].has_value()) {
RTC_PARSE_CHECK_OR_RETURN(voice_activity_values.size() > i &&
voice_activity_values[i].has_value());
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<bool>(
voice_activity_values[i].value()));
RTC_PARSE_CHECK_OR_RETURN(
IsValueInRangeForNumericType<bool>(voice_activity_values[i].value()));
bool voice_activity = static_cast<bool>(voice_activity_values[i].value());
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<int>(
audio_level_values[i].value()));
RTC_PARSE_CHECK_OR_RETURN(
IsValueInRangeForNumericType<int>(audio_level_values[i].value()));
int audio_level = static_cast<int>(audio_level_values[i].value());
RTC_PARSE_CHECK_OR_RETURN_LE(audio_level, 0x7F);
header.extension.set_audio_level(AudioLevel(voice_activity, audio_level));
@@ -3566,7 +3566,7 @@ ParsedRtcEventLog::StoreAudioNetworkAdaptationEvent(
}
if (uplink_packet_loss_fraction_values[i].has_value()) {
float uplink_packet_loss_fraction2;
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<uint32_t>(
RTC_PARSE_CHECK_OR_RETURN(IsValueInRangeForNumericType<uint32_t>(
uplink_packet_loss_fraction_values[i].value()));
RTC_PARSE_CHECK_OR_RETURN(ParsePacketLossFractionFromProtoFormat(
static_cast<uint32_t>(uplink_packet_loss_fraction_values[i].value()),
@@ -3574,20 +3574,20 @@ ParsedRtcEventLog::StoreAudioNetworkAdaptationEvent(
runtime_config.uplink_packet_loss_fraction = uplink_packet_loss_fraction2;
}
if (enable_fec_values[i].has_value()) {
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<bool>(
enable_fec_values[i].value()));
RTC_PARSE_CHECK_OR_RETURN(
IsValueInRangeForNumericType<bool>(enable_fec_values[i].value()));
runtime_config.enable_fec =
static_cast<bool>(enable_fec_values[i].value());
}
if (enable_dtx_values[i].has_value()) {
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<bool>(
enable_dtx_values[i].value()));
RTC_PARSE_CHECK_OR_RETURN(
IsValueInRangeForNumericType<bool>(enable_dtx_values[i].value()));
runtime_config.enable_dtx =
static_cast<bool>(enable_dtx_values[i].value());
}
if (num_channels_values[i].has_value()) {
RTC_PARSE_CHECK_OR_RETURN(rtc::IsValueInRangeForNumericType<size_t>(
num_channels_values[i].value()));
RTC_PARSE_CHECK_OR_RETURN(
IsValueInRangeForNumericType<size_t>(num_channels_values[i].value()));
runtime_config.num_channels =
static_cast<size_t>(num_channels_values[i].value());
}

View File

@@ -809,17 +809,19 @@ class WebRtcVoiceEngineTestFake : public ::testing::TestWithParam<bool> {
EXPECT_EQ(info.payload_bytes_received, stats.payload_bytes_received);
EXPECT_EQ(info.header_and_padding_bytes_received,
stats.header_and_padding_bytes_received);
EXPECT_EQ(rtc::checked_cast<unsigned int>(info.packets_received),
EXPECT_EQ(webrtc::checked_cast<unsigned int>(info.packets_received),
stats.packets_received);
EXPECT_EQ(info.packets_lost, stats.packets_lost);
EXPECT_EQ(info.codec_name, stats.codec_name);
EXPECT_EQ(info.codec_payload_type, stats.codec_payload_type);
EXPECT_EQ(rtc::checked_cast<unsigned int>(info.jitter_ms), stats.jitter_ms);
EXPECT_EQ(rtc::checked_cast<unsigned int>(info.jitter_buffer_ms),
EXPECT_EQ(webrtc::checked_cast<unsigned int>(info.jitter_ms),
stats.jitter_ms);
EXPECT_EQ(webrtc::checked_cast<unsigned int>(info.jitter_buffer_ms),
stats.jitter_buffer_ms);
EXPECT_EQ(rtc::checked_cast<unsigned int>(info.jitter_buffer_preferred_ms),
stats.jitter_buffer_preferred_ms);
EXPECT_EQ(rtc::checked_cast<unsigned int>(info.delay_estimate_ms),
EXPECT_EQ(
webrtc::checked_cast<unsigned int>(info.jitter_buffer_preferred_ms),
stats.jitter_buffer_preferred_ms);
EXPECT_EQ(webrtc::checked_cast<unsigned int>(info.delay_estimate_ms),
stats.delay_estimate_ms);
EXPECT_EQ(info.audio_level, stats.audio_level);
EXPECT_EQ(info.total_samples_received, stats.total_samples_received);
@@ -4111,7 +4113,7 @@ TEST(WebRtcVoiceEngineTest, CollectRecvCodecs) {
if (absl::EqualsIgnoreCase(codec.name, format.name) &&
codec.clockrate == format.clockrate_hz &&
codec.channels == format.num_channels) {
return rtc::checked_cast<int>(i);
return webrtc::checked_cast<int>(i);
}
}
return -1;
@@ -4198,7 +4200,7 @@ TEST(WebRtcVoiceEngineTest, CollectRecvCodecsWithLatePtAssignment) {
if (absl::EqualsIgnoreCase(codec.name, format.name) &&
codec.clockrate == format.clockrate_hz &&
codec.channels == format.num_channels) {
return rtc::checked_cast<int>(i);
return webrtc::checked_cast<int>(i);
}
}
return -1;

View File

@@ -23,7 +23,7 @@ void DownMixFrame(const AudioFrame& input, rtc::ArrayView<int16_t> output) {
} else {
const int16_t* const input_data = input.data();
for (size_t n = 0; n < input.samples_per_channel_; ++n) {
output[n] = rtc::dchecked_cast<int16_t>(
output[n] = dchecked_cast<int16_t>(
(int32_t{input_data[2 * n]} + int32_t{input_data[2 * n + 1]}) >> 1);
}
}
@@ -94,7 +94,7 @@ void ReMixFrame(const AudioFrame& input,
// When downmixing is needed, and the input is stereo, average the channels.
if (input.num_channels_ == 2) {
for (size_t n = 0; n < input.samples_per_channel_; ++n) {
(*output)[n] = rtc::dchecked_cast<int16_t>(
(*output)[n] = dchecked_cast<int16_t>(
(int32_t{input_data[2 * n]} + int32_t{input_data[2 * n + 1]}) >> 1);
}
return;

View File

@@ -100,9 +100,8 @@ bool ResamplerHelper::MaybeResample(int desired_sample_rate_hz,
audio_frame->samples_per_channel_ =
static_cast<size_t>(samples_per_channel_int);
audio_frame->sample_rate_hz_ = desired_sample_rate_hz;
RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_,
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
RTC_DCHECK_EQ(audio_frame->sample_rate_hz_,
dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
resampled_last_output_frame_ = true;
} else {
resampled_last_output_frame_ = false;

View File

@@ -220,7 +220,7 @@ int32_t AudioCodingModuleImpl::Encode(
first_frame_
? input_data.input_timestamp
: last_rtp_timestamp_ +
rtc::dchecked_cast<uint32_t>(rtc::CheckedDivExact(
dchecked_cast<uint32_t>(rtc::CheckedDivExact(
int64_t{input_data.input_timestamp - last_timestamp_} *
encoder_stack_->RtpTimestampRateHz(),
int64_t{encoder_stack_->SampleRateHz()}));

View File

@@ -131,7 +131,7 @@ class PacketizationCallbackStubOldApi : public AudioPacketizationCallback {
int last_payload_len_bytes() const {
MutexLock lock(&mutex_);
return rtc::checked_cast<int>(last_payload_vec_.size());
return checked_cast<int>(last_payload_vec_.size());
}
AudioFrameType last_frame_type() const {
@@ -677,7 +677,7 @@ class AcmSenderBitExactnessOldApi : public ::testing::Test,
std::unique_ptr<AudioEncoder> external_speech_encoder,
int payload_type) {
payload_type_ = payload_type;
frame_size_rtp_timestamps_ = rtc::checked_cast<uint32_t>(
frame_size_rtp_timestamps_ = checked_cast<uint32_t>(
external_speech_encoder->Num10MsFramesInNextPacket() *
external_speech_encoder->RtpTimestampRateHz() / 100);
send_test_->RegisterExternalCodec(std::move(external_speech_encoder));
@@ -1069,7 +1069,7 @@ class AcmSetBitRateTest : public ::testing::Test {
int nr_bytes = 0;
while (std::unique_ptr<test::Packet> next_packet =
send_test_->NextPacket()) {
nr_bytes += rtc::checked_cast<int>(next_packet->payload_length_bytes());
nr_bytes += checked_cast<int>(next_packet->payload_length_bytes());
}
EXPECT_LE(min_expected_total_bits, nr_bytes * 8);
EXPECT_GE(max_expected_total_bits, nr_bytes * 8);

View File

@@ -38,7 +38,7 @@ void DumpEventToFile(const Event& event, FileWrapper* dump_file) {
RTC_CHECK(dump_file->is_open());
std::string dump_data;
event.SerializeToString(&dump_data);
int32_t size = rtc::checked_cast<int32_t>(event.ByteSizeLong());
int32_t size = checked_cast<int32_t>(event.ByteSizeLong());
dump_file->Write(&size, sizeof(size));
dump_file->Write(dump_data.data(), dump_data.length());
}

View File

@@ -71,8 +71,8 @@ TEST_P(AudioEncoderFactoryTest, CanRunAllSupportedEncoders) {
factory->Create(env, spec.format, {.payload_type = kTestPayloadType});
EXPECT_TRUE(encoder);
encoder->Reset();
const int num_samples = rtc::checked_cast<int>(
encoder->SampleRateHz() * encoder->NumChannels() / 100);
const int num_samples = checked_cast<int>(encoder->SampleRateHz() *
encoder->NumChannels() / 100);
rtc::Buffer out;
rtc::BufferT<int16_t> audio;
audio.SetData(num_samples, [](rtc::ArrayView<int16_t> audio) {

View File

@@ -305,8 +305,8 @@ TEST_F(AudioEncoderCngTest, EncodePassive) {
encoded_info_.encoded_bytes);
EXPECT_EQ(expected_timestamp, encoded_info_.encoded_timestamp);
}
expected_timestamp += rtc::checked_cast<uint32_t>(
kBlocksPerFrame * num_audio_samples_10ms_);
expected_timestamp +=
checked_cast<uint32_t>(kBlocksPerFrame * num_audio_samples_10ms_);
} else {
// Otherwise, expect no output.
EXPECT_EQ(0u, encoded_info_.encoded_bytes);

View File

@@ -137,7 +137,7 @@ bool ComfortNoiseDecoder::Generate(rtc::ArrayView<int16_t> out_data,
}
/* Calculate new scale factor in Q13 */
dec_used_scale_factor_ = rtc::checked_cast<int16_t>(
dec_used_scale_factor_ = checked_cast<int16_t>(
WEBRTC_SPL_MUL_16_16_RSFT(dec_used_scale_factor_, Beta >> 2, 13) +
WEBRTC_SPL_MUL_16_16_RSFT(dec_target_scale_factor_, BetaC >> 2, 13));

View File

@@ -152,7 +152,7 @@ TEST_P(SplitBySamplesTest, PayloadSizes) {
ASSERT_EQ(value, payload[i]);
}
expected_timestamp += rtc::checked_cast<uint32_t>(
expected_timestamp += checked_cast<uint32_t>(
expected_split.frame_sizes[i] * samples_per_ms_);
}
}

View File

@@ -73,13 +73,13 @@ class WhiteNoiseGenerator {
public:
explicit WhiteNoiseGenerator(double amplitude_dbfs)
: amplitude_(
rtc::saturated_cast<int16_t>(std::pow(10, amplitude_dbfs / 20) *
std::numeric_limits<int16_t>::max())),
saturated_cast<int16_t>(std::pow(10, amplitude_dbfs / 20) *
std::numeric_limits<int16_t>::max())),
random_generator_(42) {}
void GenerateNextFrame(rtc::ArrayView<int16_t> frame) {
for (size_t i = 0; i < frame.size(); ++i) {
frame[i] = rtc::saturated_cast<int16_t>(
frame[i] = saturated_cast<int16_t>(
random_generator_.Rand(-amplitude_, amplitude_));
}
}

View File

@@ -88,11 +88,11 @@ int GetFrameSizeMs(const SdpAudioFormat& format) {
int CalculateDefaultBitrate(int max_playback_rate, size_t num_channels) {
const int bitrate = [&] {
if (max_playback_rate <= 8000) {
return kOpusBitrateNbBps * rtc::dchecked_cast<int>(num_channels);
return kOpusBitrateNbBps * dchecked_cast<int>(num_channels);
} else if (max_playback_rate <= 16000) {
return kOpusBitrateWbBps * rtc::dchecked_cast<int>(num_channels);
return kOpusBitrateWbBps * dchecked_cast<int>(num_channels);
} else {
return kOpusBitrateFbBps * rtc::dchecked_cast<int>(num_channels);
return kOpusBitrateFbBps * dchecked_cast<int>(num_channels);
}
}();
RTC_DCHECK_GE(bitrate, AudioEncoderMultiChannelOpusConfig::kMinBitrateBps);
@@ -344,7 +344,7 @@ AudioEncoder::EncodedInfo AudioEncoderMultiChannelOpusImpl::EncodeImpl(
int status = WebRtcOpus_Encode(
inst_, &input_buffer_[0],
rtc::CheckedDivExact(input_buffer_.size(), config_.num_channels),
rtc::saturated_cast<int16_t>(max_encoded_bytes), encoded.data());
saturated_cast<int16_t>(max_encoded_bytes), encoded.data());
RTC_CHECK_GE(status, 0); // Fails only if fed invalid data.

View File

@@ -448,7 +448,7 @@ void OpusTest::TestCbrEffect(bool cbr, int block_length_ms) {
int32_t diff = std::abs((int32_t)encoded_bytes_ - prev_pkt_size);
max_pkt_size_diff = std::max(max_pkt_size_diff, diff);
}
prev_pkt_size = rtc::checked_cast<int32_t>(encoded_bytes_);
prev_pkt_size = checked_cast<int32_t>(encoded_bytes_);
}
if (cbr) {
@@ -919,7 +919,7 @@ TEST_P(OpusTest, OpusDecodeRepacketized) {
rtc::CheckedDivExact(speech_block.size(), channels_),
kMaxBytes, bitstream_);
if (opus_repacketizer_cat(rp, bitstream_,
rtc::checked_cast<opus_int32>(encoded_bytes_)) ==
checked_cast<opus_int32>(encoded_bytes_)) ==
OPUS_OK) {
++num_packets;
if (num_packets == kPackets) {

View File

@@ -66,7 +66,7 @@ class AudioEncoderCopyRedTest : public ::testing::Test {
timestamp_,
rtc::ArrayView<const int16_t>(audio_, num_audio_samples_10ms),
&encoded_);
timestamp_ += rtc::checked_cast<uint32_t>(num_audio_samples_10ms);
timestamp_ += checked_cast<uint32_t>(num_audio_samples_10ms);
}
test::ScopedKeyValueConfig field_trials_;

View File

@@ -47,7 +47,7 @@ class AudioMultiVectorTest : public ::testing::TestWithParam<size_t> {
// And so on.
for (size_t i = 0; i < array_length(); ++i) {
for (size_t j = 1; j <= num_channels_; ++j) {
*ptr = rtc::checked_cast<int16_t>(j * 100 + i);
*ptr = checked_cast<int16_t>(j * 100 + i);
++ptr;
}
}

View File

@@ -24,7 +24,7 @@ class AudioVectorTest : public ::testing::Test {
virtual void SetUp() {
// Populate test array.
for (size_t i = 0; i < array_length(); ++i) {
array_[i] = rtc::checked_cast<int16_t>(i);
array_[i] = checked_cast<int16_t>(i);
}
}
@@ -250,7 +250,7 @@ TEST_F(AudioVectorTest, InsertAtEnd) {
for (int i = 0; i < kNewLength; ++i) {
new_array[i] = 100 + i;
}
int insert_position = rtc::checked_cast<int>(array_length());
int insert_position = checked_cast<int>(array_length());
vec.InsertAt(new_array, kNewLength, insert_position);
// Verify that the vector looks as follows:
// {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
@@ -279,8 +279,7 @@ TEST_F(AudioVectorTest, InsertBeyondEnd) {
for (int i = 0; i < kNewLength; ++i) {
new_array[i] = 100 + i;
}
int insert_position =
rtc::checked_cast<int>(array_length() + 10); // Too large.
int insert_position = checked_cast<int>(array_length() + 10); // Too large.
vec.InsertAt(new_array, kNewLength, insert_position);
// Verify that the vector looks as follows:
// {0, 1, ..., kLength - 1, 100, 101, ..., 100 + kNewLength - 1 }.
@@ -336,7 +335,7 @@ TEST_F(AudioVectorTest, OverwriteBeyondEnd) {
for (int i = 0; i < kNewLength; ++i) {
new_array[i] = 100 + i;
}
int insert_position = rtc::checked_cast<int>(array_length() - 2);
int insert_position = checked_cast<int>(array_length() - 2);
vec.OverwriteAt(new_array, kNewLength, insert_position);
ASSERT_EQ(array_length() - 2u + kNewLength, vec.Size());
// Verify that the vector looks as follows:

View File

@@ -36,17 +36,17 @@ void BufferLevelFilter::Update(size_t buffer_size_samples,
// `buffer_size_samples` is in Q0.
const int64_t filtered_current_level =
(level_factor_ * int64_t{filtered_current_level_} >> 8) +
(256 - level_factor_) * rtc::dchecked_cast<int64_t>(buffer_size_samples);
(256 - level_factor_) * dchecked_cast<int64_t>(buffer_size_samples);
// Account for time-scale operations (accelerate and pre-emptive expand) and
// make sure that the filtered value remains non-negative.
filtered_current_level_ = rtc::saturated_cast<int>(std::max<int64_t>(
filtered_current_level_ = saturated_cast<int>(std::max<int64_t>(
0, filtered_current_level - int64_t{time_stretched_samples} * (1 << 8)));
}
void BufferLevelFilter::SetFilteredBufferLevel(int buffer_size_samples) {
filtered_current_level_ =
rtc::saturated_cast<int>(int64_t{buffer_size_samples} * 256);
saturated_cast<int>(int64_t{buffer_size_samples} * 256);
}
void BufferLevelFilter::SetTargetBufferLevel(int target_buffer_level_ms) {

View File

@@ -234,10 +234,10 @@ NetEq::Operation DecisionLogic::CngOperation(
// The waiting time for this packet will be longer than 1.5
// times the wanted buffer delay. Apply fast-forward to cut the
// waiting time down to the optimal.
noise_fast_forward_ = rtc::saturated_cast<size_t>(noise_fast_forward_ +
excess_waiting_time_samp);
noise_fast_forward_ =
saturated_cast<size_t>(noise_fast_forward_ + excess_waiting_time_samp);
timestamp_diff =
rtc::saturated_cast<int32_t>(timestamp_diff + excess_waiting_time_samp);
saturated_cast<int32_t>(timestamp_diff + excess_waiting_time_samp);
}
if (timestamp_diff < 0 && status.last_mode == NetEq::Mode::kRfc3389Cng) {

View File

@@ -223,7 +223,7 @@ int Expand::Process(AudioMultiVector* output) {
// >= 64 * fs_mult => go from 1 to 0 in about 32 ms.
// temp_shift = getbits(max_lag_) - 5.
int temp_shift =
(31 - WebRtcSpl_NormW32(rtc::dchecked_cast<int32_t>(max_lag_))) - 5;
(31 - WebRtcSpl_NormW32(dchecked_cast<int32_t>(max_lag_))) - 5;
int16_t mix_factor_increment = 256 >> temp_shift;
if (stop_muting_) {
mix_factor_increment = 0;
@@ -314,8 +314,8 @@ int Expand::Process(AudioMultiVector* output) {
: consecutive_expands_ + 1;
expand_duration_samples_ += output->Size();
// Clamp the duration counter at 2 seconds.
expand_duration_samples_ = std::min(expand_duration_samples_,
rtc::dchecked_cast<size_t>(fs_hz_ * 2));
expand_duration_samples_ =
std::min(expand_duration_samples_, dchecked_cast<size_t>(fs_hz_ * 2));
return 0;
}
@@ -743,8 +743,7 @@ void Expand::AnalyzeSignal(int16_t* random_vector) {
// the division.
// Shift the denominator from Q13 to Q5 before the division. The result of
// the division will then be in Q20.
int16_t denom =
rtc::saturated_cast<int16_t>((distortion_lag * slope) >> 8);
int16_t denom = saturated_cast<int16_t>((distortion_lag * slope) >> 8);
int temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12, denom);
if (slope > 14746) {
// slope > 1.8.

View File

@@ -132,7 +132,7 @@ TEST_F(ExpandTest, DelayedPacketOutage) {
}
expand_.SetParametersForNormalAfterExpand();
// Convert `sum_output_len_samples` to milliseconds.
EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
EXPECT_EQ(checked_cast<int>(sum_output_len_samples),
statistics_.last_outage_duration_samples());
}
@@ -170,7 +170,7 @@ TEST_F(ExpandTest, CheckOutageStatsAfterReset) {
}
expand_.SetParametersForNormalAfterExpand();
// Convert `sum_output_len_samples` to milliseconds.
EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
EXPECT_EQ(checked_cast<int>(sum_output_len_samples),
statistics_.last_outage_duration_samples());
}

View File

@@ -212,7 +212,7 @@ int16_t Merge::SignalScaling(const int16_t* input,
const int16_t* expanded_signal) const {
// Adjust muting factor if new vector is more or less of the BGN energy.
const auto mod_input_length =
SafeMin<size_t>(64 * rtc::dchecked_cast<size_t>(fs_mult_), input_length);
SafeMin<size_t>(64 * dchecked_cast<size_t>(fs_mult_), input_length);
const int16_t expanded_max =
WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
int32_t factor =

View File

@@ -75,7 +75,7 @@ class AudioDecoderPlc : public AudioDecoder {
int dec_len = DecodeInternal(nullptr, 2 * 10 * sample_rate_hz_ / 1000,
sample_rate_hz_, decoded.data(), &speech_type);
concealment_audio->AppendData(decoded.data(), dec_len);
concealed_samples_ += rtc::checked_cast<size_t>(dec_len);
concealed_samples_ += checked_cast<size_t>(dec_len);
if (!last_was_plc) {
++concealment_events_;

View File

@@ -213,9 +213,8 @@ int NetEqImpl::GetAudio(AudioFrame* audio_frame,
return kFail;
}
stats_->IncreaseCounter(output_size_samples_, fs_hz_);
RTC_DCHECK_EQ(
audio_frame->sample_rate_hz_,
rtc::dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
RTC_DCHECK_EQ(audio_frame->sample_rate_hz_,
dchecked_cast<int>(audio_frame->samples_per_channel_ * 100));
if (muted != nullptr) {
*muted = audio_frame->muted();
}
@@ -393,7 +392,7 @@ std::optional<NetEq::DecoderFormat> NetEqImpl::GetCurrentDecoderFormat() const {
return DecoderFormat{
/*payload_type=*/*current_rtp_payload_type_,
/*sample_rate_hz=*/di->SampleRateHz(),
/*num_channels=*/rtc::dchecked_cast<int>(di->GetDecoder()->Channels()),
/*num_channels=*/dchecked_cast<int>(di->GetDecoder()->Channels()),
/*sdp_format=*/di->GetFormat()};
}
@@ -437,8 +436,8 @@ std::vector<uint16_t> NetEqImpl::GetNackList(int64_t round_trip_time_ms) const {
int NetEqImpl::SyncBufferSizeMs() const {
MutexLock lock(&mutex_);
return rtc::dchecked_cast<int>(sync_buffer_->FutureLength() /
rtc::CheckedDivExact(fs_hz_, 1000));
return dchecked_cast<int>(sync_buffer_->FutureLength() /
rtc::CheckedDivExact(fs_hz_, 1000));
}
const SyncBuffer* NetEqImpl::sync_buffer_for_test() const {
@@ -1033,7 +1032,7 @@ int NetEqImpl::GetDecision(Operation* operation,
last_mode_ == Mode::kPreemptiveExpandLowEnergy) {
// Subtract (samples_left + output_size_samples_) from sampleMemory.
controller_->AddSampleMemory(
-(samples_left + rtc::dchecked_cast<int>(output_size_samples_)));
-(samples_left + dchecked_cast<int>(output_size_samples_)));
}
// Check if it is time to play a DTMF event.
@@ -1095,7 +1094,7 @@ int NetEqImpl::GetDecision(Operation* operation,
// Check if we already have enough samples in the `sync_buffer_`. If so,
// change decision to normal, unless the decision was merge, accelerate, or
// preemptive expand.
if (samples_left >= rtc::dchecked_cast<int>(output_size_samples_) &&
if (samples_left >= dchecked_cast<int>(output_size_samples_) &&
*operation != Operation::kMerge && *operation != Operation::kAccelerate &&
*operation != Operation::kFastAccelerate &&
*operation != Operation::kPreemptiveExpand) {
@@ -1381,7 +1380,7 @@ int NetEqImpl::DecodeCng(AudioDecoder* decoder,
return 0;
}
while (*decoded_length < rtc::dchecked_cast<int>(output_size_samples_)) {
while (*decoded_length < dchecked_cast<int>(output_size_samples_)) {
const int length = decoder->Decode(
nullptr, 0, fs_hz_,
(decoded_buffer_length_ - *decoded_length) * sizeof(int16_t),
@@ -1436,7 +1435,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list,
const auto& result = *opt_result;
*speech_type = result.speech_type;
if (result.num_decoded_samples > 0) {
*decoded_length += rtc::dchecked_cast<int>(result.num_decoded_samples);
*decoded_length += dchecked_cast<int>(result.num_decoded_samples);
// Update `decoder_frame_length_` with number of samples per channel.
decoder_frame_length_ =
result.num_decoded_samples / decoder->Channels();
@@ -1450,7 +1449,7 @@ int NetEqImpl::DecodeLoop(PacketList* packet_list,
packet_list->clear();
break;
}
if (*decoded_length > rtc::dchecked_cast<int>(decoded_buffer_length_)) {
if (*decoded_length > dchecked_cast<int>(decoded_buffer_length_)) {
// Guard against overflow.
RTC_LOG(LS_WARNING) << "Decoded too much.";
packet_list->clear();
@@ -1499,8 +1498,8 @@ void NetEqImpl::DoMerge(int16_t* decoded_buffer,
merge_->Process(decoded_buffer, decoded_length, algorithm_buffer_.get());
// Correction can be negative.
int expand_length_correction =
rtc::dchecked_cast<int>(new_length) -
rtc::dchecked_cast<int>(decoded_length / algorithm_buffer_->Channels());
dchecked_cast<int>(new_length) -
dchecked_cast<int>(decoded_length / algorithm_buffer_->Channels());
// Update in-call and post-call statistics.
if (expand_->Muted() || last_decoded_type_ == AudioDecoder::kComfortNoise) {
@@ -1940,8 +1939,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
packet_duration = packet->frame->Duration();
// TODO(ossu): Is this the correct way to track Opus FEC packets?
if (packet->priority.codec_level > 0) {
stats_->SecondaryDecodedSamples(
rtc::dchecked_cast<int>(packet_duration));
stats_->SecondaryDecodedSamples(dchecked_cast<int>(packet_duration));
}
} else if (!has_cng_packet) {
RTC_LOG(LS_WARNING) << "Unknown payload type "
@@ -1988,7 +1986,7 @@ int NetEqImpl::ExtractPackets(size_t required_samples,
packet_buffer_->DiscardAllOldPackets(timestamp_);
}
return rtc::dchecked_cast<int>(extracted_samples);
return dchecked_cast<int>(extracted_samples);
}
void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) {

View File

@@ -495,7 +495,7 @@ TEST_F(NetEqImplTest, VerifyTimestampPropagation) {
decoded[i] = next_value_++;
}
*speech_type = kSpeech;
return rtc::checked_cast<int>(encoded_len);
return checked_cast<int>(encoded_len);
}
void Reset() override { next_value_ = 1; }
@@ -594,7 +594,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
.WillRepeatedly(Return(kSampleRateHz));
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
.WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
.WillRepeatedly(Return(checked_cast<int>(kPayloadLengthSamples)));
int16_t dummy_output[kPayloadLengthSamples] = {0};
// The below expectation will make the mock decoder write
// `kPayloadLengthSamples` zeros to the output array, and mark it as speech.
@@ -603,7 +603,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
.WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
SetArgPointee<4>(AudioDecoder::kSpeech),
Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
Return(checked_cast<int>(kPayloadLengthSamples))));
EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
SdpAudioFormat("L16", 8000, 1)));
@@ -652,7 +652,7 @@ TEST_F(NetEqImplTest, ReorderedPacket) {
.WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
SetArgPointee<4>(AudioDecoder::kSpeech),
Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
Return(checked_cast<int>(kPayloadLengthSamples))));
// Pull audio once.
EXPECT_EQ(NetEq::kOK, neteq_->GetAudio(&output, &muted));
@@ -753,7 +753,7 @@ std::vector<uint8_t> CreateRedPayload(size_t num_payloads,
*payload_ptr |= 0x80;
++payload_ptr;
const int this_offset =
rtc::checked_cast<int>((num_payloads - i - 1) * timestamp_offset);
checked_cast<int>((num_payloads - i - 1) * timestamp_offset);
*payload_ptr = this_offset >> 6;
++payload_ptr;
RTC_DCHECK_LE(payload_size, 1023); // Max length described by 10 bits.
@@ -1108,11 +1108,11 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
.WillRepeatedly(Return(kSampleRateKhz * 1000));
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, PacketDuration(_, kPayloadLengthBytes))
.WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
.WillRepeatedly(Return(checked_cast<int>(kPayloadLengthSamples)));
// Packed duration when asking the decoder for more CNG data (without a new
// packet).
EXPECT_CALL(mock_decoder, PacketDuration(nullptr, 0))
.WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
.WillRepeatedly(Return(checked_cast<int>(kPayloadLengthSamples)));
EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
SdpAudioFormat("opus", 48000, 2)));
@@ -1141,7 +1141,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
.WillOnce(DoAll(SetArrayArgument<3>(
dummy_output, dummy_output + kPayloadLengthSamples),
SetArgPointee<4>(packets[i].decoder_output_type),
Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
Return(checked_cast<int>(kPayloadLengthSamples))));
}
// Expect comfort noise to be returned by the decoder.
@@ -1150,7 +1150,7 @@ TEST_F(NetEqImplTest, CodecInternalCng) {
.WillOnce(DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples),
SetArgPointee<4>(AudioDecoder::kComfortNoise),
Return(rtc::checked_cast<int>(kPayloadLengthSamples))));
Return(checked_cast<int>(kPayloadLengthSamples))));
std::vector<AudioFrame::SpeechType> expected_output = {
AudioFrame::kNormalSpeech, AudioFrame::kCNG, AudioFrame::kNormalSpeech};
@@ -1202,7 +1202,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
EXPECT_CALL(decoder,
PacketDuration(Pointee(kFirstPayloadValue), kPayloadLengthBytes))
.Times(AtLeast(1))
.WillRepeatedly(Return(rtc::checked_cast<int>(kNetEqMaxFrameSize + 1)));
.WillRepeatedly(Return(checked_cast<int>(kNetEqMaxFrameSize + 1)));
EXPECT_CALL(decoder, DecodeInternal(Pointee(kFirstPayloadValue), _, _, _, _))
.Times(0);
@@ -1219,7 +1219,7 @@ TEST_F(NetEqImplTest, UnsupportedDecoder) {
EXPECT_CALL(decoder,
PacketDuration(Pointee(kSecondPayloadValue), kPayloadLengthBytes))
.Times(AtLeast(1))
.WillRepeatedly(Return(rtc::checked_cast<int>(kNetEqMaxFrameSize)));
.WillRepeatedly(Return(checked_cast<int>(kNetEqMaxFrameSize)));
EXPECT_CALL(decoder, SampleRateHz()).WillRepeatedly(Return(kSampleRateHz));
@@ -1292,7 +1292,7 @@ TEST_F(NetEqImplTest, FloodBufferAndGetNetworkStats) {
for (size_t i = 0; i <= config_.max_packets_in_buffer; ++i) {
EXPECT_EQ(i, packet_buffer_->NumPacketsInBuffer());
EXPECT_EQ(NetEq::kOK, neteq_->InsertPacket(rtp_header, payload));
rtp_header.timestamp += rtc::checked_cast<uint32_t>(kPayloadLengthSamples);
rtp_header.timestamp += checked_cast<uint32_t>(kPayloadLengthSamples);
++rtp_header.sequenceNumber;
}
EXPECT_EQ(1u, packet_buffer_->NumPacketsInBuffer());
@@ -1327,7 +1327,7 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
.WillRepeatedly(Return(kSampleRateHz));
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, PacketDuration(_, _))
.WillRepeatedly(Return(rtc::checked_cast<int>(kPayloadLengthSamples)));
.WillRepeatedly(Return(checked_cast<int>(kPayloadLengthSamples)));
int16_t dummy_output[kPayloadLengthSamples] = {0};
// The below expectation will make the mock decoder write
// `kPayloadLengthSamples` - 5 zeros to the output array, and mark it as
@@ -1338,7 +1338,7 @@ TEST_F(NetEqImplTest, DecodedPayloadTooShort) {
DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kPayloadLengthSamples - 5),
SetArgPointee<4>(AudioDecoder::kSpeech),
Return(rtc::checked_cast<int>(kPayloadLengthSamples - 5))));
Return(checked_cast<int>(kPayloadLengthSamples - 5))));
EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
SdpAudioFormat("L16", 8000, 1)));
@@ -1392,7 +1392,7 @@ TEST_F(NetEqImplTest, DecodingError) {
.WillRepeatedly(Return(kSampleRateHz));
EXPECT_CALL(mock_decoder, Channels()).WillRepeatedly(Return(1));
EXPECT_CALL(mock_decoder, PacketDuration(_, _))
.WillRepeatedly(Return(rtc::checked_cast<int>(kFrameLengthSamples)));
.WillRepeatedly(Return(checked_cast<int>(kFrameLengthSamples)));
EXPECT_CALL(mock_decoder, ErrorCode()).WillOnce(Return(kDecoderErrorCode));
EXPECT_CALL(mock_decoder, HasDecodePlc()).WillOnce(Return(false));
int16_t dummy_output[kFrameLengthSamples] = {0};
@@ -1407,7 +1407,7 @@ TEST_F(NetEqImplTest, DecodingError) {
DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kFrameLengthSamples),
SetArgPointee<4>(AudioDecoder::kSpeech),
Return(rtc::checked_cast<int>(kFrameLengthSamples))))
Return(checked_cast<int>(kFrameLengthSamples))))
.RetiresOnSaturation();
// Then mock decoder fails. A common reason for failure can be buffer being
@@ -1425,7 +1425,7 @@ TEST_F(NetEqImplTest, DecodingError) {
DoAll(SetArrayArgument<3>(dummy_output,
dummy_output + kFrameLengthSamples),
SetArgPointee<4>(AudioDecoder::kSpeech),
Return(rtc::checked_cast<int>(kFrameLengthSamples))));
Return(checked_cast<int>(kFrameLengthSamples))));
}
EXPECT_TRUE(neteq_->RegisterPayloadType(kPayloadType,
@@ -1746,7 +1746,7 @@ class Decoder120ms : public AudioDecoder {
decoded[i] = next_value_++;
}
*speech_type = speech_type_;
return rtc::checked_cast<int>(decoded_len);
return checked_cast<int>(decoded_len);
}
void Reset() override { next_value_ = 1; }

View File

@@ -120,8 +120,8 @@ TEST_F(NetEqDecodingTestFaxMode, TestFrameWaitingTimeStatistics) {
for (size_t i = 0; i < num_frames; ++i) {
const uint8_t payload[kPayloadBytes] = {0};
RTPHeader rtp_info;
rtp_info.sequenceNumber = rtc::checked_cast<uint16_t>(i);
rtp_info.timestamp = rtc::checked_cast<uint32_t>(i * kSamples);
rtp_info.sequenceNumber = checked_cast<uint16_t>(i);
rtp_info.timestamp = checked_cast<uint32_t>(i * kSamples);
rtp_info.ssrc = 0x1234; // Just an arbitrary SSRC.
rtp_info.payloadType = 94; // PCM16b WB codec.
rtp_info.markerBit = 0;
@@ -346,7 +346,7 @@ class NetEqBgnTest : public NetEqDecodingTest {
// Next packet.
rtp_info.timestamp +=
rtc::checked_cast<uint32_t>(expected_samples_per_channel);
checked_cast<uint32_t>(expected_samples_per_channel);
rtp_info.sequenceNumber++;
}
@@ -929,11 +929,10 @@ void NetEqDecodingTestFaxMode::TestJitterBufferDelay(bool apply_packet_loss) {
// Check jitter buffer delay.
NetEqLifetimeStatistics stats = neteq_->GetLifetimeStatistics();
EXPECT_EQ(expected_delay,
rtc::checked_cast<int>(stats.jitter_buffer_delay_ms));
EXPECT_EQ(expected_delay, checked_cast<int>(stats.jitter_buffer_delay_ms));
EXPECT_EQ(expected_emitted_count, stats.jitter_buffer_emitted_count);
EXPECT_EQ(expected_target_delay,
rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
checked_cast<int>(stats.jitter_buffer_target_delay_ms));
// In this test, since the packets are inserted with a receive time equal to
// the current clock time, the jitter buffer delay should match the total
// processing delay.
@@ -982,7 +981,7 @@ TEST_F(NetEqDecodingTestFaxMode, TestJitterBufferDelayWithAcceleration) {
EXPECT_EQ(10 * kSamples * 3, stats.jitter_buffer_delay_ms);
EXPECT_EQ(kSamples * 3, stats.jitter_buffer_emitted_count);
EXPECT_EQ(expected_target_delay,
rtc::checked_cast<int>(stats.jitter_buffer_target_delay_ms));
checked_cast<int>(stats.jitter_buffer_target_delay_ms));
}
namespace test {

View File

@@ -43,7 +43,7 @@ class Normal {
expand_(expand),
samples_per_ms_(rtc::CheckedDivExact(fs_hz_, 1000)),
default_win_slope_Q14_(
rtc::dchecked_cast<uint16_t>((1 << 14) / samples_per_ms_)),
dchecked_cast<uint16_t>((1 << 14) / samples_per_ms_)),
statistics_(statistics) {}
virtual ~Normal() {}

View File

@@ -130,7 +130,7 @@ bool RedPayloadSplitter::SplitRed(PacketList* packet_list) {
new_packet.payload_type = new_header.payload_type;
new_packet.sequence_number = red_packet.sequence_number;
new_packet.priority.red_level =
rtc::dchecked_cast<int>((new_headers.size() - 1) - i);
dchecked_cast<int>((new_headers.size() - 1) - i);
new_packet.payload.SetData(payload_ptr, payload_length);
new_packets.push_front(std::move(new_packet));
payload_ptr += payload_length;

View File

@@ -100,7 +100,7 @@ Packet CreateRedPayload(size_t num_payloads,
*payload_ptr |= 0x80;
++payload_ptr;
int this_offset =
rtc::checked_cast<int>((num_payloads - i - 1) * timestamp_offset);
checked_cast<int>((num_payloads - i - 1) * timestamp_offset);
*payload_ptr = this_offset >> 6;
++payload_ptr;
RTC_DCHECK_LE(kPayloadLength, 1023); // Max length described by 10 bits.

View File

@@ -159,7 +159,7 @@ void StatisticsCalculator::ExpandedVoiceSamples(size_t num_samples,
return;
}
expanded_speech_samples_ += num_samples;
ConcealedSamplesCorrection(rtc::dchecked_cast<int>(num_samples), true);
ConcealedSamplesCorrection(dchecked_cast<int>(num_samples), true);
lifetime_stats_.concealment_events += is_new_concealment_event;
}
@@ -169,7 +169,7 @@ void StatisticsCalculator::ExpandedNoiseSamples(size_t num_samples,
return;
}
expanded_noise_samples_ += num_samples;
ConcealedSamplesCorrection(rtc::dchecked_cast<int>(num_samples), false);
ConcealedSamplesCorrection(dchecked_cast<int>(num_samples), false);
lifetime_stats_.concealment_events += is_new_concealment_event;
}

View File

@@ -58,7 +58,7 @@ TEST(SyncBuffer, PushBackAndFlush) {
// Populate `new_data`.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
new_data[channel][i] = rtc::checked_cast<int16_t>(i);
new_data[channel][i] = checked_cast<int16_t>(i);
}
}
// Push back `new_data` into `sync_buffer`. This operation should pop out
@@ -98,7 +98,7 @@ TEST(SyncBuffer, PushFrontZeros) {
// Populate `new_data`.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
new_data[channel][i] = rtc::checked_cast<int16_t>(1000 + i);
new_data[channel][i] = checked_cast<int16_t>(1000 + i);
}
}
sync_buffer.PushBack(new_data);
@@ -131,7 +131,7 @@ TEST(SyncBuffer, GetNextAudioInterleaved) {
// Populate `new_data`.
for (size_t channel = 0; channel < kChannels; ++channel) {
for (size_t i = 0; i < kNewLen; ++i) {
new_data[channel][i] = rtc::checked_cast<int16_t>(i);
new_data[channel][i] = checked_cast<int16_t>(i);
}
}
// Push back `new_data` into `sync_buffer`. This operation should pop out

View File

@@ -66,7 +66,7 @@ class NetEqPcm16bQualityTest : public NetEqQualityTest {
payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);
return checked_cast<int>(info.encoded_bytes);
}
private:

View File

@@ -65,7 +65,7 @@ class NetEqPcmuQualityTest : public NetEqQualityTest {
payload);
encoded_samples += kFrameSizeSamples;
} while (info.encoded_bytes == 0);
return rtc::checked_cast<int>(info.encoded_bytes);
return checked_cast<int>(info.encoded_bytes);
}
private:

View File

@@ -184,7 +184,7 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy,
// (vec1_energy + vec2_energy) / 16 <= peak_index * background_noise_energy.
// The two sides of the inequality will be denoted `left_side` and
// `right_side`.
int32_t left_side = rtc::saturated_cast<int32_t>(
int32_t left_side = saturated_cast<int32_t>(
(static_cast<int64_t>(vec1_energy) + vec2_energy) / 16);
int32_t right_side;
if (background_noise_.initialized()) {
@@ -196,8 +196,7 @@ bool TimeStretch::SpeechDetection(int32_t vec1_energy,
int right_scale = 16 - WebRtcSpl_NormW32(right_side);
right_scale = std::max(0, right_scale);
left_side = left_side >> right_scale;
right_side =
rtc::dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
right_side = dchecked_cast<int32_t>(peak_index) * (right_side >> right_scale);
// Scale `left_side` properly before comparing with `right_side`.
// (`scaling` is the scale factor before energy calculation, thus the scale

View File

@@ -78,9 +78,9 @@ void EncodeNetEqInput::CreatePacket() {
info = encoder_->Encode(rtp_timestamp_, generator_->Generate(num_samples),
&packet_data_->payload);
rtp_timestamp_ += rtc::dchecked_cast<uint32_t>(
num_samples * encoder_->RtpTimestampRateHz() /
encoder_->SampleRateHz());
rtp_timestamp_ +=
dchecked_cast<uint32_t>(num_samples * encoder_->RtpTimestampRateHz() /
encoder_->SampleRateHz());
++num_blocks;
}
packet_data_->header.timestamp = info.encoded_timestamp;

View File

@@ -86,7 +86,7 @@ int FakeDecodeFromFile::DecodeInternal(const uint8_t* encoded,
const int total_samples_to_decode = samples_to_decode * (stereo_ ? 2 : 1);
std::fill_n(decoded, total_samples_to_decode, 0);
*speech_type = kComfortNoise;
return rtc::dchecked_cast<int>(total_samples_to_decode);
return dchecked_cast<int>(total_samples_to_decode);
}
void FakeDecodeFromFile::PrepareEncoded(uint32_t timestamp,
@@ -96,9 +96,9 @@ void FakeDecodeFromFile::PrepareEncoded(uint32_t timestamp,
RTC_CHECK_GE(encoded.size(), 12);
ByteWriter<uint32_t>::WriteLittleEndian(&encoded[0], timestamp);
ByteWriter<uint32_t>::WriteLittleEndian(&encoded[4],
rtc::checked_cast<uint32_t>(samples));
checked_cast<uint32_t>(samples));
ByteWriter<uint32_t>::WriteLittleEndian(
&encoded[8], rtc::checked_cast<uint32_t>(original_payload_size_bytes));
&encoded[8], checked_cast<uint32_t>(original_payload_size_bytes));
}
std::vector<AudioDecoder::ParseResult> FakeDecodeFromFile::ParsePayload(

View File

@@ -23,7 +23,7 @@ TEST(TestInputAudioFile, DuplicateInterleaveSeparateSrcDst) {
static const size_t kChannels = 2;
int16_t input[kSamples];
for (size_t i = 0; i < kSamples; ++i) {
input[i] = rtc::checked_cast<int16_t>(i);
input[i] = checked_cast<int16_t>(i);
}
int16_t output[kSamples * kChannels];
InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, output);
@@ -42,7 +42,7 @@ TEST(TestInputAudioFile, DuplicateInterleaveSameSrcDst) {
static const size_t kChannels = 5;
int16_t input[kSamples * kChannels];
for (size_t i = 0; i < kSamples; ++i) {
input[i] = rtc::checked_cast<int16_t>(i);
input[i] = checked_cast<int16_t>(i);
}
InputAudioFile::DuplicateInterleaved(input, kSamples, kChannels, input);

View File

@@ -119,10 +119,10 @@ class Packetizer : public AudioPacketizationCallback {
constexpr size_t kRtpHeaderLength = 12;
constexpr size_t kRtpDumpHeaderLength = 8;
const uint16_t length = htons(rtc::checked_cast<uint16_t>(
const uint16_t length = htons(checked_cast<uint16_t>(
kRtpHeaderLength + kRtpDumpHeaderLength + payload_len_bytes));
const uint16_t plen = htons(
rtc::checked_cast<uint16_t>(kRtpHeaderLength + payload_len_bytes));
const uint16_t plen =
htons(checked_cast<uint16_t>(kRtpHeaderLength + payload_len_bytes));
const uint32_t offset = htonl(timestamp / (timestamp_rate_hz_ / 1000));
RTC_CHECK_EQ(fwrite(&length, sizeof(uint16_t), 1, out_file_), 1);
RTC_CHECK_EQ(fwrite(&plen, sizeof(uint16_t), 1, out_file_), 1);

View File

@@ -116,7 +116,7 @@ class AudioStream {
// delay value in milliseconds.
// Example: index=240, frames_per_10ms_buffer=480 => 5ms as output.
int IndexToMilliseconds(size_t index, size_t frames_per_10ms_buffer) {
return rtc::checked_cast<int>(
return checked_cast<int>(
10.0 * (static_cast<double>(index) / frames_per_10ms_buffer) + 0.5);
}

View File

@@ -23,9 +23,9 @@ namespace webrtc {
FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* audio_device_buffer)
: audio_device_buffer_(audio_device_buffer),
playout_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
playout_samples_per_channel_10ms_(dchecked_cast<size_t>(
audio_device_buffer->PlayoutSampleRate() * 10 / 1000)),
record_samples_per_channel_10ms_(rtc::dchecked_cast<size_t>(
record_samples_per_channel_10ms_(dchecked_cast<size_t>(
audio_device_buffer->RecordingSampleRate() * 10 / 1000)),
playout_channels_(audio_device_buffer->PlayoutChannels()),
record_channels_(audio_device_buffer->RecordingChannels()) {

View File

@@ -501,7 +501,7 @@ TestAudioDeviceModule::CreateWavFileReader(absl::string_view filename,
bool repeat) {
WavReader reader(filename);
int sampling_frequency_in_hz = reader.sample_rate();
int num_channels = rtc::checked_cast<int>(reader.num_channels());
int num_channels = checked_cast<int>(reader.num_channels());
return std::make_unique<WavFileReader>(filename, sampling_frequency_in_hz,
num_channels, repeat);
}

View File

@@ -27,7 +27,7 @@ void SineWaveGenerator::GenerateNextFrame(AudioFrame* frame) {
for (size_t i = 0; i < frame->samples_per_channel_; ++i) {
for (size_t ch = 0; ch < frame->num_channels_; ++ch) {
frame_data[frame->num_channels_ * i + ch] =
rtc::saturated_cast<int16_t>(amplitude_ * sinf(phase_));
saturated_cast<int16_t>(amplitude_ * sinf(phase_));
}
phase_ += wave_frequency_hz_ * 2 * kPi / frame->sample_rate_hz_;
}

View File

@@ -854,7 +854,7 @@ void WebRtcAecm_UpdateChannel(AecmCore* aecm,
// right shift of 32 is undefined. To avoid that, we
// do this check.
tmpU32no1 =
rtc::dchecked_cast<uint32_t>(
dchecked_cast<uint32_t>(
shiftChFar >= 32 ? 0 : aecm->channelAdapt32[i] >> shiftChFar) *
far_spectrum[i];
}

View File

@@ -513,8 +513,7 @@ int RTC_NO_SANITIZE("signed-integer-overflow") // bugs.webrtc.org/8200
// Far end signal through channel estimate in Q8
// How much can we shift right to preserve resolution
tmp32no1 = echoEst32[i] - aecm->echoFilt[i];
aecm->echoFilt[i] +=
rtc::dchecked_cast<int32_t>((int64_t{tmp32no1} * 50) >> 8);
aecm->echoFilt[i] += dchecked_cast<int32_t>((int64_t{tmp32no1} * 50) >> 8);
zeros32 = WebRtcSpl_NormW32(aecm->echoFilt[i]) + 1;
zeros16 = WebRtcSpl_NormW16(supGain) + 1;

View File

@@ -36,7 +36,7 @@ constexpr float kAttackFirstSubframeInterpolationPower = 8.0f;
void InterpolateFirstSubframe(float last_factor,
float current_factor,
rtc::ArrayView<float> subframe) {
const int n = rtc::dchecked_cast<int>(subframe.size());
const int n = dchecked_cast<int>(subframe.size());
constexpr float p = kAttackFirstSubframeInterpolationPower;
for (int i = 0; i < n; ++i) {
subframe[i] = std::pow(1.f - i / n, p) * (last_factor - current_factor) +

View File

@@ -78,7 +78,7 @@ TEST(RnnVadTest, FeatureExtractionLowHighPitch) {
FeaturesExtractor features_extractor(cpu_features);
std::vector<float> samples(kNumTestDataSize);
std::vector<float> feature_vector(kFeatureVectorSize);
ASSERT_EQ(kFeatureVectorSize, rtc::dchecked_cast<int>(feature_vector.size()));
ASSERT_EQ(kFeatureVectorSize, dchecked_cast<int>(feature_vector.size()));
rtc::ArrayView<float, kFeatureVectorSize> feature_vector_view(
feature_vector.data(), kFeatureVectorSize);

View File

@@ -40,8 +40,8 @@ std::vector<float> PreprocessWeights(rtc::ArrayView<const int8_t> weights,
return GetScaledParams(weights);
}
// Transpose, scale and cast.
const int input_size = rtc::CheckedDivExact(
rtc::dchecked_cast<int>(weights.size()), output_size);
const int input_size =
rtc::CheckedDivExact(dchecked_cast<int>(weights.size()), output_size);
std::vector<float> w(weights.size());
for (int o = 0; o < output_size; ++o) {
for (int i = 0; i < input_size; ++i) {

View File

@@ -25,7 +25,7 @@ std::vector<float> PreprocessGruTensor(rtc::ArrayView<const int8_t> tensor_src,
int output_size) {
// Transpose, cast and scale.
// `n` is the size of the first dimension of the 3-dim tensor `weights`.
const int n = rtc::CheckedDivExact(rtc::dchecked_cast<int>(tensor_src.size()),
const int n = rtc::CheckedDivExact(dchecked_cast<int>(tensor_src.size()),
output_size * kNumGruGates);
const int stride_src = kNumGruGates * output_size;
const int stride_dst = n * output_size;

View File

@@ -31,9 +31,9 @@ void TestGatedRecurrentLayer(
rtc::ArrayView<const float> input_sequence,
rtc::ArrayView<const float> expected_output_sequence) {
const int input_sequence_length = rtc::CheckedDivExact(
rtc::dchecked_cast<int>(input_sequence.size()), gru.input_size());
dchecked_cast<int>(input_sequence.size()), gru.input_size());
const int output_sequence_length = rtc::CheckedDivExact(
rtc::dchecked_cast<int>(expected_output_sequence.size()), gru.size());
dchecked_cast<int>(expected_output_sequence.size()), gru.size());
ASSERT_EQ(input_sequence_length, output_sequence_length)
<< "The test data length is invalid.";
// Feed the GRU layer and check the output at every step.

View File

@@ -67,8 +67,8 @@ class VectorMath {
accumulator = _mm_add_ps(accumulator, high);
float dot_product = _mm_cvtss_f32(accumulator);
// Add the result for the last block if incomplete.
for (int i = incomplete_block_index;
i < rtc::dchecked_cast<int>(x.size()); ++i) {
for (int i = incomplete_block_index; i < dchecked_cast<int>(x.size());
++i) {
dot_product += x[i] * y[i];
}
return dot_product;

View File

@@ -43,8 +43,7 @@ float VectorMath::DotProductAvx2(rtc::ArrayView<const float> x,
low = _mm_add_ss(high, low);
float dot_product = _mm_cvtss_f32(low);
// Add the result for the last block if incomplete.
for (int i = incomplete_block_index; i < rtc::dchecked_cast<int>(x.size());
++i) {
for (int i = incomplete_block_index; i < dchecked_cast<int>(x.size()); ++i) {
dot_product += x[i] * y[i];
}
return dot_product;

View File

@@ -160,7 +160,7 @@ void OpenFileAndWriteMessage(absl::string_view filename,
FILE* file = fopen(std::string(filename).c_str(), "wb");
ASSERT_TRUE(file != NULL);
int32_t size = rtc::checked_cast<int32_t>(msg.ByteSizeLong());
int32_t size = checked_cast<int32_t>(msg.ByteSizeLong());
ASSERT_GT(size, 0);
std::unique_ptr<uint8_t[]> array(new uint8_t[size]);
ASSERT_TRUE(msg.SerializeToArray(array.get(), size));
@@ -673,12 +673,12 @@ void ApmTest::ProcessDelayVerificationTest(int delay_ms,
SafeMin<size_t>(16u, frame_.samples_per_channel() / 10);
const int expected_median =
SafeClamp<int>(delay_ms - system_delay_ms, delay_min, delay_max);
const int expected_median_high = SafeClamp<int>(
expected_median + rtc::dchecked_cast<int>(96 / samples_per_ms), delay_min,
delay_max);
const int expected_median_low = SafeClamp<int>(
expected_median - rtc::dchecked_cast<int>(96 / samples_per_ms), delay_min,
delay_max);
const int expected_median_high =
SafeClamp<int>(expected_median + dchecked_cast<int>(96 / samples_per_ms),
delay_min, delay_max);
const int expected_median_low =
SafeClamp<int>(expected_median - dchecked_cast<int>(96 / samples_per_ms),
delay_min, delay_max);
// Verify delay metrics.
AudioProcessingStats stats = apm_->GetStatistics();
ASSERT_TRUE(stats.delay_median_ms.has_value());

View File

@@ -48,7 +48,7 @@ std::vector<int16_t> CreateInt16Sinusoid(int frequency_hz,
size_t num_samples) {
std::vector<int16_t> x(num_samples);
for (size_t n = 0; n < num_samples; ++n) {
x[n] = rtc::saturated_cast<int16_t>(
x[n] = saturated_cast<int16_t>(
amplitude * std::sin(2 * M_PI * n * frequency_hz / kSampleRateHz));
}
return x;

View File

@@ -142,7 +142,7 @@ void AecDumpBasedSimulator::PrepareProcessStreamCall(
if (msg.has_input_data()) {
int16_t* fwd_frame_data = fwd_frame_.data.data();
for (size_t k = 0; k < in_buf_->num_frames(); ++k) {
fwd_frame_data[k] = rtc::saturated_cast<int16_t>(
fwd_frame_data[k] = saturated_cast<int16_t>(
fwd_frame_data[k] +
static_cast<int16_t>(32767 *
artificial_nearend_buf_->channels()[0][k]));

View File

@@ -107,7 +107,7 @@ void CreateSineWavFile(absl::string_view filepath,
double phase = 0.0;
std::vector<int16_t> samples(params.num_samples);
for (size_t i = 0; i < params.num_samples; ++i) {
samples[i] = rtc::saturated_cast<int16_t>(32767.0f * std::sin(phase));
samples[i] = saturated_cast<int16_t>(32767.0f * std::sin(phase));
phase += phase_step;
}

View File

@@ -165,7 +165,7 @@ void ScaleSignal(rtc::ArrayView<const int16_t> source_samples,
RTC_DCHECK_EQ(source_samples.size(), output_samples.size());
std::transform(source_samples.begin(), source_samples.end(),
output_samples.begin(), [gain_linear](int16_t x) -> int16_t {
return rtc::saturated_cast<int16_t>(x * gain_linear);
return saturated_cast<int16_t>(x * gain_linear);
});
}

View File

@@ -76,7 +76,7 @@ class FakeRecordingDeviceLinear final : public FakeRecordingDeviceWorker {
const float divisor =
(undo_mic_level_ && *undo_mic_level_ > 0) ? *undo_mic_level_ : 255.f;
for (size_t i = 0; i < number_of_samples; ++i) {
data[i] = rtc::saturated_cast<int16_t>(data[i] * mic_level_ / divisor);
data[i] = saturated_cast<int16_t>(data[i] * mic_level_ / divisor);
}
}
void ModifyBufferFloat(ChannelBuffer<float>* buffer) override {
@@ -116,7 +116,7 @@ class FakeRecordingDeviceAgc final : public FakeRecordingDeviceWorker {
const size_t number_of_samples = buffer.size();
int16_t* data = buffer.data();
for (size_t i = 0; i < number_of_samples; ++i) {
data[i] = rtc::saturated_cast<int16_t>(data[i] * scaling_factor);
data[i] = saturated_cast<int16_t>(data[i] * scaling_factor);
}
}
void ModifyBufferFloat(ChannelBuffer<float>* buffer) override {

View File

@@ -54,7 +54,7 @@ void IntervalBudget::UseBudget(size_t bytes) {
}
size_t IntervalBudget::bytes_remaining() const {
return rtc::saturated_cast<size_t>(std::max<int64_t>(0, bytes_remaining_));
return saturated_cast<size_t>(std::max<int64_t>(0, bytes_remaining_));
}
double IntervalBudget::budget_ratio() const {

View File

@@ -272,7 +272,7 @@ TimeDelta PacingController::ExpectedQueueTime() const {
}
size_t PacingController::QueueSizePackets() const {
return rtc::checked_cast<size_t>(packet_queue_.SizeInPackets());
return checked_cast<size_t>(packet_queue_.SizeInPackets());
}
const std::array<int, kNumMediaTypes>&

View File

@@ -42,8 +42,7 @@ inline constexpr int64_t ToNtpUnits(TimeDelta delta) {
// then multiplaction and conversion to seconds are swapped to avoid float
// arithmetic.
// 2^31 us ~= 35.8 minutes.
return (rtc::saturated_cast<int32_t>(delta.us()) * (int64_t{1} << 32)) /
1'000'000;
return (saturated_cast<int32_t>(delta.us()) * (int64_t{1} << 32)) / 1'000'000;
}
// Converts interval from compact ntp (1/2^16 seconds) resolution to TimeDelta.

View File

@@ -77,7 +77,7 @@ void Dlrr::Create(uint8_t* buffer) const {
buffer[0] = kBlockType;
buffer[1] = kReserved;
ByteWriter<uint16_t>::WriteBigEndian(
&buffer[2], rtc::dchecked_cast<uint16_t>(3 * sub_blocks_.size()));
&buffer[2], dchecked_cast<uint16_t>(3 * sub_blocks_.size()));
// Create sub blocks.
uint8_t* write_at = buffer + kBlockHeaderLength;
for (const ReceiveTimeInfo& sub_block : sub_blocks_) {

View File

@@ -113,7 +113,7 @@ void TargetBitrate::Create(uint8_t* buffer) const {
buffer[0] = kBlockType;
buffer[1] = 0; // Reserved.
uint16_t block_length_words =
rtc::dchecked_cast<uint16_t>((BlockLength() / 4) - 1);
dchecked_cast<uint16_t>((BlockLength() / 4) - 1);
ByteWriter<uint16_t>::WriteBigEndian(&buffer[2], block_length_words);
size_t index = kTargetBitrateHeaderSizeBytes;

View File

@@ -740,7 +740,7 @@ TimeDelta RTCPSender::ComputeTimeUntilNextReport(DataRate send_bitrate) {
// The interval between RTCP packets is varied randomly over the
// range [1/2,3/2] times the calculated interval.
int min_interval_int = rtc::dchecked_cast<int>(min_interval.ms());
int min_interval_int = dchecked_cast<int>(min_interval.ms());
TimeDelta time_to_next = TimeDelta::Millis(
random_.Rand(min_interval_int * 1 / 2, min_interval_int * 3 / 2));

View File

@@ -231,7 +231,7 @@ void RtpPacket::SetCsrcs(rtc::ArrayView<const uint32_t> csrcs) {
RTC_DCHECK_LE(csrcs.size(), 0x0fu);
RTC_DCHECK_LE(kFixedHeaderSize + 4 * csrcs.size(), capacity());
payload_offset_ = kFixedHeaderSize + 4 * csrcs.size();
WriteAt(0, (data()[0] & 0xF0) | rtc::dchecked_cast<uint8_t>(csrcs.size()));
WriteAt(0, (data()[0] & 0xF0) | dchecked_cast<uint8_t>(csrcs.size()));
size_t offset = kFixedHeaderSize;
for (uint32_t csrc : csrcs) {
ByteWriter<uint32_t>::WriteBigEndian(WriteAt(offset), csrc);
@@ -327,20 +327,20 @@ rtc::ArrayView<uint8_t> RtpPacket::AllocateRawExtension(int id, size_t length) {
}
if (profile_id == kOneByteExtensionProfileId) {
uint8_t one_byte_header = rtc::dchecked_cast<uint8_t>(id) << 4;
one_byte_header |= rtc::dchecked_cast<uint8_t>(length - 1);
uint8_t one_byte_header = dchecked_cast<uint8_t>(id) << 4;
one_byte_header |= dchecked_cast<uint8_t>(length - 1);
WriteAt(extensions_offset + extensions_size_, one_byte_header);
} else {
// TwoByteHeaderExtension.
uint8_t extension_id = rtc::dchecked_cast<uint8_t>(id);
uint8_t extension_id = dchecked_cast<uint8_t>(id);
WriteAt(extensions_offset + extensions_size_, extension_id);
uint8_t extension_length = rtc::dchecked_cast<uint8_t>(length);
uint8_t extension_length = dchecked_cast<uint8_t>(length);
WriteAt(extensions_offset + extensions_size_ + 1, extension_length);
}
const uint16_t extension_info_offset = rtc::dchecked_cast<uint16_t>(
const uint16_t extension_info_offset = dchecked_cast<uint16_t>(
extensions_offset + extensions_size_ + extension_header_size);
const uint8_t extension_info_length = rtc::dchecked_cast<uint8_t>(length);
const uint8_t extension_info_length = dchecked_cast<uint8_t>(length);
extension_entries_.emplace_back(id, extension_info_length,
extension_info_offset);
@@ -371,7 +371,7 @@ void RtpPacket::PromoteToTwoByteHeaderExtension() {
size_t read_index = extension_entry->offset;
size_t write_index = read_index + write_read_delta;
// Update offset.
extension_entry->offset = rtc::dchecked_cast<uint16_t>(write_index);
extension_entry->offset = dchecked_cast<uint16_t>(write_index);
// Copy data. Use memmove since read/write regions may overlap.
memmove(WriteAt(write_index), data() + read_index, extension_entry->length);
// Rewrite id and length.
@@ -393,8 +393,8 @@ void RtpPacket::PromoteToTwoByteHeaderExtension() {
uint16_t RtpPacket::SetExtensionLengthMaybeAddZeroPadding(
size_t extensions_offset) {
// Update header length field.
uint16_t extensions_words = rtc::dchecked_cast<uint16_t>(
(extensions_size_ + 3) / 4); // Wrap up to 32bit.
uint16_t extensions_words =
dchecked_cast<uint16_t>((extensions_size_ + 3) / 4); // Wrap up to 32bit.
ByteWriter<uint16_t>::WriteBigEndian(WriteAt(extensions_offset - 2),
extensions_words);
// Fill extension padding place with zeroes.
@@ -426,7 +426,7 @@ bool RtpPacket::SetPadding(size_t padding_bytes) {
<< " bytes left in buffer.";
return false;
}
padding_size_ = rtc::dchecked_cast<uint8_t>(padding_bytes);
padding_size_ = dchecked_cast<uint8_t>(padding_bytes);
buffer_.SetSize(payload_offset_ + payload_size_ + padding_size_);
if (padding_size_ > 0) {
size_t padding_offset = payload_offset_ + payload_size_;
@@ -547,7 +547,7 @@ bool RtpPacket::ParseBuffer(const uint8_t* buffer, size_t size) {
size_t offset =
extension_offset + extensions_size_ + extension_header_length;
if (!rtc::IsValueInRangeForNumericType<uint16_t>(offset)) {
if (!IsValueInRangeForNumericType<uint16_t>(offset)) {
RTC_DLOG(LS_WARNING) << "Oversized rtp header extension.";
break;
}

View File

@@ -42,7 +42,7 @@ void RtpPacketReceived::GetHeader(RTPHeader* header) const {
header->timestamp = Timestamp();
header->ssrc = Ssrc();
std::vector<uint32_t> csrcs = Csrcs();
header->numCSRCs = rtc::dchecked_cast<uint8_t>(csrcs.size());
header->numCSRCs = dchecked_cast<uint8_t>(csrcs.size());
for (size_t i = 0; i < csrcs.size(); ++i) {
header->arrOfCSRCs[i] = csrcs[i];
}

View File

@@ -77,7 +77,7 @@ int32_t RTPSenderAudio::RegisterAudioPayload(absl::string_view payload_name,
return 0;
} else if (payload_name == "audio") {
MutexLock lock(&send_audio_mutex_);
encoder_rtp_timestamp_frequency_ = rtc::dchecked_cast<int>(frequency);
encoder_rtp_timestamp_frequency_ = dchecked_cast<int>(frequency);
return 0;
}
return 0;

View File

@@ -319,7 +319,7 @@ bool CalculateObuSizes(ObuInfo* obu_info) {
}
obu_info->payload_offset = it;
obu_info->prefix_size +=
WriteLeb128(rtc::dchecked_cast<uint64_t>(obu_info->payload_size),
WriteLeb128(dchecked_cast<uint64_t>(obu_info->payload_size),
obu_info->prefix.data() + obu_info->prefix_size);
return true;
}

View File

@@ -1377,7 +1377,7 @@ VideoEncoder::EncoderInfo LibvpxVp8Encoder::GetEncoderInfo() const {
for (size_t ti = 0; ti < vpx_configs_[encoder_idx].ts_number_layers;
++ti) {
RTC_DCHECK_GT(vpx_configs_[encoder_idx].ts_rate_decimator[ti], 0);
info.fps_allocation[si].push_back(rtc::saturated_cast<uint8_t>(
info.fps_allocation[si].push_back(saturated_cast<uint8_t>(
EncoderInfo::kMaxFramerateFraction /
vpx_configs_[encoder_idx].ts_rate_decimator[ti] +
0.5));

View File

@@ -1878,8 +1878,8 @@ VideoEncoder::EncoderInfo LibvpxVp9Encoder::GetEncoderInfo() const {
num_temporal_layers_ <= 1 ? 1 : config_->ts_rate_decimator[ti];
RTC_DCHECK_GT(decimator, 0);
info.fps_allocation[si].push_back(
rtc::saturated_cast<uint8_t>(EncoderInfo::kMaxFramerateFraction *
(sl_fps_fraction / decimator)));
saturated_cast<uint8_t>(EncoderInfo::kMaxFramerateFraction *
(sl_fps_fraction / decimator)));
}
}
if (profile_ == VP9Profile::kProfile0) {

View File

@@ -137,7 +137,7 @@ VCMEncodedFrame* VCMReceiver::FrameForDecoding(uint16_t max_wait_time_ms,
static_cast<int32_t>(clock_->TimeInMilliseconds() - start_time_ms);
uint16_t new_max_wait_time =
static_cast<uint16_t>(VCM_MAX(available_wait_time, 0));
uint32_t wait_time_ms = rtc::saturated_cast<uint32_t>(
uint32_t wait_time_ms = saturated_cast<uint32_t>(
timing_
->MaxWaitingTime(Timestamp::Millis(render_time_ms),
clock_->CurrentTime(),

View File

@@ -129,8 +129,8 @@ bool VCMNackFecMethod::ProtectionFactor(
// Adjust FEC with NACK on (for delta frame only)
// table depends on RTT relative to rttMax (NACK Threshold)
_protectionFactorD = rtc::saturated_cast<uint8_t>(
adjustRtt * rtc::saturated_cast<float>(_protectionFactorD));
_protectionFactorD = saturated_cast<uint8_t>(
adjustRtt * saturated_cast<float>(_protectionFactorD));
// update FEC rates after applying adjustment
VCMFecMethod::UpdateProtectionFactorD(_protectionFactorD);
}
@@ -152,9 +152,9 @@ int VCMNackFecMethod::ComputeMaxFramesFec(
// RTP module based on the actual number of packets and the protection factor.
float base_layer_framerate =
parameters->frameRate /
rtc::saturated_cast<float>(1 << (parameters->numLayers - 1));
saturated_cast<float>(1 << (parameters->numLayers - 1));
int max_frames_fec = std::max(
rtc::saturated_cast<int>(
saturated_cast<int>(
2.0f * base_layer_framerate * parameters->rtt / 1000.0f + 0.5f),
1);
// `kUpperLimitFramesFec` is the upper limit on how many frames we
@@ -270,9 +270,9 @@ uint8_t VCMFecMethod::BoostCodeRateKey(uint8_t packetFrameDelta,
}
uint8_t VCMFecMethod::ConvertFECRate(uint8_t codeRateRTP) const {
return rtc::saturated_cast<uint8_t>(
VCM_MIN(255, (0.5 + 255.0 * codeRateRTP /
rtc::saturated_cast<float>(255 - codeRateRTP))));
return saturated_cast<uint8_t>(VCM_MIN(
255,
(0.5 + 255.0 * codeRateRTP / saturated_cast<float>(255 - codeRateRTP))));
}
// Update FEC with protectionFactorD
@@ -289,7 +289,7 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
// FEC PROTECTION SETTINGS: varies with packet loss and bitrate
// No protection if (filtered) packetLoss is 0
uint8_t packetLoss = rtc::saturated_cast<uint8_t>(255 * parameters->lossPr);
uint8_t packetLoss = saturated_cast<uint8_t>(255 * parameters->lossPr);
if (packetLoss == 0) {
_protectionFactorK = 0;
_protectionFactorD = 0;
@@ -300,7 +300,7 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
// first partition size, thresholds, table pars, spatial resoln fac.
// First partition protection: ~ 20%
uint8_t firstPartitionProt = rtc::saturated_cast<uint8_t>(255 * 0.20);
uint8_t firstPartitionProt = saturated_cast<uint8_t>(255 * 0.20);
// Minimum protection level needed to generate one FEC packet for one
// source packet/frame (in RTP sender)
@@ -316,9 +316,9 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
const uint8_t ratePar2 = 49;
// Spatial resolution size, relative to a reference size.
float spatialSizeToRef = rtc::saturated_cast<float>(parameters->codecWidth *
parameters->codecHeight) /
(rtc::saturated_cast<float>(704 * 576));
float spatialSizeToRef =
saturated_cast<float>(parameters->codecWidth * parameters->codecHeight) /
(saturated_cast<float>(704 * 576));
// resolnFac: This parameter will generally increase/decrease the FEC rate
// (for fixed bitRate and packetLoss) based on system size.
// Use a smaller exponent (< 1) to control/soften system size effect.
@@ -327,9 +327,9 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
const int bitRatePerFrame = BitsPerFrame(parameters);
// Average number of packets per frame (source and fec):
const uint8_t avgTotPackets = rtc::saturated_cast<uint8_t>(
1.5f + rtc::saturated_cast<float>(bitRatePerFrame) * 1000.0f /
rtc::saturated_cast<float>(8.0 * _maxPayloadSize));
const uint8_t avgTotPackets = saturated_cast<uint8_t>(
1.5f + saturated_cast<float>(bitRatePerFrame) * 1000.0f /
saturated_cast<float>(8.0 * _maxPayloadSize));
// FEC rate parameters: for P and I frame
uint8_t codeRateDelta = 0;
@@ -339,8 +339,8 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
// The range on the rate index corresponds to rates (bps)
// from ~200k to ~8000k, for 30fps
const uint16_t effRateFecTable =
rtc::saturated_cast<uint16_t>(resolnFac * bitRatePerFrame);
uint8_t rateIndexTable = rtc::saturated_cast<uint8_t>(
saturated_cast<uint16_t>(resolnFac * bitRatePerFrame);
uint8_t rateIndexTable = saturated_cast<uint8_t>(
VCM_MAX(VCM_MIN((effRateFecTable - ratePar1) / ratePar1, ratePar2), 0));
// Restrict packet loss range to 50:
@@ -373,12 +373,12 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
// The boost factor may depend on several factors: ratio of packet
// number of I to P frames, how much protection placed on P frames, etc.
const uint8_t packetFrameDelta =
rtc::saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrame);
saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrame);
const uint8_t packetFrameKey =
rtc::saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrameKey);
saturated_cast<uint8_t>(0.5 + parameters->packetsPerFrameKey);
const uint8_t boostKey = BoostCodeRateKey(packetFrameDelta, packetFrameKey);
rateIndexTable = rtc::saturated_cast<uint8_t>(VCM_MAX(
rateIndexTable = saturated_cast<uint8_t>(VCM_MAX(
VCM_MIN(1 + (boostKey * effRateFecTable - ratePar1) / ratePar1, ratePar2),
0));
uint16_t indexTableKey = rateIndexTable * kPacketLossMax + packetLoss;
@@ -399,7 +399,7 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
// Make sure I frame protection is at least larger than P frame protection,
// and at least as high as filtered packet loss.
codeRateKey = rtc::saturated_cast<uint8_t>(
codeRateKey = saturated_cast<uint8_t>(
VCM_MAX(packetLoss, VCM_MAX(boostKeyProt, codeRateKey)));
// Check limit on amount of protection for I frame: 50% is max.
@@ -420,13 +420,12 @@ bool VCMFecMethod::ProtectionFactor(const VCMProtectionParameters* parameters) {
// for cases of low rates (small #packets) and low protection levels.
float numPacketsFl =
1.0f + (rtc::saturated_cast<float>(bitRatePerFrame) * 1000.0 /
rtc::saturated_cast<float>(8.0 * _maxPayloadSize) +
1.0f + (saturated_cast<float>(bitRatePerFrame) * 1000.0 /
saturated_cast<float>(8.0 * _maxPayloadSize) +
0.5);
const float estNumFecGen =
0.5f +
rtc::saturated_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
0.5f + saturated_cast<float>(_protectionFactorD * numPacketsFl / 255.0f);
// We reduce cost factor (which will reduce overhead for FEC and
// hybrid method) and not the protectionFactor.
@@ -459,7 +458,7 @@ int VCMFecMethod::BitsPerFrame(const VCMProtectionParameters* parameters) {
if (frameRate < 1.0f)
frameRate = 1.0f;
// Average bits per frame (units of kbits)
return rtc::saturated_cast<int>(adjustmentFactor * bitRate / frameRate);
return saturated_cast<int>(adjustmentFactor * bitRate / frameRate);
}
bool VCMFecMethod::EffectivePacketLoss(
@@ -603,8 +602,8 @@ uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
UpdateMaxLossHistory(lossPr255, nowMs);
// Update the recursive average filter.
_lossPr255.Apply(rtc::saturated_cast<float>(nowMs - _lastPrUpdateT),
rtc::saturated_cast<float>(lossPr255));
_lossPr255.Apply(saturated_cast<float>(nowMs - _lastPrUpdateT),
saturated_cast<float>(lossPr255));
_lastPrUpdateT = nowMs;
// Filtered loss: default is received loss (no filtering).
@@ -614,7 +613,7 @@ uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
case kNoFilter:
break;
case kAvgFilter:
filtered_loss = rtc::saturated_cast<uint8_t>(_lossPr255.filtered() + 0.5);
filtered_loss = saturated_cast<uint8_t>(_lossPr255.filtered() + 0.5);
break;
case kMaxFilter:
filtered_loss = MaxFilteredLossPr(nowMs);
@@ -625,7 +624,7 @@ uint8_t VCMLossProtectionLogic::FilteredLoss(int64_t nowMs,
}
void VCMLossProtectionLogic::UpdateFilteredLossPr(uint8_t packetLossEnc) {
_lossPr = rtc::saturated_cast<float>(packetLossEnc) / 255.0;
_lossPr = saturated_cast<float>(packetLossEnc) / 255.0;
}
void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
@@ -635,15 +634,14 @@ void VCMLossProtectionLogic::UpdateBitRate(float bitRate) {
void VCMLossProtectionLogic::UpdatePacketsPerFrame(float nPackets,
int64_t nowMs) {
_packetsPerFrame.Apply(
rtc::saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateT), nPackets);
saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateT), nPackets);
_lastPacketPerFrameUpdateT = nowMs;
}
void VCMLossProtectionLogic::UpdatePacketsPerFrameKey(float nPackets,
int64_t nowMs) {
_packetsPerFrameKey.Apply(
rtc::saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey),
nPackets);
saturated_cast<float>(nowMs - _lastPacketPerFrameUpdateTKey), nPackets);
_lastPacketPerFrameUpdateTKey = nowMs;
}

View File

@@ -375,7 +375,7 @@ VideoCodec VideoCodecInitializer::SetupCodec(
GetExperimentalMinVideoBitrate(field_trials, video_codec.codecType);
if (experimental_min_bitrate) {
const int experimental_min_bitrate_kbps =
rtc::saturated_cast<int>(experimental_min_bitrate->kbps());
saturated_cast<int>(experimental_min_bitrate->kbps());
video_codec.minBitrate = experimental_min_bitrate_kbps;
video_codec.simulcastStream[0].minBitrate = experimental_min_bitrate_kbps;
if (video_codec.codecType == kVideoCodecVP9 ||

View File

@@ -744,7 +744,7 @@ index 3745e9cba5..f68cfb94c1 100644
device_names->push_back(rtc::ToUtf8(device.DeviceName));
}
diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc
index 55724a850d..6d3ca422cf 100644
index d7ba03d335..5fa32af69b 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -210,7 +210,7 @@ void RTCPSender::SetRTCPStatus(RtcpMode new_method) {
@@ -834,7 +834,7 @@ index c8ca45f72c..a42dcfd1cf 100644
// Subclasses must defined kId and kUri static constexpr members.
class BaseRtpStringExtension {
diff --git a/modules/rtp_rtcp/source/rtp_packet.cc b/modules/rtp_rtcp/source/rtp_packet.cc
index fb9c3d677b..0ddd4bed7e 100644
index bf8d5e46ae..2277fb95d7 100644
--- a/modules/rtp_rtcp/source/rtp_packet.cc
+++ b/modules/rtp_rtcp/source/rtp_packet.cc
@@ -212,6 +212,10 @@ void RtpPacket::ZeroMutableExtensions() {
@@ -1425,7 +1425,7 @@ index 5ec1fd4a83..e46e050609 100644
int64_t _lastProcessFrameTimeNanos RTC_GUARDED_BY(capture_checker_);
diff --git a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
index b5c985be28..2d5729b304 100644
index e976866891..bf4845a687 100644
--- a/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
+++ b/modules/video_coding/codecs/vp9/libvpx_vp9_encoder.cc
@@ -281,6 +281,7 @@ LibvpxVp9Encoder::LibvpxVp9Encoder(const Environment& env,

View File

@@ -127,7 +127,7 @@ index 2b4ff128de..2ec8936f4f 100644
return -1;
#elif defined(WEBRTC_POSIX)
diff --git a/system_wrappers/source/cpu_info.cc b/system_wrappers/source/cpu_info.cc
index eff720371a..94aed09c48 100644
index 7e53eb78fa..b47a9b4022 100644
--- a/system_wrappers/source/cpu_info.cc
+++ b/system_wrappers/source/cpu_info.cc
@@ -12,7 +12,7 @@

Some files were not shown because too many files have changed in this diff Show More