diff --git a/BUILD.gn b/BUILD.gn index f143937d31..89f3b71d72 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -98,16 +98,10 @@ rtc_shared_library("libwebrtc") { "include/helper.h", "src/helper.cc", "src/base/portable.cc", - "src/internal/custom_audio_state.cc", - "src/internal/custom_audio_state.h", "src/internal/custom_audio_transport_impl.cc", "src/internal/custom_audio_transport_impl.h", "src/internal/local_audio_track.cc", "src/internal/local_audio_track.h", - "src/internal/custom_media_context.cc", - "src/internal/custom_media_context.h", - "src/internal/custom_webrtc_voice_engine.cc", - "src/internal/custom_webrtc_voice_engine.h", "src/internal/vcm_capturer.cc", "src/internal/vcm_capturer.h", "src/internal/video_capturer.cc", diff --git a/include/rtc_ice_transport.h b/include/rtc_ice_transport.h index 645f305b2d..698f56cbbe 100644 --- a/include/rtc_ice_transport.h +++ b/include/rtc_ice_transport.h @@ -35,8 +35,8 @@ class IceTransportInit final { IceTransportInit& operator=(const IceTransportInit&) = delete; IceTransportInit& operator=(IceTransportInit&&) = default; - cricket::PortAllocator* port_allocator() { return port_allocator_; } - void set_port_allocator(cricket::PortAllocator* port_allocator) { + webrtc::PortAllocator* port_allocator() { return port_allocator_; } + void set_port_allocator(webrtc::PortAllocator* port_allocator) { port_allocator_ = port_allocator; } diff --git a/patchs/custom_audio_source.patch b/patchs/custom_audio_source_m125.patch similarity index 100% rename from patchs/custom_audio_source.patch rename to patchs/custom_audio_source_m125.patch diff --git a/patchs/custom_audio_source_m137.patch b/patchs/custom_audio_source_m137.patch new file mode 100644 index 0000000000..f93cab1884 --- /dev/null +++ b/patchs/custom_audio_source_m137.patch @@ -0,0 +1,367 @@ +diff --git a/api/audio/audio_device_defines.h b/api/audio/audio_device_defines.h +index 63eca2a422..52395ae7e6 100644 +--- a/api/audio/audio_device_defines.h ++++ b/api/audio/audio_device_defines.h +@@ -21,6 +21,8 @@ + + namespace webrtc { + ++class AudioSender; ++ + static const int kAdmMaxDeviceNameSize = 128; + static const int kAdmMaxFileNameSize = 512; + static const int kAdmMaxGuidSize = 128; +@@ -86,7 +88,13 @@ class AudioTransport { + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) = 0; + +- protected: ++ ++ virtual void UpdateAudioSenders(std::vector senders, ++ int send_sample_rate_hz, ++ size_t send_num_channels) {} ++ ++ virtual void SetStereoChannelSwapping(bool enable) {} ++ + virtual ~AudioTransport() {} + }; + +diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc +index ededc00816..1b3ac2786b 100644 +--- a/api/create_peerconnection_factory.cc ++++ b/api/create_peerconnection_factory.cc +@@ -42,7 +42,8 @@ scoped_refptr CreatePeerConnectionFactory( + scoped_refptr audio_mixer, + scoped_refptr audio_processing, + std::unique_ptr audio_frame_processor, +- std::unique_ptr field_trials) { ++ std::unique_ptr field_trials, ++ scoped_refptr audio_transport_factory) { + PeerConnectionFactoryDependencies dependencies; + dependencies.network_thread = network_thread; + dependencies.worker_thread = worker_thread; +@@ -67,6 +68,9 @@ scoped_refptr CreatePeerConnectionFactory( + std::make_unique(); + #endif + } ++ if(audio_transport_factory != nullptr) { ++ dependencies.audio_transport_factory = std::move(audio_transport_factory); ++ } + dependencies.audio_mixer = std::move(audio_mixer); + dependencies.video_encoder_factory = std::move(video_encoder_factory); + dependencies.video_decoder_factory = std::move(video_decoder_factory); +diff --git a/api/create_peerconnection_factory.h b/api/create_peerconnection_factory.h +index 68311008ae..5c66f477c4 100644 +--- a/api/create_peerconnection_factory.h ++++ b/api/create_peerconnection_factory.h +@@ -46,7 +46,8 @@ CreatePeerConnectionFactory( + scoped_refptr audio_mixer, + scoped_refptr audio_processing, + std::unique_ptr audio_frame_processor = nullptr, +- std::unique_ptr field_trials = nullptr); ++ std::unique_ptr field_trials = nullptr, ++ scoped_refptr audio_transport_factory = nullptr); + + } // namespace webrtc + +diff --git a/api/enable_media.cc b/api/enable_media.cc +index 7d6d0cc3df..c8574ddd6f 100644 +--- a/api/enable_media.cc ++++ b/api/enable_media.cc +@@ -17,6 +17,7 @@ + #include "api/environment/environment.h" + #include "api/peer_connection_interface.h" + #include "api/scoped_refptr.h" ++#include "audio/audio_transport_impl.h" + #include "call/call.h" + #include "call/call_config.h" + #include "media/base/media_engine.h" +@@ -52,7 +53,8 @@ class MediaFactoryImpl : public MediaFactory { + auto audio_engine = std::make_unique( + env, std::move(deps.adm), std::move(deps.audio_encoder_factory), + std::move(deps.audio_decoder_factory), std::move(deps.audio_mixer), +- std::move(audio_processing), std::move(deps.audio_frame_processor)); ++ std::move(audio_processing), std::move(deps.audio_frame_processor), ++ std::move(deps.audio_transport_factory)); + auto video_engine = std::make_unique( + std::move(deps.video_encoder_factory), + std::move(deps.video_decoder_factory), env.field_trials()); +diff --git a/api/peer_connection_interface.h b/api/peer_connection_interface.h +index e5b3853e11..e41b3aa125 100644 +--- a/api/peer_connection_interface.h ++++ b/api/peer_connection_interface.h +@@ -125,6 +125,7 @@ + #include "api/video/video_bitrate_allocator_factory.h" + #include "api/video_codecs/video_decoder_factory.h" + #include "api/video_codecs/video_encoder_factory.h" ++#include "audio/audio_transport_impl.h" + #include "call/rtp_transport_controller_send_factory_interface.h" + #include "media/base/media_config.h" + // TODO(bugs.webrtc.org/7447): We plan to provide a way to let applications +@@ -1472,6 +1473,7 @@ struct RTC_EXPORT PeerConnectionFactoryDependencies final { + scoped_refptr audio_encoder_factory; + scoped_refptr audio_decoder_factory; + scoped_refptr audio_mixer; ++ scoped_refptr audio_transport_factory; + // TODO: bugs.webrtc.org/369904700 - Delete `audio_processing` in favor + // of `audio_processing_builder`. + [[deprecated]] scoped_refptr audio_processing; +diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc +index 344c4b4428..dc4a01cc39 100644 +--- a/audio/audio_receive_stream.cc ++++ b/audio/audio_receive_stream.cc +@@ -456,8 +456,8 @@ const std::string& AudioReceiveStreamImpl::sync_group() const { + return config_.sync_group; + } + +-internal::AudioState* AudioReceiveStreamImpl::audio_state() const { +- auto* audio_state = static_cast(audio_state_.get()); ++webrtc::AudioState* AudioReceiveStreamImpl::audio_state() const { ++ auto* audio_state = static_cast(audio_state_.get()); + RTC_DCHECK(audio_state); + return audio_state; + } +diff --git a/audio/audio_receive_stream.h b/audio/audio_receive_stream.h +index bc248120ef..d8f766c7bc 100644 +--- a/audio/audio_receive_stream.h ++++ b/audio/audio_receive_stream.h +@@ -146,7 +146,7 @@ class AudioReceiveStreamImpl final : public webrtc::AudioReceiveStreamInterface, + const webrtc::AudioReceiveStreamInterface::Config& config); + + private: +- internal::AudioState* audio_state() const; ++ webrtc::AudioState* audio_state() const; + + RTC_NO_UNIQUE_ADDRESS SequenceChecker worker_thread_checker_; + // TODO(bugs.webrtc.org/11993): This checker conceptually represents +diff --git a/audio/audio_state.cc b/audio/audio_state.cc +index 2e1c32b018..67b2ffa1b0 100644 +--- a/audio/audio_state.cc ++++ b/audio/audio_state.cc +@@ -29,9 +29,16 @@ namespace internal { + + AudioState::AudioState(const AudioState::Config& config) + : config_(config), +- audio_transport_(config_.audio_mixer.get(), +- config_.audio_processing.get(), +- config_.async_audio_processing_factory.get()) { ++ audio_transport_( ++ config_.audio_transport_factory ++ ? config_.audio_transport_factory->Create( ++ config_.audio_mixer.get(), ++ config_.audio_processing.get(), ++ config_.async_audio_processing_factory.get()) ++ : std::make_unique( ++ config_.audio_mixer.get(), ++ config_.audio_processing.get(), ++ config_.async_audio_processing_factory.get())) { + RTC_DCHECK(config_.audio_mixer); + RTC_DCHECK(config_.audio_device_module); + } +@@ -48,7 +55,7 @@ AudioProcessing* AudioState::audio_processing() { + } + + AudioTransport* AudioState::audio_transport() { +- return &audio_transport_; ++ return audio_transport_.get(); + } + + void AudioState::SetPlayout(bool enabled) { +@@ -167,7 +174,7 @@ void AudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) { + + void AudioState::SetStereoChannelSwapping(bool enable) { + RTC_DCHECK(thread_checker_.IsCurrent()); +- audio_transport_.SetStereoChannelSwapping(enable); ++ audio_transport_->SetStereoChannelSwapping(enable); + } + + void AudioState::UpdateAudioTransportWithSendingStreams() { +@@ -180,8 +187,8 @@ void AudioState::UpdateAudioTransportWithSendingStreams() { + max_sample_rate_hz = std::max(max_sample_rate_hz, kv.second.sample_rate_hz); + max_num_channels = std::max(max_num_channels, kv.second.num_channels); + } +- audio_transport_.UpdateAudioSenders(std::move(audio_senders), +- max_sample_rate_hz, max_num_channels); ++ audio_transport_->UpdateAudioSenders(std::move(audio_senders), ++ max_sample_rate_hz, max_num_channels); + } + + void AudioState::UpdateNullAudioPollerState() { +@@ -189,7 +196,7 @@ void AudioState::UpdateNullAudioPollerState() { + // disabled. + if (!receiving_streams_.empty() && !playout_enabled_) { + if (!null_audio_poller_.Running()) { +- AudioTransport* audio_transport = &audio_transport_; ++ AudioTransport* audio_transport = audio_transport_.get(); + null_audio_poller_ = RepeatingTaskHandle::Start( + TaskQueueBase::Current(), [audio_transport] { + static constexpr size_t kNumChannels = 1; +diff --git a/audio/audio_state.h b/audio/audio_state.h +index 88aaaa3697..820b2d4884 100644 +--- a/audio/audio_state.h ++++ b/audio/audio_state.h +@@ -21,6 +21,7 @@ + #include "rtc_base/ref_count.h" + #include "rtc_base/task_utils/repeating_task.h" + #include "rtc_base/thread_annotations.h" ++#include "audio/audio_transport_impl.h" + + namespace webrtc { + +@@ -52,13 +53,13 @@ class AudioState : public webrtc::AudioState { + return config_.audio_device_module.get(); + } + +- void AddReceivingStream(webrtc::AudioReceiveStreamInterface* stream); +- void RemoveReceivingStream(webrtc::AudioReceiveStreamInterface* stream); ++ void AddReceivingStream(webrtc::AudioReceiveStreamInterface* stream) override; ++ void RemoveReceivingStream(webrtc::AudioReceiveStreamInterface* stream) override; + + void AddSendingStream(webrtc::AudioSendStream* stream, + int sample_rate_hz, +- size_t num_channels); +- void RemoveSendingStream(webrtc::AudioSendStream* stream); ++ size_t num_channels) override; ++ void RemoveSendingStream(webrtc::AudioSendStream* stream) override; + + private: + void UpdateAudioTransportWithSendingStreams(); +@@ -72,7 +73,7 @@ class AudioState : public webrtc::AudioState { + + // Transports mixed audio from the mixer to the audio device and + // recorded audio to the sending streams. +- AudioTransportImpl audio_transport_; ++ std::unique_ptr audio_transport_; + + // Null audio poller is used to continue polling the audio streams if audio + // playout is disabled so that audio processing still happens and the audio +diff --git a/audio/audio_transport_impl.h b/audio/audio_transport_impl.h +index a240eb0fe5..6229191e1d 100644 +--- a/audio/audio_transport_impl.h ++++ b/audio/audio_transport_impl.h +@@ -84,8 +84,8 @@ class AudioTransportImpl : public AudioTransport { + + void UpdateAudioSenders(std::vector senders, + int send_sample_rate_hz, +- size_t send_num_channels); +- void SetStereoChannelSwapping(bool enable); ++ size_t send_num_channels) override; ++ void SetStereoChannelSwapping(bool enable) override; + + private: + void SendProcessedData(std::unique_ptr audio_frame); +@@ -112,6 +112,15 @@ class AudioTransportImpl : public AudioTransport { + // Converts mixed audio to the audio device output rate. + PushResampler render_resampler_; + }; ++ ++class AudioTransportFactory : public RefCountInterface { ++ public: ++ virtual std::unique_ptr Create( ++ AudioMixer* mixer, ++ AudioProcessing* audio_processing, ++ AsyncAudioProcessing::Factory* async_audio_processing_factory) = 0; ++}; ++ + } // namespace webrtc + + #endif // AUDIO_AUDIO_TRANSPORT_IMPL_H_ +diff --git a/call/audio_state.h b/call/audio_state.h +index d58b7ff97e..88c0e895c5 100644 +--- a/call/audio_state.h ++++ b/call/audio_state.h +@@ -16,10 +16,12 @@ + #include "api/ref_count.h" + #include "api/scoped_refptr.h" + #include "modules/async_audio_processing/async_audio_processing.h" ++#include "audio/audio_transport_impl.h" + + namespace webrtc { + +-class AudioTransport; ++class AudioReceiveStreamInterface; ++class AudioSendStream; + + // AudioState holds the state which must be shared between multiple instances of + // webrtc::Call for audio processing purposes. +@@ -40,6 +42,8 @@ class AudioState : public RefCountInterface { + scoped_refptr audio_device_module; + + scoped_refptr async_audio_processing_factory; ++ ++ scoped_refptr audio_transport_factory; + }; + + virtual AudioProcessing* audio_processing() = 0; +@@ -58,6 +62,16 @@ class AudioState : public RefCountInterface { + + virtual void SetStereoChannelSwapping(bool enable) = 0; + ++ virtual void AddReceivingStream(AudioReceiveStreamInterface* stream) = 0; ++ ++ virtual void RemoveReceivingStream(AudioReceiveStreamInterface* stream) = 0; ++ ++ virtual void AddSendingStream(AudioSendStream* stream, ++ int sample_rate_hz, ++ size_t num_channels) = 0; ++ ++ virtual void RemoveSendingStream(AudioSendStream* stream) = 0; ++ + static scoped_refptr Create(const AudioState::Config& config); + + ~AudioState() override {} +diff --git a/media/engine/webrtc_voice_engine.cc b/media/engine/webrtc_voice_engine.cc +index d1a22b5b03..fb57e25459 100644 +--- a/media/engine/webrtc_voice_engine.cc ++++ b/media/engine/webrtc_voice_engine.cc +@@ -458,7 +458,8 @@ WebRtcVoiceEngine::WebRtcVoiceEngine( + scoped_refptr decoder_factory, + scoped_refptr audio_mixer, + scoped_refptr audio_processing, +- std::unique_ptr audio_frame_processor) ++ std::unique_ptr audio_frame_processor, ++ scoped_refptr audio_transport_factory) + : env_(env), + adm_(std::move(adm)), + encoder_factory_(std::move(encoder_factory)), +@@ -466,6 +467,7 @@ WebRtcVoiceEngine::WebRtcVoiceEngine( + audio_mixer_(std::move(audio_mixer)), + apm_(std::move(audio_processing)), + audio_frame_processor_(std::move(audio_frame_processor)), ++ audio_transport_factory_(std::move(audio_transport_factory)), + minimized_remsampling_on_mobile_trial_enabled_( + env_.field_trials().IsEnabled( + "WebRTC-Audio-MinimizeResamplingOnMobile")), +@@ -535,6 +537,9 @@ void WebRtcVoiceEngine::Init() { + } else { + config.audio_mixer = AudioMixerImpl::Create(); + } ++ if(audio_transport_factory_){ ++ config.audio_transport_factory = audio_transport_factory_; ++ } + config.audio_processing = apm_; + config.audio_device_module = adm_; + if (audio_frame_processor_) { +diff --git a/media/engine/webrtc_voice_engine.h b/media/engine/webrtc_voice_engine.h +index 0293bcff9e..d8f72e33f9 100644 +--- a/media/engine/webrtc_voice_engine.h ++++ b/media/engine/webrtc_voice_engine.h +@@ -83,7 +83,8 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface { + scoped_refptr decoder_factory, + scoped_refptr audio_mixer, + scoped_refptr audio_processing, +- std::unique_ptr audio_frame_processor); ++ std::unique_ptr audio_frame_processor, ++ scoped_refptr audio_transport_factory); + + WebRtcVoiceEngine() = delete; + WebRtcVoiceEngine(const WebRtcVoiceEngine&) = delete; +@@ -157,6 +158,8 @@ class WebRtcVoiceEngine final : public VoiceEngineInterface { + scoped_refptr apm_; + // Asynchronous audio processing. + std::unique_ptr audio_frame_processor_; ++ // The audio transport factory. ++ scoped_refptr audio_transport_factory_; + // The primary instance of WebRtc VoiceEngine. + scoped_refptr audio_state_; + std::vector send_codecs_; diff --git a/src/helper.cc b/src/helper.cc index b5ac14eeba..015754d4df 100644 --- a/src/helper.cc +++ b/src/helper.cc @@ -1,14 +1,14 @@ #include "helper.h" -#include "rtc_base/helpers.h" +#include "rtc_base/crypto_random.h" namespace libwebrtc { /** * Generates a random UUID string using the WebRTC library function - * rtc::CreateRandomUuid(). + * webrtc::CreateRandomUuid(). * * @return A string representation of a random UUID. */ -string Helper::CreateRandomUuid() { return rtc::CreateRandomUuid(); } +string Helper::CreateRandomUuid() { return webrtc::CreateRandomUuid(); } } // namespace libwebrtc diff --git a/src/internal/custom_audio_state.cc b/src/internal/custom_audio_state.cc deleted file mode 100644 index 008f4eec56..0000000000 --- a/src/internal/custom_audio_state.cc +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "src/internal/custom_audio_state.h" - -#include -#include -#include -#include - -#include "api/sequence_checker.h" -#include "api/task_queue/task_queue_base.h" -#include "api/units/time_delta.h" -#include "audio/audio_receive_stream.h" -#include "audio/audio_send_stream.h" -#include "modules/audio_device/include/audio_device.h" -#include "rtc_base/checks.h" -#include "rtc_base/logging.h" -#include "src/internal/custom_audio_transport_impl.h" -#include "src/internal/custom_media_context.h" - -namespace webrtc { - -CustomAudioState::CustomAudioState( - const AudioState::Config& config, - std::unique_ptr custom_audio_transport) - : config_(config), audio_transport_(std::move(custom_audio_transport)) { - RTC_DCHECK(config_.audio_mixer); - RTC_DCHECK(config_.audio_device_module); -} - -CustomAudioState::~CustomAudioState() { - RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_DCHECK(receiving_streams_.empty()); - RTC_DCHECK(sending_streams_.empty()); - RTC_DCHECK(!null_audio_poller_.Running()); -} - -AudioProcessing* CustomAudioState::audio_processing() { - return config_.audio_processing.get(); -} - -AudioTransport* CustomAudioState::audio_transport() { - return audio_transport_.get(); -} - -void CustomAudioState::AddReceivingStream( - webrtc::AudioReceiveStreamInterface* stream) { - RTC_DCHECK_RUN_ON(&thread_checker_); - RTC_DCHECK_EQ(0, receiving_streams_.count(stream)); - receiving_streams_.insert(stream); - if (!config_.audio_mixer->AddSource( - static_cast(stream))) { - RTC_DLOG(LS_ERROR) << "Failed to add source to mixer."; - } - - // Make sure playback is initialized; start playing if enabled. - UpdateNullAudioPollerState(); - auto* adm = config_.audio_device_module.get(); - if (!adm->Playing()) { - if (adm->InitPlayout() == 0) { - if (playout_enabled_) { - adm->StartPlayout(); - } - } else { - RTC_DLOG_F(LS_ERROR) << "Failed to initialize playout."; - } - } -} - -void CustomAudioState::RemoveReceivingStream( - webrtc::AudioReceiveStreamInterface* stream) { - RTC_DCHECK_RUN_ON(&thread_checker_); - auto count = receiving_streams_.erase(stream); - RTC_DCHECK_EQ(1, count); - config_.audio_mixer->RemoveSource( - static_cast(stream)); - UpdateNullAudioPollerState(); - if (receiving_streams_.empty()) { - config_.audio_device_module->StopPlayout(); - } -} - -void CustomAudioState::AddSendingStream(webrtc::AudioSendStream* stream, - int sample_rate_hz, - size_t num_channels) { - RTC_DCHECK_RUN_ON(&thread_checker_); - auto& properties = sending_streams_[stream]; - properties.sample_rate_hz = sample_rate_hz; - properties.num_channels = num_channels; - UpdateAudioTransportWithSendingStreams(); - - // Make sure recording is initialized; start recording if enabled. - if (ShouldRecord()) { - auto* adm = config_.audio_device_module.get(); - if (!adm->Recording()) { - if (adm->InitRecording() == 0) { - if (recording_enabled_) { - // TODO: Verify if the following windows only logic is still required. -#if defined(WEBRTC_WIN) - if (adm->BuiltInAECIsAvailable() && !adm->Playing()) { - if (!adm->PlayoutIsInitialized()) { - adm->InitPlayout(); - } - adm->StartPlayout(); - } -#endif - adm->StartRecording(); - } - } else { - RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording."; - } - } - } -} - -void CustomAudioState::RemoveSendingStream(webrtc::AudioSendStream* stream) { - RTC_DCHECK_RUN_ON(&thread_checker_); - auto count = sending_streams_.erase(stream); - RTC_DCHECK_EQ(1, count); - UpdateAudioTransportWithSendingStreams(); - - bool should_record = ShouldRecord(); - RTC_LOG(LS_INFO) << "RemoveSendingStream: should_record = " << should_record; - if (!should_record) { - config_.audio_device_module->StopRecording(); - } -} - -void CustomAudioState::SetPlayout(bool enabled) { - RTC_LOG(LS_INFO) << "SetPlayout(" << enabled << ")"; - RTC_DCHECK_RUN_ON(&thread_checker_); - if (playout_enabled_ != enabled) { - playout_enabled_ = enabled; - if (enabled) { - UpdateNullAudioPollerState(); - if (!receiving_streams_.empty()) { - config_.audio_device_module->StartPlayout(); - } - } else { - config_.audio_device_module->StopPlayout(); - UpdateNullAudioPollerState(); - } - } -} - -void CustomAudioState::SetRecording(bool enabled) { - RTC_LOG(LS_INFO) << "SetRecording(" << enabled << ")"; - RTC_DCHECK_RUN_ON(&thread_checker_); - if (recording_enabled_ != enabled) { - recording_enabled_ = enabled; - if (enabled) { - if (ShouldRecord()) { - config_.audio_device_module->StartRecording(); - } - } else { - config_.audio_device_module->StopRecording(); - } - } -} - -void CustomAudioState::SetStereoChannelSwapping(bool enable) { - RTC_DCHECK(thread_checker_.IsCurrent()); - audio_transport_->SetStereoChannelSwapping(enable); -} - -void CustomAudioState::UpdateAudioTransportWithSendingStreams() { - RTC_DCHECK(thread_checker_.IsCurrent()); - std::vector audio_senders; - int max_sample_rate_hz = 8000; - size_t max_num_channels = 1; - for (const auto& kv : sending_streams_) { - audio_senders.push_back(kv.first); - max_sample_rate_hz = std::max(max_sample_rate_hz, kv.second.sample_rate_hz); - max_num_channels = std::max(max_num_channels, kv.second.num_channels); - } - audio_transport_->UpdateAudioSenders(std::move(audio_senders), - max_sample_rate_hz, max_num_channels); -} - -void CustomAudioState::UpdateNullAudioPollerState() { - // Run NullAudioPoller when there are receiving streams and playout is - // disabled. - if (!receiving_streams_.empty() && !playout_enabled_) { - if (!null_audio_poller_.Running()) { - AudioTransport* audio_transport = audio_transport_.get(); - null_audio_poller_ = RepeatingTaskHandle::Start( - TaskQueueBase::Current(), [audio_transport] { - static constexpr size_t kNumChannels = 1; - static constexpr uint32_t kSamplesPerSecond = 48'000; - // 10ms of samples - static constexpr size_t kNumSamples = kSamplesPerSecond / 100; - - // Buffer to hold the audio samples. - int16_t buffer[kNumSamples * kNumChannels]; - - // Output variables from `NeedMorePlayData`. - size_t n_samples; - int64_t elapsed_time_ms; - int64_t ntp_time_ms; - audio_transport->NeedMorePlayData( - kNumSamples, sizeof(int16_t), kNumChannels, kSamplesPerSecond, - buffer, n_samples, &elapsed_time_ms, &ntp_time_ms); - - // Reschedule the next poll iteration. - return TimeDelta::Millis(10); - }); - } - } else { - null_audio_poller_.Stop(); - } -} - -void CustomAudioState::OnMuteStreamChanged() { - auto* adm = config_.audio_device_module.get(); - bool should_record = ShouldRecord(); - - RTC_LOG(LS_INFO) << "OnMuteStreamChanged: should_record = " << should_record; - if (should_record && !adm->Recording()) { - if (adm->InitRecording() == 0) { - adm->StartRecording(); - } - } else if (!should_record && adm->Recording()) { - adm->StopRecording(); - } -} - -bool CustomAudioState::ShouldRecord() { - RTC_LOG(LS_INFO) << "ShouldRecord"; - // no streams to send - if (sending_streams_.empty()) { - RTC_LOG(LS_INFO) << "ShouldRecord: send stream = empty"; - return false; - } - - int stream_count = sending_streams_.size(); - - int muted_count = 0; - for (const auto& kv : sending_streams_) { - if (kv.first->GetMuted()) { - muted_count++; - } - } - - RTC_LOG(LS_INFO) << "ShouldRecord: " << muted_count << " muted, " - << stream_count << " sending"; - return muted_count != stream_count; -} - -} // namespace webrtc diff --git a/src/internal/custom_audio_state.h b/src/internal/custom_audio_state.h deleted file mode 100644 index aedde0083a..0000000000 --- a/src/internal/custom_audio_state.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef INTERNAL_CUSTOM_AUDIO_STATE_H_ -#define INTERNAL_CUSTOM_AUDIO_STATE_H_ - -#include -#include - -#include "api/sequence_checker.h" -#include "audio/audio_transport_impl.h" -#include "call/audio_sender.h" -#include "call/audio_state.h" -#include "rtc_base/containers/flat_set.h" -#include "rtc_base/ref_count.h" -#include "rtc_base/task_utils/repeating_task.h" -#include "rtc_base/thread_annotations.h" - -namespace webrtc { - -class CustomAudioTransportImpl; -class AudioSendStream; -class AudioReceiveStreamInterface; - -class CustomAudioState : public webrtc::AudioState { - public: - explicit CustomAudioState( - const AudioState::Config& config, - std::unique_ptr custom_audio_transport); - - CustomAudioState() = delete; - CustomAudioState(const CustomAudioState&) = delete; - CustomAudioState& operator=(const CustomAudioState&) = delete; - - ~CustomAudioState() override; - - AudioProcessing* audio_processing() override; - AudioTransport* audio_transport() override; - - void SetPlayout(bool enabled) override; - void SetRecording(bool enabled) override; - - void SetStereoChannelSwapping(bool enable) override; - - void OnMuteStreamChanged() override; - - AudioDeviceModule* audio_device_module() { - RTC_DCHECK(config_.audio_device_module); - return config_.audio_device_module.get(); - } - - void AddReceivingStream(webrtc::AudioReceiveStreamInterface* stream) override; - void RemoveReceivingStream( - webrtc::AudioReceiveStreamInterface* stream) override; - - void AddSendingStream(webrtc::AudioSendStream* stream, int sample_rate_hz, - size_t num_channels) override; - void RemoveSendingStream(webrtc::AudioSendStream* stream) override; - - private: - void UpdateAudioTransportWithSendingStreams(); - void UpdateNullAudioPollerState() RTC_RUN_ON(&thread_checker_); - - // Returns true when at least 1 stream exists and all streams are not muted. - bool ShouldRecord(); - - SequenceChecker thread_checker_; - SequenceChecker process_thread_checker_{SequenceChecker::kDetached}; - const webrtc::AudioState::Config config_; - bool recording_enabled_ = true; - bool playout_enabled_ = true; - - // Transports mixed audio from the mixer to the audio device and - // recorded audio to the sending streams. - std::unique_ptr audio_transport_; - - // Null audio poller is used to continue polling the audio streams if audio - // playout is disabled so that audio processing still happens and the audio - // stats are still updated. - RepeatingTaskHandle null_audio_poller_ RTC_GUARDED_BY(&thread_checker_); - - webrtc::flat_set receiving_streams_; - struct StreamProperties { - int sample_rate_hz = 0; - size_t num_channels = 0; - }; - std::map sending_streams_; -}; - -} // namespace webrtc - -#endif // INTERNAL_CUSTOM_AUDIO_STATE_H_ diff --git a/src/internal/custom_audio_transport_impl.cc b/src/internal/custom_audio_transport_impl.cc index 77631c21c8..b14d136650 100644 --- a/src/internal/custom_audio_transport_impl.cc +++ b/src/internal/custom_audio_transport_impl.cc @@ -5,8 +5,9 @@ namespace webrtc { CustomAudioTransportImpl::CustomAudioTransportImpl( AudioMixer* mixer, AudioProcessing* audio_processing, AsyncAudioProcessing::Factory* async_audio_processing_factory) - : audio_transport_impl_(mixer, audio_processing, - async_audio_processing_factory) {} + : audio_transport_impl_( + std::make_unique( + mixer, audio_processing, async_audio_processing_factory)) {} // TODO(bugs.webrtc.org/13620) Deprecate this function int32_t CustomAudioTransportImpl::RecordedDataIsAvailable( @@ -14,7 +15,7 @@ int32_t CustomAudioTransportImpl::RecordedDataIsAvailable( size_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, int32_t clockDrift, uint32_t currentMicLevel, bool keyPressed, uint32_t& newMicLevel) { - return audio_transport_impl_.RecordedDataIsAvailable( + return audio_transport_impl_->RecordedDataIsAvailable( audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec, totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel); } @@ -23,8 +24,8 @@ int32_t CustomAudioTransportImpl::RecordedDataIsAvailable( const void* audioSamples, size_t nSamples, size_t nBytesPerSample, size_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, int32_t clockDrift, uint32_t currentMicLevel, bool keyPressed, - uint32_t& newMicLevel, absl::optional estimated_capture_time_ns) { - return audio_transport_impl_.RecordedDataIsAvailable( + uint32_t& newMicLevel, std::optional estimated_capture_time_ns) { + return audio_transport_impl_->RecordedDataIsAvailable( audioSamples, nSamples, nBytesPerSample, nChannels, samplesPerSec, totalDelayMS, clockDrift, currentMicLevel, keyPressed, newMicLevel, estimated_capture_time_ns); @@ -34,7 +35,7 @@ int32_t CustomAudioTransportImpl::NeedMorePlayData( size_t nSamples, size_t nBytesPerSample, size_t nChannels, uint32_t samplesPerSec, void* audioSamples, size_t& nSamplesOut, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { - return audio_transport_impl_.NeedMorePlayData( + return audio_transport_impl_->NeedMorePlayData( nSamples, nBytesPerSample, nChannels, samplesPerSec, audioSamples, nSamplesOut, elapsed_time_ms, ntp_time_ms); } @@ -43,7 +44,7 @@ void CustomAudioTransportImpl::PullRenderData( int bits_per_sample, int sample_rate, size_t number_of_channels, size_t number_of_frames, void* audio_data, int64_t* elapsed_time_ms, int64_t* ntp_time_ms) { - audio_transport_impl_.PullRenderData( + audio_transport_impl_->PullRenderData( bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms); } @@ -54,11 +55,11 @@ void CustomAudioTransportImpl::UpdateAudioSenders( if (senders.size() > 0) { std::vector snds = std::vector(); snds.push_back(this); - audio_transport_impl_.UpdateAudioSenders( + audio_transport_impl_->UpdateAudioSenders( std::move(snds), send_sample_rate_hz, send_num_channels); } else { std::vector snds = std::vector(); - audio_transport_impl_.UpdateAudioSenders( + audio_transport_impl_->UpdateAudioSenders( std::move(snds), send_sample_rate_hz, send_num_channels); } } @@ -77,7 +78,7 @@ void CustomAudioTransportImpl::RemoveAudioSender(AudioSender* sender) { } void CustomAudioTransportImpl::SetStereoChannelSwapping(bool enable) { - audio_transport_impl_.SetStereoChannelSwapping(enable); + audio_transport_impl_->SetStereoChannelSwapping(enable); } void CustomAudioTransportImpl::SendAudioData( diff --git a/src/internal/custom_audio_transport_impl.h b/src/internal/custom_audio_transport_impl.h index 80da05103b..c666d0ead1 100644 --- a/src/internal/custom_audio_transport_impl.h +++ b/src/internal/custom_audio_transport_impl.h @@ -20,6 +20,7 @@ class CustomAudioTransportImpl : public AudioTransport, public AudioSender { CustomAudioTransportImpl( AudioMixer* mixer, AudioProcessing* audio_processing, AsyncAudioProcessing::Factory* async_audio_processing_factory); + ~CustomAudioTransportImpl() {} int32_t RecordedDataIsAvailable(const void* audioSamples, size_t nSamples, size_t nBytesPerSample, size_t nChannels, @@ -33,7 +34,7 @@ class CustomAudioTransportImpl : public AudioTransport, public AudioSender { size_t nChannels, uint32_t samplesPerSec, uint32_t totalDelayMS, int32_t clockDrift, uint32_t currentMicLevel, bool keyPressed, uint32_t& newMicLevel, - absl::optional estimated_capture_time_ns) override; + std::optional estimated_capture_time_ns) override; int32_t NeedMorePlayData(size_t nSamples, size_t nBytesPerSample, size_t nChannels, uint32_t samplesPerSec, @@ -48,22 +49,46 @@ class CustomAudioTransportImpl : public AudioTransport, public AudioSender { virtual void UpdateAudioSenders(std::vector senders, int send_sample_rate_hz, - size_t send_num_channels); + size_t send_num_channels) override; void AddAudioSender(AudioSender* sender); void RemoveAudioSender(AudioSender* sender); - void SetStereoChannelSwapping(bool enable); + void SetStereoChannelSwapping(bool enable) override; void SendAudioData(std::unique_ptr audio_frame) override; private: - webrtc::AudioTransportImpl audio_transport_impl_; + std::unique_ptr audio_transport_impl_; mutable Mutex capture_lock_; std::vector audio_senders_ RTC_GUARDED_BY(capture_lock_); }; -} // namespace libwebrtc +class CustomAudioTransportFactory : public AudioTransportFactory { + public: + CustomAudioTransportFactory() = default; + ~CustomAudioTransportFactory() = default; + std::unique_ptr Create( + webrtc::AudioMixer* mixer, webrtc::AudioProcessing* audio_processing, + webrtc::AsyncAudioProcessing::Factory* async_audio_processing_factory) + override { + std::unique_ptr transport = + std::make_unique( + mixer, audio_processing, async_audio_processing_factory); + + audio_transport_impl_ = transport.get(); + return transport; + } + + CustomAudioTransportImpl* audio_transport_impl() const { + return audio_transport_impl_; + } + + private: + CustomAudioTransportImpl* audio_transport_impl_ = nullptr; +}; + +} // namespace webrtc #endif // INTERNAL_CUSTOM_AUDIO_TRANSPORT_STATE_H_ diff --git a/src/internal/custom_media_context.cc b/src/internal/custom_media_context.cc deleted file mode 100644 index 415d706dde..0000000000 --- a/src/internal/custom_media_context.cc +++ /dev/null @@ -1,151 +0,0 @@ -#include "src/internal/custom_media_context.h" - -#include - -#include -#include - -#include "api/create_peerconnection_factory.h" -#include "api/enable_media.h" -#include "api/environment/environment.h" -#include "api/peer_connection_interface.h" -#include "api/rtc_event_log/rtc_event_log_factory.h" -#include "api/scoped_refptr.h" -#include "api/task_queue/default_task_queue_factory.h" -#include "api/transport/field_trial_based_config.h" -#include "call/create_call.h" -#include "media/base/media_engine.h" -#include "media/engine/webrtc_media_engine.h" -#include "media/engine/webrtc_video_engine.h" -#include "modules/audio_device/include/audio_device.h" -#include "modules/audio_processing/include/audio_processing.h" -#include "pc/media_factory.h" -#include "rtc_base/thread.h" -#include "src/internal/custom_audio_state.h" -#include "src/internal/custom_audio_transport_impl.h" -#include "src/internal/custom_webrtc_voice_engine.h" -#include "src/internal/local_audio_track.h" - -namespace webrtc { - -class TaskQueueFactory; -class AudioDeviceModule; -using ::cricket::AudioCodec; -using ::cricket::CompositeMediaEngine; -using ::cricket::CustomWebRtcVoiceEngine; -using ::cricket::MediaEngineInterface; -using ::cricket::WebRtcVideoEngine; - -class CustomMediaFactoryImpl : public MediaFactory { - public: - CustomMediaFactoryImpl( - webrtc::CreateAudioStateFactory* create_audio_state_factory) - : create_audio_state_factory_(create_audio_state_factory) { - RTC_DCHECK(create_audio_state_factory_); - } - ~CustomMediaFactoryImpl() override = default; - - std::unique_ptr CreateCall(const CallConfig& config) override { - return webrtc::CreateCall(config); - } - - std::unique_ptr CreateMediaEngine( - const Environment& env, - PeerConnectionFactoryDependencies& deps) override { - auto audio_engine = std::make_unique( - create_audio_state_factory_, &env.task_queue_factory(), deps.adm.get(), - std::move(deps.audio_encoder_factory), - std::move(deps.audio_decoder_factory), std::move(deps.audio_mixer), - std::move(deps.audio_processing), std::move(deps.audio_frame_processor), - env.field_trials()); - auto video_engine = std::make_unique( - std::move(deps.video_encoder_factory), - std::move(deps.video_decoder_factory), env.field_trials()); - return std::make_unique(std::move(audio_engine), - std::move(video_engine)); - } - - private: - webrtc::CreateAudioStateFactory* create_audio_state_factory_ = nullptr; -}; - -rtc::scoped_refptr -CustomMediaContext::CreateAudioSource(cricket::AudioOptions* options, - bool is_custom_source) { - RTC_DCHECK(options); - // if is_custom_source == true, not using the default audio transport, - // you can put costom audio frame via LocalAudioSource::CaptureFrame(...) - // and the audio transport will be null. - // otherwise, use the default audio transport, audio transport will - // put audio frame from your platform adm to your - // LocalAudioSource::SendAudioData(...). - if (rtc::Thread::Current() != signaling_thread_) { - return signaling_thread_->BlockingCall([this, options, is_custom_source] { - return libwebrtc::LocalAudioSource::Create( - options, is_custom_source ? nullptr : audio_transport_); - }); - } - return libwebrtc::LocalAudioSource::Create( - options, is_custom_source ? nullptr : audio_transport_); -} - -rtc::scoped_refptr CustomMediaContext::CreateAudioState( - const webrtc::AudioState::Config& config) { - auto audio_transport = std::make_unique( - config.audio_mixer.get(), config.audio_processing.get(), - config.async_audio_processing_factory.get()); - audio_transport_ = audio_transport.get(); - return rtc::make_ref_counted(config, - std::move(audio_transport)); -} - -rtc::scoped_refptr -CustomMediaContext::CreatePeerConnectionFactory( - rtc::Thread* network_thread, rtc::Thread* worker_thread, - rtc::Thread* signaling_thread, - rtc::scoped_refptr default_adm, - rtc::scoped_refptr audio_encoder_factory, - rtc::scoped_refptr audio_decoder_factory, - std::unique_ptr video_encoder_factory, - std::unique_ptr video_decoder_factory, - rtc::scoped_refptr audio_mixer, - rtc::scoped_refptr audio_processing, - std::unique_ptr audio_frame_processor, - std::unique_ptr field_trials, - MediaFactory* media_factory) { - if (!field_trials) { - field_trials = std::make_unique(); - } - - PeerConnectionFactoryDependencies dependencies; - dependencies.network_thread = network_thread; - dependencies.worker_thread = worker_thread; - dependencies.signaling_thread = signaling_thread; - dependencies.task_queue_factory = - CreateDefaultTaskQueueFactory(field_trials.get()); - dependencies.event_log_factory = std::make_unique(); - dependencies.trials = std::move(field_trials); - - if (network_thread) { - // TODO(bugs.webrtc.org/13145): Add an rtc::SocketFactory* argument. - dependencies.socket_factory = network_thread->socketserver(); - } - dependencies.adm = std::move(default_adm); - dependencies.audio_encoder_factory = std::move(audio_encoder_factory); - dependencies.audio_decoder_factory = std::move(audio_decoder_factory); - dependencies.audio_frame_processor = std::move(audio_frame_processor); - if (audio_processing) { - dependencies.audio_processing = std::move(audio_processing); - } else { - dependencies.audio_processing = AudioProcessingBuilder().Create(); - } - dependencies.audio_mixer = std::move(audio_mixer); - dependencies.video_encoder_factory = std::move(video_encoder_factory); - dependencies.video_decoder_factory = std::move(video_decoder_factory); - - dependencies.media_factory = std::make_unique(this); - - return CreateModularPeerConnectionFactory(std::move(dependencies)); -} - -} // namespace webrtc diff --git a/src/internal/custom_media_context.h b/src/internal/custom_media_context.h deleted file mode 100644 index 9c19bc37f9..0000000000 --- a/src/internal/custom_media_context.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef INTERNAL_CUSTOM_MEDIA_BRIDGE_FACTORY_H_ -#define INTERNAL_CUSTOM_MEDIA_BRIDGE_FACTORY_H_ - -#include - -#include "api/audio/audio_mixer.h" -#include "api/audio_codecs/audio_decoder_factory.h" -#include "api/audio_codecs/audio_encoder_factory.h" -#include "api/peer_connection_interface.h" -#include "api/ref_count.h" -#include "api/scoped_refptr.h" -#include "api/video_codecs/video_decoder_factory.h" -#include "api/video_codecs/video_encoder_factory.h" -#include "call/audio_state.h" -#include "pc/media_factory.h" -#include "rtc_base/thread.h" -#include "src/internal/custom_webrtc_voice_engine.h" -#include "src/internal/local_audio_track.h" - -namespace webrtc { - -class CustomAudioTransportImpl; - -class CustomMediaContext : public webrtc::CreateAudioStateFactory, - public webrtc::RefCountInterface { - public: - CustomMediaContext(rtc::Thread* signaling_thread) { - RTC_DCHECK(signaling_thread); - signaling_thread_ = signaling_thread; - } - rtc::scoped_refptr CreateAudioSource( - cricket::AudioOptions* options, bool is_custom_source = false); - - rtc::scoped_refptr CreateAudioState( - const webrtc::AudioState::Config& config) override; - - rtc::scoped_refptr - CreatePeerConnectionFactory( - rtc::Thread* network_thread, rtc::Thread* worker_thread, - rtc::Thread* signaling_thread, - rtc::scoped_refptr default_adm, - rtc::scoped_refptr audio_encoder_factory, - rtc::scoped_refptr audio_decoder_factory, - std::unique_ptr video_encoder_factory, - std::unique_ptr video_decoder_factory, - rtc::scoped_refptr audio_mixer, - rtc::scoped_refptr audio_processing, - std::unique_ptr audio_frame_processor = nullptr, - std::unique_ptr field_trials = nullptr, - MediaFactory* media_factory = nullptr); - - private: - rtc::Thread* signaling_thread_; - webrtc::CustomAudioTransportImpl* audio_transport_; -}; - -} // namespace webrtc - -#endif // INTERNAL_CUSTOM_MEDIA_BRIDGE_FACTORY_H_ diff --git a/src/internal/custom_webrtc_voice_engine.cc b/src/internal/custom_webrtc_voice_engine.cc deleted file mode 100644 index 0f8f2424bf..0000000000 --- a/src/internal/custom_webrtc_voice_engine.cc +++ /dev/null @@ -1,2674 +0,0 @@ -/* - * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#include "custom_webrtc_voice_engine.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "absl/algorithm/algorithm.h" -#include "absl/algorithm/container.h" -#include "absl/functional/bind_front.h" -#include "absl/strings/match.h" -#include "absl/types/optional.h" -#include "api/audio/audio_frame.h" -#include "api/audio/audio_frame_processor.h" -#include "api/audio_codecs/audio_codec_pair_id.h" -#include "api/audio_codecs/audio_encoder.h" -#include "api/call/audio_sink.h" -#include "api/field_trials_view.h" -#include "api/make_ref_counted.h" -#include "api/media_types.h" -#include "api/priority.h" -#include "api/rtp_headers.h" -#include "api/rtp_parameters.h" -#include "api/rtp_transceiver_direction.h" -#include "api/task_queue/pending_task_safety_flag.h" -#include "api/transport/bitrate_settings.h" -#include "api/units/data_rate.h" -#include "api/units/time_delta.h" -#include "api/units/timestamp.h" -#include "call/audio_receive_stream.h" -#include "call/packet_receiver.h" -#include "call/rtp_config.h" -#include "call/rtp_transport_controller_send_interface.h" -#include "media/base/audio_source.h" -#include "media/base/codec.h" -#include "media/base/media_constants.h" -#include "media/base/stream_params.h" -#include "media/engine/adm_helpers.h" -#include "media/engine/payload_type_mapper.h" -#include "media/engine/webrtc_media_engine.h" -#include "modules/async_audio_processing/async_audio_processing.h" -#include "modules/audio_mixer/audio_mixer_impl.h" -#include "modules/audio_processing/include/audio_processing.h" -#include "modules/audio_processing/include/audio_processing_statistics.h" -#include "modules/rtp_rtcp/include/report_block_data.h" -#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "modules/rtp_rtcp/source/rtp_util.h" -#include "rtc_base/checks.h" -#include "rtc_base/dscp.h" -#include "rtc_base/experiments/struct_parameters_parser.h" -#include "rtc_base/logging.h" -#include "rtc_base/race_checker.h" -#include "rtc_base/string_encode.h" -#include "rtc_base/strings/audio_format_to_string.h" -#include "rtc_base/strings/string_builder.h" -#include "rtc_base/strings/string_format.h" -#include "rtc_base/thread_annotations.h" -#include "rtc_base/time_utils.h" -#include "rtc_base/trace_event.h" -#include "src/internal/custom_audio_state.h" -#include "src/internal/custom_media_context.h" -#include "system_wrappers/include/metrics.h" - -namespace cricket { -namespace { - -using ::webrtc::ParseRtpSsrc; - -constexpr size_t kMaxUnsignaledRecvStreams = 4; - -constexpr int kNackRtpHistoryMs = 5000; - -const int kMinTelephoneEventCode = 0; // RFC4733 (Section 2.3.1) -const int kMaxTelephoneEventCode = 255; - -const int kMinPayloadType = 0; -const int kMaxPayloadType = 127; - -class ProxySink : public webrtc::AudioSinkInterface { - public: - explicit ProxySink(AudioSinkInterface* sink) : sink_(sink) { - RTC_DCHECK(sink); - } - - void OnData(const Data& audio) override { sink_->OnData(audio); } - - private: - webrtc::AudioSinkInterface* sink_; -}; - -bool ValidateStreamParams(const StreamParams& sp) { - if (sp.ssrcs.empty()) { - RTC_DLOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString(); - return false; - } - if (sp.ssrcs.size() > 1) { - RTC_DLOG(LS_ERROR) << "Multiple SSRCs in stream parameters: " - << sp.ToString(); - return false; - } - return true; -} - -// Dumps an AudioCodec in RFC 2327-ish format. -std::string ToString(const AudioCodec& codec) { - rtc::StringBuilder ss; - ss << codec.name << "/" << codec.clockrate << "/" << codec.channels; - if (!codec.params.empty()) { - ss << " {"; - for (const auto& param : codec.params) { - ss << " " << param.first << "=" << param.second; - } - ss << " }"; - } - ss << " (" << codec.id << ")"; - return ss.Release(); -} - -bool IsCodec(const AudioCodec& codec, const char* ref_name) { - return absl::EqualsIgnoreCase(codec.name, ref_name); -} - -absl::optional FindCodec(const std::vector& codecs, - const AudioCodec& codec) { - for (const AudioCodec& c : codecs) { - if (c.Matches(codec)) { - return c; - } - } - return absl::nullopt; -} - -bool VerifyUniquePayloadTypes(const std::vector& codecs) { - if (codecs.empty()) { - return true; - } - std::vector payload_types; - absl::c_transform(codecs, std::back_inserter(payload_types), - [](const AudioCodec& codec) { return codec.id; }); - absl::c_sort(payload_types); - return absl::c_adjacent_find(payload_types) == payload_types.end(); -} - -absl::optional GetAudioNetworkAdaptorConfig( - const AudioOptions& options) { - if (options.audio_network_adaptor && *options.audio_network_adaptor && - options.audio_network_adaptor_config) { - // Turn on audio network adaptor only when `options_.audio_network_adaptor` - // equals true and `options_.audio_network_adaptor_config` has a value. - return options.audio_network_adaptor_config; - } - return absl::nullopt; -} - -// Returns its smallest positive argument. If neither argument is positive, -// returns an arbitrary nonpositive value. -int MinPositive(int a, int b) { - if (a <= 0) { - return b; - } - if (b <= 0) { - return a; - } - return std::min(a, b); -} - -// `max_send_bitrate_bps` is the bitrate from "b=" in SDP. -// `rtp_max_bitrate_bps` is the bitrate from RtpSender::SetParameters. -absl::optional ComputeSendBitrate(int max_send_bitrate_bps, - absl::optional rtp_max_bitrate_bps, - const webrtc::AudioCodecSpec& spec) { - // If application-configured bitrate is set, take minimum of that and SDP - // bitrate. - const int bps = rtp_max_bitrate_bps - ? MinPositive(max_send_bitrate_bps, *rtp_max_bitrate_bps) - : max_send_bitrate_bps; - if (bps <= 0) { - return spec.info.default_bitrate_bps; - } - - if (bps < spec.info.min_bitrate_bps) { - // If codec is not multi-rate and `bps` is less than the fixed bitrate then - // fail. If codec is not multi-rate and `bps` exceeds or equal the fixed - // bitrate then ignore. - RTC_LOG(LS_ERROR) << "Failed to set codec " << spec.format.name - << " to bitrate " << bps - << " bps" - ", requires at least " - << spec.info.min_bitrate_bps << " bps."; - return absl::nullopt; - } - - if (spec.info.HasFixedBitrate()) { - return spec.info.default_bitrate_bps; - } else { - // If codec is multi-rate then just set the bitrate. - return std::min(bps, spec.info.max_bitrate_bps); - } -} - -bool IsEnabled(const webrtc::FieldTrialsView& config, absl::string_view trial) { - return absl::StartsWith(config.Lookup(trial), "Enabled"); -} - -struct AdaptivePtimeConfig { - bool enabled = false; - webrtc::DataRate min_payload_bitrate = webrtc::DataRate::KilobitsPerSec(16); - // Value is chosen to ensure FEC can be encoded, see LBRR_WB_MIN_RATE_BPS in - // libopus. - webrtc::DataRate min_encoder_bitrate = webrtc::DataRate::KilobitsPerSec(16); - bool use_slow_adaptation = true; - - absl::optional audio_network_adaptor_config; - - std::unique_ptr Parser() { - return webrtc::StructParametersParser::Create( // - "enabled", &enabled, // - "min_payload_bitrate", &min_payload_bitrate, // - "min_encoder_bitrate", &min_encoder_bitrate, // - "use_slow_adaptation", &use_slow_adaptation); - } - - explicit AdaptivePtimeConfig(const webrtc::FieldTrialsView& trials) { - Parser()->Parse(trials.Lookup("WebRTC-Audio-AdaptivePtime")); - } -}; - -// TODO(tommi): Constructing a receive stream could be made simpler. -// Move some of this boiler plate code into the config structs themselves. -webrtc::AudioReceiveStreamInterface::Config BuildReceiveStreamConfig( - uint32_t remote_ssrc, uint32_t local_ssrc, bool use_nack, - bool enable_non_sender_rtt, const std::vector& stream_ids, - const std::vector& extensions, - webrtc::Transport* rtcp_send_transport, - const rtc::scoped_refptr& decoder_factory, - const std::map& decoder_map, - absl::optional codec_pair_id, - size_t jitter_buffer_max_packets, bool jitter_buffer_fast_accelerate, - int jitter_buffer_min_delay_ms, - rtc::scoped_refptr frame_decryptor, - const webrtc::CryptoOptions& crypto_options, - rtc::scoped_refptr frame_transformer) { - webrtc::AudioReceiveStreamInterface::Config config; - config.rtp.remote_ssrc = remote_ssrc; - config.rtp.local_ssrc = local_ssrc; - config.rtp.nack.rtp_history_ms = use_nack ? kNackRtpHistoryMs : 0; - if (!stream_ids.empty()) { - config.sync_group = stream_ids[0]; - } - config.rtcp_send_transport = rtcp_send_transport; - config.enable_non_sender_rtt = enable_non_sender_rtt; - config.decoder_factory = decoder_factory; - config.decoder_map = decoder_map; - config.codec_pair_id = codec_pair_id; - config.jitter_buffer_max_packets = jitter_buffer_max_packets; - config.jitter_buffer_fast_accelerate = jitter_buffer_fast_accelerate; - config.jitter_buffer_min_delay_ms = jitter_buffer_min_delay_ms; - config.frame_decryptor = std::move(frame_decryptor); - config.crypto_options = crypto_options; - config.frame_transformer = std::move(frame_transformer); - return config; -} - -// Utility function to check if RED codec and its parameters match a codec spec. -bool CheckRedParameters( - const AudioCodec& red_codec, - const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) { - if (red_codec.clockrate != send_codec_spec.format.clockrate_hz || - red_codec.channels != send_codec_spec.format.num_channels) { - return false; - } - - // Check the FMTP line for the empty parameter which should match - // /[/...] - auto red_parameters = red_codec.params.find(""); - if (red_parameters == red_codec.params.end()) { - RTC_LOG(LS_WARNING) << "audio/RED missing fmtp parameters."; - return false; - } - std::vector redundant_payloads = - rtc::split(red_parameters->second, '/'); - // 32 is chosen as a maximum upper bound for consistency with the - // red payload splitter. - if (redundant_payloads.size() < 2 || redundant_payloads.size() > 32) { - return false; - } - for (auto pt : redundant_payloads) { - if (pt != rtc::ToString(send_codec_spec.payload_type)) { - return false; - } - } - return true; -} - -} // namespace - -CustomWebRtcVoiceEngine::CustomWebRtcVoiceEngine( - webrtc::CreateAudioStateFactory* create_audio_state_factory, - webrtc::TaskQueueFactory* task_queue_factory, - webrtc::AudioDeviceModule* adm, - const rtc::scoped_refptr& encoder_factory, - const rtc::scoped_refptr& decoder_factory, - rtc::scoped_refptr audio_mixer, - rtc::scoped_refptr audio_processing, - std::unique_ptr audio_frame_processor, - const webrtc::FieldTrialsView& trials) - : create_audio_state_factory_(create_audio_state_factory), - task_queue_factory_(task_queue_factory), - adm_(adm), - encoder_factory_(encoder_factory), - decoder_factory_(decoder_factory), - audio_mixer_(audio_mixer), - apm_(audio_processing), - audio_frame_processor_(std::move(audio_frame_processor)), - minimized_remsampling_on_mobile_trial_enabled_( - IsEnabled(trials, "WebRTC-Audio-MinimizeResamplingOnMobile")) { - RTC_LOG(LS_INFO) << "CustomWebRtcVoiceEngine::CustomWebRtcVoiceEngine"; - RTC_DCHECK(decoder_factory); - RTC_DCHECK(encoder_factory); - // The rest of our initialization will happen in Init. -} - -CustomWebRtcVoiceEngine::~CustomWebRtcVoiceEngine() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_LOG(LS_INFO) << "CustomWebRtcVoiceEngine::~CustomWebRtcVoiceEngine"; - if (initialized_) { - StopAecDump(); - - // Stop AudioDevice. - adm()->StopPlayout(); - adm()->StopRecording(); - adm()->RegisterAudioCallback(nullptr); - adm()->Terminate(); - } -} - -void CustomWebRtcVoiceEngine::Init() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_LOG(LS_INFO) << "CustomWebRtcVoiceEngine::Init"; - - // TaskQueue expects to be created/destroyed on the same thread. - RTC_DCHECK(!low_priority_worker_queue_); - low_priority_worker_queue_ = task_queue_factory_->CreateTaskQueue( - "rtc-low-prio", webrtc::TaskQueueFactory::Priority::LOW); - - // Load our audio codec lists. - RTC_LOG(LS_VERBOSE) << "Supported send codecs in order of preference:"; - send_codecs_ = CollectCodecs(encoder_factory_->GetSupportedEncoders()); - for (const AudioCodec& codec : send_codecs_) { - RTC_LOG(LS_VERBOSE) << ToString(codec); - } - - RTC_LOG(LS_VERBOSE) << "Supported recv codecs in order of preference:"; - recv_codecs_ = CollectCodecs(decoder_factory_->GetSupportedDecoders()); - for (const AudioCodec& codec : recv_codecs_) { - RTC_LOG(LS_VERBOSE) << ToString(codec); - } - -#if defined(WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE) - // No ADM supplied? Create a default one. - if (!adm_) { - adm_ = webrtc::AudioDeviceModule::Create( - webrtc::AudioDeviceModule::kPlatformDefaultAudio, task_queue_factory_); - } -#endif // WEBRTC_INCLUDE_INTERNAL_AUDIO_DEVICE - RTC_CHECK(adm()); - webrtc::adm_helpers::Init(adm()); - - // Set up AudioState. - { - webrtc::AudioState::Config config; - if (audio_mixer_) { - config.audio_mixer = audio_mixer_; - } else { - config.audio_mixer = webrtc::AudioMixerImpl::Create(); - } - config.audio_processing = apm_; - config.audio_device_module = adm_; - if (audio_frame_processor_) { - config.async_audio_processing_factory = - rtc::make_ref_counted( - std::move(audio_frame_processor_), *task_queue_factory_); - } - audio_state_ = create_audio_state_factory_->CreateAudioState(config); - } - - // Connect the ADM to our audio path. - adm()->RegisterAudioCallback(audio_state()->audio_transport()); - - // Set default engine options. - { - AudioOptions options; - options.echo_cancellation = true; - options.auto_gain_control = true; -#if defined(WEBRTC_IOS) - // On iOS, VPIO provides built-in NS. - options.noise_suppression = false; -#else - options.noise_suppression = true; -#endif - options.highpass_filter = true; - options.stereo_swapping = false; - options.audio_jitter_buffer_max_packets = 200; - options.audio_jitter_buffer_fast_accelerate = false; - options.audio_jitter_buffer_min_delay_ms = 0; - ApplyOptions(options); - } - initialized_ = true; -} - -rtc::scoped_refptr CustomWebRtcVoiceEngine::GetAudioState() - const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - return audio_state_; -} - -std::unique_ptr -CustomWebRtcVoiceEngine::CreateSendChannel( - webrtc::Call* call, const MediaConfig& config, const AudioOptions& options, - const webrtc::CryptoOptions& crypto_options, - webrtc::AudioCodecPairId codec_pair_id) { - return std::make_unique( - this, config, options, crypto_options, call, codec_pair_id); -} - -std::unique_ptr -CustomWebRtcVoiceEngine::CreateReceiveChannel( - webrtc::Call* call, const MediaConfig& config, const AudioOptions& options, - const webrtc::CryptoOptions& crypto_options, - webrtc::AudioCodecPairId codec_pair_id) { - return std::make_unique( - this, config, options, crypto_options, call, codec_pair_id); -} - -void CustomWebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_LOG(LS_INFO) << "CustomWebRtcVoiceEngine::ApplyOptions: " - << options_in.ToString(); - AudioOptions options = options_in; // The options are modified below. - - // Set and adjust echo canceller options. - // Use desktop AEC by default, when not using hardware AEC. - bool use_mobile_software_aec = false; - -#if defined(WEBRTC_IOS) && !TARGET_OS_SIMULATOR - if (options.ios_force_software_aec_HACK && - *options.ios_force_software_aec_HACK) { - // EC may be forced on for a device known to have non-functioning platform - // AEC. - options.echo_cancellation = true; - RTC_LOG(LS_WARNING) - << "Force software AEC on iOS. May conflict with platform AEC."; - } else { - // On iOS, VPIO provides built-in EC. - options.echo_cancellation = false; - RTC_LOG(LS_INFO) << "Always disable AEC on iOS. Use built-in instead."; - } -#elif defined(WEBRTC_ANDROID) - use_mobile_software_aec = true; -#endif - -// Set and adjust gain control options. -#if defined(WEBRTC_IOS) && !TARGET_OS_SIMULATOR - // On iOS, VPIO provides built-in AGC. - options.auto_gain_control = false; - RTC_LOG(LS_INFO) << "Always disable AGC on iOS. Use built-in instead."; -#endif - -#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID) - // Turn off the gain control if specified by the field trial. - // The purpose of the field trial is to reduce the amount of resampling - // performed inside the audio processing module on mobile platforms by - // whenever possible turning off the fixed AGC mode and the high-pass filter. - // (https://bugs.chromium.org/p/webrtc/issues/detail?id=6181). - if (minimized_remsampling_on_mobile_trial_enabled_) { - options.auto_gain_control = false; - RTC_LOG(LS_INFO) << "Disable AGC according to field trial."; - if (!(options.noise_suppression.value_or(false) || - options.echo_cancellation.value_or(false))) { - // If possible, turn off the high-pass filter. - RTC_LOG(LS_INFO) - << "Disable high-pass filter in response to field trial."; - options.highpass_filter = false; - } - } -#endif - - if (options.echo_cancellation) { - // Check if platform supports built-in EC. Currently only supported on - // Android and in combination with Java based audio layer. - // TODO(henrika): investigate possibility to support built-in EC also - // in combination with Open SL ES audio. - const bool built_in_aec = adm()->BuiltInAECIsAvailable(); - if (built_in_aec) { - // Built-in EC exists on this device. Enable/Disable it according to the - // echo_cancellation audio option. - const bool enable_built_in_aec = *options.echo_cancellation; - if (adm()->EnableBuiltInAEC(enable_built_in_aec) == 0 && - enable_built_in_aec) { - // Disable internal software EC if built-in EC is enabled, - // i.e., replace the software EC with the built-in EC. - options.echo_cancellation = false; - RTC_LOG(LS_INFO) - << "Disabling EC since built-in EC will be used instead"; - } - } - } - - if (options.auto_gain_control) { - bool built_in_agc_avaliable = adm()->BuiltInAGCIsAvailable(); - if (built_in_agc_avaliable) { - if (adm()->EnableBuiltInAGC(*options.auto_gain_control) == 0 && - *options.auto_gain_control) { - // Disable internal software AGC if built-in AGC is enabled, - // i.e., replace the software AGC with the built-in AGC. - options.auto_gain_control = false; - RTC_LOG(LS_INFO) - << "Disabling AGC since built-in AGC will be used instead"; - } - } - } - - if (options.noise_suppression) { - if (adm()->BuiltInNSIsAvailable()) { - bool builtin_ns = *options.noise_suppression; - if (adm()->EnableBuiltInNS(builtin_ns) == 0 && builtin_ns) { - // Disable internal software NS if built-in NS is enabled, - // i.e., replace the software NS with the built-in NS. - options.noise_suppression = false; - RTC_LOG(LS_INFO) - << "Disabling NS since built-in NS will be used instead"; - } - } - } - - if (options.stereo_swapping) { - audio_state()->SetStereoChannelSwapping(*options.stereo_swapping); - } - - if (options.audio_jitter_buffer_max_packets) { - audio_jitter_buffer_max_packets_ = - std::max(20, *options.audio_jitter_buffer_max_packets); - } - if (options.audio_jitter_buffer_fast_accelerate) { - audio_jitter_buffer_fast_accelerate_ = - *options.audio_jitter_buffer_fast_accelerate; - } - if (options.audio_jitter_buffer_min_delay_ms) { - audio_jitter_buffer_min_delay_ms_ = - *options.audio_jitter_buffer_min_delay_ms; - } - - webrtc::AudioProcessing* ap = apm(); - if (!ap) { - return; - } - - webrtc::AudioProcessing::Config apm_config = ap->GetConfig(); - - if (options.echo_cancellation) { - apm_config.echo_canceller.enabled = *options.echo_cancellation; - apm_config.echo_canceller.mobile_mode = use_mobile_software_aec; - } - - if (options.auto_gain_control) { - const bool enabled = *options.auto_gain_control; - apm_config.gain_controller1.enabled = enabled; -#if defined(WEBRTC_IOS) || defined(WEBRTC_ANDROID) - apm_config.gain_controller1.mode = - apm_config.gain_controller1.kFixedDigital; -#else - apm_config.gain_controller1.mode = - apm_config.gain_controller1.kAdaptiveAnalog; -#endif - } - - if (options.highpass_filter) { - apm_config.high_pass_filter.enabled = *options.highpass_filter; - } - - if (options.noise_suppression) { - const bool enabled = *options.noise_suppression; - apm_config.noise_suppression.enabled = enabled; - apm_config.noise_suppression.level = - webrtc::AudioProcessing::Config::NoiseSuppression::Level::kHigh; - } - - ap->ApplyConfig(apm_config); -} - -const std::vector& CustomWebRtcVoiceEngine::send_codecs() const { - RTC_DCHECK(signal_thread_checker_.IsCurrent()); - return send_codecs_; -} - -const std::vector& CustomWebRtcVoiceEngine::recv_codecs() const { - RTC_DCHECK(signal_thread_checker_.IsCurrent()); - return recv_codecs_; -} - -std::vector -CustomWebRtcVoiceEngine::GetRtpHeaderExtensions() const { - RTC_DCHECK(signal_thread_checker_.IsCurrent()); - std::vector result; - // id is *not* incremented for non-default extensions, UsedIds needs to - // resolve conflicts. - int id = 1; - for (const auto& uri : {webrtc::RtpExtension::kAudioLevelUri, - webrtc::RtpExtension::kAbsSendTimeUri, - webrtc::RtpExtension::kTransportSequenceNumberUri, - webrtc::RtpExtension::kMidUri}) { - result.emplace_back(uri, id++, webrtc::RtpTransceiverDirection::kSendRecv); - } - for (const auto& uri : {webrtc::RtpExtension::kAbsoluteCaptureTimeUri}) { - result.emplace_back(uri, id, webrtc::RtpTransceiverDirection::kStopped); - } - return result; -} - -bool CustomWebRtcVoiceEngine::StartAecDump(webrtc::FileWrapper file, - int64_t max_size_bytes) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - - webrtc::AudioProcessing* ap = apm(); - if (!ap) { - RTC_LOG(LS_WARNING) - << "Attempting to start aecdump when no audio processing module is " - "present, hence no aecdump is started."; - return false; - } - - return ap->CreateAndAttachAecDump(file.Release(), max_size_bytes, - low_priority_worker_queue_.get()); -} - -void CustomWebRtcVoiceEngine::StopAecDump() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - webrtc::AudioProcessing* ap = apm(); - if (ap) { - ap->DetachAecDump(); - } else { - RTC_LOG(LS_WARNING) << "Attempting to stop aecdump when no audio " - "processing module is present"; - } -} - -absl::optional -CustomWebRtcVoiceEngine::GetAudioDeviceStats() { - return adm()->GetStats(); -} - -webrtc::AudioDeviceModule* CustomWebRtcVoiceEngine::adm() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(adm_); - return adm_.get(); -} - -webrtc::AudioProcessing* CustomWebRtcVoiceEngine::apm() const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - return apm_.get(); -} - -webrtc::AudioState* CustomWebRtcVoiceEngine::audio_state() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(audio_state_); - return audio_state_.get(); -} - -std::vector CustomWebRtcVoiceEngine::CollectCodecs( - const std::vector& specs) const { - PayloadTypeMapper mapper; - std::vector out; - - // Only generate CN payload types for these clockrates: - std::map> generate_cn = { - {8000, false}, {16000, false}, {32000, false}}; - // Only generate telephone-event payload types for these clockrates: - std::map> generate_dtmf = { - {8000, false}, {16000, false}, {32000, false}, {48000, false}}; - - auto map_format = [&mapper](const webrtc::SdpAudioFormat& format, - std::vector* out) { - absl::optional opt_codec = mapper.ToAudioCodec(format); - if (opt_codec) { - if (out) { - out->push_back(*opt_codec); - } - } else { - RTC_LOG(LS_ERROR) << "Unable to assign payload type to format: " - << rtc::ToString(format); - } - - return opt_codec; - }; - - for (const auto& spec : specs) { - // We need to do some extra stuff before adding the main codecs to out. - absl::optional opt_codec = map_format(spec.format, nullptr); - if (opt_codec) { - AudioCodec& codec = *opt_codec; - if (spec.info.supports_network_adaption) { - codec.AddFeedbackParam( - FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty)); - } - - if (spec.info.allow_comfort_noise) { - // Generate a CN entry if the decoder allows it and we support the - // clockrate. - auto cn = generate_cn.find(spec.format.clockrate_hz); - if (cn != generate_cn.end()) { - cn->second = true; - } - } - - // Generate a telephone-event entry if we support the clockrate. - auto dtmf = generate_dtmf.find(spec.format.clockrate_hz); - if (dtmf != generate_dtmf.end()) { - dtmf->second = true; - } - - out.push_back(codec); - - if (codec.name == kOpusCodecName) { - std::string red_fmtp = - rtc::ToString(codec.id) + "/" + rtc::ToString(codec.id); - map_format({kRedCodecName, 48000, 2, {{"", red_fmtp}}}, &out); - } - } - } - - // Add CN codecs after "proper" audio codecs. - for (const auto& cn : generate_cn) { - if (cn.second) { - map_format({kCnCodecName, cn.first, 1}, &out); - } - } - - // Add telephone-event codecs last. - for (const auto& dtmf : generate_dtmf) { - if (dtmf.second) { - map_format({kDtmfCodecName, dtmf.first, 1}, &out); - } - } - - return out; -} - -// --------------------------------- WebRtcVoiceSendChannel2 ------------------ - -class WebRtcVoiceSendChannel2::WebRtcAudioSendStream - : public AudioSource::Sink { - public: - WebRtcAudioSendStream( - uint32_t ssrc, const std::string& mid, const std::string& c_name, - const std::string track_id, - const absl::optional& - send_codec_spec, - bool extmap_allow_mixed, - const std::vector& extensions, - int max_send_bitrate_bps, int rtcp_report_interval_ms, - const absl::optional& audio_network_adaptor_config, - webrtc::Call* call, webrtc::Transport* send_transport, - const rtc::scoped_refptr& encoder_factory, - const absl::optional codec_pair_id, - rtc::scoped_refptr frame_encryptor, - const webrtc::CryptoOptions& crypto_options) - : adaptive_ptime_config_(call->trials()), - call_(call), - config_(send_transport), - max_send_bitrate_bps_(max_send_bitrate_bps), - rtp_parameters_(CreateRtpParametersWithOneEncoding()) { - RTC_DCHECK(call); - RTC_DCHECK(encoder_factory); - config_.rtp.ssrc = ssrc; - config_.rtp.mid = mid; - config_.rtp.c_name = c_name; - config_.rtp.extmap_allow_mixed = extmap_allow_mixed; - config_.rtp.extensions = extensions; - config_.has_dscp = - rtp_parameters_.encodings[0].network_priority != webrtc::Priority::kLow; - config_.encoder_factory = encoder_factory; - config_.codec_pair_id = codec_pair_id; - config_.track_id = track_id; - config_.frame_encryptor = frame_encryptor; - config_.crypto_options = crypto_options; - config_.rtcp_report_interval_ms = rtcp_report_interval_ms; - rtp_parameters_.encodings[0].ssrc = ssrc; - rtp_parameters_.rtcp.cname = c_name; - rtp_parameters_.header_extensions = extensions; - - audio_network_adaptor_config_from_options_ = audio_network_adaptor_config; - UpdateAudioNetworkAdaptorConfig(); - - if (send_codec_spec) { - UpdateSendCodecSpec(*send_codec_spec); - } - - stream_ = call_->CreateAudioSendStream(config_); - } - - WebRtcAudioSendStream() = delete; - WebRtcAudioSendStream(const WebRtcAudioSendStream&) = delete; - WebRtcAudioSendStream& operator=(const WebRtcAudioSendStream&) = delete; - - ~WebRtcAudioSendStream() override { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - ClearSource(); - call_->DestroyAudioSendStream(stream_); - } - - void SetSendCodecSpec( - const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) { - UpdateSendCodecSpec(send_codec_spec); - ReconfigureAudioSendStream(nullptr); - } - - void SetRtpExtensions(const std::vector& extensions) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.rtp.extensions = extensions; - rtp_parameters_.header_extensions = extensions; - ReconfigureAudioSendStream(nullptr); - } - - void SetExtmapAllowMixed(bool extmap_allow_mixed) { - config_.rtp.extmap_allow_mixed = extmap_allow_mixed; - ReconfigureAudioSendStream(nullptr); - } - - void SetMid(const std::string& mid) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - if (config_.rtp.mid == mid) { - return; - } - config_.rtp.mid = mid; - ReconfigureAudioSendStream(nullptr); - } - - void SetFrameEncryptor( - rtc::scoped_refptr frame_encryptor) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.frame_encryptor = frame_encryptor; - ReconfigureAudioSendStream(nullptr); - } - - void SetAudioNetworkAdaptorConfig( - const absl::optional& audio_network_adaptor_config) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - if (audio_network_adaptor_config_from_options_ == - audio_network_adaptor_config) { - return; - } - audio_network_adaptor_config_from_options_ = audio_network_adaptor_config; - UpdateAudioNetworkAdaptorConfig(); - UpdateAllowedBitrateRange(); - ReconfigureAudioSendStream(nullptr); - } - - bool SetMaxSendBitrate(int bps) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(config_.send_codec_spec); - RTC_DCHECK(audio_codec_spec_); - auto send_rate = ComputeSendBitrate( - bps, rtp_parameters_.encodings[0].max_bitrate_bps, *audio_codec_spec_); - - if (!send_rate) { - return false; - } - - max_send_bitrate_bps_ = bps; - - if (send_rate != config_.send_codec_spec->target_bitrate_bps) { - config_.send_codec_spec->target_bitrate_bps = send_rate; - ReconfigureAudioSendStream(nullptr); - } - return true; - } - - bool SendTelephoneEvent(int payload_type, int payload_freq, int event, - int duration_ms) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - return stream_->SendTelephoneEvent(payload_type, payload_freq, event, - duration_ms); - } - - void SetSend(bool send) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - send_ = send; - UpdateSendState(); - } - - void SetMuted(bool muted) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - stream_->SetMuted(muted); - muted_ = muted; - } - - bool muted() const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - return muted_; - } - - webrtc::AudioSendStream::Stats GetStats(bool has_remote_tracks) const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - return stream_->GetStats(has_remote_tracks); - } - - // Starts the sending by setting ourselves as a sink to the AudioSource to - // get data callbacks. - // This method is called on the libjingle worker thread. - // TODO(xians): Make sure Start() is called only once. - void SetSource(AudioSource* source) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(source); - if (source_) { - RTC_DCHECK(source_ == source); - return; - } - source->SetSink(this); - source_ = source; - UpdateSendState(); - } - - // Stops sending by setting the sink of the AudioSource to nullptr. No data - // callback will be received after this method. - // This method is called on the libjingle worker thread. - void ClearSource() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - if (source_) { - source_->SetSink(nullptr); - source_ = nullptr; - } - UpdateSendState(); - } - - // AudioSource::Sink implementation. - // This method is called on the audio thread. - void OnData(const void* audio_data, int bits_per_sample, int sample_rate, - size_t number_of_channels, size_t number_of_frames, - absl::optional absolute_capture_timestamp_ms) override { - TRACE_EVENT_BEGIN2("webrtc", "WebRtcAudioSendStream::OnData", "sample_rate", - sample_rate, "number_of_frames", number_of_frames); - RTC_DCHECK_EQ(16, bits_per_sample); - RTC_CHECK_RUNS_SERIALIZED(&audio_capture_race_checker_); - RTC_DCHECK(stream_); - std::unique_ptr audio_frame(new webrtc::AudioFrame()); - audio_frame->UpdateFrame( - audio_frame->timestamp_, static_cast(audio_data), - number_of_frames, sample_rate, audio_frame->speech_type_, - audio_frame->vad_activity_, number_of_channels); - // TODO(bugs.webrtc.org/10739): add dcheck that - // `absolute_capture_timestamp_ms` always receives a value. - if (absolute_capture_timestamp_ms) { - audio_frame->set_absolute_capture_timestamp_ms( - *absolute_capture_timestamp_ms); - } - stream_->SendAudioData(std::move(audio_frame)); - TRACE_EVENT_END1("webrtc", "WebRtcAudioSendStream::OnData", - "number_of_channels", number_of_channels); - } - - // Callback from the `source_` when it is going away. In case Start() has - // never been called, this callback won't be triggered. - void OnClose() override { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - // Set `source_` to nullptr to make sure no more callback will get into - // the source. - source_ = nullptr; - UpdateSendState(); - } - - const webrtc::RtpParameters& rtp_parameters() const { - return rtp_parameters_; - } - - webrtc::RTCError SetRtpParameters(const webrtc::RtpParameters& parameters, - webrtc::SetParametersCallback callback) { - webrtc::RTCError error = CheckRtpParametersInvalidModificationAndValues( - rtp_parameters_, parameters); - if (!error.ok()) { - return webrtc::InvokeSetParametersCallback(callback, error); - } - - absl::optional send_rate; - if (audio_codec_spec_) { - send_rate = ComputeSendBitrate(max_send_bitrate_bps_, - parameters.encodings[0].max_bitrate_bps, - *audio_codec_spec_); - if (!send_rate) { - return webrtc::InvokeSetParametersCallback( - callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR)); - } - } - - const absl::optional old_rtp_max_bitrate = - rtp_parameters_.encodings[0].max_bitrate_bps; - double old_priority = rtp_parameters_.encodings[0].bitrate_priority; - webrtc::Priority old_dscp = rtp_parameters_.encodings[0].network_priority; - bool old_adaptive_ptime = rtp_parameters_.encodings[0].adaptive_ptime; - rtp_parameters_ = parameters; - config_.bitrate_priority = rtp_parameters_.encodings[0].bitrate_priority; - config_.has_dscp = (rtp_parameters_.encodings[0].network_priority != - webrtc::Priority::kLow); - - bool reconfigure_send_stream = - (rtp_parameters_.encodings[0].max_bitrate_bps != old_rtp_max_bitrate) || - (rtp_parameters_.encodings[0].bitrate_priority != old_priority) || - (rtp_parameters_.encodings[0].network_priority != old_dscp) || - (rtp_parameters_.encodings[0].adaptive_ptime != old_adaptive_ptime); - if (rtp_parameters_.encodings[0].max_bitrate_bps != old_rtp_max_bitrate) { - // Update the bitrate range. - if (send_rate) { - config_.send_codec_spec->target_bitrate_bps = send_rate; - } - } - if (reconfigure_send_stream) { - // Changing adaptive_ptime may update the audio network adaptor config - // used. - UpdateAudioNetworkAdaptorConfig(); - UpdateAllowedBitrateRange(); - ReconfigureAudioSendStream(std::move(callback)); - } else { - webrtc::InvokeSetParametersCallback(callback, webrtc::RTCError::OK()); - } - - rtp_parameters_.rtcp.cname = config_.rtp.c_name; - rtp_parameters_.rtcp.reduced_size = false; - - // parameters.encodings[0].active could have changed. - UpdateSendState(); - return webrtc::RTCError::OK(); - } - - void SetEncoderToPacketizerFrameTransformer( - rtc::scoped_refptr frame_transformer) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.frame_transformer = std::move(frame_transformer); - ReconfigureAudioSendStream(nullptr); - } - - private: - void UpdateSendState() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - RTC_DCHECK_EQ(1UL, rtp_parameters_.encodings.size()); - // Stream can be started without |source_| being set. - if (send_ && rtp_parameters_.encodings[0].active) { - stream_->Start(); - } else { - stream_->Stop(); - } - } - - void UpdateAllowedBitrateRange() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - // The order of precedence, from lowest to highest is: - // - a reasonable default of 32kbps min/max - // - fixed target bitrate from codec spec - // - lower min bitrate if adaptive ptime is enabled - const int kDefaultBitrateBps = 32000; - config_.min_bitrate_bps = kDefaultBitrateBps; - config_.max_bitrate_bps = kDefaultBitrateBps; - - if (config_.send_codec_spec && - config_.send_codec_spec->target_bitrate_bps) { - config_.min_bitrate_bps = *config_.send_codec_spec->target_bitrate_bps; - config_.max_bitrate_bps = *config_.send_codec_spec->target_bitrate_bps; - } - - if (rtp_parameters_.encodings[0].adaptive_ptime) { - config_.min_bitrate_bps = std::min( - config_.min_bitrate_bps, - static_cast(adaptive_ptime_config_.min_encoder_bitrate.bps())); - } - } - - void UpdateSendCodecSpec( - const webrtc::AudioSendStream::Config::SendCodecSpec& send_codec_spec) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - config_.send_codec_spec = send_codec_spec; - auto info = - config_.encoder_factory->QueryAudioEncoder(send_codec_spec.format); - RTC_DCHECK(info); - // If a specific target bitrate has been set for the stream, use that as - // the new default bitrate when computing send bitrate. - if (send_codec_spec.target_bitrate_bps) { - info->default_bitrate_bps = std::max( - info->min_bitrate_bps, - std::min(info->max_bitrate_bps, *send_codec_spec.target_bitrate_bps)); - } - - audio_codec_spec_.emplace( - webrtc::AudioCodecSpec{send_codec_spec.format, *info}); - - config_.send_codec_spec->target_bitrate_bps = ComputeSendBitrate( - max_send_bitrate_bps_, rtp_parameters_.encodings[0].max_bitrate_bps, - *audio_codec_spec_); - - UpdateAllowedBitrateRange(); - - // Encoder will only use two channels if the stereo parameter is set. - const auto& it = send_codec_spec.format.parameters.find("stereo"); - if (it != send_codec_spec.format.parameters.end() && it->second == "1") { - num_encoded_channels_ = 2; - } else { - num_encoded_channels_ = 1; - } - } - - void UpdateAudioNetworkAdaptorConfig() { - if (adaptive_ptime_config_.enabled || - rtp_parameters_.encodings[0].adaptive_ptime) { - config_.audio_network_adaptor_config = - adaptive_ptime_config_.audio_network_adaptor_config; - return; - } - config_.audio_network_adaptor_config = - audio_network_adaptor_config_from_options_; - } - - void ReconfigureAudioSendStream(webrtc::SetParametersCallback callback) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - RTC_DCHECK(stream_); - stream_->Reconfigure(config_, std::move(callback)); - } - - int NumPreferredChannels() const override { return num_encoded_channels_; } - - const AdaptivePtimeConfig adaptive_ptime_config_; - webrtc::SequenceChecker worker_thread_checker_; - rtc::RaceChecker audio_capture_race_checker_; - webrtc::Call* call_ = nullptr; - webrtc::AudioSendStream::Config config_; - // The stream is owned by WebRtcAudioSendStream and may be reallocated if - // configuration changes. - webrtc::AudioSendStream* stream_ = nullptr; - - // Raw pointer to AudioSource owned by LocalAudioTrackHandler. - // PeerConnection will make sure invalidating the pointer before the object - // goes away. - AudioSource* source_ = nullptr; - bool send_ = false; - bool muted_ = false; - int max_send_bitrate_bps_; - webrtc::RtpParameters rtp_parameters_; - absl::optional audio_codec_spec_; - // TODO(webrtc:11717): Remove this once audio_network_adaptor in AudioOptions - // has been removed. - absl::optional audio_network_adaptor_config_from_options_; - std::atomic num_encoded_channels_{-1}; -}; - -WebRtcVoiceSendChannel2::WebRtcVoiceSendChannel2( - CustomWebRtcVoiceEngine* engine, const MediaConfig& config, - const AudioOptions& options, const webrtc::CryptoOptions& crypto_options, - webrtc::Call* call, webrtc::AudioCodecPairId codec_pair_id) - : MediaChannelUtil(call->network_thread(), config.enable_dscp), - worker_thread_(call->worker_thread()), - engine_(engine), - call_(call), - audio_config_(config.audio), - codec_pair_id_(codec_pair_id), - crypto_options_(crypto_options) { - RTC_LOG(LS_VERBOSE) << "WebRtcVoiceSendChannel2::WebRtcVoiceSendChannel2"; - RTC_DCHECK(call); - SetOptions(options); -} - -WebRtcVoiceSendChannel2::~WebRtcVoiceSendChannel2() { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DLOG(LS_VERBOSE) << "WebRtcVoiceSendChannel2::~WebRtcVoiceSendChannel2"; - // TODO(solenberg): Should be able to delete the streams directly, without - // going through RemoveNnStream(), once stream objects handle - // all (de)configuration. - while (!send_streams_.empty()) { - RemoveSendStream(send_streams_.begin()->first); - } -} - -bool WebRtcVoiceSendChannel2::SetOptions(const AudioOptions& options) { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "Setting voice channel options: " << options.ToString(); - - // We retain all of the existing options, and apply the given ones - // on top. This means there is no way to "clear" options such that - // they go back to the engine default. - options_.SetAll(options); - engine()->ApplyOptions(options_); - - absl::optional audio_network_adaptor_config = - GetAudioNetworkAdaptorConfig(options_); - for (auto& it : send_streams_) { - it.second->SetAudioNetworkAdaptorConfig(audio_network_adaptor_config); - } - - RTC_LOG(LS_INFO) << "Set voice send channel options. Current options: " - << options_.ToString(); - return true; -} - -bool WebRtcVoiceSendChannel2::SetSenderParameters( - const AudioSenderParameter& params) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetSenderParameters"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSenderParameters: " - << params.ToString(); - // TODO(pthatcher): Refactor this to be more clean now that we have - // all the information at once. - - // Finding if the RtpParameters force a specific codec - absl::optional force_codec; - if (send_streams_.size() == 1) { - // Since audio simulcast is not supported, currently, only PlanB - // has multiple tracks and we don't care about getting the - // functionality working there properly. - auto rtp_parameters = send_streams_.begin()->second->rtp_parameters(); - if (rtp_parameters.encodings[0].codec) { - auto matched_codec = - absl::c_find_if(params.codecs, [&](auto negotiated_codec) { - return negotiated_codec.MatchesRtpCodec( - *rtp_parameters.encodings[0].codec); - }); - if (matched_codec != params.codecs.end()) { - force_codec = *matched_codec; - } else { - // The requested codec has been negotiated away, we clear it from the - // parameters. - for (auto& encoding : rtp_parameters.encodings) { - encoding.codec.reset(); - } - send_streams_.begin()->second->SetRtpParameters(rtp_parameters, - nullptr); - } - } - } - - if (!SetSendCodecs(params.codecs, force_codec)) { - return false; - } - - if (!ValidateRtpExtensions(params.extensions, send_rtp_extensions_)) { - return false; - } - - if (ExtmapAllowMixed() != params.extmap_allow_mixed) { - SetExtmapAllowMixed(params.extmap_allow_mixed); - for (auto& it : send_streams_) { - it.second->SetExtmapAllowMixed(params.extmap_allow_mixed); - } - } - - std::vector filtered_extensions = FilterRtpExtensions( - params.extensions, webrtc::RtpExtension::IsSupportedForAudio, true, - call_->trials()); - if (send_rtp_extensions_ != filtered_extensions) { - send_rtp_extensions_.swap(filtered_extensions); - for (auto& it : send_streams_) { - it.second->SetRtpExtensions(send_rtp_extensions_); - } - } - if (!params.mid.empty()) { - mid_ = params.mid; - for (auto& it : send_streams_) { - it.second->SetMid(params.mid); - } - } - - if (send_codec_spec_ && !SetMaxSendBitrate(params.max_bandwidth_bps)) { - return false; - } - return SetOptions(params.options); -} - -absl::optional WebRtcVoiceSendChannel2::GetSendCodec() const { - if (send_codec_spec_) { - return CreateAudioCodec(send_codec_spec_->format); - } - return absl::nullopt; -} - -// Utility function called from SetSenderParameters() to extract current send -// codec settings from the given list of codecs (originally from SDP). Both send -// and receive streams may be reconfigured based on the new settings. -bool WebRtcVoiceSendChannel2::SetSendCodecs( - const std::vector& codecs, absl::optional preferred_codec) { - RTC_DCHECK_RUN_ON(worker_thread_); - dtmf_payload_type_ = absl::nullopt; - dtmf_payload_freq_ = -1; - - // Validate supplied codecs list. - for (const Codec& codec : codecs) { - // TODO(solenberg): Validate more aspects of input - that payload types - // don't overlap, remove redundant/unsupported codecs etc - - // the same way it is done for RtpHeaderExtensions. - if (codec.id < kMinPayloadType || codec.id > kMaxPayloadType) { - RTC_LOG(LS_WARNING) << "Codec payload type out of range: " - << ToString(codec); - return false; - } - } - - // Find PT of telephone-event codec with lowest clockrate, as a fallback, in - // case we don't have a DTMF codec with a rate matching the send codec's, or - // if this function returns early. - std::vector dtmf_codecs; - for (const Codec& codec : codecs) { - if (IsCodec(codec, kDtmfCodecName)) { - dtmf_codecs.push_back(codec); - if (!dtmf_payload_type_ || codec.clockrate < dtmf_payload_freq_) { - dtmf_payload_type_ = codec.id; - dtmf_payload_freq_ = codec.clockrate; - } - } - } - - // Scan through the list to figure out the codec to use for sending. - absl::optional - send_codec_spec; - webrtc::BitrateConstraints bitrate_config; - absl::optional voice_codec_info; - size_t send_codec_position = 0; - for (const Codec& voice_codec : codecs) { - if (!(IsCodec(voice_codec, kCnCodecName) || - IsCodec(voice_codec, kDtmfCodecName) || - IsCodec(voice_codec, kRedCodecName)) && - (!preferred_codec || preferred_codec->Matches(voice_codec))) { - webrtc::SdpAudioFormat format(voice_codec.name, voice_codec.clockrate, - voice_codec.channels, voice_codec.params); - - voice_codec_info = engine()->encoder_factory_->QueryAudioEncoder(format); - if (!voice_codec_info) { - RTC_LOG(LS_WARNING) << "Unknown codec " << ToString(voice_codec); - continue; - } - - send_codec_spec = webrtc::AudioSendStream::Config::SendCodecSpec( - voice_codec.id, format); - if (voice_codec.bitrate > 0) { - send_codec_spec->target_bitrate_bps = voice_codec.bitrate; - } - send_codec_spec->transport_cc_enabled = HasTransportCc(voice_codec); - send_codec_spec->nack_enabled = HasNack(voice_codec); - send_codec_spec->enable_non_sender_rtt = HasRrtr(voice_codec); - bitrate_config = GetBitrateConfigForCodec(voice_codec); - break; - } - send_codec_position++; - } - - if (!send_codec_spec) { - // No codecs in common, bail out early. - return true; - } - - RTC_DCHECK(voice_codec_info); - if (voice_codec_info->allow_comfort_noise) { - // Loop through the codecs list again to find the CN codec. - // TODO(solenberg): Break out into a separate function? - for (const Codec& cn_codec : codecs) { - if (IsCodec(cn_codec, kCnCodecName) && - cn_codec.clockrate == send_codec_spec->format.clockrate_hz && - cn_codec.channels == voice_codec_info->num_channels) { - if (cn_codec.channels != 1) { - RTC_LOG(LS_WARNING) - << "CN #channels " << cn_codec.channels << " not supported."; - } else if (cn_codec.clockrate != 8000 && cn_codec.clockrate != 16000 && - cn_codec.clockrate != 32000) { - RTC_LOG(LS_WARNING) - << "CN frequency " << cn_codec.clockrate << " not supported."; - } else { - send_codec_spec->cng_payload_type = cn_codec.id; - } - break; - } - } - - // Find the telephone-event PT exactly matching the preferred send codec. - for (const Codec& dtmf_codec : dtmf_codecs) { - if (dtmf_codec.clockrate == send_codec_spec->format.clockrate_hz) { - dtmf_payload_type_ = dtmf_codec.id; - dtmf_payload_freq_ = dtmf_codec.clockrate; - break; - } - } - } - - // Loop through the codecs to find the RED codec that matches opus - // with respect to clockrate and number of channels. - // RED codec needs to be negotiated before the actual codec they - // reference. - for (size_t i = 0; i < send_codec_position; ++i) { - const Codec& red_codec = codecs[i]; - if (IsCodec(red_codec, kRedCodecName) && - CheckRedParameters(red_codec, *send_codec_spec)) { - send_codec_spec->red_payload_type = red_codec.id; - break; - } - } - - if (send_codec_spec_ != send_codec_spec) { - send_codec_spec_ = std::move(send_codec_spec); - // Apply new settings to all streams. - for (const auto& kv : send_streams_) { - kv.second->SetSendCodecSpec(*send_codec_spec_); - } - } else { - // If the codec isn't changing, set the start bitrate to -1 which means - // "unchanged" so that BWE isn't affected. - bitrate_config.start_bitrate_bps = -1; - } - call_->GetTransportControllerSend()->SetSdpBitrateParameters(bitrate_config); - - send_codecs_ = codecs; - - if (send_codec_changed_callback_) { - send_codec_changed_callback_(); - } - - return true; -} - -void WebRtcVoiceSendChannel2::SetSend(bool send) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetSend"); - if (send_ == send) { - return; - } - - // Apply channel specific options. - if (send) { - engine()->ApplyOptions(options_); - - // Initialize the ADM for recording (this may take time on some platforms, - // e.g. Android). - if (options_.init_recording_on_send.value_or(true) && - // InitRecording() may return an error if the ADM is already recording. - !engine()->adm()->RecordingIsInitialized() && - !engine()->adm()->Recording()) { - if (engine()->adm()->InitRecording() != 0) { - RTC_LOG(LS_WARNING) << "Failed to initialize recording"; - } - } - } - - // Change the settings on each send channel. - for (auto& kv : send_streams_) { - kv.second->SetSend(send); - } - - send_ = send; -} - -bool WebRtcVoiceSendChannel2::SetAudioSend(uint32_t ssrc, bool enable, - const AudioOptions* options, - AudioSource* source) { - RTC_DCHECK_RUN_ON(worker_thread_); - // TODO(solenberg): The state change should be fully rolled back if any one of - // these calls fail. - if (!SetLocalSource(ssrc, source)) { - return false; - } - if (!MuteStream(ssrc, !enable)) { - return false; - } - if (enable && options) { - return SetOptions(*options); - } - return true; -} - -bool WebRtcVoiceSendChannel2::AddSendStream(const StreamParams& sp) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::AddSendStream"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "AddSendStream: " << sp.ToString(); - - uint32_t ssrc = sp.first_ssrc(); - RTC_DCHECK(0 != ssrc); - - if (send_streams_.find(ssrc) != send_streams_.end()) { - RTC_LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc; - return false; - } - - absl::optional audio_network_adaptor_config = - GetAudioNetworkAdaptorConfig(options_); - WebRtcAudioSendStream* stream = new WebRtcAudioSendStream( - ssrc, mid_, sp.cname, sp.id, send_codec_spec_, ExtmapAllowMixed(), - send_rtp_extensions_, max_send_bitrate_bps_, - audio_config_.rtcp_report_interval_ms, audio_network_adaptor_config, - call_, transport(), engine()->encoder_factory_, codec_pair_id_, nullptr, - crypto_options_); - send_streams_.insert(std::make_pair(ssrc, stream)); - if (ssrc_list_changed_callback_) { - std::set ssrcs_in_use; - for (auto it : send_streams_) { - ssrcs_in_use.insert(it.first); - } - ssrc_list_changed_callback_(ssrcs_in_use); - } - - send_streams_[ssrc]->SetSend(send_); - return true; -} - -bool WebRtcVoiceSendChannel2::RemoveSendStream(uint32_t ssrc) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::RemoveSendStream"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "RemoveSendStream: " << ssrc; - - auto it = send_streams_.find(ssrc); - if (it == send_streams_.end()) { - RTC_LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc - << " which doesn't exist."; - return false; - } - - it->second->SetSend(false); - - // TODO(solenberg): If we're removing the receiver_reports_ssrc_ stream, find - // the first active send stream and use that instead, reassociating receive - // streams. - - delete it->second; - send_streams_.erase(it); - if (send_streams_.empty()) { - SetSend(false); - } - return true; -} - -void WebRtcVoiceSendChannel2::SetSsrcListChangedCallback( - absl::AnyInvocable&)> callback) { - ssrc_list_changed_callback_ = std::move(callback); -} - -bool WebRtcVoiceSendChannel2::SetLocalSource(uint32_t ssrc, - AudioSource* source) { - auto it = send_streams_.find(ssrc); - if (it == send_streams_.end()) { - if (source) { - // Return an error if trying to set a valid source with an invalid ssrc. - RTC_LOG(LS_ERROR) << "SetLocalSource failed with ssrc " << ssrc; - return false; - } - - // The channel likely has gone away, do nothing. - return true; - } - - if (source) { - it->second->SetSource(source); - } else { - it->second->ClearSource(); - } - - return true; -} - -bool WebRtcVoiceSendChannel2::CanInsertDtmf() { - return dtmf_payload_type_.has_value() && send_; -} - -void WebRtcVoiceSendChannel2::SetFrameEncryptor( - uint32_t ssrc, - rtc::scoped_refptr frame_encryptor) { - RTC_DCHECK_RUN_ON(worker_thread_); - auto matching_stream = send_streams_.find(ssrc); - if (matching_stream != send_streams_.end()) { - matching_stream->second->SetFrameEncryptor(frame_encryptor); - } -} - -bool WebRtcVoiceSendChannel2::InsertDtmf(uint32_t ssrc, int event, - int duration) { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::InsertDtmf"; - if (!CanInsertDtmf()) { - return false; - } - - // Figure out which WebRtcAudioSendStream to send the event on. - auto it = ssrc != 0 ? send_streams_.find(ssrc) : send_streams_.begin(); - if (it == send_streams_.end()) { - RTC_LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use."; - return false; - } - if (event < kMinTelephoneEventCode || event > kMaxTelephoneEventCode) { - RTC_LOG(LS_WARNING) << "DTMF event code " << event << " out of range."; - return false; - } - RTC_DCHECK_NE(-1, dtmf_payload_freq_); - return it->second->SendTelephoneEvent(*dtmf_payload_type_, dtmf_payload_freq_, - event, duration); -} - -void WebRtcVoiceSendChannel2::OnPacketSent(const rtc::SentPacket& sent_packet) { - RTC_DCHECK_RUN_ON(&network_thread_checker_); - // TODO(tommi): We shouldn't need to go through call_ to deliver this - // notification. We should already have direct access to - // video_send_delay_stats_ and transport_send_ptr_ via `stream_`. - // So we should be able to remove OnSentPacket from Call and handle this per - // channel instead. At the moment Call::OnSentPacket calls OnSentPacket for - // the video stats, which we should be able to skip. - call_->OnSentPacket(sent_packet); -} - -void WebRtcVoiceSendChannel2::OnNetworkRouteChanged( - absl::string_view transport_name, const rtc::NetworkRoute& network_route) { - RTC_DCHECK_RUN_ON(&network_thread_checker_); - - call_->OnAudioTransportOverheadChanged(network_route.packet_overhead); - - worker_thread_->PostTask(SafeTask( - task_safety_.flag(), - [this, name = std::string(transport_name), route = network_route] { - RTC_DCHECK_RUN_ON(worker_thread_); - call_->GetTransportControllerSend()->OnNetworkRouteChanged(name, route); - })); -} - -bool WebRtcVoiceSendChannel2::MuteStream(uint32_t ssrc, bool muted) { - RTC_DCHECK_RUN_ON(worker_thread_); - const auto it = send_streams_.find(ssrc); - if (it == send_streams_.end()) { - RTC_LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use."; - return false; - } - it->second->SetMuted(muted); - - // TODO(solenberg): - // We set the AGC to mute state only when all the channels are muted. - // This implementation is not ideal, instead we should signal the AGC when - // the mic channel is muted/unmuted. We can't do it today because there - // is no good way to know which stream is mapping to the mic channel. - bool all_muted = muted; - for (const auto& kv : send_streams_) { - all_muted = all_muted && kv.second->muted(); - } - webrtc::AudioProcessing* ap = engine()->apm(); - if (ap) { - ap->set_output_will_be_muted(all_muted); - } - - // Notfy the AudioState that the mute state has updated. - engine_->audio_state()->OnMuteStreamChanged(); - - return true; -} - -bool WebRtcVoiceSendChannel2::SetMaxSendBitrate(int bps) { - RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetMaxSendBitrate."; - max_send_bitrate_bps_ = bps; - bool success = true; - for (const auto& kv : send_streams_) { - if (!kv.second->SetMaxSendBitrate(max_send_bitrate_bps_)) { - success = false; - } - } - return success; -} - -void WebRtcVoiceSendChannel2::OnReadyToSend(bool ready) { - RTC_DCHECK_RUN_ON(&network_thread_checker_); - RTC_LOG(LS_VERBOSE) << "OnReadyToSend: " << (ready ? "Ready." : "Not ready."); - call_->SignalChannelNetworkState( - webrtc::MediaType::AUDIO, - ready ? webrtc::kNetworkUp : webrtc::kNetworkDown); -} - -bool WebRtcVoiceSendChannel2::GetStats(VoiceMediaSendInfo* info) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::GetSendStats"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DCHECK(info); - - // Get SSRC and stats for each sender. - // With separate send and receive channels, we expect GetStats to be called on - // both, and accumulate info, but only one channel (the send one) should have - // senders. - RTC_DCHECK(info->senders.size() == 0U || send_streams_.size() == 0); - for (const auto& stream : send_streams_) { - webrtc::AudioSendStream::Stats stats = stream.second->GetStats(false); - VoiceSenderInfo sinfo; - sinfo.add_ssrc(stats.local_ssrc); - sinfo.payload_bytes_sent = stats.payload_bytes_sent; - sinfo.header_and_padding_bytes_sent = stats.header_and_padding_bytes_sent; - sinfo.retransmitted_bytes_sent = stats.retransmitted_bytes_sent; - sinfo.packets_sent = stats.packets_sent; - sinfo.total_packet_send_delay = stats.total_packet_send_delay; - sinfo.retransmitted_packets_sent = stats.retransmitted_packets_sent; - sinfo.packets_lost = stats.packets_lost; - sinfo.fraction_lost = stats.fraction_lost; - sinfo.nacks_received = stats.nacks_received; - sinfo.target_bitrate = stats.target_bitrate_bps; - sinfo.codec_name = stats.codec_name; - sinfo.codec_payload_type = stats.codec_payload_type; - sinfo.jitter_ms = stats.jitter_ms; - sinfo.rtt_ms = stats.rtt_ms; - sinfo.audio_level = stats.audio_level; - sinfo.total_input_energy = stats.total_input_energy; - sinfo.total_input_duration = stats.total_input_duration; - sinfo.ana_statistics = stats.ana_statistics; - sinfo.apm_statistics = stats.apm_statistics; - sinfo.report_block_datas = std::move(stats.report_block_datas); - - auto encodings = stream.second->rtp_parameters().encodings; - if (!encodings.empty()) { - sinfo.active = encodings[0].active; - } - - info->senders.push_back(sinfo); - } - - FillSendCodecStats(info); - - return true; -} - -void WebRtcVoiceSendChannel2::FillSendCodecStats( - VoiceMediaSendInfo* voice_media_info) { - for (const auto& sender : voice_media_info->senders) { - auto codec = absl::c_find_if(send_codecs_, [&sender](const AudioCodec& c) { - return sender.codec_payload_type && *sender.codec_payload_type == c.id; - }); - if (codec != send_codecs_.end()) { - voice_media_info->send_codecs.insert( - std::make_pair(codec->id, codec->ToCodecParameters())); - } - } -} - -void WebRtcVoiceSendChannel2::SetEncoderToPacketizerFrameTransformer( - uint32_t ssrc, - rtc::scoped_refptr frame_transformer) { - RTC_DCHECK_RUN_ON(worker_thread_); - auto matching_stream = send_streams_.find(ssrc); - if (matching_stream == send_streams_.end()) { - RTC_LOG(LS_INFO) << "Attempting to set frame transformer for SSRC:" << ssrc - << " which doesn't exist."; - return; - } - matching_stream->second->SetEncoderToPacketizerFrameTransformer( - std::move(frame_transformer)); -} - -webrtc::RtpParameters WebRtcVoiceSendChannel2::GetRtpSendParameters( - uint32_t ssrc) const { - RTC_DCHECK_RUN_ON(worker_thread_); - auto it = send_streams_.find(ssrc); - if (it == send_streams_.end()) { - RTC_LOG(LS_WARNING) << "Attempting to get RTP send parameters for stream " - "with ssrc " - << ssrc << " which doesn't exist."; - return webrtc::RtpParameters(); - } - - webrtc::RtpParameters rtp_params = it->second->rtp_parameters(); - // Need to add the common list of codecs to the send stream-specific - // RTP parameters. - for (const AudioCodec& codec : send_codecs_) { - rtp_params.codecs.push_back(codec.ToCodecParameters()); - } - return rtp_params; -} - -webrtc::RTCError WebRtcVoiceSendChannel2::SetRtpSendParameters( - uint32_t ssrc, const webrtc::RtpParameters& parameters, - webrtc::SetParametersCallback callback) { - RTC_DCHECK_RUN_ON(worker_thread_); - auto it = send_streams_.find(ssrc); - if (it == send_streams_.end()) { - RTC_LOG(LS_WARNING) << "Attempting to set RTP send parameters for stream " - "with ssrc " - << ssrc << " which doesn't exist."; - return webrtc::InvokeSetParametersCallback( - callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR)); - } - - // TODO(deadbeef): Handle setting parameters with a list of codecs in a - // different order (which should change the send codec). - webrtc::RtpParameters current_parameters = GetRtpSendParameters(ssrc); - if (current_parameters.codecs != parameters.codecs) { - RTC_DLOG(LS_ERROR) << "Using SetParameters to change the set of codecs " - "is not currently supported."; - return webrtc::InvokeSetParametersCallback( - callback, webrtc::RTCError(webrtc::RTCErrorType::INTERNAL_ERROR)); - } - - if (!parameters.encodings.empty()) { - // Note that these values come from: - // https://tools.ietf.org/html/draft-ietf-tsvwg-rtcweb-qos-16#section-5 - rtc::DiffServCodePoint new_dscp = rtc::DSCP_DEFAULT; - switch (parameters.encodings[0].network_priority) { - case webrtc::Priority::kVeryLow: - new_dscp = rtc::DSCP_CS1; - break; - case webrtc::Priority::kLow: - new_dscp = rtc::DSCP_DEFAULT; - break; - case webrtc::Priority::kMedium: - new_dscp = rtc::DSCP_EF; - break; - case webrtc::Priority::kHigh: - new_dscp = rtc::DSCP_EF; - break; - } - SetPreferredDscp(new_dscp); - - absl::optional send_codec = GetSendCodec(); - // Since we validate that all layers have the same value, we can just check - // the first layer. - // TODO(orphis): Support mixed-codec simulcast - if (parameters.encodings[0].codec && send_codec && - !send_codec->MatchesRtpCodec(*parameters.encodings[0].codec)) { - RTC_LOG(LS_VERBOSE) << "Trying to change codec to " - << parameters.encodings[0].codec->name; - auto matched_codec = - absl::c_find_if(send_codecs_, [&](auto negotiated_codec) { - return negotiated_codec.MatchesRtpCodec( - *parameters.encodings[0].codec); - }); - - if (matched_codec == send_codecs_.end()) { - return webrtc::InvokeSetParametersCallback( - callback, webrtc::RTCError( - webrtc::RTCErrorType::INVALID_MODIFICATION, - "Attempted to use an unsupported codec for layer 0")); - } - - SetSendCodecs(send_codecs_, *matched_codec); - } - } - - // TODO(minyue): The following legacy actions go into - // `WebRtcAudioSendStream::SetRtpParameters()` which is called at the end, - // though there are two difference: - // 1. `WebRtcVoiceMediaChannel::SetChannelSendParameters()` only calls - // `SetSendCodec` while `WebRtcAudioSendStream::SetRtpParameters()` calls - // `SetSendCodecs`. The outcome should be the same. - // 2. AudioSendStream can be recreated. - - // Codecs are handled at the WebRtcVoiceMediaChannel level. - webrtc::RtpParameters reduced_params = parameters; - reduced_params.codecs.clear(); - return it->second->SetRtpParameters(reduced_params, std::move(callback)); -} - -// -------------------------- WebRtcVoiceReceiveChannel2 ---------------------- - -class WebRtcVoiceReceiveChannel2::WebRtcAudioReceiveStream { - public: - WebRtcAudioReceiveStream(webrtc::AudioReceiveStreamInterface::Config config, - webrtc::Call* call) - : call_(call), stream_(call_->CreateAudioReceiveStream(config)) { - RTC_DCHECK(call); - RTC_DCHECK(stream_); - } - - WebRtcAudioReceiveStream() = delete; - WebRtcAudioReceiveStream(const WebRtcAudioReceiveStream&) = delete; - WebRtcAudioReceiveStream& operator=(const WebRtcAudioReceiveStream&) = delete; - - ~WebRtcAudioReceiveStream() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - call_->DestroyAudioReceiveStream(stream_); - } - - webrtc::AudioReceiveStreamInterface& stream() { - RTC_DCHECK(stream_); - return *stream_; - } - - void SetFrameDecryptor( - rtc::scoped_refptr frame_decryptor) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - stream_->SetFrameDecryptor(std::move(frame_decryptor)); - } - - void SetUseNack(bool use_nack) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - stream_->SetNackHistory(use_nack ? kNackRtpHistoryMs : 0); - } - - void SetNonSenderRttMeasurement(bool enabled) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - stream_->SetNonSenderRttMeasurement(enabled); - } - - // Set a new payload type -> decoder map. - void SetDecoderMap(const std::map& decoder_map) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - stream_->SetDecoderMap(decoder_map); - } - - webrtc::AudioReceiveStreamInterface::Stats GetStats( - bool get_and_clear_legacy_stats) const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - return stream_->GetStats(get_and_clear_legacy_stats); - } - - void SetRawAudioSink(std::unique_ptr sink) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - // Need to update the stream's sink first; once raw_audio_sink_ is - // reassigned, whatever was in there before is destroyed. - stream_->SetSink(sink.get()); - raw_audio_sink_ = std::move(sink); - } - - void SetOutputVolume(double volume) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - stream_->SetGain(volume); - } - - void SetPlayout(bool playout) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - if (playout) { - stream_->Start(); - } else { - stream_->Stop(); - } - } - - bool SetBaseMinimumPlayoutDelayMs(int delay_ms) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - if (stream_->SetBaseMinimumPlayoutDelayMs(delay_ms)) return true; - - RTC_LOG(LS_ERROR) << "Failed to SetBaseMinimumPlayoutDelayMs" - " on AudioReceiveStreamInterface on SSRC=" - << stream_->remote_ssrc() - << " with delay_ms=" << delay_ms; - return false; - } - - int GetBaseMinimumPlayoutDelayMs() const { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - return stream_->GetBaseMinimumPlayoutDelayMs(); - } - - std::vector GetSources() { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - return stream_->GetSources(); - } - - void SetDepacketizerToDecoderFrameTransformer( - rtc::scoped_refptr frame_transformer) { - RTC_DCHECK_RUN_ON(&worker_thread_checker_); - stream_->SetDepacketizerToDecoderFrameTransformer(frame_transformer); - } - - private: - webrtc::SequenceChecker worker_thread_checker_; - webrtc::Call* call_ = nullptr; - webrtc::AudioReceiveStreamInterface* const stream_ = nullptr; - std::unique_ptr raw_audio_sink_ - RTC_GUARDED_BY(worker_thread_checker_); -}; - -WebRtcVoiceReceiveChannel2::WebRtcVoiceReceiveChannel2( - CustomWebRtcVoiceEngine* engine, const MediaConfig& config, - const AudioOptions& options, const webrtc::CryptoOptions& crypto_options, - webrtc::Call* call, webrtc::AudioCodecPairId codec_pair_id) - : MediaChannelUtil(call->network_thread(), config.enable_dscp), - worker_thread_(call->worker_thread()), - engine_(engine), - call_(call), - audio_config_(config.audio), - codec_pair_id_(codec_pair_id), - crypto_options_(crypto_options) { - RTC_LOG(LS_VERBOSE) - << "WebRtcVoiceReceiveChannel2::WebRtcVoiceReceiveChannel2"; - RTC_DCHECK(call); - SetOptions(options); -} - -WebRtcVoiceReceiveChannel2::~WebRtcVoiceReceiveChannel2() { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DLOG(LS_VERBOSE) - << "WebRtcVoiceReceiveChannel2::~WebRtcVoiceReceiveChannel2"; - // TODO(solenberg): Should be able to delete the streams directly, without - // going through RemoveNnStream(), once stream objects handle - // all (de)configuration. - while (!recv_streams_.empty()) { - RemoveRecvStream(recv_streams_.begin()->first); - } -} - -bool WebRtcVoiceReceiveChannel2::SetReceiverParameters( - const AudioReceiverParameters& params) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetReceiverParameters"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetReceiverParameters: " - << params.ToString(); - // TODO(pthatcher): Refactor this to be more clean now that we have - // all the information at once. - - if (!SetRecvCodecs(params.codecs)) { - return false; - } - - if (!ValidateRtpExtensions(params.extensions, recv_rtp_extensions_)) { - return false; - } - std::vector filtered_extensions = FilterRtpExtensions( - params.extensions, webrtc::RtpExtension::IsSupportedForAudio, false, - call_->trials()); - if (recv_rtp_extensions_ != filtered_extensions) { - recv_rtp_extensions_.swap(filtered_extensions); - recv_rtp_extension_map_ = - webrtc::RtpHeaderExtensionMap(recv_rtp_extensions_); - } - return true; -} - -webrtc::RtpParameters WebRtcVoiceReceiveChannel2::GetRtpReceiverParameters( - uint32_t ssrc) const { - RTC_DCHECK_RUN_ON(worker_thread_); - webrtc::RtpParameters rtp_params; - auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_WARNING) - << "Attempting to get RTP receive parameters for stream " - "with ssrc " - << ssrc << " which doesn't exist."; - return webrtc::RtpParameters(); - } - rtp_params.encodings.emplace_back(); - rtp_params.encodings.back().ssrc = it->second->stream().remote_ssrc(); - rtp_params.header_extensions = recv_rtp_extensions_; - - for (const AudioCodec& codec : recv_codecs_) { - rtp_params.codecs.push_back(codec.ToCodecParameters()); - } - return rtp_params; -} - -webrtc::RtpParameters -WebRtcVoiceReceiveChannel2::GetDefaultRtpReceiveParameters() const { - RTC_DCHECK_RUN_ON(worker_thread_); - webrtc::RtpParameters rtp_params; - if (!default_sink_) { - // Getting parameters on a default, unsignaled audio receive stream but - // because we've not configured to receive such a stream, `encodings` is - // empty. - return rtp_params; - } - rtp_params.encodings.emplace_back(); - - for (const AudioCodec& codec : recv_codecs_) { - rtp_params.codecs.push_back(codec.ToCodecParameters()); - } - return rtp_params; -} - -bool WebRtcVoiceReceiveChannel2::SetOptions(const AudioOptions& options) { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "Setting voice channel options: " << options.ToString(); - - // We retain all of the existing options, and apply the given ones - // on top. This means there is no way to "clear" options such that - // they go back to the engine default. - options_.SetAll(options); - engine()->ApplyOptions(options_); - - RTC_LOG(LS_INFO) << "Set voice receive channel options. Current options: " - << options_.ToString(); - return true; -} - -bool WebRtcVoiceReceiveChannel2::SetRecvCodecs( - const std::vector& codecs) { - RTC_DCHECK_RUN_ON(worker_thread_); - - // Set the payload types to be used for incoming media. - RTC_LOG(LS_INFO) << "Setting receive voice codecs."; - - if (!VerifyUniquePayloadTypes(codecs)) { - RTC_LOG(LS_ERROR) << "Codec payload types overlap."; - return false; - } - - // Create a payload type -> SdpAudioFormat map with all the decoders. Fail - // unless the factory claims to support all decoders. - std::map decoder_map; - for (const AudioCodec& codec : codecs) { - // Log a warning if a codec's payload type is changing. This used to be - // treated as an error. It's abnormal, but not really illegal. - absl::optional old_codec = FindCodec(recv_codecs_, codec); - if (old_codec && old_codec->id != codec.id) { - RTC_LOG(LS_WARNING) << codec.name << " mapped to a second payload type (" - << codec.id << ", was already mapped to " - << old_codec->id << ")"; - } - auto format = AudioCodecToSdpAudioFormat(codec); - if (!IsCodec(codec, kCnCodecName) && !IsCodec(codec, kDtmfCodecName) && - !IsCodec(codec, kRedCodecName) && - !engine()->decoder_factory_->IsSupportedDecoder(format)) { - RTC_LOG(LS_ERROR) << "Unsupported codec: " << rtc::ToString(format); - return false; - } - // We allow adding new codecs but don't allow changing the payload type of - // codecs that are already configured since we might already be receiving - // packets with that payload type. See RFC3264, Section 8.3.2. - // TODO(deadbeef): Also need to check for clashes with previously mapped - // payload types, and not just currently mapped ones. For example, this - // should be illegal: - // 1. {100: opus/48000/2, 101: ISAC/16000} - // 2. {100: opus/48000/2} - // 3. {100: opus/48000/2, 101: ISAC/32000} - // Though this check really should happen at a higher level, since this - // conflict could happen between audio and video codecs. - auto existing = decoder_map_.find(codec.id); - if (existing != decoder_map_.end() && !existing->second.Matches(format)) { - RTC_LOG(LS_ERROR) << "Attempting to use payload type " << codec.id - << " for " << codec.name - << ", but it is already used for " - << existing->second.name; - return false; - } - decoder_map.insert({codec.id, std::move(format)}); - } - - if (decoder_map == decoder_map_) { - // There's nothing new to configure. - return true; - } - - bool playout_enabled = playout_; - // Receive codecs can not be changed while playing. So we temporarily - // pause playout. - SetPlayout(false); - RTC_DCHECK(!playout_); - - decoder_map_ = std::move(decoder_map); - for (auto& kv : recv_streams_) { - kv.second->SetDecoderMap(decoder_map_); - } - - recv_codecs_ = codecs; - - SetPlayout(playout_enabled); - RTC_DCHECK_EQ(playout_, playout_enabled); - - return true; -} - -void WebRtcVoiceReceiveChannel2::SetReceiveNackEnabled(bool enabled) { - // Check if the NACK status has changed on the - // preferred send codec, and in that case reconfigure all receive streams. - if (recv_nack_enabled_ != enabled) { - RTC_LOG(LS_INFO) << "Changing NACK status on receive streams."; - recv_nack_enabled_ = enabled; - for (auto& kv : recv_streams_) { - kv.second->SetUseNack(recv_nack_enabled_); - } - } -} - -void WebRtcVoiceReceiveChannel2::SetReceiveNonSenderRttEnabled(bool enabled) { - // Check if the receive-side RTT status has changed on the preferred send - // codec, in that case reconfigure all receive streams. - if (enable_non_sender_rtt_ != enabled) { - RTC_LOG(LS_INFO) << "Changing receive-side RTT status on receive streams."; - enable_non_sender_rtt_ = enabled; - for (auto& kv : recv_streams_) { - kv.second->SetNonSenderRttMeasurement(enable_non_sender_rtt_); - } - } -} - -void WebRtcVoiceReceiveChannel2::SetPlayout(bool playout) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::SetPlayout"); - RTC_DCHECK_RUN_ON(worker_thread_); - if (playout_ == playout) { - return; - } - - for (const auto& kv : recv_streams_) { - kv.second->SetPlayout(playout); - } - playout_ = playout; -} - -bool WebRtcVoiceReceiveChannel2::AddRecvStream(const StreamParams& sp) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::AddRecvStream"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "AddRecvStream: " << sp.ToString(); - - if (!sp.has_ssrcs()) { - // This is a StreamParam with unsignaled SSRCs. Store it, so it can be used - // later when we know the SSRCs on the first packet arrival. - unsignaled_stream_params_ = sp; - return true; - } - - if (!ValidateStreamParams(sp)) { - return false; - } - - const uint32_t ssrc = sp.first_ssrc(); - - // If this stream was previously received unsignaled, we promote it, possibly - // updating the sync group if stream ids have changed. - if (MaybeDeregisterUnsignaledRecvStream(ssrc)) { - auto stream_ids = sp.stream_ids(); - std::string sync_group = stream_ids.empty() ? std::string() : stream_ids[0]; - call_->OnUpdateSyncGroup(recv_streams_[ssrc]->stream(), - std::move(sync_group)); - return true; - } - - if (recv_streams_.find(ssrc) != recv_streams_.end()) { - RTC_LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc; - return false; - } - - // Create a new channel for receiving audio data. - auto config = BuildReceiveStreamConfig( - ssrc, receiver_reports_ssrc_, recv_nack_enabled_, enable_non_sender_rtt_, - sp.stream_ids(), recv_rtp_extensions_, transport(), - engine()->decoder_factory_, decoder_map_, codec_pair_id_, - engine()->audio_jitter_buffer_max_packets_, - engine()->audio_jitter_buffer_fast_accelerate_, - engine()->audio_jitter_buffer_min_delay_ms_, unsignaled_frame_decryptor_, - crypto_options_, unsignaled_frame_transformer_); - - recv_streams_.insert(std::make_pair( - ssrc, new WebRtcAudioReceiveStream(std::move(config), call_))); - recv_streams_[ssrc]->SetPlayout(playout_); - - return true; -} - -bool WebRtcVoiceReceiveChannel2::RemoveRecvStream(uint32_t ssrc) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::RemoveRecvStream"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "RemoveRecvStream: " << ssrc; - - const auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc - << " which doesn't exist."; - return false; - } - - MaybeDeregisterUnsignaledRecvStream(ssrc); - - it->second->SetRawAudioSink(nullptr); - delete it->second; - recv_streams_.erase(it); - return true; -} - -void WebRtcVoiceReceiveChannel2::ResetUnsignaledRecvStream() { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << "ResetUnsignaledRecvStream."; - unsignaled_stream_params_ = StreamParams(); - // Create a copy since RemoveRecvStream will modify `unsignaled_recv_ssrcs_`. - std::vector to_remove = unsignaled_recv_ssrcs_; - for (uint32_t ssrc : to_remove) { - RemoveRecvStream(ssrc); - } -} - -absl::optional WebRtcVoiceReceiveChannel2::GetUnsignaledSsrc() const { - if (unsignaled_recv_ssrcs_.empty()) { - return absl::nullopt; - } - // In the event of multiple unsignaled ssrcs, the last in the vector will be - // the most recent one (the one forwarded to the MediaStreamTrack). - return unsignaled_recv_ssrcs_.back(); -} - -void WebRtcVoiceReceiveChannel2::ChooseReceiverReportSsrc( - const std::set& choices) { - // Don't change SSRC if set is empty. Note that this differs from - // the behavior of video. - if (choices.empty()) { - return; - } - if (choices.find(receiver_reports_ssrc_) != choices.end()) { - return; - } - uint32_t ssrc = *(choices.begin()); - receiver_reports_ssrc_ = ssrc; - for (auto& kv : recv_streams_) { - call_->OnLocalSsrcUpdated(kv.second->stream(), ssrc); - } -} - -// Not implemented. -// TODO(https://crbug.com/webrtc/12676): Implement a fix for the unsignalled -// SSRC race that can happen when an m= section goes from receiving to not -// receiving. -void WebRtcVoiceReceiveChannel2::OnDemuxerCriteriaUpdatePending() {} -void WebRtcVoiceReceiveChannel2::OnDemuxerCriteriaUpdateComplete() {} - -bool WebRtcVoiceReceiveChannel2::SetOutputVolume(uint32_t ssrc, double volume) { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_INFO) << rtc::StringFormat("WRVMC::%s({ssrc=%u}, {volume=%.2f})", - __func__, ssrc, volume); - const auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_WARNING) << rtc::StringFormat( - "WRVMC::%s => (WARNING: no receive stream for SSRC %u)", __func__, - ssrc); - return false; - } - it->second->SetOutputVolume(volume); - RTC_LOG(LS_INFO) << rtc::StringFormat( - "WRVMC::%s => (stream with SSRC %u now uses volume %.2f)", __func__, ssrc, - volume); - return true; -} - -bool WebRtcVoiceReceiveChannel2::SetDefaultOutputVolume(double volume) { - RTC_DCHECK_RUN_ON(worker_thread_); - default_recv_volume_ = volume; - for (uint32_t ssrc : unsignaled_recv_ssrcs_) { - const auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_WARNING) << "SetDefaultOutputVolume: no recv stream " << ssrc; - return false; - } - it->second->SetOutputVolume(volume); - RTC_LOG(LS_INFO) << "SetDefaultOutputVolume() to " << volume - << " for recv stream with ssrc " << ssrc; - } - return true; -} - -bool WebRtcVoiceReceiveChannel2::SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, - int delay_ms) { - RTC_DCHECK_RUN_ON(worker_thread_); - std::vector ssrcs(1, ssrc); - // SSRC of 0 represents the default receive stream. - if (ssrc == 0) { - default_recv_base_minimum_delay_ms_ = delay_ms; - ssrcs = unsignaled_recv_ssrcs_; - } - for (uint32_t ssrc : ssrcs) { - const auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_WARNING) << "SetBaseMinimumPlayoutDelayMs: no recv stream " - << ssrc; - return false; - } - it->second->SetBaseMinimumPlayoutDelayMs(delay_ms); - RTC_LOG(LS_INFO) << "SetBaseMinimumPlayoutDelayMs() to " << delay_ms - << " for recv stream with ssrc " << ssrc; - } - return true; -} - -absl::optional WebRtcVoiceReceiveChannel2::GetBaseMinimumPlayoutDelayMs( - uint32_t ssrc) const { - // SSRC of 0 represents the default receive stream. - if (ssrc == 0) { - return default_recv_base_minimum_delay_ms_; - } - - const auto it = recv_streams_.find(ssrc); - - if (it != recv_streams_.end()) { - return it->second->GetBaseMinimumPlayoutDelayMs(); - } - return absl::nullopt; -} - -void WebRtcVoiceReceiveChannel2::SetFrameDecryptor( - uint32_t ssrc, - rtc::scoped_refptr frame_decryptor) { - RTC_DCHECK_RUN_ON(worker_thread_); - auto matching_stream = recv_streams_.find(ssrc); - if (matching_stream != recv_streams_.end()) { - matching_stream->second->SetFrameDecryptor(frame_decryptor); - } - // Handle unsignaled frame decryptors. - if (ssrc == 0) { - unsignaled_frame_decryptor_ = frame_decryptor; - } -} - -void WebRtcVoiceReceiveChannel2::OnPacketReceived( - const webrtc::RtpPacketReceived& packet) { - RTC_DCHECK_RUN_ON(&network_thread_checker_); - - // TODO(bugs.webrtc.org/11993): This code is very similar to what - // WebRtcVideoChannel::OnPacketReceived does. For maintainability and - // consistency it would be good to move the interaction with - // call_->Receiver() to a common implementation and provide a callback on - // the worker thread for the exception case (DELIVERY_UNKNOWN_SSRC) and - // how retry is attempted. - worker_thread_->PostTask( - SafeTask(task_safety_.flag(), [this, packet = packet]() mutable { - RTC_DCHECK_RUN_ON(worker_thread_); - - // TODO(bugs.webrtc.org/7135): extensions in `packet` is currently set - // in RtpTransport and does not neccessarily include extensions specific - // to this channel/MID. Also see comment in - // BaseChannel::MaybeUpdateDemuxerAndRtpExtensions_w. - // It would likely be good if extensions where merged per BUNDLE and - // applied directly in RtpTransport::DemuxPacket; - packet.IdentifyExtensions(recv_rtp_extension_map_); - if (!packet.arrival_time().IsFinite()) { - packet.set_arrival_time(webrtc::Timestamp::Micros(rtc::TimeMicros())); - } - - call_->Receiver()->DeliverRtpPacket( - webrtc::MediaType::AUDIO, std::move(packet), - absl::bind_front( - &WebRtcVoiceReceiveChannel2::MaybeCreateDefaultReceiveStream, - this)); - })); -} - -bool WebRtcVoiceReceiveChannel2::MaybeCreateDefaultReceiveStream( - const webrtc::RtpPacketReceived& packet) { - // Create an unsignaled receive stream for this previously not received - // ssrc. If there already is N unsignaled receive streams, delete the - // oldest. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208 - uint32_t ssrc = packet.Ssrc(); - RTC_DCHECK(!absl::c_linear_search(unsignaled_recv_ssrcs_, ssrc)); - - // Add new stream. - StreamParams sp = unsignaled_stream_params_; - sp.ssrcs.push_back(ssrc); - RTC_LOG(LS_INFO) << "Creating unsignaled receive stream for SSRC=" << ssrc; - if (!AddRecvStream(sp)) { - RTC_LOG(LS_WARNING) << "Could not create unsignaled receive stream."; - return false; - } - unsignaled_recv_ssrcs_.push_back(ssrc); - RTC_HISTOGRAM_COUNTS_LINEAR("WebRTC.Audio.NumOfUnsignaledStreams", - unsignaled_recv_ssrcs_.size(), 1, 100, 101); - - // Remove oldest unsignaled stream, if we have too many. - if (unsignaled_recv_ssrcs_.size() > kMaxUnsignaledRecvStreams) { - uint32_t remove_ssrc = unsignaled_recv_ssrcs_.front(); - RTC_DLOG(LS_INFO) << "Removing unsignaled receive stream with SSRC=" - << remove_ssrc; - RemoveRecvStream(remove_ssrc); - } - RTC_DCHECK_GE(kMaxUnsignaledRecvStreams, unsignaled_recv_ssrcs_.size()); - - SetOutputVolume(ssrc, default_recv_volume_); - SetBaseMinimumPlayoutDelayMs(ssrc, default_recv_base_minimum_delay_ms_); - - // The default sink can only be attached to one stream at a time, so we hook - // it up to the *latest* unsignaled stream we've seen, in order to support - // the case where the SSRC of one unsignaled stream changes. - if (default_sink_) { - for (uint32_t drop_ssrc : unsignaled_recv_ssrcs_) { - auto it = recv_streams_.find(drop_ssrc); - it->second->SetRawAudioSink(nullptr); - } - std::unique_ptr proxy_sink( - new ProxySink(default_sink_.get())); - SetRawAudioSink(ssrc, std::move(proxy_sink)); - } - return true; -} - -bool WebRtcVoiceReceiveChannel2::GetStats(VoiceMediaReceiveInfo* info, - bool get_and_clear_legacy_stats) { - TRACE_EVENT0("webrtc", "WebRtcVoiceMediaChannel::GetReceiveStats"); - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_DCHECK(info); - - // Get SSRC and stats for each receiver. - RTC_DCHECK_EQ(info->receivers.size(), 0U); - for (const auto& stream : recv_streams_) { - uint32_t ssrc = stream.first; - // When SSRCs are unsignaled, there's only one audio MediaStreamTrack, but - // multiple RTP streams can be received over time (if the SSRC changes for - // whatever reason). We only want the RTCMediaStreamTrackStats to represent - // the stats for the most recent stream (the one whose audio is actually - // routed to the MediaStreamTrack), so here we ignore any unsignaled SSRCs - // except for the most recent one (last in the vector). This is somewhat of - // a hack, and means you don't get *any* stats for these inactive streams, - // but it's slightly better than the previous behavior, which was "highest - // SSRC wins". - // See: https://bugs.chromium.org/p/webrtc/issues/detail?id=8158 - if (!unsignaled_recv_ssrcs_.empty()) { - auto end_it = --unsignaled_recv_ssrcs_.end(); - if (absl::linear_search(unsignaled_recv_ssrcs_.begin(), end_it, ssrc)) { - continue; - } - } - webrtc::AudioReceiveStreamInterface::Stats stats = - stream.second->GetStats(get_and_clear_legacy_stats); - VoiceReceiverInfo rinfo; - rinfo.add_ssrc(stats.remote_ssrc); - rinfo.payload_bytes_received = stats.payload_bytes_received; - rinfo.header_and_padding_bytes_received = - stats.header_and_padding_bytes_received; - rinfo.packets_received = stats.packets_received; - rinfo.fec_packets_received = stats.fec_packets_received; - rinfo.fec_packets_discarded = stats.fec_packets_discarded; - rinfo.packets_lost = stats.packets_lost; - rinfo.packets_discarded = stats.packets_discarded; - rinfo.codec_name = stats.codec_name; - rinfo.codec_payload_type = stats.codec_payload_type; - rinfo.jitter_ms = stats.jitter_ms; - rinfo.jitter_buffer_ms = stats.jitter_buffer_ms; - rinfo.jitter_buffer_preferred_ms = stats.jitter_buffer_preferred_ms; - rinfo.delay_estimate_ms = stats.delay_estimate_ms; - rinfo.audio_level = stats.audio_level; - rinfo.total_output_energy = stats.total_output_energy; - rinfo.total_samples_received = stats.total_samples_received; - rinfo.total_output_duration = stats.total_output_duration; - rinfo.concealed_samples = stats.concealed_samples; - rinfo.silent_concealed_samples = stats.silent_concealed_samples; - rinfo.concealment_events = stats.concealment_events; - rinfo.jitter_buffer_delay_seconds = stats.jitter_buffer_delay_seconds; - rinfo.jitter_buffer_emitted_count = stats.jitter_buffer_emitted_count; - rinfo.jitter_buffer_target_delay_seconds = - stats.jitter_buffer_target_delay_seconds; - rinfo.jitter_buffer_minimum_delay_seconds = - stats.jitter_buffer_minimum_delay_seconds; - rinfo.inserted_samples_for_deceleration = - stats.inserted_samples_for_deceleration; - rinfo.removed_samples_for_acceleration = - stats.removed_samples_for_acceleration; - rinfo.expand_rate = stats.expand_rate; - rinfo.speech_expand_rate = stats.speech_expand_rate; - rinfo.secondary_decoded_rate = stats.secondary_decoded_rate; - rinfo.secondary_discarded_rate = stats.secondary_discarded_rate; - rinfo.accelerate_rate = stats.accelerate_rate; - rinfo.preemptive_expand_rate = stats.preemptive_expand_rate; - rinfo.delayed_packet_outage_samples = stats.delayed_packet_outage_samples; - rinfo.decoding_calls_to_silence_generator = - stats.decoding_calls_to_silence_generator; - rinfo.decoding_calls_to_neteq = stats.decoding_calls_to_neteq; - rinfo.decoding_normal = stats.decoding_normal; - rinfo.decoding_plc = stats.decoding_plc; - rinfo.decoding_codec_plc = stats.decoding_codec_plc; - rinfo.decoding_cng = stats.decoding_cng; - rinfo.decoding_plc_cng = stats.decoding_plc_cng; - rinfo.decoding_muted_output = stats.decoding_muted_output; - rinfo.capture_start_ntp_time_ms = stats.capture_start_ntp_time_ms; - rinfo.last_packet_received = stats.last_packet_received; - rinfo.estimated_playout_ntp_timestamp_ms = - stats.estimated_playout_ntp_timestamp_ms; - rinfo.jitter_buffer_flushes = stats.jitter_buffer_flushes; - rinfo.relative_packet_arrival_delay_seconds = - stats.relative_packet_arrival_delay_seconds; - rinfo.interruption_count = stats.interruption_count; - rinfo.total_interruption_duration_ms = stats.total_interruption_duration_ms; - rinfo.last_sender_report_timestamp_ms = - stats.last_sender_report_timestamp_ms; - rinfo.last_sender_report_remote_timestamp_ms = - stats.last_sender_report_remote_timestamp_ms; - rinfo.sender_reports_packets_sent = stats.sender_reports_packets_sent; - rinfo.sender_reports_bytes_sent = stats.sender_reports_bytes_sent; - rinfo.sender_reports_reports_count = stats.sender_reports_reports_count; - rinfo.round_trip_time = stats.round_trip_time; - rinfo.round_trip_time_measurements = stats.round_trip_time_measurements; - rinfo.total_round_trip_time = stats.total_round_trip_time; - - if (recv_nack_enabled_) { - rinfo.nacks_sent = stats.nacks_sent; - } - - info->receivers.push_back(rinfo); - } - - FillReceiveCodecStats(info); - - info->device_underrun_count = engine_->adm()->GetPlayoutUnderrunCount(); - - return true; -} - -void WebRtcVoiceReceiveChannel2::FillReceiveCodecStats( - VoiceMediaReceiveInfo* voice_media_info) { - for (const auto& receiver : voice_media_info->receivers) { - auto codec = - absl::c_find_if(recv_codecs_, [&receiver](const AudioCodec& c) { - return receiver.codec_payload_type && - *receiver.codec_payload_type == c.id; - }); - if (codec != recv_codecs_.end()) { - voice_media_info->receive_codecs.insert( - std::make_pair(codec->id, codec->ToCodecParameters())); - } - } -} - -void WebRtcVoiceReceiveChannel2::SetRawAudioSink( - uint32_t ssrc, std::unique_ptr sink) { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink: ssrc:" - << ssrc << " " << (sink ? "(ptr)" : "NULL"); - const auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_WARNING) << "SetRawAudioSink: no recv stream " << ssrc; - return; - } - it->second->SetRawAudioSink(std::move(sink)); -} - -void WebRtcVoiceReceiveChannel2::SetDefaultRawAudioSink( - std::unique_ptr sink) { - RTC_DCHECK_RUN_ON(worker_thread_); - RTC_LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetDefaultRawAudioSink:"; - if (!unsignaled_recv_ssrcs_.empty()) { - std::unique_ptr proxy_sink( - sink ? new ProxySink(sink.get()) : nullptr); - SetRawAudioSink(unsignaled_recv_ssrcs_.back(), std::move(proxy_sink)); - } - default_sink_ = std::move(sink); -} - -std::vector WebRtcVoiceReceiveChannel2::GetSources( - uint32_t ssrc) const { - auto it = recv_streams_.find(ssrc); - if (it == recv_streams_.end()) { - RTC_LOG(LS_ERROR) << "Attempting to get contributing sources for SSRC:" - << ssrc << " which doesn't exist."; - return std::vector(); - } - return it->second->GetSources(); -} - -void WebRtcVoiceReceiveChannel2::SetDepacketizerToDecoderFrameTransformer( - uint32_t ssrc, - rtc::scoped_refptr frame_transformer) { - RTC_DCHECK_RUN_ON(worker_thread_); - if (ssrc == 0) { - // If the receiver is unsignaled, save the frame transformer and set it when - // the stream is associated with an ssrc. - unsignaled_frame_transformer_ = std::move(frame_transformer); - return; - } - - auto matching_stream = recv_streams_.find(ssrc); - if (matching_stream == recv_streams_.end()) { - RTC_LOG(LS_INFO) << "Attempting to set frame transformer for SSRC:" << ssrc - << " which doesn't exist."; - return; - } - matching_stream->second->SetDepacketizerToDecoderFrameTransformer( - std::move(frame_transformer)); -} - -bool WebRtcVoiceReceiveChannel2::MaybeDeregisterUnsignaledRecvStream( - uint32_t ssrc) { - RTC_DCHECK_RUN_ON(worker_thread_); - auto it = absl::c_find(unsignaled_recv_ssrcs_, ssrc); - if (it != unsignaled_recv_ssrcs_.end()) { - unsignaled_recv_ssrcs_.erase(it); - return true; - } - return false; -} -} // namespace cricket diff --git a/src/internal/custom_webrtc_voice_engine.h b/src/internal/custom_webrtc_voice_engine.h deleted file mode 100644 index fee5ecb816..0000000000 --- a/src/internal/custom_webrtc_voice_engine.h +++ /dev/null @@ -1,519 +0,0 @@ -/* - * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. - * - * Use of this source code is governed by a BSD-style license - * that can be found in the LICENSE file in the root of the source - * tree. An additional intellectual property rights grant can be found - * in the file PATENTS. All contributing project authors may - * be found in the AUTHORS file in the root of the source tree. - */ - -#ifndef INTERNAL_CUSTOM_MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_ -#define INTERNAL_CUSTOM_MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_ - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include "absl/functional/any_invocable.h" -#include "absl/strings/string_view.h" -#include "absl/types/optional.h" -#include "api/audio/audio_frame_processor.h" -#include "api/audio/audio_mixer.h" -#include "api/audio_codecs/audio_codec_pair_id.h" -#include "api/audio_codecs/audio_decoder_factory.h" -#include "api/audio_codecs/audio_encoder_factory.h" -#include "api/audio_codecs/audio_format.h" -#include "api/audio_options.h" -#include "api/call/audio_sink.h" -#include "api/call/transport.h" -#include "api/crypto/crypto_options.h" -#include "api/crypto/frame_decryptor_interface.h" -#include "api/crypto/frame_encryptor_interface.h" -#include "api/field_trials_view.h" -#include "api/frame_transformer_interface.h" -#include "api/rtc_error.h" -#include "api/rtp_parameters.h" -#include "api/rtp_sender_interface.h" -#include "api/scoped_refptr.h" -#include "api/sequence_checker.h" -#include "api/task_queue/pending_task_safety_flag.h" -#include "api/task_queue/task_queue_base.h" -#include "api/task_queue/task_queue_factory.h" -#include "api/transport/rtp/rtp_source.h" -#include "call/audio_send_stream.h" -#include "call/audio_state.h" -#include "call/call.h" -#include "media/base/codec.h" -#include "media/base/media_channel.h" -#include "media/base/media_channel_impl.h" -#include "media/base/media_config.h" -#include "media/base/media_engine.h" -#include "media/base/rtp_utils.h" -#include "media/base/stream_params.h" -#include "modules/async_audio_processing/async_audio_processing.h" -#include "modules/audio_device/include/audio_device.h" -#include "modules/audio_processing/include/audio_processing.h" -#include "modules/rtp_rtcp/include/rtp_header_extension_map.h" -#include "modules/rtp_rtcp/source/rtp_packet_received.h" -#include "rtc_base/buffer.h" -#include "rtc_base/network/sent_packet.h" -#include "rtc_base/network_route.h" -#include "rtc_base/system/file_wrapper.h" - -namespace webrtc { -class AudioFrameProcessor; - -class CreateAudioStateFactory { - public: - virtual ~CreateAudioStateFactory() = default; - virtual rtc::scoped_refptr CreateAudioState( - const webrtc::AudioState::Config& config) = 0; -}; -} // namespace webrtc - -namespace cricket { - -class AudioSource; - -// CustomWebRtcVoiceEngine is a class to be used with CompositeMediaEngine. -// It uses the WebRtc VoiceEngine library for audio handling. -class CustomWebRtcVoiceEngine final : public VoiceEngineInterface { - friend class WebRtcVoiceSendChannel2; - friend class WebRtcVoiceReceiveChannel2; - - public: - CustomWebRtcVoiceEngine( - webrtc::CreateAudioStateFactory* create_audio_state_factory, - webrtc::TaskQueueFactory* task_queue_factory, - webrtc::AudioDeviceModule* adm, - const rtc::scoped_refptr& encoder_factory, - const rtc::scoped_refptr& decoder_factory, - rtc::scoped_refptr audio_mixer, - rtc::scoped_refptr audio_processing, - std::unique_ptr owned_audio_frame_processor, - const webrtc::FieldTrialsView& trials); - - CustomWebRtcVoiceEngine() = delete; - CustomWebRtcVoiceEngine(const CustomWebRtcVoiceEngine&) = delete; - CustomWebRtcVoiceEngine& operator=(const CustomWebRtcVoiceEngine&) = delete; - - ~CustomWebRtcVoiceEngine() override; - - // Does initialization that needs to occur on the worker thread. - void Init() override; - rtc::scoped_refptr GetAudioState() const override; - - std::unique_ptr CreateSendChannel( - webrtc::Call* call, const MediaConfig& config, - const AudioOptions& options, const webrtc::CryptoOptions& crypto_options, - webrtc::AudioCodecPairId codec_pair_id) override; - - std::unique_ptr CreateReceiveChannel( - webrtc::Call* call, const MediaConfig& config, - const AudioOptions& options, const webrtc::CryptoOptions& crypto_options, - webrtc::AudioCodecPairId codec_pair_id) override; - - const std::vector& send_codecs() const override; - const std::vector& recv_codecs() const override; - std::vector GetRtpHeaderExtensions() - const override; - - // Starts AEC dump using an existing file. A maximum file size in bytes can be - // specified. When the maximum file size is reached, logging is stopped and - // the file is closed. If max_size_bytes is set to <= 0, no limit will be - // used. - bool StartAecDump(webrtc::FileWrapper file, int64_t max_size_bytes) override; - - // Stops AEC dump. - void StopAecDump() override; - - absl::optional GetAudioDeviceStats() - override; - // Moved to public so WebRtcVoiceMediaChannel can access it. - webrtc::AudioState* audio_state(); - - private: - // Every option that is "set" will be applied. Every option not "set" will be - // ignored. This allows us to selectively turn on and off different options - // easily at any time. - void ApplyOptions(const AudioOptions& options); - - webrtc::CreateAudioStateFactory* create_audio_state_factory_; - - webrtc::TaskQueueFactory* const task_queue_factory_; - std::unique_ptr - low_priority_worker_queue_; - - webrtc::AudioDeviceModule* adm(); - webrtc::AudioProcessing* apm() const; - - std::vector CollectCodecs( - const std::vector& specs) const; - - webrtc::SequenceChecker signal_thread_checker_{ - webrtc::SequenceChecker::kDetached}; - webrtc::SequenceChecker worker_thread_checker_{ - webrtc::SequenceChecker::kDetached}; - - // The audio device module. - rtc::scoped_refptr adm_; - rtc::scoped_refptr encoder_factory_; - rtc::scoped_refptr decoder_factory_; - rtc::scoped_refptr audio_mixer_; - // The audio processing module. - rtc::scoped_refptr apm_; - // Asynchronous audio processing. - std::unique_ptr audio_frame_processor_; - // The primary instance of WebRtc VoiceEngine. - rtc::scoped_refptr audio_state_; - std::vector send_codecs_; - std::vector recv_codecs_; - bool is_dumping_aec_ = false; - bool initialized_ = false; - - // Jitter buffer settings for new streams. - size_t audio_jitter_buffer_max_packets_ = 200; - bool audio_jitter_buffer_fast_accelerate_ = false; - int audio_jitter_buffer_min_delay_ms_ = 0; - - const bool minimized_remsampling_on_mobile_trial_enabled_; -}; - -class WebRtcVoiceSendChannel2 final : public MediaChannelUtil, - public VoiceMediaSendChannelInterface { - public: - WebRtcVoiceSendChannel2(CustomWebRtcVoiceEngine* engine, - const MediaConfig& config, - const AudioOptions& options, - const webrtc::CryptoOptions& crypto_options, - webrtc::Call* call, - webrtc::AudioCodecPairId codec_pair_id); - - WebRtcVoiceSendChannel2() = delete; - WebRtcVoiceSendChannel2(const WebRtcVoiceSendChannel2&) = delete; - WebRtcVoiceSendChannel2& operator=(const WebRtcVoiceSendChannel2&) = delete; - - ~WebRtcVoiceSendChannel2() override; - - MediaType media_type() const override { return MEDIA_TYPE_AUDIO; } - VideoMediaSendChannelInterface* AsVideoSendChannel() override { - RTC_CHECK_NOTREACHED(); - return nullptr; - } - VoiceMediaSendChannelInterface* AsVoiceSendChannel() override { return this; } - - absl::optional GetSendCodec() const override; - - // Functions imported from MediaChannelUtil - void SetInterface(MediaChannelNetworkInterface* iface) override { - MediaChannelUtil::SetInterface(iface); - } - - bool HasNetworkInterface() const override { - return MediaChannelUtil::HasNetworkInterface(); - } - void SetExtmapAllowMixed(bool extmap_allow_mixed) override { - MediaChannelUtil::SetExtmapAllowMixed(extmap_allow_mixed); - } - bool ExtmapAllowMixed() const override { - return MediaChannelUtil::ExtmapAllowMixed(); - } - - const AudioOptions& options() const { return options_; } - - bool SetSenderParameters(const AudioSenderParameter& params) override; - webrtc::RtpParameters GetRtpSendParameters(uint32_t ssrc) const override; - webrtc::RTCError SetRtpSendParameters( - uint32_t ssrc, const webrtc::RtpParameters& parameters, - webrtc::SetParametersCallback callback) override; - - void SetSend(bool send) override; - bool SetAudioSend(uint32_t ssrc, bool enable, const AudioOptions* options, - AudioSource* source) override; - bool AddSendStream(const StreamParams& sp) override; - bool RemoveSendStream(uint32_t ssrc) override; - - void SetSsrcListChangedCallback( - absl::AnyInvocable&)> callback) override; - - // E2EE Frame API - // Set a frame encryptor to a particular ssrc that will intercept all - // outgoing audio payloads frames and attempt to encrypt them and forward the - // result to the packetizer. - void SetFrameEncryptor(uint32_t ssrc, - rtc::scoped_refptr - frame_encryptor) override; - - bool CanInsertDtmf() override; - bool InsertDtmf(uint32_t ssrc, int event, int duration) override; - - void OnPacketSent(const rtc::SentPacket& sent_packet) override; - void OnNetworkRouteChanged(absl::string_view transport_name, - const rtc::NetworkRoute& network_route) override; - void OnReadyToSend(bool ready) override; - bool GetStats(VoiceMediaSendInfo* info) override; - - // Sets a frame transformer between encoder and packetizer, to transform - // encoded frames before sending them out the network. - void SetEncoderToPacketizerFrameTransformer( - uint32_t ssrc, - rtc::scoped_refptr frame_transformer) - override; - - bool SenderNackEnabled() const override { - if (!send_codec_spec_) { - return false; - } - return send_codec_spec_->nack_enabled; - } - bool SenderNonSenderRttEnabled() const override { - if (!send_codec_spec_) { - return false; - } - return send_codec_spec_->enable_non_sender_rtt; - } - bool SendCodecHasNack() const override { return SenderNackEnabled(); } - - void SetSendCodecChangedCallback( - absl::AnyInvocable callback) override { - send_codec_changed_callback_ = std::move(callback); - } - - private: - bool SetOptions(const AudioOptions& options); - bool SetSendCodecs(const std::vector& codecs, - absl::optional preferred_codec); - bool SetLocalSource(uint32_t ssrc, AudioSource* source); - bool MuteStream(uint32_t ssrc, bool mute); - - CustomWebRtcVoiceEngine* engine() { return engine_; } - bool SetMaxSendBitrate(int bps); - void SetupRecording(); - - webrtc::TaskQueueBase* const worker_thread_; - webrtc::ScopedTaskSafety task_safety_; - webrtc::SequenceChecker network_thread_checker_{ - webrtc::SequenceChecker::kDetached}; - - CustomWebRtcVoiceEngine* const engine_ = nullptr; - std::vector send_codecs_; - - int max_send_bitrate_bps_ = 0; - AudioOptions options_; - absl::optional dtmf_payload_type_; - int dtmf_payload_freq_ = -1; - bool enable_non_sender_rtt_ = false; - bool send_ = false; - webrtc::Call* const call_ = nullptr; - - const MediaConfig::Audio audio_config_; - - class WebRtcAudioSendStream; - - std::map send_streams_; - std::vector send_rtp_extensions_; - std::string mid_; - - absl::optional - send_codec_spec_; - - // TODO(kwiberg): Per-SSRC codec pair IDs? - const webrtc::AudioCodecPairId codec_pair_id_; - - // Per peer connection crypto options that last for the lifetime of the peer - // connection. - const webrtc::CryptoOptions crypto_options_; - rtc::scoped_refptr - unsignaled_frame_transformer_; - - void FillSendCodecStats(VoiceMediaSendInfo* voice_media_info); - - // Callback invoked whenever the send codec changes. - // TODO(bugs.webrtc.org/13931): Remove again when coupling isn't needed. - absl::AnyInvocable send_codec_changed_callback_; - // Callback invoked whenever the list of SSRCs changes. - absl::AnyInvocable&)> - ssrc_list_changed_callback_; -}; - -class WebRtcVoiceReceiveChannel2 final - : public MediaChannelUtil, - public VoiceMediaReceiveChannelInterface { - public: - WebRtcVoiceReceiveChannel2(CustomWebRtcVoiceEngine* engine, - const MediaConfig& config, - const AudioOptions& options, - const webrtc::CryptoOptions& crypto_options, - webrtc::Call* call, - webrtc::AudioCodecPairId codec_pair_id); - - WebRtcVoiceReceiveChannel2() = delete; - WebRtcVoiceReceiveChannel2(const WebRtcVoiceReceiveChannel2&) = delete; - WebRtcVoiceReceiveChannel2& operator=(const WebRtcVoiceReceiveChannel2&) = - delete; - - ~WebRtcVoiceReceiveChannel2() override; - - MediaType media_type() const override { return MEDIA_TYPE_AUDIO; } - - VideoMediaReceiveChannelInterface* AsVideoReceiveChannel() override { - RTC_CHECK_NOTREACHED(); - return nullptr; - } - VoiceMediaReceiveChannelInterface* AsVoiceReceiveChannel() override { - return this; - } - - const AudioOptions& options() const { return options_; } - - void SetInterface(MediaChannelNetworkInterface* iface) override { - MediaChannelUtil::SetInterface(iface); - } - bool SetReceiverParameters(const AudioReceiverParameters& params) override; - webrtc::RtpParameters GetRtpReceiverParameters(uint32_t ssrc) const override; - webrtc::RtpParameters GetDefaultRtpReceiveParameters() const override; - - void SetPlayout(bool playout) override; - bool AddRecvStream(const StreamParams& sp) override; - bool RemoveRecvStream(uint32_t ssrc) override; - void ResetUnsignaledRecvStream() override; - absl::optional GetUnsignaledSsrc() const override; - - void ChooseReceiverReportSsrc(const std::set& choices) override; - - void OnDemuxerCriteriaUpdatePending() override; - void OnDemuxerCriteriaUpdateComplete() override; - - // E2EE Frame API - // Set a frame decryptor to a particular ssrc that will intercept all - // incoming audio payloads and attempt to decrypt them before forwarding the - // result. - void SetFrameDecryptor(uint32_t ssrc, - rtc::scoped_refptr - frame_decryptor) override; - - bool SetOutputVolume(uint32_t ssrc, double volume) override; - // Applies the new volume to current and future unsignaled streams. - bool SetDefaultOutputVolume(double volume) override; - - bool SetBaseMinimumPlayoutDelayMs(uint32_t ssrc, int delay_ms) override; - absl::optional GetBaseMinimumPlayoutDelayMs( - uint32_t ssrc) const override; - - void OnPacketReceived(const webrtc::RtpPacketReceived& packet) override; - bool GetStats(VoiceMediaReceiveInfo* info, - bool get_and_clear_legacy_stats) override; - - // Set the audio sink for an existing stream. - void SetRawAudioSink( - uint32_t ssrc, std::unique_ptr sink) override; - // Will set the audio sink on the latest unsignaled stream, future or - // current. Only one stream at a time will use the sink. - void SetDefaultRawAudioSink( - std::unique_ptr sink) override; - - std::vector GetSources(uint32_t ssrc) const override; - - void SetDepacketizerToDecoderFrameTransformer( - uint32_t ssrc, - rtc::scoped_refptr frame_transformer) - override; - - void SetReceiveNackEnabled(bool enabled) override; - void SetReceiveNonSenderRttEnabled(bool enabled) override; - - private: - bool SetOptions(const AudioOptions& options); - bool SetRecvCodecs(const std::vector& codecs); - bool SetLocalSource(uint32_t ssrc, AudioSource* source); - bool MuteStream(uint32_t ssrc, bool mute); - - CustomWebRtcVoiceEngine* engine() { return engine_; } - void SetupRecording(); - - // Expected to be invoked once per packet that belongs to this channel that - // can not be demuxed. Returns true if a default receive stream has been - // created. - bool MaybeCreateDefaultReceiveStream(const webrtc::RtpPacketReceived& packet); - // Check if 'ssrc' is an unsignaled stream, and if so mark it as not being - // unsignaled anymore (i.e. it is now removed, or signaled), and return true. - bool MaybeDeregisterUnsignaledRecvStream(uint32_t ssrc); - - webrtc::TaskQueueBase* const worker_thread_; - webrtc::ScopedTaskSafety task_safety_; - webrtc::SequenceChecker network_thread_checker_{ - webrtc::SequenceChecker::kDetached}; - - CustomWebRtcVoiceEngine* const engine_ = nullptr; - - // TODO(kwiberg): decoder_map_ and recv_codecs_ store the exact same - // information, in slightly different formats. Eliminate recv_codecs_. - std::map decoder_map_; - std::vector recv_codecs_; - - AudioOptions options_; - bool recv_nack_enabled_ = false; - bool enable_non_sender_rtt_ = false; - bool playout_ = false; - webrtc::Call* const call_ = nullptr; - - const MediaConfig::Audio audio_config_; - - // Queue of unsignaled SSRCs; oldest at the beginning. - std::vector unsignaled_recv_ssrcs_; - - // This is a stream param that comes from the remote description, but wasn't - // signaled with any a=ssrc lines. It holds the information that was signaled - // before the unsignaled receive stream is created when the first packet is - // received. - StreamParams unsignaled_stream_params_; - - // Volume for unsignaled streams, which may be set before the stream exists. - double default_recv_volume_ = 1.0; - - // Delay for unsignaled streams, which may be set before the stream exists. - int default_recv_base_minimum_delay_ms_ = 0; - - // Sink for latest unsignaled stream - may be set before the stream exists. - std::unique_ptr default_sink_; - // Default SSRC to use for RTCP receiver reports in case of no signaled - // send streams. See: https://code.google.com/p/webrtc/issues/detail?id=4740 - // and https://code.google.com/p/chromium/issues/detail?id=547661 - uint32_t receiver_reports_ssrc_ = 0xFA17FA17u; - - std::string mid_; - - class WebRtcAudioReceiveStream; - - std::map recv_streams_; - std::vector recv_rtp_extensions_; - webrtc::RtpHeaderExtensionMap recv_rtp_extension_map_; - - absl::optional - send_codec_spec_; - - // TODO(kwiberg): Per-SSRC codec pair IDs? - const webrtc::AudioCodecPairId codec_pair_id_; - - // Per peer connection crypto options that last for the lifetime of the peer - // connection. - const webrtc::CryptoOptions crypto_options_; - // Unsignaled streams have an option to have a frame decryptor set on them. - rtc::scoped_refptr - unsignaled_frame_decryptor_; - rtc::scoped_refptr - unsignaled_frame_transformer_; - - void FillReceiveCodecStats(VoiceMediaReceiveInfo* voice_media_info); -}; - -} // namespace cricket - -#endif // INTERNAL_CUSTOM_MEDIA_ENGINE_WEBRTC_VOICE_ENGINE_H_ diff --git a/src/internal/desktop_capturer.h b/src/internal/desktop_capturer.h index 390f73c16e..be024c87d8 100644 --- a/src/internal/desktop_capturer.h +++ b/src/internal/desktop_capturer.h @@ -30,10 +30,10 @@ namespace libwebrtc { class ScreenCapturerTrackSource : public webrtc::VideoTrackSource { public: - static rtc::scoped_refptr Create( + static webrtc::scoped_refptr Create( scoped_refptr capturer) { if (capturer) { - return rtc::make_ref_counted(capturer); + return webrtc::make_ref_counted(capturer); } return nullptr; } @@ -44,7 +44,7 @@ class ScreenCapturerTrackSource : public webrtc::VideoTrackSource { virtual ~ScreenCapturerTrackSource() { capturer_->Stop(); } private: - rtc::VideoSourceInterface* source() override { + webrtc::VideoSourceInterface* source() override { return static_cast(capturer_.get()); } diff --git a/src/internal/local_audio_track.cc b/src/internal/local_audio_track.cc index e7cf09ff65..1069ae26c8 100644 --- a/src/internal/local_audio_track.cc +++ b/src/internal/local_audio_track.cc @@ -4,15 +4,15 @@ using webrtc::MediaSourceInterface; namespace libwebrtc { -rtc::scoped_refptr LocalAudioSource::Create( - const cricket::AudioOptions* audio_options, +webrtc::scoped_refptr LocalAudioSource::Create( + const webrtc::AudioOptions* audio_options, webrtc::CustomAudioTransportImpl* audio_transport) { - auto source = rtc::make_ref_counted(audio_transport); + auto source = webrtc::make_ref_counted(audio_transport); source->Initialize(audio_options); return source; } -void LocalAudioSource::Initialize(const cricket::AudioOptions* audio_options) { +void LocalAudioSource::Initialize(const webrtc::AudioOptions* audio_options) { if (!audio_options) return; options_ = *audio_options; diff --git a/src/internal/local_audio_track.h b/src/internal/local_audio_track.h index a8592a6df3..37fcca034c 100644 --- a/src/internal/local_audio_track.h +++ b/src/internal/local_audio_track.h @@ -15,14 +15,14 @@ using namespace webrtc; class LocalAudioSource : public Notifier, AudioSender { public: // Creates an instance of CustomLocalAudioSource. - static rtc::scoped_refptr Create( - const cricket::AudioOptions* audio_options, + static webrtc::scoped_refptr Create( + const webrtc::AudioOptions* audio_options, webrtc::CustomAudioTransportImpl* audio_transport); SourceState state() const override { return kLive; } bool remote() const override { return false; } - const cricket::AudioOptions options() const override { return options_; } + const webrtc::AudioOptions options() const override { return options_; } void AddSink(AudioTrackSinkInterface* sink) override { webrtc::MutexLock lock(&sink_lock_); @@ -70,10 +70,10 @@ class LocalAudioSource : public Notifier, AudioSender { } private: - void Initialize(const cricket::AudioOptions* audio_options); + void Initialize(const webrtc::AudioOptions* audio_options); mutable webrtc::Mutex sink_lock_; std::vector sinks_ RTC_GUARDED_BY(sink_lock_); - cricket::AudioOptions options_; + webrtc::AudioOptions options_; webrtc::CustomAudioTransportImpl* audio_transport_; }; } // namespace libwebrtc diff --git a/src/internal/vcm_capturer.cc b/src/internal/vcm_capturer.cc index 2362d71fc5..a72bc63e64 100644 --- a/src/internal/vcm_capturer.cc +++ b/src/internal/vcm_capturer.cc @@ -21,7 +21,7 @@ namespace webrtc { namespace internal { -VcmCapturer::VcmCapturer(rtc::Thread* worker_thread) +VcmCapturer::VcmCapturer(webrtc::Thread* worker_thread) : vcm_(nullptr), worker_thread_(worker_thread) {} bool VcmCapturer::Init(size_t width, size_t height, size_t target_fps, @@ -56,7 +56,7 @@ bool VcmCapturer::Init(size_t width, size_t height, size_t target_fps, return true; } -std::shared_ptr VcmCapturer::Create(rtc::Thread* worker_thread, +std::shared_ptr VcmCapturer::Create(webrtc::Thread* worker_thread, size_t width, size_t height, size_t target_fps, size_t capture_device_index) { @@ -110,8 +110,8 @@ void VcmCapturer::OnFrame(const VideoFrame& frame) { VideoCapturer::OnFrame(frame); } -rtc::scoped_refptr CapturerTrackSource::Create( - rtc::Thread* worker_thread) { +webrtc::scoped_refptr CapturerTrackSource::Create( + webrtc::Thread* worker_thread) { const size_t kWidth = 640; const size_t kHeight = 480; const size_t kFps = 30; @@ -125,8 +125,8 @@ rtc::scoped_refptr CapturerTrackSource::Create( for (int i = 0; i < num_devices; ++i) { capturer = VcmCapturer::Create(worker_thread, kWidth, kHeight, kFps, i); if (capturer) { - return rtc::scoped_refptr( - new rtc::RefCountedObject(capturer)); + return webrtc::scoped_refptr( + new webrtc::RefCountedObject(capturer)); } } diff --git a/src/internal/vcm_capturer.h b/src/internal/vcm_capturer.h index 4649e30711..57b3f6e2b4 100644 --- a/src/internal/vcm_capturer.h +++ b/src/internal/vcm_capturer.h @@ -22,13 +22,13 @@ namespace webrtc { namespace internal { class VcmCapturer : public VideoCapturer, - public rtc::VideoSinkInterface { + public webrtc::VideoSinkInterface { public: - static std::shared_ptr Create(rtc::Thread* worker_thread, + static std::shared_ptr Create(webrtc::Thread* worker_thread, size_t width, size_t height, size_t target_fps, size_t capture_device_index); - VcmCapturer(rtc::Thread* worker_thread); + VcmCapturer(webrtc::Thread* worker_thread); virtual ~VcmCapturer(); @@ -45,22 +45,22 @@ class VcmCapturer : public VideoCapturer, size_t capture_device_index); void Destroy(); - rtc::scoped_refptr vcm_; - rtc::Thread* worker_thread_ = nullptr; + webrtc::scoped_refptr vcm_; + webrtc::Thread* worker_thread_ = nullptr; VideoCaptureCapability capability_; }; class CapturerTrackSource : public webrtc::VideoTrackSource { public: - static rtc::scoped_refptr Create( - rtc::Thread* worker_thread); + static webrtc::scoped_refptr Create( + webrtc::Thread* worker_thread); public: explicit CapturerTrackSource(std::shared_ptr capturer) : VideoTrackSource(/*remote=*/false), capturer_(capturer) {} private: - rtc::VideoSourceInterface* source() override { + webrtc::VideoSourceInterface* source() override { return capturer_.get(); } std::shared_ptr capturer_; diff --git a/src/internal/video_capturer.cc b/src/internal/video_capturer.cc index 623fce43ba..20021d95f5 100644 --- a/src/internal/video_capturer.cc +++ b/src/internal/video_capturer.cc @@ -38,7 +38,7 @@ void VideoCapturer::OnFrame(const VideoFrame& frame) { if (out_height != frame.height() || out_width != frame.width()) { // Video adapter has requested a down-scale. Allocate a new buffer and // return scaled version. - rtc::scoped_refptr scaled_buffer = + webrtc::scoped_refptr scaled_buffer = I420Buffer::Create(out_width, out_height); scaled_buffer->ScaleFrom(*frame.video_frame_buffer()->ToI420()); broadcaster_.OnFrame(VideoFrame::Builder() @@ -53,23 +53,23 @@ void VideoCapturer::OnFrame(const VideoFrame& frame) { } } -rtc::VideoSinkWants VideoCapturer::GetSinkWants() { +webrtc::VideoSinkWants VideoCapturer::GetSinkWants() { return broadcaster_.wants(); } -void VideoCapturer::AddOrUpdateSink(rtc::VideoSinkInterface* sink, - const rtc::VideoSinkWants& wants) { +void VideoCapturer::AddOrUpdateSink(webrtc::VideoSinkInterface* sink, + const webrtc::VideoSinkWants& wants) { broadcaster_.AddOrUpdateSink(sink, wants); UpdateVideoAdapter(); } -void VideoCapturer::RemoveSink(rtc::VideoSinkInterface* sink) { +void VideoCapturer::RemoveSink(webrtc::VideoSinkInterface* sink) { broadcaster_.RemoveSink(sink); UpdateVideoAdapter(); } void VideoCapturer::UpdateVideoAdapter() { - rtc::VideoSinkWants wants = broadcaster_.wants(); + webrtc::VideoSinkWants wants = broadcaster_.wants(); if (0 < wants.resolutions.size()) { auto size = wants.resolutions.at(0); diff --git a/src/internal/video_capturer.h b/src/internal/video_capturer.h index c643329c50..f28371af91 100644 --- a/src/internal/video_capturer.h +++ b/src/internal/video_capturer.h @@ -25,7 +25,7 @@ namespace webrtc { namespace internal { -class VideoCapturer : public rtc::VideoSourceInterface { +class VideoCapturer : public webrtc::VideoSourceInterface { public: VideoCapturer(); virtual ~VideoCapturer(); @@ -36,19 +36,19 @@ class VideoCapturer : public rtc::VideoSourceInterface { virtual void StopCapture() {} - void AddOrUpdateSink(rtc::VideoSinkInterface* sink, - const rtc::VideoSinkWants& wants) override; - void RemoveSink(rtc::VideoSinkInterface* sink) override; + void AddOrUpdateSink(webrtc::VideoSinkInterface* sink, + const webrtc::VideoSinkWants& wants) override; + void RemoveSink(webrtc::VideoSinkInterface* sink) override; protected: void OnFrame(const VideoFrame& frame); - rtc::VideoSinkWants GetSinkWants(); + webrtc::VideoSinkWants GetSinkWants(); private: void UpdateVideoAdapter(); - rtc::VideoBroadcaster broadcaster_; - cricket::VideoAdapter video_adapter_; + webrtc::VideoBroadcaster broadcaster_; + webrtc::VideoAdapter video_adapter_; }; } // namespace internal } // namespace webrtc diff --git a/src/libwebrtc.cc b/src/libwebrtc.cc index a2e03243dc..522c506a3c 100644 --- a/src/libwebrtc.cc +++ b/src/libwebrtc.cc @@ -13,7 +13,7 @@ static bool g_is_initialized = false; // Initializes SSL, if not initialized. bool LibWebRTC::Initialize() { if (!g_is_initialized) { - rtc::InitializeSSL(); + webrtc::InitializeSSL(); g_is_initialized = true; } return g_is_initialized; @@ -21,8 +21,8 @@ bool LibWebRTC::Initialize() { // Stops and cleans up the threads and SSL. void LibWebRTC::Terminate() { - rtc::ThreadManager::Instance()->SetCurrentThread(NULL); - rtc::CleanupSSL(); + webrtc::ThreadManager::Instance()->SetCurrentThread(NULL); + webrtc::CleanupSSL(); // Resets the static variable g_is_initialized to false. g_is_initialized = false; diff --git a/src/rtc_audio_device_impl.cc b/src/rtc_audio_device_impl.cc index ebc91e0e22..bd3e3383c0 100644 --- a/src/rtc_audio_device_impl.cc +++ b/src/rtc_audio_device_impl.cc @@ -5,10 +5,10 @@ namespace libwebrtc { AudioDeviceImpl::AudioDeviceImpl( - rtc::scoped_refptr audio_device_module, - rtc::Thread* worker_thread) + webrtc::scoped_refptr audio_device_module, + webrtc::Thread* worker_thread) : audio_device_module_(audio_device_module), worker_thread_(worker_thread) { - audio_device_module_->SetAudioDeviceSink(this); + audio_device_module_->SetObserver(this); } AudioDeviceImpl::~AudioDeviceImpl() { diff --git a/src/rtc_audio_device_impl.h b/src/rtc_audio_device_impl.h index d2cae4a76a..2dd713cbb6 100644 --- a/src/rtc_audio_device_impl.h +++ b/src/rtc_audio_device_impl.h @@ -8,11 +8,11 @@ #include "rtc_base/thread.h" namespace libwebrtc { -class AudioDeviceImpl : public RTCAudioDevice, public webrtc::AudioDeviceSink { +class AudioDeviceImpl : public RTCAudioDevice, public webrtc::AudioDeviceObserver { public: AudioDeviceImpl( - rtc::scoped_refptr audio_device_module, - rtc::Thread* worker_thread); + webrtc::scoped_refptr audio_device_module, + webrtc::Thread* worker_thread); virtual ~AudioDeviceImpl(); @@ -45,8 +45,8 @@ class AudioDeviceImpl : public RTCAudioDevice, public webrtc::AudioDeviceSink { void OnDevicesUpdated() override; private: - rtc::scoped_refptr audio_device_module_; - rtc::Thread* worker_thread_ = nullptr; + webrtc::scoped_refptr audio_device_module_; + webrtc::Thread* worker_thread_ = nullptr; OnDeviceChangeCallback listener_ = nullptr; }; diff --git a/src/rtc_audio_processing_impl.cc b/src/rtc_audio_processing_impl.cc index ffed8051e4..d0b460eae0 100644 --- a/src/rtc_audio_processing_impl.cc +++ b/src/rtc_audio_processing_impl.cc @@ -1,5 +1,7 @@ #include "rtc_audio_processing_impl.h" +#include "api/audio/builtin_audio_processing_builder.h" +#include "api/environment/environment_factory.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_processing/ns/ns_common.h" #include "rtc_base/logging.h" @@ -81,10 +83,10 @@ RTCAudioProcessingImpl::RTCAudioProcessingImpl() { std::unique_ptr render_pre_processor( render_pre_processor_); - apm_ = webrtc::AudioProcessingBuilder() + apm_ = webrtc::BuiltinAudioProcessingBuilder() .SetCapturePostProcessing(std::move(capture_post_processor)) .SetRenderPreProcessing(std::move(render_pre_processor)) - .Create(); + .Build(webrtc::CreateEnvironment()); webrtc::AudioProcessing::Config config; apm_->ApplyConfig(config); diff --git a/src/rtc_audio_processing_impl.h b/src/rtc_audio_processing_impl.h index 574c3e881f..d85c1b42ef 100644 --- a/src/rtc_audio_processing_impl.h +++ b/src/rtc_audio_processing_impl.h @@ -20,14 +20,14 @@ class RTCAudioProcessingImpl : public RTCAudioProcessing { void SetRenderPreProcessing( RTCAudioProcessing::CustomProcessing* render_pre_processing) override; - virtual rtc::scoped_refptr GetAudioProcessing() { + virtual webrtc::scoped_refptr GetAudioProcessing() { return apm_; } private: CustomProcessingAdapter* capture_post_processor_; CustomProcessingAdapter* render_pre_processor_; - rtc::scoped_refptr apm_; + webrtc::scoped_refptr apm_; }; } // namespace libwebrtc diff --git a/src/rtc_audio_source_impl.cc b/src/rtc_audio_source_impl.cc index e75513af0f..b075456a3d 100644 --- a/src/rtc_audio_source_impl.cc +++ b/src/rtc_audio_source_impl.cc @@ -3,7 +3,7 @@ namespace libwebrtc { RTCAudioSourceImpl::RTCAudioSourceImpl( - rtc::scoped_refptr rtc_audio_source, + webrtc::scoped_refptr rtc_audio_source, SourceType source_type) : rtc_audio_source_(rtc_audio_source), source_type_(source_type) { RTC_LOG(LS_INFO) << __FUNCTION__ << ": ctor "; diff --git a/src/rtc_audio_source_impl.h b/src/rtc_audio_source_impl.h index d295755241..cd8b012e54 100644 --- a/src/rtc_audio_source_impl.h +++ b/src/rtc_audio_source_impl.h @@ -18,7 +18,7 @@ namespace libwebrtc { class RTCAudioSourceImpl : public RTCAudioSource { public: RTCAudioSourceImpl( - rtc::scoped_refptr rtc_audio_source, + webrtc::scoped_refptr rtc_audio_source, SourceType source_type); void CaptureFrame(const void* audio_data, int bits_per_sample, @@ -34,12 +34,12 @@ class RTCAudioSourceImpl : public RTCAudioSource { virtual ~RTCAudioSourceImpl(); - rtc::scoped_refptr rtc_audio_source() { + webrtc::scoped_refptr rtc_audio_source() { return rtc_audio_source_; } private: - rtc::scoped_refptr rtc_audio_source_; + webrtc::scoped_refptr rtc_audio_source_; SourceType source_type_; }; diff --git a/src/rtc_audio_track_impl.cc b/src/rtc_audio_track_impl.cc index e8e1f5da30..1716b60797 100644 --- a/src/rtc_audio_track_impl.cc +++ b/src/rtc_audio_track_impl.cc @@ -3,7 +3,7 @@ namespace libwebrtc { AudioTrackImpl::AudioTrackImpl( - rtc::scoped_refptr audio_track) + webrtc::scoped_refptr audio_track) : rtc_track_(audio_track) { RTC_LOG(LS_INFO) << __FUNCTION__ << ": ctor "; id_ = rtc_track_->id(); diff --git a/src/rtc_audio_track_impl.h b/src/rtc_audio_track_impl.h index 6bd89dd4ad..50cdc56bed 100644 --- a/src/rtc_audio_track_impl.h +++ b/src/rtc_audio_track_impl.h @@ -32,7 +32,7 @@ class AudioTrackSinkAdapter : public webrtc::AudioTrackSinkInterface { class AudioTrackImpl : public RTCAudioTrack { public: - AudioTrackImpl(rtc::scoped_refptr audio_track); + AudioTrackImpl(webrtc::scoped_refptr audio_track); virtual ~AudioTrackImpl(); @@ -68,7 +68,7 @@ class AudioTrackImpl : public RTCAudioTrack { sinks_.erase(it); } - rtc::scoped_refptr rtc_track() { + webrtc::scoped_refptr rtc_track() { return rtc_track_; } @@ -78,7 +78,7 @@ class AudioTrackImpl : public RTCAudioTrack { private: void RemoveSinks(); - rtc::scoped_refptr rtc_track_; + webrtc::scoped_refptr rtc_track_; std::map> sinks_; webrtc::Mutex mutex_; string id_, kind_; diff --git a/src/rtc_data_channel_impl.cc b/src/rtc_data_channel_impl.cc index 41de9ef481..22cc93ad21 100644 --- a/src/rtc_data_channel_impl.cc +++ b/src/rtc_data_channel_impl.cc @@ -3,7 +3,7 @@ namespace libwebrtc { RTCDataChannelImpl::RTCDataChannelImpl( - rtc::scoped_refptr rtc_data_channel) + webrtc::scoped_refptr rtc_data_channel) : rtc_data_channel_(rtc_data_channel), crit_sect_(new webrtc::Mutex()) { rtc_data_channel_->RegisterObserver(this); label_ = rtc_data_channel_->label(); @@ -15,7 +15,7 @@ RTCDataChannelImpl::~RTCDataChannelImpl() { void RTCDataChannelImpl::Send(const uint8_t* data, uint32_t size, bool binary /*= false*/) { - rtc::CopyOnWriteBuffer copyOnWriteBuffer(data, size); + webrtc::CopyOnWriteBuffer copyOnWriteBuffer(data, size); webrtc::DataBuffer buffer(copyOnWriteBuffer, binary); rtc_data_channel_->Send(buffer); } diff --git a/src/rtc_data_channel_impl.h b/src/rtc_data_channel_impl.h index a433a36bce..8e4b10591a 100644 --- a/src/rtc_data_channel_impl.h +++ b/src/rtc_data_channel_impl.h @@ -12,7 +12,7 @@ class RTCDataChannelImpl : public RTCDataChannel, public webrtc::DataChannelObserver { public: RTCDataChannelImpl( - rtc::scoped_refptr rtc_data_channel); + webrtc::scoped_refptr rtc_data_channel); virtual void Send(const uint8_t* data, uint32_t size, bool binary = false) override; @@ -31,7 +31,7 @@ class RTCDataChannelImpl : public RTCDataChannel, virtual RTCDataChannelState state() override; - rtc::scoped_refptr rtc_data_channel() { + webrtc::scoped_refptr rtc_data_channel() { return rtc_data_channel_; } @@ -43,7 +43,7 @@ class RTCDataChannelImpl : public RTCDataChannel, virtual void OnMessage(const webrtc::DataBuffer& buffer) override; private: - rtc::scoped_refptr rtc_data_channel_; + webrtc::scoped_refptr rtc_data_channel_; RTCDataChannelObserver* observer_ = nullptr; std::unique_ptr crit_sect_; RTCDataChannelState state_; diff --git a/src/rtc_desktop_capturer_impl.cc b/src/rtc_desktop_capturer_impl.cc index 93dec6d38d..07edb03898 100644 --- a/src/rtc_desktop_capturer_impl.cc +++ b/src/rtc_desktop_capturer_impl.cc @@ -29,8 +29,8 @@ enum { kCaptureDelay = 33, kCaptureMessageId = 1000 }; RTCDesktopCapturerImpl::RTCDesktopCapturerImpl( DesktopType type, webrtc::DesktopCapturer::SourceId source_id, - rtc::Thread* signaling_thread, scoped_refptr source) - : thread_(rtc::Thread::Create()), + webrtc::Thread* signaling_thread, scoped_refptr source) + : thread_(webrtc::Thread::Create()), source_id_(source_id), signaling_thread_(signaling_thread), source_(source) { @@ -201,7 +201,7 @@ void RTCDesktopCapturerImpl::OnCaptureResult( #endif width, height, libyuv::kRotate0, libyuv::FOURCC_ARGB); - OnFrame(webrtc::VideoFrame(i420_buffer_, 0, rtc::TimeMillis(), + OnFrame(webrtc::VideoFrame(i420_buffer_, 0, webrtc::TimeMillis(), webrtc::kVideoRotation_0)); } #ifdef WEBRTC_WIN diff --git a/src/rtc_desktop_capturer_impl.h b/src/rtc_desktop_capturer_impl.h index 7ae63f546b..cde964be2e 100644 --- a/src/rtc_desktop_capturer_impl.h +++ b/src/rtc_desktop_capturer_impl.h @@ -37,7 +37,7 @@ class RTCDesktopCapturerImpl : public RTCDesktopCapturer, public: RTCDesktopCapturerImpl(DesktopType type, webrtc::DesktopCapturer::SourceId source_id, - rtc::Thread* signaling_thread, + webrtc::Thread* signaling_thread, scoped_refptr source); ~RTCDesktopCapturerImpl(); @@ -67,8 +67,8 @@ class RTCDesktopCapturerImpl : public RTCDesktopCapturer, void CaptureFrame(); webrtc::DesktopCaptureOptions options_; std::unique_ptr capturer_; - std::unique_ptr thread_; - rtc::scoped_refptr i420_buffer_; + std::unique_ptr thread_; + webrtc::scoped_refptr i420_buffer_; CaptureState capture_state_ = CS_STOPPED; DesktopType type_; webrtc::DesktopCapturer::SourceId source_id_; @@ -76,7 +76,7 @@ class RTCDesktopCapturerImpl : public RTCDesktopCapturer, uint32_t capture_delay_ = 1000; // 1s webrtc::DesktopCapturer::Result result_ = webrtc::DesktopCapturer::Result::SUCCESS; - rtc::Thread* signaling_thread_ = nullptr; + webrtc::Thread* signaling_thread_ = nullptr; scoped_refptr source_; uint32_t x_ = 0; uint32_t y_ = 0; diff --git a/src/rtc_desktop_device_impl.cc b/src/rtc_desktop_device_impl.cc index ee3f462ef0..225f4452d2 100644 --- a/src/rtc_desktop_device_impl.cc +++ b/src/rtc_desktop_device_impl.cc @@ -7,7 +7,7 @@ namespace libwebrtc { -RTCDesktopDeviceImpl::RTCDesktopDeviceImpl(rtc::Thread* signaling_thread) +RTCDesktopDeviceImpl::RTCDesktopDeviceImpl(webrtc::Thread* signaling_thread) : signaling_thread_(signaling_thread) {} RTCDesktopDeviceImpl::~RTCDesktopDeviceImpl() {} diff --git a/src/rtc_desktop_device_impl.h b/src/rtc_desktop_device_impl.h index 8f3bcc524d..ae7ac43257 100644 --- a/src/rtc_desktop_device_impl.h +++ b/src/rtc_desktop_device_impl.h @@ -16,7 +16,7 @@ namespace libwebrtc { class RTCDesktopDeviceImpl : public RTCDesktopDevice { public: - RTCDesktopDeviceImpl(rtc::Thread* signaling_thread); + RTCDesktopDeviceImpl(webrtc::Thread* signaling_thread); ~RTCDesktopDeviceImpl(); scoped_refptr CreateDesktopCapturer( @@ -26,7 +26,7 @@ class RTCDesktopDeviceImpl : public RTCDesktopDevice { DesktopType type) override; private: - rtc::Thread* signaling_thread_ = nullptr; + webrtc::Thread* signaling_thread_ = nullptr; std::map> desktop_media_lists_; }; diff --git a/src/rtc_desktop_media_list_impl.cc b/src/rtc_desktop_media_list_impl.cc index 68aa783834..a777bcb1b8 100644 --- a/src/rtc_desktop_media_list_impl.cc +++ b/src/rtc_desktop_media_list_impl.cc @@ -30,8 +30,8 @@ namespace libwebrtc { RTCDesktopMediaListImpl::RTCDesktopMediaListImpl(DesktopType type, - rtc::Thread* signaling_thread) - : thread_(rtc::Thread::Create()), + webrtc::Thread* signaling_thread) + : thread_(webrtc::Thread::Create()), type_(type), signaling_thread_(signaling_thread) { RTC_DCHECK(thread_); diff --git a/src/rtc_desktop_media_list_impl.h b/src/rtc_desktop_media_list_impl.h index bfd6561bc7..230a03eec5 100644 --- a/src/rtc_desktop_media_list_impl.h +++ b/src/rtc_desktop_media_list_impl.h @@ -63,7 +63,7 @@ class MediaSourceImpl : public MediaSource { private: std::vector thumbnail_; - rtc::scoped_refptr i420_buffer_; + webrtc::scoped_refptr i420_buffer_; RTCDesktopMediaListImpl* mediaList_; DesktopType type_; }; @@ -73,7 +73,7 @@ class RTCDesktopMediaListImpl : public RTCDesktopMediaList { enum CaptureState { CS_RUNNING, CS_STOPPED, CS_FAILED }; public: - RTCDesktopMediaListImpl(DesktopType type, rtc::Thread* signaling_thread); + RTCDesktopMediaListImpl(DesktopType type, webrtc::Thread* signaling_thread); virtual ~RTCDesktopMediaListImpl(); @@ -120,11 +120,11 @@ class RTCDesktopMediaListImpl : public RTCDesktopMediaList { std::unique_ptr callback_; webrtc::DesktopCaptureOptions options_; std::unique_ptr capturer_; - std::unique_ptr thread_; + std::unique_ptr thread_; std::vector> sources_; MediaListObserver* observer_ = nullptr; DesktopType type_; - rtc::Thread* signaling_thread_ = nullptr; + webrtc::Thread* signaling_thread_ = nullptr; }; } // namespace libwebrtc diff --git a/src/rtc_dtls_transport_impl.cc b/src/rtc_dtls_transport_impl.cc index 0d29e9c64f..56581616cd 100644 --- a/src/rtc_dtls_transport_impl.cc +++ b/src/rtc_dtls_transport_impl.cc @@ -38,7 +38,7 @@ RTCDtlsTransportInformationImpl::dtls_transport_information() { } RTCDtlsTransportImpl::RTCDtlsTransportImpl( - rtc::scoped_refptr dtls_transport) + webrtc::scoped_refptr dtls_transport) : dtls_transport_(dtls_transport), observer_(nullptr) {} scoped_refptr @@ -67,7 +67,7 @@ void RTCDtlsTransportImpl::OnError(webrtc::RTCError error) { } } -rtc::scoped_refptr +webrtc::scoped_refptr RTCDtlsTransportImpl::dtls_transport() { return dtls_transport_; } diff --git a/src/rtc_dtls_transport_impl.h b/src/rtc_dtls_transport_impl.h index ea2f3ae148..37c870dc69 100644 --- a/src/rtc_dtls_transport_impl.h +++ b/src/rtc_dtls_transport_impl.h @@ -32,7 +32,7 @@ class RTCDtlsTransportImpl : public RTCDtlsTransport, public webrtc::DtlsTransportObserverInterface { public: RTCDtlsTransportImpl( - rtc::scoped_refptr dtls_transport); + webrtc::scoped_refptr dtls_transport); virtual scoped_refptr GetInformation() override; @@ -45,10 +45,10 @@ class RTCDtlsTransportImpl : public RTCDtlsTransport, virtual void OnError(webrtc::RTCError error) override; - rtc::scoped_refptr dtls_transport(); + webrtc::scoped_refptr dtls_transport(); private: - rtc::scoped_refptr dtls_transport_; + webrtc::scoped_refptr dtls_transport_; RTCDtlsTransportObserver* observer_; }; diff --git a/src/rtc_dtmf_sender_impl.cc b/src/rtc_dtmf_sender_impl.cc index f603157a10..ad81e3a32c 100644 --- a/src/rtc_dtmf_sender_impl.cc +++ b/src/rtc_dtmf_sender_impl.cc @@ -3,10 +3,10 @@ namespace libwebrtc { RTCDtmfSenderImpl::RTCDtmfSenderImpl( - rtc::scoped_refptr dtmf_sender) + webrtc::scoped_refptr dtmf_sender) : dtmf_sender_(dtmf_sender), observer_(nullptr) {} -rtc::scoped_refptr +webrtc::scoped_refptr RTCDtmfSenderImpl::dtmf_sender() { return dtmf_sender_; } diff --git a/src/rtc_dtmf_sender_impl.h b/src/rtc_dtmf_sender_impl.h index 9f762e5ed8..74a2078cc9 100644 --- a/src/rtc_dtmf_sender_impl.h +++ b/src/rtc_dtmf_sender_impl.h @@ -14,7 +14,7 @@ class RTCDtmfSenderImpl : public RTCDtmfSender, public webrtc::DtmfSenderObserverInterface { public: RTCDtmfSenderImpl( - rtc::scoped_refptr dtmf_sender); + webrtc::scoped_refptr dtmf_sender); virtual void RegisterObserver(RTCDtmfSenderObserver* observer) override; virtual void UnregisterObserver() override; @@ -33,10 +33,10 @@ class RTCDtmfSenderImpl : public RTCDtmfSender, virtual void OnToneChange(const std::string& tone) override; - rtc::scoped_refptr dtmf_sender(); + webrtc::scoped_refptr dtmf_sender(); private: - rtc::scoped_refptr dtmf_sender_; + webrtc::scoped_refptr dtmf_sender_; RTCDtmfSenderObserver* observer_; }; diff --git a/src/rtc_frame_cryptor_impl.cc b/src/rtc_frame_cryptor_impl.cc index b720e44368..b69f0f5aec 100644 --- a/src/rtc_frame_cryptor_impl.cc +++ b/src/rtc_frame_cryptor_impl.cc @@ -42,7 +42,7 @@ RTCFrameCryptorImpl::RTCFrameCryptorImpl( key_index_(0), key_provider_(key_provider), sender_(sender), - observer_(rtc::make_ref_counted()) { + observer_(webrtc::make_ref_counted()) { auto factoryImpl = static_cast(factory.get()); auto keyImpl = static_cast(key_provider.get()); RTCRtpSenderImpl* impl = static_cast(sender.get()); @@ -50,7 +50,7 @@ RTCFrameCryptorImpl::RTCFrameCryptorImpl( impl->rtc_rtp_sender()->track()->kind() == "audio" ? webrtc::FrameCryptorTransformer::MediaType::kAudioFrame : webrtc::FrameCryptorTransformer::MediaType::kVideoFrame; - e2ee_transformer_ = rtc::scoped_refptr( + e2ee_transformer_ = webrtc::scoped_refptr( new webrtc::FrameCryptorTransformer( factoryImpl->signaling_thread(), participant_id_.std_string(), mediaType, AlgorithmToFrameCryptorAlgorithm(algorithm), @@ -72,7 +72,7 @@ RTCFrameCryptorImpl::RTCFrameCryptorImpl( key_provider_(key_provider), receiver_(receiver), observer_( - rtc::make_ref_counted()) { + webrtc::make_ref_counted()) { auto factoryImpl = static_cast(factory.get()); auto keyImpl = static_cast(key_provider.get()); RTCRtpReceiverImpl* impl = static_cast(receiver.get()); @@ -80,7 +80,7 @@ RTCFrameCryptorImpl::RTCFrameCryptorImpl( impl->rtp_receiver()->track()->kind() == "audio" ? webrtc::FrameCryptorTransformer::MediaType::kAudioFrame : webrtc::FrameCryptorTransformer::MediaType::kVideoFrame; - e2ee_transformer_ = rtc::scoped_refptr( + e2ee_transformer_ = webrtc::scoped_refptr( new webrtc::FrameCryptorTransformer( factoryImpl->signaling_thread(), participant_id_.std_string(), mediaType, AlgorithmToFrameCryptorAlgorithm(algorithm), diff --git a/src/rtc_frame_cryptor_impl.h b/src/rtc_frame_cryptor_impl.h index b44d887d31..5397affb0a 100644 --- a/src/rtc_frame_cryptor_impl.h +++ b/src/rtc_frame_cryptor_impl.h @@ -23,7 +23,7 @@ class DefaultKeyProviderImpl : public KeyProvider { rtc_options.discard_frame_when_cryptor_not_ready = options->discard_frame_when_cryptor_not_ready; impl_ = - new rtc::RefCountedObject(rtc_options); + new webrtc::RefCountedObject(rtc_options); } ~DefaultKeyProviderImpl() {} @@ -59,10 +59,10 @@ class DefaultKeyProviderImpl : public KeyProvider { impl_->SetSifTrailer(trailer.std_vector()); } - rtc::scoped_refptr rtc_key_provider() { return impl_; } + webrtc::scoped_refptr rtc_key_provider() { return impl_; } private: - rtc::scoped_refptr impl_; + webrtc::scoped_refptr impl_; }; class RTCFrameCryptorObserverAdapter @@ -116,11 +116,11 @@ class RTCFrameCryptorImpl : public RTCFrameCryptor { mutable webrtc::Mutex mutex_; bool enabled_; int key_index_; - rtc::scoped_refptr e2ee_transformer_; + webrtc::scoped_refptr e2ee_transformer_; scoped_refptr key_provider_; scoped_refptr sender_; scoped_refptr receiver_; - rtc::scoped_refptr observer_; + webrtc::scoped_refptr observer_; }; } // namespace libwebrtc diff --git a/src/rtc_media_stream_impl.cc b/src/rtc_media_stream_impl.cc index f5d6072a4b..95ba8e77cd 100644 --- a/src/rtc_media_stream_impl.cc +++ b/src/rtc_media_stream_impl.cc @@ -9,7 +9,7 @@ namespace libwebrtc { MediaStreamImpl::MediaStreamImpl( - rtc::scoped_refptr rtc_media_stream) + webrtc::scoped_refptr rtc_media_stream) : rtc_media_stream_(rtc_media_stream) { rtc_media_stream_->RegisterObserver(this); diff --git a/src/rtc_media_stream_impl.h b/src/rtc_media_stream_impl.h index 011efc90af..13d924ebd2 100644 --- a/src/rtc_media_stream_impl.h +++ b/src/rtc_media_stream_impl.h @@ -18,18 +18,18 @@ class WebRTCStatsCollectorCallback : public webrtc::RTCStatsCollectorCallback { : success_(success), failure_(failure) {} ~WebRTCStatsCollectorCallback() {} - static rtc::scoped_refptr Create( + static webrtc::scoped_refptr Create( OnStatsCollectorSuccess success, OnStatsCollectorFailure failure) { - rtc::scoped_refptr rtc_stats_observer = - rtc::scoped_refptr( - new rtc::RefCountedObject(success, + webrtc::scoped_refptr rtc_stats_observer = + webrtc::scoped_refptr( + new webrtc::RefCountedObject(success, failure)); rtc_stats_observer->AddRef(); return rtc_stats_observer; } virtual void OnStatsDelivered( - const rtc::scoped_refptr& report) override; + const webrtc::scoped_refptr& report) override; private: OnStatsCollectorSuccess success_; @@ -186,7 +186,7 @@ class MediaStreamImpl : public RTCMediaStream, public webrtc::ObserverInterface { public: MediaStreamImpl( - rtc::scoped_refptr rtc_media_stream); + webrtc::scoped_refptr rtc_media_stream); ~MediaStreamImpl(); @@ -217,7 +217,7 @@ class MediaStreamImpl : public RTCMediaStream, virtual void OnChanged() override; public: - rtc::scoped_refptr rtc_media_stream() { + webrtc::scoped_refptr rtc_media_stream() { return rtc_media_stream_; } @@ -226,8 +226,8 @@ class MediaStreamImpl : public RTCMediaStream, } private: - rtc::scoped_refptr rtc_media_stream_; - rtc::scoped_refptr rtc_peerconnection_; + webrtc::scoped_refptr rtc_media_stream_; + webrtc::scoped_refptr rtc_peerconnection_; std::vector> audio_tracks_; std::vector> video_tracks_; RTCPeerConnectionObserver* observer_ = nullptr; diff --git a/src/rtc_peerconnection_factory_impl.cc b/src/rtc_peerconnection_factory_impl.cc index b9388d6ba3..653aa4bea9 100644 --- a/src/rtc_peerconnection_factory_impl.cc +++ b/src/rtc_peerconnection_factory_impl.cc @@ -24,8 +24,6 @@ #endif #include -#include "src/internal/custom_media_context.h" - namespace libwebrtc { #if defined(USE_INTEL_MEDIA_SDK) @@ -49,23 +47,20 @@ RTCPeerConnectionFactoryImpl::RTCPeerConnectionFactoryImpl() {} RTCPeerConnectionFactoryImpl::~RTCPeerConnectionFactoryImpl() {} bool RTCPeerConnectionFactoryImpl::Initialize() { - worker_thread_ = rtc::Thread::Create(); + worker_thread_ = webrtc::Thread::Create(); worker_thread_->SetName("worker_thread", nullptr); RTC_CHECK(worker_thread_->Start()) << "Failed to start thread"; - signaling_thread_ = rtc::Thread::Create(); + signaling_thread_ = webrtc::Thread::Create(); signaling_thread_->SetName("signaling_thread", nullptr); RTC_CHECK(signaling_thread_->Start()) << "Failed to start thread"; - custom_media_context_ = rtc::make_ref_counted( - signaling_thread_.get()); - - network_thread_ = rtc::Thread::CreateWithSocketServer(); + network_thread_ = webrtc::Thread::CreateWithSocketServer(); network_thread_->SetName("network_thread", nullptr); RTC_CHECK(network_thread_->Start()) << "Failed to start thread"; if (!audio_device_module_) { task_queue_factory_ = webrtc::CreateDefaultTaskQueueFactory(); - worker_thread_->BlockingCall([=] { CreateAudioDeviceModule_w(); }); + worker_thread_->BlockingCall([&] { CreateAudioDeviceModule_w(); }); } if (!audio_processing_impl_) { @@ -74,20 +69,26 @@ bool RTCPeerConnectionFactoryImpl::Initialize() { }); } + if (!audio_transport_factory_) { + worker_thread_->BlockingCall([this] { + audio_transport_factory_ = + webrtc::make_ref_counted(); + }); + } + if (!rtc_peerconnection_factory_) { - rtc_peerconnection_factory_ = - custom_media_context_->CreatePeerConnectionFactory( - network_thread_.get(), worker_thread_.get(), - signaling_thread_.get(), audio_device_module_, - webrtc::CreateBuiltinAudioEncoderFactory(), - webrtc::CreateBuiltinAudioDecoderFactory(), + rtc_peerconnection_factory_ = CreatePeerConnectionFactory( + network_thread_.get(), worker_thread_.get(), signaling_thread_.get(), + audio_device_module_, webrtc::CreateBuiltinAudioEncoderFactory(), + webrtc::CreateBuiltinAudioDecoderFactory(), #if defined(USE_INTEL_MEDIA_SDK) - CreateIntelVideoEncoderFactory(), CreateIntelVideoDecoderFactory(), + CreateIntelVideoEncoderFactory(), CreateIntelVideoDecoderFactory(), #else - webrtc::CreateBuiltinVideoEncoderFactory(), - webrtc::CreateBuiltinVideoDecoderFactory(), + webrtc::CreateBuiltinVideoEncoderFactory(), + webrtc::CreateBuiltinVideoDecoderFactory(), #endif - nullptr, audio_processing_impl_->GetAudioProcessing(), nullptr); + nullptr, audio_processing_impl_->GetAudioProcessing(), nullptr, nullptr, + audio_transport_factory_); } if (!rtc_peerconnection_factory_.get()) { @@ -177,11 +178,35 @@ scoped_refptr RTCPeerConnectionFactoryImpl::GetVideoDevice() { return video_device_impl_; } +webrtc::scoped_refptr +RTCPeerConnectionFactoryImpl::CreateAudioSourceWithOptions( + webrtc::AudioOptions* options, bool is_custom_source) { + RTC_DCHECK(options); + // if is_custom_source == true, not using the default audio transport, + // you can put costom audio frame via LocalAudioSource::CaptureFrame(...) + // and the audio transport will be null. + // otherwise, use the default audio transport, audio transport will + // put audio frame from your platform adm to your + // LocalAudioSource::SendAudioData(...). + if (webrtc::Thread::Current() != signaling_thread_.get()) { + return signaling_thread_->BlockingCall([this, options, is_custom_source] { + return libwebrtc::LocalAudioSource::Create( + options, is_custom_source + ? nullptr + : audio_transport_factory_->audio_transport_impl()); + }); + } + return libwebrtc::LocalAudioSource::Create( + options, is_custom_source + ? nullptr + : audio_transport_factory_->audio_transport_impl()); +} + scoped_refptr RTCPeerConnectionFactoryImpl::CreateAudioSource( const string audio_source_label, RTCAudioSource::SourceType source_type) { - auto options = cricket::AudioOptions(); - rtc::scoped_refptr rtc_source_track = - custom_media_context_->CreateAudioSource(&options); + auto options = webrtc::AudioOptions(); + webrtc::scoped_refptr rtc_source_track = + CreateAudioSourceWithOptions(&options); scoped_refptr source = scoped_refptr( new RefCountedObject(rtc_source_track, source_type)); return source; @@ -201,7 +226,7 @@ RTCPeerConnectionFactoryImpl::GetDesktopDevice() { scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) { - if (rtc::Thread::Current() != signaling_thread_.get()) { + if (webrtc::Thread::Current() != signaling_thread_.get()) { scoped_refptr source = signaling_thread_->BlockingCall( [this, capturer, video_source_label, constraints] { return CreateVideoSource_s( @@ -221,9 +246,9 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource_s( static_cast(capturer.get()); /*RTCMediaConstraintsImpl* media_constraints = static_cast(constraints.get());*/ - rtc::scoped_refptr rtc_source_track = - rtc::scoped_refptr( - new rtc::RefCountedObject( + webrtc::scoped_refptr rtc_source_track = + webrtc::scoped_refptr( + new webrtc::RefCountedObject( capturer_impl->video_capturer())); scoped_refptr source = scoped_refptr( new RefCountedObject(rtc_source_track)); @@ -234,7 +259,7 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoSource_s( scoped_refptr RTCPeerConnectionFactoryImpl::CreateDesktopSource( scoped_refptr capturer, const string video_source_label, scoped_refptr constraints) { - if (rtc::Thread::Current() != signaling_thread_.get()) { + if (webrtc::Thread::Current() != signaling_thread_.get()) { scoped_refptr source = signaling_thread_->BlockingCall( [this, capturer, video_source_label, constraints] { return CreateDesktopSource_d( @@ -251,9 +276,9 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateDesktopSource_d( scoped_refptr capturer, const char* video_source_label, scoped_refptr constraints) { - rtc::scoped_refptr rtc_source_track = - rtc::scoped_refptr( - new rtc::RefCountedObject(capturer)); + webrtc::scoped_refptr rtc_source_track = + webrtc::scoped_refptr( + new webrtc::RefCountedObject(capturer)); scoped_refptr source = scoped_refptr( new RefCountedObject(rtc_source_track)); @@ -264,7 +289,7 @@ RTCPeerConnectionFactoryImpl::CreateDesktopSource_d( scoped_refptr RTCPeerConnectionFactoryImpl::CreateStream( const string stream_id) { - rtc::scoped_refptr rtc_stream = + webrtc::scoped_refptr rtc_stream = rtc_peerconnection_factory_->CreateLocalMediaStream( to_std_string(stream_id)); @@ -278,7 +303,7 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateVideoTrack( scoped_refptr source, const string track_id) { scoped_refptr source_adapter( static_cast(source.get())); - rtc::scoped_refptr rtc_video_track = + webrtc::scoped_refptr rtc_video_track = rtc_peerconnection_factory_->CreateVideoTrack( source_adapter->rtc_source_track(), track_id.std_string()); @@ -302,7 +327,7 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateAudioTrack( RTCAudioSourceImpl* source_impl = static_cast(source.get()); - rtc::scoped_refptr audio_track( + webrtc::scoped_refptr audio_track( rtc_peerconnection_factory_->CreateAudioTrack( to_std_string(track_id), source_impl->rtc_audio_source().get())); @@ -314,7 +339,7 @@ scoped_refptr RTCPeerConnectionFactoryImpl::CreateAudioTrack( scoped_refptr RTCPeerConnectionFactoryImpl::GetRtpSenderCapabilities( RTCMediaType media_type) { - if (rtc::Thread::Current() != signaling_thread_.get()) { + if (webrtc::Thread::Current() != signaling_thread_.get()) { scoped_refptr capabilities = signaling_thread_->BlockingCall([this, media_type] { return GetRtpSenderCapabilities(media_type); @@ -322,13 +347,13 @@ RTCPeerConnectionFactoryImpl::GetRtpSenderCapabilities( return capabilities; } - cricket::MediaType type = cricket::MediaType::MEDIA_TYPE_AUDIO; + webrtc::MediaType type = webrtc::MediaType::AUDIO; switch (media_type) { case RTCMediaType::AUDIO: - type = cricket::MediaType::MEDIA_TYPE_AUDIO; + type = webrtc::MediaType::AUDIO; break; case RTCMediaType::VIDEO: - type = cricket::MediaType::MEDIA_TYPE_VIDEO; + type = webrtc::MediaType::VIDEO; break; default: break; @@ -342,20 +367,20 @@ RTCPeerConnectionFactoryImpl::GetRtpSenderCapabilities( scoped_refptr RTCPeerConnectionFactoryImpl::GetRtpReceiverCapabilities( RTCMediaType media_type) { - if (rtc::Thread::Current() != signaling_thread_.get()) { + if (webrtc::Thread::Current() != signaling_thread_.get()) { scoped_refptr capabilities = signaling_thread_->BlockingCall([this, media_type] { return GetRtpSenderCapabilities(media_type); }); return capabilities; } - cricket::MediaType type = cricket::MediaType::MEDIA_TYPE_AUDIO; + webrtc::MediaType type = webrtc::MediaType::AUDIO; switch (media_type) { case RTCMediaType::AUDIO: - type = cricket::MediaType::MEDIA_TYPE_AUDIO; + type = webrtc::MediaType::AUDIO; break; case RTCMediaType::VIDEO: - type = cricket::MediaType::MEDIA_TYPE_VIDEO; + type = webrtc::MediaType::VIDEO; break; default: break; diff --git a/src/rtc_peerconnection_factory_impl.h b/src/rtc_peerconnection_factory_impl.h index 4c62b3703c..49f1d776da 100644 --- a/src/rtc_peerconnection_factory_impl.h +++ b/src/rtc_peerconnection_factory_impl.h @@ -19,7 +19,8 @@ #include "src/internal/desktop_capturer.h" #endif -#include "src/internal/custom_media_context.h" +#include "src/internal/custom_audio_transport_impl.h" +#include "src/internal/local_audio_track.h" namespace libwebrtc { @@ -68,7 +69,7 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { virtual scoped_refptr CreateStream( const string stream_id) override; - rtc::scoped_refptr + webrtc::scoped_refptr peer_connection_factory() { return rtc_peerconnection_factory_; } @@ -79,13 +80,17 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { scoped_refptr GetRtpReceiverCapabilities( RTCMediaType media_type) override; - rtc::Thread* signaling_thread() { return signaling_thread_.get(); } + webrtc::Thread* signaling_thread() { return signaling_thread_.get(); } protected: void CreateAudioDeviceModule_w(); void DestroyAudioDeviceModule_w(); + webrtc::scoped_refptr + CreateAudioSourceWithOptions(webrtc::AudioOptions* options, + bool is_custom_source = false); + scoped_refptr CreateVideoSource_s( scoped_refptr capturer, const char* video_source_label, scoped_refptr constraints); @@ -96,13 +101,12 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { scoped_refptr constraints); #endif private: - std::unique_ptr worker_thread_; - std::unique_ptr signaling_thread_; - std::unique_ptr network_thread_; - rtc::scoped_refptr custom_media_context_; - rtc::scoped_refptr + std::unique_ptr worker_thread_; + std::unique_ptr signaling_thread_; + std::unique_ptr network_thread_; + webrtc::scoped_refptr rtc_peerconnection_factory_; - rtc::scoped_refptr audio_device_module_; + webrtc::scoped_refptr audio_device_module_; scoped_refptr audio_device_impl_; scoped_refptr audio_processing_impl_; scoped_refptr video_device_impl_; @@ -111,6 +115,8 @@ class RTCPeerConnectionFactoryImpl : public RTCPeerConnectionFactory { #endif std::list> peerconnections_; std::unique_ptr task_queue_factory_; + webrtc::scoped_refptr + audio_transport_factory_; }; } // namespace libwebrtc diff --git a/src/rtc_peerconnection_impl.cc b/src/rtc_peerconnection_impl.cc index f2ddf48072..b0bee9ffda 100644 --- a/src/rtc_peerconnection_impl.cc +++ b/src/rtc_peerconnection_impl.cc @@ -5,6 +5,7 @@ #include #include "api/data_channel_interface.h" +#include "api/jsep.h" #include "pc/media_session.h" #include "rtc_base/logging.h" #include "rtc_data_channel_impl.h" @@ -15,7 +16,7 @@ #include "rtc_rtp_sender_impl.h" #include "rtc_rtp_transceiver_impl.h" -using rtc::Thread; +using webrtc::Thread; static std::map @@ -132,13 +133,37 @@ static std::map Create( OnSetSdpSuccess success_callback, OnSetSdpFailure failure_callback) { - return new rtc::RefCountedObject( + return webrtc::make_ref_counted( success_callback, failure_callback); } + virtual void OnSetLocalDescriptionComplete(webrtc::RTCError error) override { + RTC_LOG(LS_INFO) << __FUNCTION__; + if (error.ok()) { + success_callback_(); + } else { + failure_callback_(error.message()); + } + } + + virtual void OnSetRemoteDescriptionComplete(webrtc::RTCError error) override { + RTC_LOG(LS_INFO) << __FUNCTION__; + if (error.ok()) { + success_callback_(); + } else { + failure_callback_(error.message()); + } + } + virtual void OnSuccess() { RTC_LOG(LS_INFO) << __FUNCTION__; success_callback_(); @@ -148,13 +173,6 @@ class SetSessionDescriptionObserverProxy failure_callback_(error.message()); } - protected: - SetSessionDescriptionObserverProxy(OnSetSdpSuccess success_callback, - OnSetSdpFailure failure_callback) - : success_callback_(success_callback), - failure_callback_(failure_callback) {} - ~SetSessionDescriptionObserverProxy() {} - private: OnSetSdpSuccess success_callback_; OnSetSdpFailure failure_callback_; @@ -166,7 +184,7 @@ class CreateSessionDescriptionObserverProxy static CreateSessionDescriptionObserverProxy* Create( OnSdpCreateSuccess success_callback, OnSdpCreateFailure failure_callback) { - return new rtc::RefCountedObject( + return new webrtc::RefCountedObject( success_callback, failure_callback); } @@ -195,7 +213,7 @@ class CreateSessionDescriptionObserverProxy RTCPeerConnectionImpl::RTCPeerConnectionImpl( const RTCConfiguration& configuration, scoped_refptr constraints, - rtc::scoped_refptr + webrtc::scoped_refptr peer_connection_factory) : rtc_peerconnection_factory_(peer_connection_factory), configuration_(configuration), @@ -211,8 +229,8 @@ RTCPeerConnectionImpl::~RTCPeerConnectionImpl() { } void RTCPeerConnectionImpl::OnAddTrack( - rtc::scoped_refptr receiver, - const std::vector>& + webrtc::scoped_refptr receiver, + const std::vector>& streams) { if (nullptr != observer_) { std::vector> out_streams; @@ -226,7 +244,7 @@ void RTCPeerConnectionImpl::OnAddTrack( } void RTCPeerConnectionImpl::OnTrack( - rtc::scoped_refptr transceiver) { + webrtc::scoped_refptr transceiver) { if (nullptr != observer_) { observer_->OnTrack( new RefCountedObject(transceiver)); @@ -234,7 +252,7 @@ void RTCPeerConnectionImpl::OnTrack( } void RTCPeerConnectionImpl::OnRemoveTrack( - rtc::scoped_refptr receiver) { + webrtc::scoped_refptr receiver) { if (nullptr != observer_) { observer_->OnRemoveTrack( new RefCountedObject(receiver)); @@ -243,7 +261,7 @@ void RTCPeerConnectionImpl::OnRemoveTrack( // Called when a remote stream is added void RTCPeerConnectionImpl::OnAddStream( - rtc::scoped_refptr stream) { + webrtc::scoped_refptr stream) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << stream->id(); scoped_refptr remote_stream = scoped_refptr( @@ -259,7 +277,7 @@ void RTCPeerConnectionImpl::OnAddStream( } void RTCPeerConnectionImpl::OnRemoveStream( - rtc::scoped_refptr stream) { + webrtc::scoped_refptr stream) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << stream->id(); MediaStreamImpl* recv_stream = nullptr; @@ -282,7 +300,7 @@ void RTCPeerConnectionImpl::OnRemoveStream( } void RTCPeerConnectionImpl::OnDataChannel( - rtc::scoped_refptr rtc_data_channel) { + webrtc::scoped_refptr rtc_data_channel) { data_channel_ = scoped_refptr( new RefCountedObject(rtc_data_channel)); @@ -454,9 +472,9 @@ scoped_refptr RTCPeerConnectionImpl::CreateDataChannel( init.protocol = to_std_string(dataChannelDict->protocol); init.reliable = dataChannelDict->reliable; - webrtc::RTCErrorOr> result = - rtc_peerconnection_->CreateDataChannelOrError(to_std_string(label), - &init); + webrtc::RTCErrorOr> + result = rtc_peerconnection_->CreateDataChannelOrError( + to_std_string(label), &init); if (!result.ok()) { RTC_LOG(LS_ERROR) << "CreateDataChannel failed: " @@ -477,8 +495,13 @@ void RTCPeerConnectionImpl::SetLocalDescription(const string sdp, OnSetSdpSuccess success, OnSetSdpFailure failure) { webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface* session_description( - webrtc::CreateSessionDescription(to_std_string(type), to_std_string(sdp), + std::optional maybe_type = + webrtc::SdpTypeFromString(to_std_string(type)); + if (!maybe_type) { + return; + } + std::unique_ptr session_description( + webrtc::CreateSessionDescription(*maybe_type, to_std_string(sdp), &error)); if (!session_description) { @@ -487,10 +510,11 @@ void RTCPeerConnectionImpl::SetLocalDescription(const string sdp, failure(error.c_str()); return; } - - rtc_peerconnection_->SetLocalDescription( - SetSessionDescriptionObserverProxy::Create(success, failure), - session_description); + webrtc::scoped_refptr observer = + webrtc::make_ref_counted(success, + failure); + rtc_peerconnection_->SetLocalDescription(std::move(session_description), + observer); } void RTCPeerConnectionImpl::SetRemoteDescription(const string sdp, @@ -499,9 +523,14 @@ void RTCPeerConnectionImpl::SetRemoteDescription(const string sdp, OnSetSdpFailure failure) { RTC_LOG(LS_INFO) << " Received session description :" << to_std_string(sdp); webrtc::SdpParseError error; - webrtc::SessionDescriptionInterface* session_description( - webrtc::CreateSessionDescription(to_std_string(type), to_std_string(sdp), - &error)); + webrtc::SdpParseError sdp_error; + std::optional maybe_type = + webrtc::SdpTypeFromString(type.std_string()); + if (!maybe_type) { + return; + } + std::unique_ptr session_description( + webrtc::CreateSessionDescription(*maybe_type, sdp.std_string(), &error)); if (!session_description) { std::string error = "Can't parse received session description message."; @@ -510,18 +539,19 @@ void RTCPeerConnectionImpl::SetRemoteDescription(const string sdp, return; } - cricket::MediaContentDescription* content_desc = + webrtc::MediaContentDescription* content_desc = session_description->description()->GetContentDescriptionByName("video"); - cricket::MediaContentDescription* media_content_desc = - (cricket::MediaContentDescription*)content_desc; + webrtc::MediaContentDescription* media_content_desc = + (webrtc::MediaContentDescription*)content_desc; if (media_content_desc && configuration_.local_video_bandwidth > 0) media_content_desc->set_bandwidth(configuration_.local_video_bandwidth * 1000); - - rtc_peerconnection_->SetRemoteDescription( - SetSessionDescriptionObserverProxy::Create(success, failure), - session_description); + webrtc::scoped_refptr + observer = webrtc::make_ref_counted( + success, failure); + rtc_peerconnection_->SetRemoteDescription(std::move(session_description), + observer); return; } @@ -639,7 +669,7 @@ void RTCPeerConnectionImpl::Close() { int RTCPeerConnectionImpl::AddStream(scoped_refptr stream) { MediaStreamImpl* send_stream = static_cast(stream.get()); - rtc::scoped_refptr rtc_media_stream = + webrtc::scoped_refptr rtc_media_stream = send_stream->rtc_media_stream(); send_stream->RegisterRTCPeerConnectionObserver(observer_); @@ -659,7 +689,7 @@ int RTCPeerConnectionImpl::AddStream(scoped_refptr stream) { int RTCPeerConnectionImpl::RemoveStream(scoped_refptr stream) { MediaStreamImpl* send_stream = static_cast(stream.get()); - rtc::scoped_refptr rtc_media_stream = + webrtc::scoped_refptr rtc_media_stream = send_stream->rtc_media_stream(); if (std::find(local_streams_.begin(), local_streams_.end(), stream) == @@ -688,7 +718,7 @@ scoped_refptr RTCPeerConnectionImpl::CreateLocalMediaStream( bool RTCPeerConnectionImpl::GetStats(scoped_refptr sender, OnStatsCollectorSuccess success, OnStatsCollectorFailure failure) { - rtc::scoped_refptr rtc_callback = + webrtc::scoped_refptr rtc_callback = WebRTCStatsCollectorCallback::Create(success, failure); if (!rtc_peerconnection_.get() || !rtc_peerconnection_factory_.get()) { webrtc::MutexLock cs(callback_crt_sec_.get()); @@ -703,7 +733,7 @@ bool RTCPeerConnectionImpl::GetStats(scoped_refptr sender, bool RTCPeerConnectionImpl::GetStats(scoped_refptr receiver, OnStatsCollectorSuccess success, OnStatsCollectorFailure failure) { - rtc::scoped_refptr rtc_callback = + webrtc::scoped_refptr rtc_callback = WebRTCStatsCollectorCallback::Create(success, failure); if (!rtc_peerconnection_.get() || !rtc_peerconnection_factory_.get()) { webrtc::MutexLock cs(callback_crt_sec_.get()); @@ -717,7 +747,7 @@ bool RTCPeerConnectionImpl::GetStats(scoped_refptr receiver, void RTCPeerConnectionImpl::GetStats(OnStatsCollectorSuccess success, OnStatsCollectorFailure failure) { - rtc::scoped_refptr rtc_callback = + webrtc::scoped_refptr rtc_callback = WebRTCStatsCollectorCallback::Create(success, failure); if (!rtc_peerconnection_.get() || !rtc_peerconnection_factory_.get()) { webrtc::MutexLock cs(callback_crt_sec_.get()); @@ -733,7 +763,7 @@ scoped_refptr RTCPeerConnectionImpl::AddTransceiver( RTCRtpTransceiverInitImpl* initImpl = static_cast(init.get()); - webrtc::RTCErrorOr> + webrtc::RTCErrorOr> errorOr; std::string kind = to_std_string(track->kind()); if (0 == kind.compare(webrtc::MediaStreamTrackInterface::kVideoKind)) { @@ -755,7 +785,7 @@ scoped_refptr RTCPeerConnectionImpl::AddTransceiver( scoped_refptr RTCPeerConnectionImpl::AddTransceiver( scoped_refptr track) { - webrtc::RTCErrorOr> + webrtc::RTCErrorOr> errorOr; std::string kind = to_std_string(track->kind()); if (0 == kind.compare(webrtc::MediaStreamTrackInterface::kVideoKind)) { @@ -775,14 +805,12 @@ scoped_refptr RTCPeerConnectionImpl::AddTransceiver( scoped_refptr RTCPeerConnectionImpl::AddTransceiver( RTCMediaType media_type) { - webrtc::RTCErrorOr> + webrtc::RTCErrorOr> errorOr; if (media_type == RTCMediaType::AUDIO) { - errorOr = rtc_peerconnection_->AddTransceiver( - cricket::MediaType::MEDIA_TYPE_AUDIO); + errorOr = rtc_peerconnection_->AddTransceiver(webrtc::MediaType::AUDIO); } else if (media_type == RTCMediaType::VIDEO) { - errorOr = rtc_peerconnection_->AddTransceiver( - cricket::MediaType::MEDIA_TYPE_VIDEO); + errorOr = rtc_peerconnection_->AddTransceiver(webrtc::MediaType::VIDEO); } if (errorOr.ok()) { return new RefCountedObject(errorOr.value()); @@ -795,14 +823,14 @@ scoped_refptr RTCPeerConnectionImpl::AddTransceiver( RTCMediaType media_type, scoped_refptr init) { RTCRtpTransceiverInitImpl* initImpl = static_cast(init.get()); - webrtc::RTCErrorOr> + webrtc::RTCErrorOr> errorOr; if (media_type == RTCMediaType::AUDIO) { errorOr = rtc_peerconnection_->AddTransceiver( - cricket::MediaType::MEDIA_TYPE_AUDIO, initImpl->rtp_transceiver_init()); + webrtc::MediaType::AUDIO, initImpl->rtp_transceiver_init()); } else if (media_type == RTCMediaType::VIDEO) { errorOr = rtc_peerconnection_->AddTransceiver( - cricket::MediaType::MEDIA_TYPE_VIDEO, initImpl->rtp_transceiver_init()); + webrtc::MediaType::VIDEO, initImpl->rtp_transceiver_init()); } if (errorOr.ok()) { return new RefCountedObject(errorOr.value()); @@ -813,7 +841,7 @@ scoped_refptr RTCPeerConnectionImpl::AddTransceiver( scoped_refptr RTCPeerConnectionImpl::AddTrack( scoped_refptr track, vector streamIds) { - webrtc::RTCErrorOr> errorOr; + webrtc::RTCErrorOr> errorOr; std::vector stream_ids; for (auto id : streamIds.std_vector()) { @@ -894,7 +922,7 @@ RTCIceGatheringState RTCPeerConnectionImpl::ice_gathering_state() { } void WebRTCStatsCollectorCallback::OnStatsDelivered( - const rtc::scoped_refptr& report) { + const webrtc::scoped_refptr& report) { webrtc::RTCStatsReport::ConstIterator iter = report->begin(); std::vector> reports; while (iter != report->end()) { diff --git a/src/rtc_peerconnection_impl.h b/src/rtc_peerconnection_impl.h index 7920bd5fc3..c9c5d72fef 100644 --- a/src/rtc_peerconnection_impl.h +++ b/src/rtc_peerconnection_impl.h @@ -139,31 +139,31 @@ class RTCPeerConnectionImpl : public RTCPeerConnection, RTCPeerConnectionImpl( const RTCConfiguration& configuration, scoped_refptr constraints, - rtc::scoped_refptr + webrtc::scoped_refptr peer_connection_factory); protected: ~RTCPeerConnectionImpl(); virtual void OnAddTrack( - rtc::scoped_refptr receiver, - const std::vector>& + webrtc::scoped_refptr receiver, + const std::vector>& streams) override; virtual void OnTrack( - rtc::scoped_refptr transceiver) override; + webrtc::scoped_refptr transceiver) override; virtual void OnRemoveTrack( - rtc::scoped_refptr receiver) override; + webrtc::scoped_refptr receiver) override; virtual void OnAddStream( - rtc::scoped_refptr stream) override; + webrtc::scoped_refptr stream) override; virtual void OnRemoveStream( - rtc::scoped_refptr stream) override; + webrtc::scoped_refptr stream) override; virtual void OnDataChannel( - rtc::scoped_refptr data_channel) override; + webrtc::scoped_refptr data_channel) override; virtual void OnRenegotiationNeeded() override; @@ -183,9 +183,9 @@ class RTCPeerConnectionImpl : public RTCPeerConnection, webrtc::PeerConnectionInterface::SignalingState new_state) override; protected: - rtc::scoped_refptr + webrtc::scoped_refptr rtc_peerconnection_factory_; - rtc::scoped_refptr rtc_peerconnection_; + webrtc::scoped_refptr rtc_peerconnection_; const RTCConfiguration& configuration_; scoped_refptr constraints_; webrtc::PeerConnectionInterface::RTCOfferAnswerOptions offer_answer_options_; diff --git a/src/rtc_rtp_capabilities_impl.cc b/src/rtc_rtp_capabilities_impl.cc index ce502f609d..0f1594ea44 100644 --- a/src/rtc_rtp_capabilities_impl.cc +++ b/src/rtc_rtp_capabilities_impl.cc @@ -94,13 +94,13 @@ string RTCRtpCodecCapabilityImpl::mime_type() const { void RTCRtpCodecCapabilityImpl::set_mime_type(const string& mime_type) { std::vector mime_type_split = split(mime_type.std_string(), "/"); rtp_codec_capability_.name = mime_type_split[1]; - cricket::MediaType kind = cricket::MEDIA_TYPE_AUDIO; + webrtc::MediaType kind = webrtc::MediaType::ANY; if (mime_type_split[0] == "audio") { - kind = cricket::MEDIA_TYPE_AUDIO; + kind = webrtc::MediaType::AUDIO; } else if (mime_type_split[0] == "video") { - kind = cricket::MEDIA_TYPE_VIDEO; - } else { - kind = cricket::MEDIA_TYPE_DATA; + kind = webrtc::MediaType::VIDEO; + } else if (mime_type_split[0] == "data") { + kind = webrtc::MediaType::DATA; } rtp_codec_capability_.kind = kind; } diff --git a/src/rtc_rtp_parameters_impl.cc b/src/rtc_rtp_parameters_impl.cc index ac4aec8a25..0dc33c55a6 100644 --- a/src/rtc_rtp_parameters_impl.cc +++ b/src/rtc_rtp_parameters_impl.cc @@ -356,7 +356,7 @@ RTCMediaType RTCRtpCodecParametersImpl::kind() { } void RTCRtpCodecParametersImpl::set_kind(RTCMediaType value) { - rtp_codec_parameters_.kind = static_cast(value); + rtp_codec_parameters_.kind = static_cast(value); } int RTCRtpCodecParametersImpl::payload_type() { diff --git a/src/rtc_rtp_receiver_impl.cc b/src/rtc_rtp_receiver_impl.cc index 615d96509f..0298fb1a69 100644 --- a/src/rtc_rtp_receiver_impl.cc +++ b/src/rtc_rtp_receiver_impl.cc @@ -9,33 +9,33 @@ namespace libwebrtc { RTCRtpReceiverImpl::RTCRtpReceiverImpl( - rtc::scoped_refptr rtp_receiver) + webrtc::scoped_refptr rtp_receiver) : rtp_receiver_(rtp_receiver), observer_(nullptr) {} -rtc::scoped_refptr +webrtc::scoped_refptr RTCRtpReceiverImpl::rtp_receiver() { return rtp_receiver_; } -void RTCRtpReceiverImpl::OnFirstPacketReceived(cricket::MediaType media_type) { +void RTCRtpReceiverImpl::OnFirstPacketReceived(webrtc::MediaType media_type) { if (nullptr != observer_) { observer_->OnFirstPacketReceived(static_cast(media_type)); } } scoped_refptr RTCRtpReceiverImpl::track() const { - rtc::scoped_refptr track = + webrtc::scoped_refptr track = rtp_receiver_->track(); if (nullptr == track.get()) { return scoped_refptr(); } if (track->kind() == webrtc::MediaStreamTrackInterface::kVideoKind) { return scoped_refptr(new RefCountedObject( - rtc::scoped_refptr( + webrtc::scoped_refptr( static_cast(track.get())))); } else if (track->kind() == webrtc::MediaStreamTrackInterface::kAudioKind) { return scoped_refptr(new RefCountedObject( - rtc::scoped_refptr( + webrtc::scoped_refptr( static_cast(track.get())))); } return scoped_refptr(); diff --git a/src/rtc_rtp_receiver_impl.h b/src/rtc_rtp_receiver_impl.h index 986956ce14..f9fbfacd1b 100644 --- a/src/rtc_rtp_receiver_impl.h +++ b/src/rtc_rtp_receiver_impl.h @@ -1,6 +1,7 @@ #ifndef LIB_WEBRTC_RTC_RTP_RECEIVER_IMPL_HXX #define LIB_WEBRTC_RTC_RTP_RECEIVER_IMPL_HXX +#include "api/media_types.h" #include "api/rtp_receiver_interface.h" #include "rtc_rtp_receiver.h" @@ -9,7 +10,7 @@ class RTCRtpReceiverImpl : public RTCRtpReceiver, webrtc::RtpReceiverObserverInterface { public: RTCRtpReceiverImpl( - rtc::scoped_refptr rtp_receiver); + webrtc::scoped_refptr rtp_receiver); virtual scoped_refptr track() const override; virtual scoped_refptr dtls_transport() const override; @@ -22,13 +23,13 @@ class RTCRtpReceiverImpl : public RTCRtpReceiver, scoped_refptr parameters) override; virtual void SetObserver(RTCRtpReceiverObserver* observer) override; virtual void SetJitterBufferMinimumDelay(double delay_seconds) override; - rtc::scoped_refptr rtp_receiver(); + webrtc::scoped_refptr rtp_receiver(); private: - rtc::scoped_refptr rtp_receiver_; + webrtc::scoped_refptr rtp_receiver_; RTCRtpReceiverObserver* observer_; - virtual void OnFirstPacketReceived(cricket::MediaType media_type) override; + virtual void OnFirstPacketReceived(webrtc::MediaType media_type) override; }; // namespace libwebrtc diff --git a/src/rtc_rtp_sender_impl.cc b/src/rtc_rtp_sender_impl.cc index 5785b9c370..b218e89af6 100644 --- a/src/rtc_rtp_sender_impl.cc +++ b/src/rtc_rtp_sender_impl.cc @@ -8,7 +8,7 @@ namespace libwebrtc { RTCRtpSenderImpl::RTCRtpSenderImpl( - rtc::scoped_refptr rtp_sender) + webrtc::scoped_refptr rtp_sender) : rtp_sender_(rtp_sender) {} bool RTCRtpSenderImpl::set_track(scoped_refptr track) { @@ -28,7 +28,7 @@ bool RTCRtpSenderImpl::set_track(scoped_refptr track) { } scoped_refptr RTCRtpSenderImpl::track() const { - rtc::scoped_refptr track = + webrtc::scoped_refptr track = rtp_sender_->track(); if (nullptr == track.get()) { @@ -37,12 +37,12 @@ scoped_refptr RTCRtpSenderImpl::track() const { if (track->kind() == webrtc::MediaStreamTrackInterface::kVideoKind) { return scoped_refptr(new RefCountedObject( - rtc::scoped_refptr( + webrtc::scoped_refptr( static_cast(track.get())))); } else if (track->kind() == webrtc::MediaStreamTrackInterface::kAudioKind) { return scoped_refptr(new RefCountedObject( - rtc::scoped_refptr( - rtc::scoped_refptr( + webrtc::scoped_refptr( + webrtc::scoped_refptr( static_cast(track.get()))))); } return scoped_refptr(); diff --git a/src/rtc_rtp_sender_impl.h b/src/rtc_rtp_sender_impl.h index a8660a3af3..2008006043 100644 --- a/src/rtc_rtp_sender_impl.h +++ b/src/rtc_rtp_sender_impl.h @@ -9,7 +9,7 @@ namespace libwebrtc { class RTCRtpSenderImpl : public RTCRtpSender { public: - RTCRtpSenderImpl(rtc::scoped_refptr rtp_sender); + RTCRtpSenderImpl(webrtc::scoped_refptr rtp_sender); virtual bool set_track(scoped_refptr track) override; virtual scoped_refptr track() const override; @@ -26,12 +26,12 @@ class RTCRtpSenderImpl : public RTCRtpSender { const scoped_refptr parameters) override; virtual scoped_refptr dtmf_sender() const override; - rtc::scoped_refptr rtc_rtp_sender() { + webrtc::scoped_refptr rtc_rtp_sender() { return rtp_sender_; } private: - rtc::scoped_refptr rtp_sender_; + webrtc::scoped_refptr rtp_sender_; }; } // namespace libwebrtc diff --git a/src/rtc_rtp_transceiver_impl.cc b/src/rtc_rtp_transceiver_impl.cc index 301d78691f..5802b622c7 100644 --- a/src/rtc_rtp_transceiver_impl.cc +++ b/src/rtc_rtp_transceiver_impl.cc @@ -69,10 +69,10 @@ void RTCRtpTransceiverInitImpl::set_send_encodings( } RTCRtpTransceiverImpl::RTCRtpTransceiverImpl( - rtc::scoped_refptr rtp_transceiver) + webrtc::scoped_refptr rtp_transceiver) : rtp_transceiver_(rtp_transceiver) {} -rtc::scoped_refptr +webrtc::scoped_refptr libwebrtc::RTCRtpTransceiverImpl::rtp_transceiver() { return rtp_transceiver_; } diff --git a/src/rtc_rtp_transceiver_impl.h b/src/rtc_rtp_transceiver_impl.h index d04297d011..f4cac3e948 100644 --- a/src/rtc_rtp_transceiver_impl.h +++ b/src/rtc_rtp_transceiver_impl.h @@ -31,7 +31,7 @@ class RTCRtpTransceiverInitImpl : public RTCRtpTransceiverInit { class RTCRtpTransceiverImpl : public RTCRtpTransceiver { public: RTCRtpTransceiverImpl( - rtc::scoped_refptr rtp_transceiver); + webrtc::scoped_refptr rtp_transceiver); virtual RTCMediaType media_type() const override; virtual const string mid() const override; @@ -49,10 +49,10 @@ class RTCRtpTransceiverImpl : public RTCRtpTransceiver { virtual void SetCodecPreferences( vector> codecs) override; virtual const string transceiver_id() const override; - rtc::scoped_refptr rtp_transceiver(); + webrtc::scoped_refptr rtp_transceiver(); private: - rtc::scoped_refptr rtp_transceiver_; + webrtc::scoped_refptr rtp_transceiver_; }; } // namespace libwebrtc diff --git a/src/rtc_session_description_impl.cc b/src/rtc_session_description_impl.cc index b2345116b8..8290ad9240 100644 --- a/src/rtc_session_description_impl.cc +++ b/src/rtc_session_description_impl.cc @@ -1,12 +1,18 @@ #include "rtc_session_description_impl.h" +#include "api/jsep.h" + namespace libwebrtc { scoped_refptr RTCSessionDescription::Create( const string type, const string sdp, SdpParseError* error) { webrtc::SdpParseError sdp_error; + std::optional maybe_type = webrtc::SdpTypeFromString(to_std_string(type)); + if (!maybe_type) { + return nullptr; + } std::unique_ptr rtc_description( - webrtc::CreateSessionDescription(to_std_string(type), to_std_string(sdp), + webrtc::CreateSessionDescription(*maybe_type, to_std_string(sdp), &sdp_error)); error->description = sdp_error.description; error->line = sdp_error.line; diff --git a/src/rtc_video_device_impl.cc b/src/rtc_video_device_impl.cc index 20fb103eb3..9f059a3a57 100644 --- a/src/rtc_video_device_impl.cc +++ b/src/rtc_video_device_impl.cc @@ -4,7 +4,7 @@ namespace libwebrtc { -RTCVideoDeviceImpl::RTCVideoDeviceImpl(rtc::Thread* worker_thread) +RTCVideoDeviceImpl::RTCVideoDeviceImpl(webrtc::Thread* worker_thread) : device_info_(webrtc::VideoCaptureFactory::CreateDeviceInfo()), worker_thread_(worker_thread) {} diff --git a/src/rtc_video_device_impl.h b/src/rtc_video_device_impl.h index 9ff22be32d..2f28cb5509 100644 --- a/src/rtc_video_device_impl.h +++ b/src/rtc_video_device_impl.h @@ -38,7 +38,7 @@ class RTCVideoCapturerImpl : public RTCVideoCapturer { class RTCVideoDeviceImpl : public RTCVideoDevice { public: - RTCVideoDeviceImpl(rtc::Thread* worker_thread); + RTCVideoDeviceImpl(webrtc::Thread* worker_thread); public: uint32_t NumberOfDevices() override; @@ -55,7 +55,7 @@ class RTCVideoDeviceImpl : public RTCVideoDevice { private: std::unique_ptr device_info_; - rtc::Thread* worker_thread_ = nullptr; + webrtc::Thread* worker_thread_ = nullptr; }; } // namespace libwebrtc diff --git a/src/rtc_video_frame_impl.cc b/src/rtc_video_frame_impl.cc index 3c14db17a3..a692ec19b5 100644 --- a/src/rtc_video_frame_impl.cc +++ b/src/rtc_video_frame_impl.cc @@ -9,11 +9,11 @@ namespace libwebrtc { VideoFrameBufferImpl::VideoFrameBufferImpl( - rtc::scoped_refptr frame_buffer) + webrtc::scoped_refptr frame_buffer) : buffer_(frame_buffer) {} VideoFrameBufferImpl::VideoFrameBufferImpl( - rtc::scoped_refptr frame_buffer) + webrtc::scoped_refptr frame_buffer) : buffer_(frame_buffer) {} VideoFrameBufferImpl::~VideoFrameBufferImpl() {} @@ -56,10 +56,10 @@ int VideoFrameBufferImpl::StrideV() const { int VideoFrameBufferImpl::ConvertToARGB(Type type, uint8_t* dst_buffer, int dst_stride, int dest_width, int dest_height) { - rtc::scoped_refptr i420 = + webrtc::scoped_refptr i420 = webrtc::I420Buffer::Rotate(*buffer_.get(), rotation_); - rtc::scoped_refptr dest = + webrtc::scoped_refptr dest = webrtc::I420Buffer::Create(dest_width, dest_height); dest->ScaleFrom(*i420.get()); @@ -127,7 +127,7 @@ scoped_refptr RTCVideoFrame::Create(int width, int height, const uint8_t* data_u = buffer + size_y; const uint8_t* data_v = buffer + size_y + size_u; - rtc::scoped_refptr i420_buffer = webrtc::I420Buffer::Copy( + webrtc::scoped_refptr i420_buffer = webrtc::I420Buffer::Copy( width, height, data_y, stride_y, data_u, stride_uv, data_v, stride_uv); scoped_refptr frame = @@ -139,7 +139,7 @@ scoped_refptr RTCVideoFrame::Create(int width, int height, scoped_refptr RTCVideoFrame::Create( int width, int height, const uint8_t* data_y, int stride_y, const uint8_t* data_u, int stride_u, const uint8_t* data_v, int stride_v) { - rtc::scoped_refptr i420_buffer = webrtc::I420Buffer::Copy( + webrtc::scoped_refptr i420_buffer = webrtc::I420Buffer::Copy( width, height, data_y, stride_y, data_u, stride_u, data_v, stride_v); scoped_refptr frame = diff --git a/src/rtc_video_frame_impl.h b/src/rtc_video_frame_impl.h index 3af6845abb..e8cbbca626 100644 --- a/src/rtc_video_frame_impl.h +++ b/src/rtc_video_frame_impl.h @@ -12,8 +12,8 @@ namespace libwebrtc { class VideoFrameBufferImpl : public RTCVideoFrame { public: VideoFrameBufferImpl( - rtc::scoped_refptr frame_buffer); - VideoFrameBufferImpl(rtc::scoped_refptr frame_buffer); + webrtc::scoped_refptr frame_buffer); + VideoFrameBufferImpl(webrtc::scoped_refptr frame_buffer); virtual ~VideoFrameBufferImpl(); @@ -39,9 +39,9 @@ class VideoFrameBufferImpl : public RTCVideoFrame { int ConvertToARGB(Type type, uint8_t* dst_argb, int dst_stride_argb, int dest_width, int dest_height) override; - rtc::scoped_refptr buffer() { return buffer_; } + webrtc::scoped_refptr buffer() { return buffer_; } - // System monotonic clock, same timebase as rtc::TimeMicros(). + // System monotonic clock, same timebase as webrtc::TimeMicros(). int64_t timestamp_us() const { return timestamp_us_; } void set_timestamp_us(int64_t timestamp_us) { timestamp_us_ = timestamp_us; } @@ -52,7 +52,7 @@ class VideoFrameBufferImpl : public RTCVideoFrame { void set_rotation(webrtc::VideoRotation rotation) { rotation_ = rotation; } private: - rtc::scoped_refptr buffer_; + webrtc::scoped_refptr buffer_; int64_t timestamp_us_ = 0; webrtc::VideoRotation rotation_ = webrtc::kVideoRotation_0; }; diff --git a/src/rtc_video_sink_adapter.cc b/src/rtc_video_sink_adapter.cc index bd099e5c6b..4ae373f0ec 100644 --- a/src/rtc_video_sink_adapter.cc +++ b/src/rtc_video_sink_adapter.cc @@ -7,9 +7,9 @@ namespace libwebrtc { VideoSinkAdapter::VideoSinkAdapter( - rtc::scoped_refptr track) + webrtc::scoped_refptr track) : rtc_track_(track), crt_sec_(new webrtc::Mutex()) { - rtc_track_->AddOrUpdateSink(this, rtc::VideoSinkWants()); + rtc_track_->AddOrUpdateSink(this, webrtc::VideoSinkWants()); RTC_LOG(LS_INFO) << __FUNCTION__ << ": ctor " << (void*)this; } @@ -56,11 +56,11 @@ void VideoSinkAdapter::RemoveRenderer( } void VideoSinkAdapter::AddRenderer( - rtc::VideoSinkInterface* renderer) { - rtc_track_->AddOrUpdateSink(renderer, rtc::VideoSinkWants()); + webrtc::VideoSinkInterface* renderer) { + rtc_track_->AddOrUpdateSink(renderer, webrtc::VideoSinkWants()); } void VideoSinkAdapter::RemoveRenderer( - rtc::VideoSinkInterface* renderer) { + webrtc::VideoSinkInterface* renderer) { rtc_track_->RemoveSink(renderer); } diff --git a/src/rtc_video_sink_adapter.h b/src/rtc_video_sink_adapter.h index e58bd895df..ec510248a0 100644 --- a/src/rtc_video_sink_adapter.h +++ b/src/rtc_video_sink_adapter.h @@ -9,10 +9,10 @@ namespace libwebrtc { -class VideoSinkAdapter : public rtc::VideoSinkInterface, +class VideoSinkAdapter : public webrtc::VideoSinkInterface, public RefCountInterface { public: - VideoSinkAdapter(rtc::scoped_refptr track); + VideoSinkAdapter(webrtc::scoped_refptr track); ~VideoSinkAdapter() override; virtual void AddRenderer( @@ -22,15 +22,15 @@ class VideoSinkAdapter : public rtc::VideoSinkInterface, RTCVideoRenderer>* renderer); virtual void AddRenderer( - rtc::VideoSinkInterface* renderer); + webrtc::VideoSinkInterface* renderer); virtual void RemoveRenderer( - rtc::VideoSinkInterface* renderer); + webrtc::VideoSinkInterface* renderer); protected: // VideoSinkInterface implementation void OnFrame(const webrtc::VideoFrame& frame) override; - rtc::scoped_refptr rtc_track_; + webrtc::scoped_refptr rtc_track_; std::unique_ptr crt_sec_; std::vector>*> renderers_; }; diff --git a/src/rtc_video_source_impl.cc b/src/rtc_video_source_impl.cc index ef4ec9c5a6..b1044b40c3 100644 --- a/src/rtc_video_source_impl.cc +++ b/src/rtc_video_source_impl.cc @@ -7,7 +7,7 @@ namespace libwebrtc { RTCVideoSourceImpl::RTCVideoSourceImpl( - rtc::scoped_refptr rtc_source_track) + webrtc::scoped_refptr rtc_source_track) : rtc_source_track_(rtc_source_track) { RTC_LOG(LS_INFO) << __FUNCTION__ << ": ctor "; } diff --git a/src/rtc_video_source_impl.h b/src/rtc_video_source_impl.h index 568355701c..9eebf97378 100644 --- a/src/rtc_video_source_impl.h +++ b/src/rtc_video_source_impl.h @@ -14,16 +14,16 @@ namespace libwebrtc { class RTCVideoSourceImpl : public RTCVideoSource { public: RTCVideoSourceImpl( - rtc::scoped_refptr video_source_track); + webrtc::scoped_refptr video_source_track); virtual ~RTCVideoSourceImpl(); - virtual rtc::scoped_refptr + virtual webrtc::scoped_refptr rtc_source_track() { return rtc_source_track_; } private: - rtc::scoped_refptr rtc_source_track_; + webrtc::scoped_refptr rtc_source_track_; }; } // namespace libwebrtc diff --git a/src/rtc_video_track_impl.cc b/src/rtc_video_track_impl.cc index 329fecb4a5..0c3d78b3fd 100644 --- a/src/rtc_video_track_impl.cc +++ b/src/rtc_video_track_impl.cc @@ -6,7 +6,7 @@ namespace libwebrtc { VideoTrackImpl::VideoTrackImpl( - rtc::scoped_refptr rtc_track) + webrtc::scoped_refptr rtc_track) : rtc_track_(rtc_track), video_sink_(new RefCountedObject(rtc_track)) { RTC_LOG(LS_INFO) << __FUNCTION__ << ": ctor "; diff --git a/src/rtc_video_track_impl.h b/src/rtc_video_track_impl.h index c090cf7502..4b083e7bbd 100644 --- a/src/rtc_video_track_impl.h +++ b/src/rtc_video_track_impl.h @@ -10,11 +10,11 @@ namespace libwebrtc { -typedef std::vector> VideoSourceVector; +typedef std::vector> VideoSourceVector; class VideoTrackImpl : public RTCVideoTrack { public: - VideoTrackImpl(rtc::scoped_refptr rtc_track); + VideoTrackImpl(webrtc::scoped_refptr rtc_track); virtual ~VideoTrackImpl(); @@ -34,7 +34,7 @@ class VideoTrackImpl : public RTCVideoTrack { return rtc_track_->set_enabled(enable); } - virtual rtc::scoped_refptr rtc_track() { + virtual webrtc::scoped_refptr rtc_track() { return rtc_track_; } @@ -43,7 +43,7 @@ class VideoTrackImpl : public RTCVideoTrack { } private: - rtc::scoped_refptr rtc_track_; + webrtc::scoped_refptr rtc_track_; scoped_refptr video_source_; scoped_refptr video_sink_; string id_, kind_; diff --git a/src/win/codecutils.cc b/src/win/codecutils.cc index 6d256158bd..efc8cdfbc7 100644 --- a/src/win/codecutils.cc +++ b/src/win/codecutils.cc @@ -20,10 +20,10 @@ webrtc::SdpVideoFormat CreateH264Format(webrtc::H264Profile profile, webrtc::H264ProfileLevelIdToString( webrtc::H264ProfileLevelId(profile, level)); return webrtc::SdpVideoFormat( - cricket::kH264CodecName, - {{cricket::kH264FmtpProfileLevelId, *profile_string}, - {cricket::kH264FmtpLevelAsymmetryAllowed, "1"}, - {cricket::kH264FmtpPacketizationMode, packetization_mode}}); + webrtc::kH264CodecName, + {{webrtc::kH264FmtpProfileLevelId, *profile_string}, + {webrtc::kH264FmtpLevelAsymmetryAllowed, "1"}, + {webrtc::kH264FmtpPacketizationMode, packetization_mode}}); } std::vector CodecUtils::SupportedH264Codecs() { @@ -39,14 +39,14 @@ std::vector CodecUtils::SupportedH264Codecs() { webrtc::VideoCodecType CodecUtils::ConvertSdpFormatToCodecType( webrtc::SdpVideoFormat format) { - if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName)) { + if (absl::EqualsIgnoreCase(format.name, webrtc::kVp8CodecName)) { return webrtc::kVideoCodecVP8; - } else if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName)) { + } else if (absl::EqualsIgnoreCase(format.name, webrtc::kVp9CodecName)) { return webrtc::kVideoCodecVP9; - } else if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName)) { + } else if (absl::EqualsIgnoreCase(format.name, webrtc::kH264CodecName)) { return webrtc::kVideoCodecH264; - } else if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName)) { + } else if (absl::EqualsIgnoreCase(format.name, webrtc::kAv1CodecName)) { return webrtc::kVideoCodecAV1; } else { return webrtc::kVideoCodecGeneric; diff --git a/src/win/d3d11_manager.h b/src/win/d3d11_manager.h index 4a18333887..570b5fd41c 100644 --- a/src/win/d3d11_manager.h +++ b/src/win/d3d11_manager.h @@ -17,7 +17,7 @@ namespace owt { namespace base { -class D3D11Manager : public rtc::RefCountInterface { +class D3D11Manager : public webrtc::RefCountInterface { public: D3D11Manager() : manager_(nullptr), diff --git a/src/win/mediautils.cc b/src/win/mediautils.cc index 1505ac96dd..ade1ad6066 100644 --- a/src/win/mediautils.cc +++ b/src/win/mediautils.cc @@ -136,7 +136,7 @@ bool MediaUtils::GetH264TemporalInfo(uint8_t* buffer, size_t buffer_length, } absl::optional StringToAV1Profile(const std::string& str) { - const absl::optional i = rtc::StringToNumber(str); + const absl::optional i = webrtc::StringToNumber(str); if (!i.has_value()) return absl::nullopt; switch (i.value()) { @@ -154,7 +154,7 @@ absl::optional StringToAV1Profile(const std::string& str) { absl::optional StringToH265Profile(const std::string& str) { #ifdef OWT_USE_MSDK - const absl::optional i = rtc::StringToNumber(str); + const absl::optional i = webrtc::StringToNumber(str); if (!i.has_value()) return absl::nullopt; // See ISO/IEC-23008-2 section A.3.5. we use the general_profile_idc // as the profile-id per RFC 7798. diff --git a/src/win/msdkvideodecoder.cc b/src/win/msdkvideodecoder.cc index 6d69e30cad..5365f178f9 100644 --- a/src/win/msdkvideodecoder.cc +++ b/src/win/msdkvideodecoder.cc @@ -37,9 +37,9 @@ int32_t MSDKVideoDecoder::Release() { MSDKVideoDecoder::MSDKVideoDecoder() : width_(0), height_(0) - //,decoder_thread_(new rtc::Thread(rtc::SocketServer::CreateDefault())) + //,decoder_thread_(new webrtc::Thread(webrtc::SocketServer::CreateDefault())) , - decoder_thread_(rtc::Thread::Create()) { + decoder_thread_(webrtc::Thread::Create()) { decoder_thread_->SetName("MSDKVideoDecoderThread", nullptr); RTC_CHECK(decoder_thread_->Start()) << "Failed to start MSDK video decoder thread"; @@ -63,7 +63,7 @@ MSDKVideoDecoder::~MSDKVideoDecoder() { void MSDKVideoDecoder::CheckOnCodecThread() { RTC_CHECK(decoder_thread_.get() == - rtc::ThreadManager::Instance()->CurrentThread()) + webrtc::ThreadManager::Instance()->CurrentThread()) << "Running on wrong thread!"; } @@ -367,7 +367,7 @@ int32_t MSDKVideoDecoder::Decode(const webrtc::EncodedImage& inputImage, m_pmfx_allocator_->GetFrameHDL(dxMemId, (mfxHDL*)&pair); #if 0 - rtc::scoped_refptr cropped_buffer = + webrtc::scoped_refptr cropped_buffer = WrapI420Buffer(frame_info.Width, frame_info.Height, av_frame_->data[kYPlaneIndex], av_frame_->linesize[kYPlaneIndex], @@ -392,8 +392,8 @@ int32_t MSDKVideoDecoder::Decode(const webrtc::EncodedImage& inputImage, // TODO(johny): we should extend the buffer structure to include // not only the CropW|CropH value, but also the CropX|CropY for the // renderer to correctly setup the video processor input view. - rtc::scoped_refptr buffer = - new rtc::RefCountedObject( + webrtc::scoped_refptr buffer = + new webrtc::RefCountedObject( (void*)surface_handle_.get(), frame_info.CropW, frame_info.CropH); webrtc::VideoFrame decoded_frame(buffer, inputImage.Timestamp(), 0, @@ -494,7 +494,7 @@ int32_t MSDKVideoDecoder::RegisterDecodeCompleteCallback( } std::unique_ptr MSDKVideoDecoder::Create( - cricket::VideoCodec format) { + webrtc::VideoCodec format) { return absl::make_unique(); } diff --git a/src/win/msdkvideodecoder.h b/src/win/msdkvideodecoder.h index 7abd44288a..19cb0db22f 100644 --- a/src/win/msdkvideodecoder.h +++ b/src/win/msdkvideodecoder.h @@ -55,7 +55,7 @@ class MSDKVideoDecoder : public webrtc::VideoDecoder { explicit MSDKVideoDecoder(); virtual ~MSDKVideoDecoder(); - static std::unique_ptr Create(cricket::VideoCodec format); + static std::unique_ptr Create(webrtc::VideoCodec format); bool Configure(const Settings& settings) override; @@ -110,7 +110,7 @@ class MSDKVideoDecoder : public webrtc::VideoDecoder { bool inited_; int width_; int height_; - std::unique_ptr + std::unique_ptr decoder_thread_; // Thread on which the decoder will be working on. webrtc::VideoDecoder::Settings settings_; diff --git a/src/win/msdkvideodecoderfactory.cc b/src/win/msdkvideodecoderfactory.cc index 42517be850..92a3dd9474 100644 --- a/src/win/msdkvideodecoderfactory.cc +++ b/src/win/msdkvideodecoderfactory.cc @@ -70,35 +70,35 @@ MSDKVideoDecoderFactory::CreateVideoDecoder( else if (codec == webrtc::kVideoCodecVP9) vp9_hw = false; } - if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName) && !vp9_hw) { + if (absl::EqualsIgnoreCase(format.name, webrtc::kVp9CodecName) && !vp9_hw) { return webrtc::VP9Decoder::Create(); - } else if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName) && + } else if (absl::EqualsIgnoreCase(format.name, webrtc::kVp8CodecName) && !vp8_hw) { RTC_LOG(LS_ERROR) << "Not supporting HW VP8 decoder. Requesting SW decoding."; return webrtc::VP8Decoder::Create(); - } else if (absl::EqualsIgnoreCase(format.name, cricket::kH264CodecName) && + } else if (absl::EqualsIgnoreCase(format.name, webrtc::kH264CodecName) && !h264_hw) { return webrtc::H264Decoder::Create(); - } else if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName) && + } else if (absl::EqualsIgnoreCase(format.name, webrtc::kAv1CodecName) && !av1_hw) { return webrtc::CreateLibaomAv1Decoder(); } - return MSDKVideoDecoder::Create(cricket::VideoCodec(format)); + return MSDKVideoDecoder::Create(webrtc::VideoCodec(format)); } std::vector MSDKVideoDecoderFactory::GetSupportedFormats() const { std::vector supported_codecs; - supported_codecs.push_back(webrtc::SdpVideoFormat(cricket::kVp8CodecName)); + supported_codecs.push_back(webrtc::SdpVideoFormat(webrtc::kVp8CodecName)); for (const webrtc::SdpVideoFormat& format : webrtc::SupportedVP9Codecs()) supported_codecs.push_back(format); for (const webrtc::SdpVideoFormat& format : owt::base::CodecUtils::SupportedH264Codecs()) supported_codecs.push_back(format); if (webrtc::kIsLibaomAv1DecoderSupported) { - supported_codecs.push_back(webrtc::SdpVideoFormat(cricket::kAv1CodecName)); + supported_codecs.push_back(webrtc::SdpVideoFormat(webrtc::kAv1CodecName)); } return supported_codecs; } diff --git a/src/win/msdkvideoencoder.cc b/src/win/msdkvideoencoder.cc index 913ed85585..050b878c88 100644 --- a/src/win/msdkvideoencoder.cc +++ b/src/win/msdkvideoencoder.cc @@ -28,12 +28,12 @@ using namespace rtc; namespace owt { namespace base { -MSDKVideoEncoder::MSDKVideoEncoder(const cricket::VideoCodec& format) +MSDKVideoEncoder::MSDKVideoEncoder(const webrtc::VideoCodec& format) : callback_(nullptr), bitrate_(0), width_(0), height_(0), - encoder_thread_(rtc::Thread::Create()), + encoder_thread_(webrtc::Thread::Create()), inited_(false) { m_penc_surfaces_ = nullptr; m_frames_processed_ = 0; @@ -55,9 +55,9 @@ MSDKVideoEncoder::MSDKVideoEncoder(const cricket::VideoCodec& format) if (!encoder_dump_file_name_.empty()) { enable_bitstream_dump_ = true; char filename_buffer[256]; - rtc::SimpleStringBuilder ssb(filename_buffer); + webrtc::SimpleStringBuilder ssb(filename_buffer); ssb << encoder_dump_file_name_ << "/webrtc_send_stream_" - << rtc::TimeMicros() << ".ivf"; + << webrtc::TimeMicros() << ".ivf"; dump_writer_ = webrtc::IvfFileWriter::Wrap( webrtc::FileWrapper::OpenWriteOnly(ssb.str()), /* byte_limit= */ 100000000); @@ -94,7 +94,7 @@ int MSDKVideoEncoder::InitEncode(const webrtc::VideoCodec* codec_settings, codec_type_ = codec_settings->codecType; // return encoder_thread_->Invoke( // RTC_FROM_HERE, - // rtc::Bind(&MSDKVideoEncoder::InitEncodeOnEncoderThread, this, + // webrtc::Bind(&MSDKVideoEncoder::InitEncodeOnEncoderThread, this, // codec_settings, number_of_cores, max_payload_size)); return encoder_thread_->Invoke( RTC_FROM_HERE, [this, codec_settings, number_of_cores, max_payload_size] { @@ -421,7 +421,7 @@ int MSDKVideoEncoder::Encode( ptr = pData.Y + pInfo.CropX + pInfo.CropY * pData.Pitch; if (MFX_FOURCC_NV12 == pInfo.FourCC) { - rtc::scoped_refptr buffer( + webrtc::scoped_refptr buffer( input_image.video_frame_buffer()->ToI420()); libyuv::I420ToNV12(buffer->DataY(), buffer->StrideY(), buffer->DataU(), @@ -432,7 +432,7 @@ int MSDKVideoEncoder::Encode( return WEBRTC_VIDEO_CODEC_ERROR; } else if (MFX_FOURCC_P010 == pInfo.FourCC) { // Source is always I420. - rtc::scoped_refptr buffer( + webrtc::scoped_refptr buffer( input_image.video_frame_buffer()->ToI420()); libyuv::I420ToI010(buffer->DataY(), buffer->StrideY(), buffer->DataU(), buffer->StrideU(), buffer->DataV(), buffer->StrideV(), @@ -704,7 +704,7 @@ uint32_t MaxSizeOfKeyframeAsPercentage(uint32_t optimal_buffer_size, } std::unique_ptr MSDKVideoEncoder::Create( - cricket::VideoCodec format) { + webrtc::VideoCodec format) { return absl::make_unique(format); } diff --git a/src/win/msdkvideoencoder.h b/src/win/msdkvideoencoder.h index 931faecdce..c620177311 100644 --- a/src/win/msdkvideoencoder.h +++ b/src/win/msdkvideoencoder.h @@ -34,10 +34,10 @@ enum MemType { /// Encoder with Intel MediaSDK as the backend. class MSDKVideoEncoder : public webrtc::VideoEncoder { public: - explicit MSDKVideoEncoder(const cricket::VideoCodec& codec); + explicit MSDKVideoEncoder(const webrtc::VideoCodec& codec); virtual ~MSDKVideoEncoder(); - static std::unique_ptr Create(cricket::VideoCodec format); + static std::unique_ptr Create(webrtc::VideoCodec format); int InitEncode(const webrtc::VideoCodec* codec_settings, int number_of_cores, size_t max_payload_size) override; int Encode(const webrtc::VideoFrame& input_image, @@ -67,7 +67,7 @@ class MSDKVideoEncoder : public webrtc::VideoEncoder { int32_t height_; uint32_t frame_rate; webrtc::VideoCodecType codec_type_; - cricket::VideoCodec rtp_codec_parameters_; + webrtc::VideoCodec rtp_codec_parameters_; uint8_t num_temporal_layers_ = 1; MFXVideoSession* m_mfx_session_; @@ -86,7 +86,7 @@ class MSDKVideoEncoder : public webrtc::VideoEncoder { mfxFrameAllocResponse m_enc_response_; mfxFrameSurface1* m_penc_surfaces_; // frames array for encoder mfxU32 m_frames_processed_; - std::unique_ptr encoder_thread_; + std::unique_ptr encoder_thread_; std::atomic inited_; std::unique_ptr dump_writer_; diff --git a/src/win/msdkvideoencoderfactory.cc b/src/win/msdkvideoencoderfactory.cc index a244cc3558..e7ddef5264 100644 --- a/src/win/msdkvideoencoderfactory.cc +++ b/src/win/msdkvideoencoderfactory.cc @@ -54,15 +54,15 @@ MSDKVideoEncoderFactory::CreateVideoEncoder( vp9_hw = false; } // VP8 encoding will always use SW impl. - if (absl::EqualsIgnoreCase(format.name, cricket::kVp8CodecName) && !vp8_hw) + if (absl::EqualsIgnoreCase(format.name, webrtc::kVp8CodecName) && !vp8_hw) return webrtc::VP8Encoder::Create(); // VP9 encoding will only be enabled on ICL+; - else if (absl::EqualsIgnoreCase(format.name, cricket::kVp9CodecName)) - return webrtc::VP9Encoder::Create(cricket::VideoCodec(format)); + else if (absl::EqualsIgnoreCase(format.name, webrtc::kVp9CodecName)) + return webrtc::VP9Encoder::Create(webrtc::VideoCodec(format)); // TODO: Replace with AV1 HW encoder post ADL. - else if (absl::EqualsIgnoreCase(format.name, cricket::kAv1CodecName)) + else if (absl::EqualsIgnoreCase(format.name, webrtc::kAv1CodecName)) return webrtc::CreateLibaomAv1Encoder(); - return MSDKVideoEncoder::Create(cricket::VideoCodec(format)); + return MSDKVideoEncoder::Create(webrtc::VideoCodec(format)); } std::vector @@ -73,11 +73,11 @@ MSDKVideoEncoderFactory::GetSupportedFormats() const { for (const webrtc::SdpVideoFormat& format : owt::base::CodecUtils::SupportedH264Codecs()) supported_codecs.push_back(format); - supported_codecs.push_back(webrtc::SdpVideoFormat(cricket::kVp8CodecName)); + supported_codecs.push_back(webrtc::SdpVideoFormat(webrtc::kVp8CodecName)); for (const webrtc::SdpVideoFormat& format : webrtc::SupportedVP9Codecs()) supported_codecs.push_back(format); if (webrtc::kIsLibaomAv1EncoderSupported) { - supported_codecs.push_back(webrtc::SdpVideoFormat(cricket::kAv1CodecName)); + supported_codecs.push_back(webrtc::SdpVideoFormat(webrtc::kAv1CodecName)); } return supported_codecs; diff --git a/src/win/nativehandlebuffer.h b/src/win/nativehandlebuffer.h index 507e75a92d..8626a7cc2f 100644 --- a/src/win/nativehandlebuffer.h +++ b/src/win/nativehandlebuffer.h @@ -17,7 +17,7 @@ class NativeHandleBuffer : public VideoFrameBuffer { Type type() const override { return Type::kNative; } int width() const override { return width_; } int height() const override { return height_; } - rtc::scoped_refptr ToI420() override { + webrtc::scoped_refptr ToI420() override { RTC_NOTREACHED(); return nullptr; }