add vad code.

This commit is contained in:
luocai
2024-09-06 18:26:45 +08:00
parent 35bf68338f
commit 2bed1dacf2
93 changed files with 12362 additions and 2 deletions

View File

@ -0,0 +1,235 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/audio/audio_frame.h"
#include <string.h>
#include <cstdint>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/audio/audio_view.h"
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/checks.h"
#include "rtc_base/time_utils.h"
namespace webrtc {
AudioFrame::AudioFrame() {
// Visual Studio doesn't like this in the class definition.
static_assert(sizeof(data_) == kMaxDataSizeBytes, "kMaxDataSizeBytes");
}
AudioFrame::AudioFrame(int sample_rate_hz,
size_t num_channels,
ChannelLayout layout /*= CHANNEL_LAYOUT_UNSUPPORTED*/)
: samples_per_channel_(SampleRateToDefaultChannelSize(sample_rate_hz)),
sample_rate_hz_(sample_rate_hz),
num_channels_(num_channels),
channel_layout_(layout == CHANNEL_LAYOUT_UNSUPPORTED
? GuessChannelLayout(num_channels)
: layout) {
RTC_DCHECK_LE(num_channels_, kMaxConcurrentChannels);
RTC_DCHECK_GT(sample_rate_hz_, 0);
RTC_DCHECK_GT(samples_per_channel_, 0u);
}
void AudioFrame::Reset() {
ResetWithoutMuting();
muted_ = true;
}
void AudioFrame::ResetWithoutMuting() {
// TODO(wu): Zero is a valid value for `timestamp_`. We should initialize
// to an invalid value, or add a new member to indicate invalidity.
timestamp_ = 0;
elapsed_time_ms_ = -1;
ntp_time_ms_ = -1;
samples_per_channel_ = 0;
sample_rate_hz_ = 0;
num_channels_ = 0;
channel_layout_ = CHANNEL_LAYOUT_NONE;
speech_type_ = kUndefined;
vad_activity_ = kVadUnknown;
profile_timestamp_ms_ = 0;
packet_infos_ = RtpPacketInfos();
absolute_capture_timestamp_ms_ = absl::nullopt;
}
void AudioFrame::UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels) {
RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
timestamp_ = timestamp;
samples_per_channel_ = samples_per_channel;
sample_rate_hz_ = sample_rate_hz;
speech_type_ = speech_type;
vad_activity_ = vad_activity;
num_channels_ = num_channels;
channel_layout_ = GuessChannelLayout(num_channels);
if (channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED) {
RTC_DCHECK_EQ(num_channels, ChannelLayoutToChannelCount(channel_layout_));
}
const size_t length = samples_per_channel * num_channels;
RTC_CHECK_LE(length, data_.size());
if (data != nullptr) {
memcpy(data_.data(), data, sizeof(int16_t) * length);
muted_ = false;
} else {
muted_ = true;
}
}
void AudioFrame::CopyFrom(const AudioFrame& src) {
if (this == &src)
return;
if (muted_ && !src.muted()) {
// TODO: bugs.webrtc.org/5647 - Since the default value for `muted_` is
// false and `data_` may still be uninitialized (because we don't initialize
// data_ as part of construction), we clear the full buffer here before
// copying over new values. If we don't, msan might complain in some tests.
// Consider locking down construction, avoiding the default constructor and
// prefering construction that initializes all state.
ClearSamples(data_);
}
timestamp_ = src.timestamp_;
elapsed_time_ms_ = src.elapsed_time_ms_;
ntp_time_ms_ = src.ntp_time_ms_;
packet_infos_ = src.packet_infos_;
muted_ = src.muted();
samples_per_channel_ = src.samples_per_channel_;
sample_rate_hz_ = src.sample_rate_hz_;
speech_type_ = src.speech_type_;
vad_activity_ = src.vad_activity_;
num_channels_ = src.num_channels_;
channel_layout_ = src.channel_layout_;
absolute_capture_timestamp_ms_ = src.absolute_capture_timestamp_ms();
auto data = src.data_view();
RTC_CHECK_LE(data.size(), data_.size());
if (!muted_ && !data.empty()) {
memcpy(&data_[0], &data[0], sizeof(int16_t) * data.size());
}
}
void AudioFrame::UpdateProfileTimeStamp() {
profile_timestamp_ms_ = rtc::TimeMillis();
}
int64_t AudioFrame::ElapsedProfileTimeMs() const {
if (profile_timestamp_ms_ == 0) {
// Profiling has not been activated.
return -1;
}
return rtc::TimeSince(profile_timestamp_ms_);
}
const int16_t* AudioFrame::data() const {
return muted_ ? zeroed_data().begin() : data_.data();
}
InterleavedView<const int16_t> AudioFrame::data_view() const {
// If you get a nullptr from `data_view()`, it's likely because the
// samples_per_channel_ and/or num_channels_ members haven't been properly
// set. Since `data_view()` returns an InterleavedView<> (which internally
// uses rtc::ArrayView<>), we inherit the behavior in InterleavedView when the
// view size is 0 that ArrayView<>::data() returns nullptr. So, even when an
// AudioFrame is muted and we want to return `zeroed_data()`, if
// samples_per_channel_ or num_channels_ is 0, the view will point to
// nullptr.
return InterleavedView<const int16_t>(muted_ ? &zeroed_data()[0] : &data_[0],
samples_per_channel_, num_channels_);
}
int16_t* AudioFrame::mutable_data() {
// TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
// Consider instead if we should rather zero the buffer when `muted_` is set
// to `true`.
if (muted_) {
ClearSamples(data_);
muted_ = false;
}
return &data_[0];
}
InterleavedView<int16_t> AudioFrame::mutable_data(size_t samples_per_channel,
size_t num_channels) {
const size_t total_samples = samples_per_channel * num_channels;
RTC_CHECK_LE(total_samples, data_.size());
RTC_CHECK_LE(num_channels, kMaxConcurrentChannels);
// Sanity check for valid argument values during development.
// If `samples_per_channel` is < `num_channels` but larger than 0,
// then chances are the order of arguments is incorrect.
RTC_DCHECK((samples_per_channel == 0 && num_channels == 0) ||
num_channels <= samples_per_channel)
<< "samples_per_channel=" << samples_per_channel
<< "num_channels=" << num_channels;
// TODO: bugs.webrtc.org/5647 - Can we skip zeroing the buffer?
// Consider instead if we should rather zero the whole buffer when `muted_` is
// set to `true`.
if (muted_) {
ClearSamples(data_, total_samples);
muted_ = false;
}
samples_per_channel_ = samples_per_channel;
num_channels_ = num_channels;
return InterleavedView<int16_t>(&data_[0], samples_per_channel, num_channels);
}
void AudioFrame::Mute() {
muted_ = true;
}
bool AudioFrame::muted() const {
return muted_;
}
void AudioFrame::SetLayoutAndNumChannels(ChannelLayout layout,
size_t num_channels) {
channel_layout_ = layout;
num_channels_ = num_channels;
#if RTC_DCHECK_IS_ON
// Do a sanity check that the layout and num_channels match.
// If this lookup yield 0u, then the layout is likely CHANNEL_LAYOUT_DISCRETE.
auto expected_num_channels = ChannelLayoutToChannelCount(layout);
if (expected_num_channels) { // If expected_num_channels is 0
RTC_DCHECK_EQ(expected_num_channels, num_channels_);
}
#endif
RTC_CHECK_LE(samples_per_channel_ * num_channels_, data_.size());
}
void AudioFrame::SetSampleRateAndChannelSize(int sample_rate) {
sample_rate_hz_ = sample_rate;
// We could call `AudioProcessing::GetFrameSize()` here, but that requires
// adding a dependency on the ":audio_processing" build target, which can
// complicate the dependency tree. Some refactoring is probably in order to
// get some consistency around this since there are many places across the
// code that assume this default buffer size.
samples_per_channel_ = SampleRateToDefaultChannelSize(sample_rate_hz_);
}
// static
rtc::ArrayView<const int16_t> AudioFrame::zeroed_data() {
static int16_t* null_data = new int16_t[kMaxDataSizeSamples]();
return rtc::ArrayView<const int16_t>(null_data, kMaxDataSizeSamples);
}
} // namespace webrtc

View File

@ -0,0 +1,230 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_AUDIO_AUDIO_FRAME_H_
#define API_AUDIO_AUDIO_FRAME_H_
#include <stddef.h>
#include <stdint.h>
#include <array>
#include "absl/types/optional.h"
#include "api/array_view.h"
#include "api/audio/audio_view.h"
#include "api/audio/channel_layout.h"
#include "api/rtp_packet_infos.h"
#include "rtc_base/checks.h"
namespace webrtc {
// Default webrtc buffer size in milliseconds.
constexpr size_t kDefaultAudioBufferLengthMs = 10u;
// Default total number of audio buffers per second based on the default length.
constexpr size_t kDefaultAudioBuffersPerSec =
1000u / kDefaultAudioBufferLengthMs;
// Returns the number of samples a buffer needs to hold for ~10ms of a single
// audio channel at a given sample rate.
// See also `AudioProcessing::GetFrameSize()`.
inline size_t SampleRateToDefaultChannelSize(size_t sample_rate) {
// Basic sanity check. 192kHz is the highest supported input sample rate.
RTC_DCHECK_LE(sample_rate, 192000);
return sample_rate / kDefaultAudioBuffersPerSec;
}
/////////////////////////////////////////////////////////////////////
/* This class holds up to 120 ms of super-wideband (32 kHz) stereo audio. It
* allows for adding and subtracting frames while keeping track of the resulting
* states.
*
* Notes
* - This is a de-facto api, not designed for external use. The AudioFrame class
* is in need of overhaul or even replacement, and anyone depending on it
* should be prepared for that.
* - The total number of samples is samples_per_channel_ * num_channels_.
* - Stereo data is interleaved starting with the left channel.
*/
class AudioFrame {
public:
// Using constexpr here causes linker errors unless the variable also has an
// out-of-class definition, which is impractical in this header-only class.
// (This makes no sense because it compiles as an enum value, which we most
// certainly cannot take the address of, just fine.) C++17 introduces inline
// variables which should allow us to switch to constexpr and keep this a
// header-only class.
enum : size_t {
// Stereo, 32 kHz, 120 ms (2 * 32 * 120)
// Stereo, 192 kHz, 20 ms (2 * 192 * 20)
kMaxDataSizeSamples = 7680,
kMaxDataSizeBytes = kMaxDataSizeSamples * sizeof(int16_t),
};
enum VADActivity { kVadActive = 0, kVadPassive = 1, kVadUnknown = 2 };
enum SpeechType {
kNormalSpeech = 0,
kPLC = 1,
kCNG = 2,
kPLCCNG = 3,
kCodecPLC = 5,
kUndefined = 4
};
AudioFrame();
// Construct an audio frame with frame length properties and channel
// information. `samples_per_channel()` will be initialized to a 10ms buffer
// size and if `layout` is not specified (default value of
// CHANNEL_LAYOUT_UNSUPPORTED is set), then the channel layout is derived
// (guessed) from `num_channels`.
AudioFrame(int sample_rate_hz,
size_t num_channels,
ChannelLayout layout = CHANNEL_LAYOUT_UNSUPPORTED);
AudioFrame(const AudioFrame&) = delete;
AudioFrame& operator=(const AudioFrame&) = delete;
// Resets all members to their default state.
void Reset();
// Same as Reset(), but leaves mute state unchanged. Muting a frame requires
// the buffer to be zeroed on the next call to mutable_data(). Callers
// intending to write to the buffer immediately after Reset() can instead use
// ResetWithoutMuting() to skip this wasteful zeroing.
void ResetWithoutMuting();
// TODO: b/335805780 - Accept InterleavedView.
void UpdateFrame(uint32_t timestamp,
const int16_t* data,
size_t samples_per_channel,
int sample_rate_hz,
SpeechType speech_type,
VADActivity vad_activity,
size_t num_channels = 1);
void CopyFrom(const AudioFrame& src);
// Sets a wall-time clock timestamp in milliseconds to be used for profiling
// of time between two points in the audio chain.
// Example:
// t0: UpdateProfileTimeStamp()
// t1: ElapsedProfileTimeMs() => t1 - t0 [msec]
void UpdateProfileTimeStamp();
// Returns the time difference between now and when UpdateProfileTimeStamp()
// was last called. Returns -1 if UpdateProfileTimeStamp() has not yet been
// called.
int64_t ElapsedProfileTimeMs() const;
// data() returns a zeroed static buffer if the frame is muted.
// TODO: b/335805780 - Return InterleavedView.
const int16_t* data() const;
// Returns a read-only view of all the valid samples held by the AudioFrame.
// For a muted AudioFrame, the samples will all be 0.
InterleavedView<const int16_t> data_view() const;
// mutable_frame() always returns a non-static buffer; the first call to
// mutable_frame() zeros the buffer and marks the frame as unmuted.
// TODO: b/335805780 - Return an InterleavedView.
int16_t* mutable_data();
// Grants write access to the audio buffer. The size of the returned writable
// view is determined by the `samples_per_channel` and `num_channels`
// dimensions which the function checks for correctness and stores in the
// internal member variables; `samples_per_channel()` and `num_channels()`
// respectively.
// If the state is currently muted, the returned view will be zeroed out.
InterleavedView<int16_t> mutable_data(size_t samples_per_channel,
size_t num_channels);
// Prefer to mute frames using AudioFrameOperations::Mute.
void Mute();
// Frame is muted by default.
bool muted() const;
size_t max_16bit_samples() const { return data_.size(); }
size_t samples_per_channel() const { return samples_per_channel_; }
size_t num_channels() const { return num_channels_; }
ChannelLayout channel_layout() const { return channel_layout_; }
// Sets the `channel_layout` property as well as `num_channels`.
void SetLayoutAndNumChannels(ChannelLayout layout, size_t num_channels);
int sample_rate_hz() const { return sample_rate_hz_; }
void set_absolute_capture_timestamp_ms(
int64_t absolute_capture_time_stamp_ms) {
absolute_capture_timestamp_ms_ = absolute_capture_time_stamp_ms;
}
absl::optional<int64_t> absolute_capture_timestamp_ms() const {
return absolute_capture_timestamp_ms_;
}
// Sets the sample_rate_hz and samples_per_channel properties based on a
// given sample rate and calculates a default 10ms samples_per_channel value.
void SetSampleRateAndChannelSize(int sample_rate);
// RTP timestamp of the first sample in the AudioFrame.
uint32_t timestamp_ = 0;
// Time since the first frame in milliseconds.
// -1 represents an uninitialized value.
int64_t elapsed_time_ms_ = -1;
// NTP time of the estimated capture time in local timebase in milliseconds.
// -1 represents an uninitialized value.
int64_t ntp_time_ms_ = -1;
size_t samples_per_channel_ = 0;
int sample_rate_hz_ = 0;
size_t num_channels_ = 0;
SpeechType speech_type_ = kUndefined;
VADActivity vad_activity_ = kVadUnknown;
// Monotonically increasing timestamp intended for profiling of audio frames.
// Typically used for measuring elapsed time between two different points in
// the audio path. No lock is used to save resources and we are thread safe
// by design.
// TODO(nisse@webrtc.org): consider using absl::optional.
int64_t profile_timestamp_ms_ = 0;
// Information about packets used to assemble this audio frame. This is needed
// by `SourceTracker` when the frame is delivered to the RTCRtpReceiver's
// MediaStreamTrack, in order to implement getContributingSources(). See:
// https://w3c.github.io/webrtc-pc/#dom-rtcrtpreceiver-getcontributingsources
//
// TODO(bugs.webrtc.org/10757):
// Note that this information might not be fully accurate since we currently
// don't have a proper way to track it across the audio sync buffer. The
// sync buffer is the small sample-holding buffer located after the audio
// decoder and before where samples are assembled into output frames.
//
// `RtpPacketInfos` may also be empty if the audio samples did not come from
// RTP packets. E.g. if the audio were locally generated by packet loss
// concealment, comfort noise generation, etc.
RtpPacketInfos packet_infos_;
private:
// A permanently zeroed out buffer to represent muted frames. This is a
// header-only class, so the only way to avoid creating a separate zeroed
// buffer per translation unit is to wrap a static in an inline function.
static rtc::ArrayView<const int16_t> zeroed_data();
std::array<int16_t, kMaxDataSizeSamples> data_;
bool muted_ = true;
ChannelLayout channel_layout_ = CHANNEL_LAYOUT_NONE;
// Absolute capture timestamp when this audio frame was originally captured.
// This is only valid for audio frames captured on this machine. The absolute
// capture timestamp of a received frame is found in `packet_infos_`.
// This timestamp MUST be based on the same clock as rtc::TimeMillis().
absl::optional<int64_t> absolute_capture_timestamp_ms_;
};
} // namespace webrtc
#endif // API_AUDIO_AUDIO_FRAME_H_

View File

@ -0,0 +1,131 @@
/*
* Copyright 2016 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_FUNCTION_VIEW_H_
#define API_FUNCTION_VIEW_H_
#include <cstddef>
#include <type_traits>
#include <utility>
#include "rtc_base/checks.h"
// Just like std::function, FunctionView will wrap any callable and hide its
// actual type, exposing only its signature. But unlike std::function,
// FunctionView doesn't own its callable---it just points to it. Thus, it's a
// good choice mainly as a function argument when the callable argument will
// not be called again once the function has returned.
//
// Its constructors are implicit, so that callers won't have to convert lambdas
// and other callables to FunctionView<Blah(Blah, Blah)> explicitly. This is
// safe because FunctionView is only a reference to the real callable.
//
// Example use:
//
// void SomeFunction(rtc::FunctionView<int(int)> index_transform);
// ...
// SomeFunction([](int i) { return 2 * i + 1; });
//
// Note: FunctionView is tiny (essentially just two pointers) and trivially
// copyable, so it's probably cheaper to pass it by value than by const
// reference.
namespace rtc {
template <typename T>
class FunctionView; // Undefined.
template <typename RetT, typename... ArgT>
class FunctionView<RetT(ArgT...)> final {
public:
// Constructor for lambdas and other callables; it accepts every type of
// argument except those noted in its enable_if call.
template <
typename F,
typename std::enable_if<
// Not for function pointers; we have another constructor for that
// below.
!std::is_function<typename std::remove_pointer<
typename std::remove_reference<F>::type>::type>::value &&
// Not for nullptr; we have another constructor for that below.
!std::is_same<std::nullptr_t,
typename std::remove_cv<F>::type>::value &&
// Not for FunctionView objects; we have another constructor for that
// (the implicitly declared copy constructor).
!std::is_same<FunctionView,
typename std::remove_cv<typename std::remove_reference<
F>::type>::type>::value>::type* = nullptr>
FunctionView(F&& f)
: call_(CallVoidPtr<typename std::remove_reference<F>::type>) {
f_.void_ptr = &f;
}
// Constructor that accepts function pointers. If the argument is null, the
// result is an empty FunctionView.
template <
typename F,
typename std::enable_if<std::is_function<typename std::remove_pointer<
typename std::remove_reference<F>::type>::type>::value>::type* =
nullptr>
FunctionView(F&& f)
: call_(f ? CallFunPtr<typename std::remove_pointer<F>::type> : nullptr) {
f_.fun_ptr = reinterpret_cast<void (*)()>(f);
}
// Constructor that accepts nullptr. It creates an empty FunctionView.
template <typename F,
typename std::enable_if<std::is_same<
std::nullptr_t,
typename std::remove_cv<F>::type>::value>::type* = nullptr>
FunctionView(F&& f) : call_(nullptr) {}
// Default constructor. Creates an empty FunctionView.
FunctionView() : call_(nullptr) {}
RetT operator()(ArgT... args) const {
RTC_DCHECK(call_);
return call_(f_, std::forward<ArgT>(args)...);
}
// Returns true if we have a function, false if we don't (i.e., we're null).
explicit operator bool() const { return !!call_; }
private:
union VoidUnion {
void* void_ptr;
void (*fun_ptr)();
};
template <typename F>
static RetT CallVoidPtr(VoidUnion vu, ArgT... args) {
return (*static_cast<F*>(vu.void_ptr))(std::forward<ArgT>(args)...);
}
template <typename F>
static RetT CallFunPtr(VoidUnion vu, ArgT... args) {
return (reinterpret_cast<typename std::add_pointer<F>::type>(vu.fun_ptr))(
std::forward<ArgT>(args)...);
}
// A pointer to the callable thing, with type information erased. It's a
// union because we have to use separate types depending on if the callable
// thing is a function pointer or something else.
VoidUnion f_;
// Pointer to a dispatch function that knows the type of the callable thing
// that's stored in f_, and how to call it. A FunctionView object is empty
// (null) iff call_ is null.
RetT (*call_)(VoidUnion, ArgT...);
};
} // namespace rtc
#endif // API_FUNCTION_VIEW_H_

View File

@ -0,0 +1,170 @@
/*
* Copyright 2022 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_MAKE_REF_COUNTED_H_
#define API_MAKE_REF_COUNTED_H_
#include <type_traits>
#include <utility>
#include "rtc_base/ref_counted_object.h"
namespace webrtc {
namespace webrtc_make_ref_counted_internal {
// Determines if the given class has AddRef and Release methods.
template <typename T>
class HasAddRefAndRelease {
private:
template <typename C,
decltype(std::declval<C>().AddRef())* = nullptr,
decltype(std::declval<C>().Release())* = nullptr>
static int Test(int);
template <typename>
static char Test(...);
public:
static constexpr bool value = std::is_same_v<decltype(Test<T>(0)), int>;
};
} // namespace webrtc_make_ref_counted_internal
// General utilities for constructing a reference counted class and the
// appropriate reference count implementation for that class.
//
// These utilities select either the `RefCountedObject` implementation or
// `FinalRefCountedObject` depending on whether the to-be-shared class is
// derived from the RefCountInterface interface or not (respectively).
// `make_ref_counted`:
//
// Use this when you want to construct a reference counted object of type T and
// get a `scoped_refptr<>` back. Example:
//
// auto p = make_ref_counted<Foo>("bar", 123);
//
// For a class that inherits from RefCountInterface, this is equivalent to:
//
// auto p = scoped_refptr<Foo>(new RefCountedObject<Foo>("bar", 123));
//
// If the class does not inherit from RefCountInterface, but does have
// AddRef/Release methods (so a T* is convertible to rtc::scoped_refptr), this
// is equivalent to just
//
// auto p = scoped_refptr<Foo>(new Foo("bar", 123));
//
// Otherwise, the example is equivalent to:
//
// auto p = scoped_refptr<FinalRefCountedObject<Foo>>(
// new FinalRefCountedObject<Foo>("bar", 123));
//
// In these cases, `make_ref_counted` reduces the amount of boilerplate code but
// also helps with the most commonly intended usage of RefCountedObject whereby
// methods for reference counting, are virtual and designed to satisfy the need
// of an interface. When such a need does not exist, it is more efficient to use
// the `FinalRefCountedObject` template, which does not add the vtable overhead.
//
// Note that in some cases, using RefCountedObject directly may still be what's
// needed.
// `make_ref_counted` for abstract classes that are convertible to
// RefCountInterface. The is_abstract requirement rejects classes that inherit
// both RefCountInterface and RefCounted object, which is a a discouraged
// pattern, and would result in double inheritance of RefCountedObject if this
// template was applied.
template <
typename T,
typename... Args,
typename std::enable_if<std::is_convertible_v<T*, RefCountInterface*> &&
std::is_abstract_v<T>,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
return scoped_refptr<T>(new RefCountedObject<T>(std::forward<Args>(args)...));
}
// `make_ref_counted` for complete classes that are not convertible to
// RefCountInterface and already carry a ref count.
template <
typename T,
typename... Args,
typename std::enable_if<
!std::is_convertible_v<T*, RefCountInterface*> &&
webrtc_make_ref_counted_internal::HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
return scoped_refptr<T>(new T(std::forward<Args>(args)...));
}
// `make_ref_counted` for complete classes that are not convertible to
// RefCountInterface and have no ref count of their own.
template <
typename T,
typename... Args,
typename std::enable_if<
!std::is_convertible_v<T*, RefCountInterface*> &&
!webrtc_make_ref_counted_internal::HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<FinalRefCountedObject<T>> make_ref_counted(Args&&... args) {
return scoped_refptr<FinalRefCountedObject<T>>(
new FinalRefCountedObject<T>(std::forward<Args>(args)...));
}
} // namespace webrtc
// Backwards compatibe aliases.
// TODO: https://issues.webrtc.org/42225969 - deprecate and remove.
namespace rtc {
// This doesn't work:
// template <typename T, typename... Args>
// using make_ref_counted(Args&&... args) =
// webrtc::make_ref_counted<T>(Args&&... args);
// Instead, reproduce the templates.
template <typename T,
typename... Args,
typename std::enable_if<
std::is_convertible_v<T*, webrtc::RefCountInterface*> &&
std::is_abstract_v<T>,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
return webrtc::scoped_refptr<T>(
new webrtc::RefCountedObject<T>(std::forward<Args>(args)...));
}
// `make_ref_counted` for complete classes that are not convertible to
// RefCountInterface and already carry a ref count.
template <typename T,
typename... Args,
typename std::enable_if<
!std::is_convertible_v<T*, webrtc::RefCountInterface*> &&
webrtc::webrtc_make_ref_counted_internal::HasAddRefAndRelease<
T>::value,
T>::type* = nullptr>
scoped_refptr<T> make_ref_counted(Args&&... args) {
return webrtc::scoped_refptr<T>(new T(std::forward<Args>(args)...));
}
// `make_ref_counted` for complete classes that are not convertible to
// RefCountInterface and have no ref count of their own.
template <typename T,
typename... Args,
typename std::enable_if<
!std::is_convertible_v<T*, webrtc::RefCountInterface*> &&
!webrtc::webrtc_make_ref_counted_internal::
HasAddRefAndRelease<T>::value,
T>::type* = nullptr>
scoped_refptr<webrtc::FinalRefCountedObject<T>> make_ref_counted(
Args&&... args) {
return webrtc::scoped_refptr<FinalRefCountedObject<T>>(
new webrtc::FinalRefCountedObject<T>(std::forward<Args>(args)...));
}
} // namespace rtc
#endif // API_MAKE_REF_COUNTED_H_

View File

@ -0,0 +1,106 @@
/*
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_REF_COUNTED_BASE_H_
#define API_REF_COUNTED_BASE_H_
#include <type_traits>
#include "rtc_base/ref_counter.h"
namespace webrtc {
class RefCountedBase {
public:
RefCountedBase() = default;
RefCountedBase(const RefCountedBase&) = delete;
RefCountedBase& operator=(const RefCountedBase&) = delete;
void AddRef() const { ref_count_.IncRef(); }
RefCountReleaseStatus Release() const {
const auto status = ref_count_.DecRef();
if (status == RefCountReleaseStatus::kDroppedLastRef) {
delete this;
}
return status;
}
protected:
// Provided for internal webrtc subclasses for corner cases where it's
// necessary to know whether or not a reference is exclusively held.
bool HasOneRef() const { return ref_count_.HasOneRef(); }
virtual ~RefCountedBase() = default;
private:
mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
};
// Template based version of `RefCountedBase` for simple implementations that do
// not need (or want) destruction via virtual destructor or the overhead of a
// vtable.
//
// To use:
// struct MyInt : public rtc::RefCountedNonVirtual<MyInt> {
// int foo_ = 0;
// };
//
// rtc::scoped_refptr<MyInt> my_int(new MyInt());
//
// sizeof(MyInt) on a 32 bit system would then be 8, int + refcount and no
// vtable generated.
template <typename T>
class RefCountedNonVirtual {
public:
RefCountedNonVirtual() = default;
RefCountedNonVirtual(const RefCountedNonVirtual&) = delete;
RefCountedNonVirtual& operator=(const RefCountedNonVirtual&) = delete;
void AddRef() const { ref_count_.IncRef(); }
RefCountReleaseStatus Release() const {
// If you run into this assert, T has virtual methods. There are two
// options:
// 1) The class doesn't actually need virtual methods, the type is complete
// so the virtual attribute(s) can be removed.
// 2) The virtual methods are a part of the design of the class. In this
// case you can consider using `RefCountedBase` instead or alternatively
// use `rtc::RefCountedObject`.
static_assert(!std::is_polymorphic<T>::value,
"T has virtual methods. RefCountedBase is a better fit.");
const auto status = ref_count_.DecRef();
if (status == RefCountReleaseStatus::kDroppedLastRef) {
delete static_cast<const T*>(this);
}
return status;
}
protected:
// Provided for internal webrtc subclasses for corner cases where it's
// necessary to know whether or not a reference is exclusively held.
bool HasOneRef() const { return ref_count_.HasOneRef(); }
~RefCountedNonVirtual() = default;
private:
mutable webrtc::webrtc_impl::RefCounter ref_count_{0};
};
} // namespace webrtc
// Backwards compatibe aliases.
// TODO: https://issues.webrtc.org/42225969 - deprecate and remove.
namespace rtc {
using RefCountedBase = webrtc::RefCountedBase;
template <typename T>
using RefCountedNonVirtual = webrtc::RefCountedNonVirtual<T>;
} // namespace rtc
#endif // API_REF_COUNTED_BASE_H_

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/rtp_headers.h"
namespace webrtc {
AudioLevel::AudioLevel() : voice_activity_(false), audio_level_(0) {}
AudioLevel::AudioLevel(bool voice_activity, int audio_level)
: voice_activity_(voice_activity), audio_level_(audio_level) {
RTC_CHECK_GE(audio_level, 0);
RTC_CHECK_LE(audio_level, 127);
}
RTPHeaderExtension::RTPHeaderExtension()
: hasTransmissionTimeOffset(false),
transmissionTimeOffset(0),
hasAbsoluteSendTime(false),
absoluteSendTime(0),
hasTransportSequenceNumber(false),
transportSequenceNumber(0),
hasVideoRotation(false),
videoRotation(kVideoRotation_0),
hasVideoContentType(false),
videoContentType(VideoContentType::UNSPECIFIED),
has_video_timing(false) {}
RTPHeaderExtension::RTPHeaderExtension(const RTPHeaderExtension& other) =
default;
RTPHeaderExtension& RTPHeaderExtension::operator=(
const RTPHeaderExtension& other) = default;
RTPHeader::RTPHeader()
: markerBit(false),
payloadType(0),
sequenceNumber(0),
timestamp(0),
ssrc(0),
numCSRCs(0),
arrOfCSRCs(),
paddingLength(0),
headerLength(0),
extension() {}
RTPHeader::RTPHeader(const RTPHeader& other) = default;
RTPHeader& RTPHeader::operator=(const RTPHeader& other) = default;
} // namespace webrtc

View File

@ -0,0 +1,208 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_RTP_HEADERS_H_
#define API_RTP_HEADERS_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include "absl/types/optional.h"
#include "api/units/timestamp.h"
#include "api/video/color_space.h"
#include "api/video/video_content_type.h"
#include "api/video/video_rotation.h"
#include "api/video/video_timing.h"
#include "rtc_base/checks.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
struct FeedbackRequest {
// Determines whether the recv delta as specified in
// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
// should be included.
bool include_timestamps;
// Include feedback of received packets in the range [sequence_number -
// sequence_count + 1, sequence_number]. That is, no feedback will be sent if
// sequence_count is zero.
int sequence_count;
};
// The Absolute Capture Time extension is used to stamp RTP packets with a NTP
// timestamp showing when the first audio or video frame in a packet was
// originally captured. The intent of this extension is to provide a way to
// accomplish audio-to-video synchronization when RTCP-terminating intermediate
// systems (e.g. mixers) are involved. See:
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
struct AbsoluteCaptureTime {
// Absolute capture timestamp is the NTP timestamp of when the first frame in
// a packet was originally captured. This timestamp MUST be based on the same
// clock as the clock used to generate NTP timestamps for RTCP sender reports
// on the capture system.
//
// Its not always possible to do an NTP clock readout at the exact moment of
// when a media frame is captured. A capture system MAY postpone the readout
// until a more convenient time. A capture system SHOULD have known delays
// (e.g. from hardware buffers) subtracted from the readout to make the final
// timestamp as close to the actual capture time as possible.
//
// This field is encoded as a 64-bit unsigned fixed-point number with the high
// 32 bits for the timestamp in seconds and low 32 bits for the fractional
// part. This is also known as the UQ32.32 format and is what the RTP
// specification defines as the canonical format to represent NTP timestamps.
uint64_t absolute_capture_timestamp;
// Estimated capture clock offset is the senders estimate of the offset
// between its own NTP clock and the capture systems NTP clock. The sender is
// here defined as the system that owns the NTP clock used to generate the NTP
// timestamps for the RTCP sender reports on this stream. The sender system is
// typically either the capture system or a mixer.
//
// This field is encoded as a 64-bit twos complement signed fixed-point
// number with the high 32 bits for the seconds and low 32 bits for the
// fractional part. Its intended to make it easy for a receiver, that knows
// how to estimate the sender systems NTP clock, to also estimate the capture
// systems NTP clock:
//
// Capture NTP Clock = Sender NTP Clock + Capture Clock Offset
absl::optional<int64_t> estimated_capture_clock_offset;
};
// The audio level extension is used to indicate the voice activity and the
// audio level of the payload in the RTP stream. See:
// https://tools.ietf.org/html/rfc6464#section-3.
class AudioLevel {
public:
AudioLevel();
AudioLevel(bool voice_activity, int audio_level);
AudioLevel(const AudioLevel& other) = default;
AudioLevel& operator=(const AudioLevel& other) = default;
// Flag indicating whether the encoder believes the audio packet contains
// voice activity.
bool voice_activity() const { return voice_activity_; }
// Audio level in -dBov. Values range from 0 to 127, representing 0 to -127
// dBov. 127 represents digital silence.
int level() const { return audio_level_; }
private:
bool voice_activity_;
int audio_level_;
};
inline bool operator==(const AbsoluteCaptureTime& lhs,
const AbsoluteCaptureTime& rhs) {
return (lhs.absolute_capture_timestamp == rhs.absolute_capture_timestamp) &&
(lhs.estimated_capture_clock_offset ==
rhs.estimated_capture_clock_offset);
}
inline bool operator!=(const AbsoluteCaptureTime& lhs,
const AbsoluteCaptureTime& rhs) {
return !(lhs == rhs);
}
struct RTPHeaderExtension {
RTPHeaderExtension();
RTPHeaderExtension(const RTPHeaderExtension& other);
RTPHeaderExtension& operator=(const RTPHeaderExtension& other);
static constexpr int kAbsSendTimeFraction = 18;
Timestamp GetAbsoluteSendTimestamp() const {
RTC_DCHECK(hasAbsoluteSendTime);
RTC_DCHECK(absoluteSendTime < (1ul << 24));
return Timestamp::Micros((absoluteSendTime * 1000000ll) /
(1 << kAbsSendTimeFraction));
}
bool hasTransmissionTimeOffset;
int32_t transmissionTimeOffset;
bool hasAbsoluteSendTime;
uint32_t absoluteSendTime;
absl::optional<AbsoluteCaptureTime> absolute_capture_time;
bool hasTransportSequenceNumber;
uint16_t transportSequenceNumber;
absl::optional<FeedbackRequest> feedback_request;
// Audio Level includes both level in dBov and voiced/unvoiced bit. See:
// https://tools.ietf.org/html/rfc6464#section-3
absl::optional<AudioLevel> audio_level() const { return audio_level_; }
void set_audio_level(absl::optional<AudioLevel> audio_level) {
audio_level_ = audio_level;
}
// For Coordination of Video Orientation. See
// http://www.etsi.org/deliver/etsi_ts/126100_126199/126114/12.07.00_60/
// ts_126114v120700p.pdf
bool hasVideoRotation;
VideoRotation videoRotation;
// TODO(ilnik): Refactor this and one above to be absl::optional() and remove
// a corresponding bool flag.
bool hasVideoContentType;
VideoContentType videoContentType;
bool has_video_timing;
VideoSendTiming video_timing;
VideoPlayoutDelay playout_delay;
// For identification of a stream when ssrc is not signaled. See
// https://tools.ietf.org/html/rfc8852
std::string stream_id;
std::string repaired_stream_id;
// For identifying the media section used to interpret this RTP packet. See
// https://tools.ietf.org/html/rfc8843
std::string mid;
absl::optional<ColorSpace> color_space;
private:
absl::optional<AudioLevel> audio_level_;
};
enum { kRtpCsrcSize = 15 }; // RFC 3550 page 13
struct RTC_EXPORT RTPHeader {
RTPHeader();
RTPHeader(const RTPHeader& other);
RTPHeader& operator=(const RTPHeader& other);
bool markerBit;
uint8_t payloadType;
uint16_t sequenceNumber;
uint32_t timestamp;
uint32_t ssrc;
uint8_t numCSRCs;
uint32_t arrOfCSRCs[kRtpCsrcSize];
size_t paddingLength;
size_t headerLength;
RTPHeaderExtension extension;
};
// RTCP mode to use. Compound mode is described by RFC 4585 and reduced-size
// RTCP mode is described by RFC 5506.
enum class RtcpMode { kOff, kCompound, kReducedSize };
enum NetworkState {
kNetworkUp,
kNetworkDown,
};
} // namespace webrtc
#endif // API_RTP_HEADERS_H_

View File

@ -0,0 +1,63 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/rtp_packet_info.h"
#include <stddef.h>
#include <algorithm>
#include <cstdint>
#include <utility>
#include <vector>
#include "api/rtp_headers.h"
#include "api/units/timestamp.h"
namespace webrtc {
RtpPacketInfo::RtpPacketInfo()
: ssrc_(0), rtp_timestamp_(0), receive_time_(Timestamp::MinusInfinity()) {}
RtpPacketInfo::RtpPacketInfo(uint32_t ssrc,
std::vector<uint32_t> csrcs,
uint32_t rtp_timestamp,
Timestamp receive_time)
: ssrc_(ssrc),
csrcs_(std::move(csrcs)),
rtp_timestamp_(rtp_timestamp),
receive_time_(receive_time) {}
RtpPacketInfo::RtpPacketInfo(const RTPHeader& rtp_header,
Timestamp receive_time)
: ssrc_(rtp_header.ssrc),
rtp_timestamp_(rtp_header.timestamp),
receive_time_(receive_time) {
const auto& extension = rtp_header.extension;
const auto csrcs_count = std::min<size_t>(rtp_header.numCSRCs, kRtpCsrcSize);
csrcs_.assign(&rtp_header.arrOfCSRCs[0], &rtp_header.arrOfCSRCs[csrcs_count]);
if (extension.audio_level()) {
audio_level_ = extension.audio_level()->level();
}
absolute_capture_time_ = extension.absolute_capture_time;
}
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs) {
return (lhs.ssrc() == rhs.ssrc()) && (lhs.csrcs() == rhs.csrcs()) &&
(lhs.rtp_timestamp() == rhs.rtp_timestamp()) &&
(lhs.receive_time() == rhs.receive_time()) &&
(lhs.audio_level() == rhs.audio_level()) &&
(lhs.absolute_capture_time() == rhs.absolute_capture_time()) &&
(lhs.local_capture_clock_offset() == rhs.local_capture_clock_offset());
}
} // namespace webrtc

View File

@ -0,0 +1,117 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_RTP_PACKET_INFO_H_
#define API_RTP_PACKET_INFO_H_
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/types/optional.h"
#include "api/rtp_headers.h"
#include "api/units/time_delta.h"
#include "api/units/timestamp.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
//
// Structure to hold information about a received `RtpPacket`. It is primarily
// used to carry per-packet information from when a packet is received until
// the information is passed to `SourceTracker`.
//
class RTC_EXPORT RtpPacketInfo {
public:
RtpPacketInfo();
RtpPacketInfo(uint32_t ssrc,
std::vector<uint32_t> csrcs,
uint32_t rtp_timestamp,
Timestamp receive_time);
RtpPacketInfo(const RTPHeader& rtp_header, Timestamp receive_time);
RtpPacketInfo(const RtpPacketInfo& other) = default;
RtpPacketInfo(RtpPacketInfo&& other) = default;
RtpPacketInfo& operator=(const RtpPacketInfo& other) = default;
RtpPacketInfo& operator=(RtpPacketInfo&& other) = default;
uint32_t ssrc() const { return ssrc_; }
void set_ssrc(uint32_t value) { ssrc_ = value; }
const std::vector<uint32_t>& csrcs() const { return csrcs_; }
void set_csrcs(std::vector<uint32_t> value) { csrcs_ = std::move(value); }
uint32_t rtp_timestamp() const { return rtp_timestamp_; }
void set_rtp_timestamp(uint32_t value) { rtp_timestamp_ = value; }
Timestamp receive_time() const { return receive_time_; }
void set_receive_time(Timestamp value) { receive_time_ = value; }
absl::optional<uint8_t> audio_level() const { return audio_level_; }
RtpPacketInfo& set_audio_level(absl::optional<uint8_t> value) {
audio_level_ = value;
return *this;
}
const absl::optional<AbsoluteCaptureTime>& absolute_capture_time() const {
return absolute_capture_time_;
}
RtpPacketInfo& set_absolute_capture_time(
const absl::optional<AbsoluteCaptureTime>& value) {
absolute_capture_time_ = value;
return *this;
}
const absl::optional<TimeDelta>& local_capture_clock_offset() const {
return local_capture_clock_offset_;
}
RtpPacketInfo& set_local_capture_clock_offset(
absl::optional<TimeDelta> value) {
local_capture_clock_offset_ = value;
return *this;
}
private:
// Fields from the RTP header:
// https://tools.ietf.org/html/rfc3550#section-5.1
uint32_t ssrc_;
std::vector<uint32_t> csrcs_;
uint32_t rtp_timestamp_;
// Local `webrtc::Clock`-based timestamp of when the packet was received.
Timestamp receive_time_;
// Fields from the Audio Level header extension:
// https://tools.ietf.org/html/rfc6464#section-3
absl::optional<uint8_t> audio_level_;
// Fields from the Absolute Capture Time header extension:
// http://www.webrtc.org/experiments/rtp-hdrext/abs-capture-time
absl::optional<AbsoluteCaptureTime> absolute_capture_time_;
// Clock offset between the local clock and the capturer's clock.
// Do not confuse with `AbsoluteCaptureTime::estimated_capture_clock_offset`
// which instead represents the clock offset between a remote sender and the
// capturer. The following holds:
// Capture's NTP Clock = Local NTP Clock + Local-Capture Clock Offset
absl::optional<TimeDelta> local_capture_clock_offset_;
};
bool operator==(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs);
inline bool operator!=(const RtpPacketInfo& lhs, const RtpPacketInfo& rhs) {
return !(lhs == rhs);
}
} // namespace webrtc
#endif // API_RTP_PACKET_INFO_H_

View File

@ -0,0 +1,130 @@
/*
* Copyright (c) 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_RTP_PACKET_INFOS_H_
#define API_RTP_PACKET_INFOS_H_
#include <utility>
#include <vector>
#include "api/make_ref_counted.h"
#include "api/ref_counted_base.h"
#include "api/rtp_packet_info.h"
#include "api/scoped_refptr.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Semi-immutable structure to hold information about packets used to assemble
// an audio or video frame. Uses internal reference counting to make it very
// cheap to copy.
//
// We should ideally just use `std::vector<RtpPacketInfo>` and have it
// `std::move()`-ed as the per-packet information is transferred from one object
// to another. But moving the info, instead of copying it, is not easily done
// for the current video code.
class RTC_EXPORT RtpPacketInfos {
public:
using vector_type = std::vector<RtpPacketInfo>;
using value_type = vector_type::value_type;
using size_type = vector_type::size_type;
using difference_type = vector_type::difference_type;
using const_reference = vector_type::const_reference;
using const_pointer = vector_type::const_pointer;
using const_iterator = vector_type::const_iterator;
using const_reverse_iterator = vector_type::const_reverse_iterator;
using reference = const_reference;
using pointer = const_pointer;
using iterator = const_iterator;
using reverse_iterator = const_reverse_iterator;
RtpPacketInfos() {}
explicit RtpPacketInfos(const vector_type& entries)
: data_(Data::Create(entries)) {}
explicit RtpPacketInfos(vector_type&& entries)
: data_(Data::Create(std::move(entries))) {}
RtpPacketInfos(const RtpPacketInfos& other) = default;
RtpPacketInfos(RtpPacketInfos&& other) = default;
RtpPacketInfos& operator=(const RtpPacketInfos& other) = default;
RtpPacketInfos& operator=(RtpPacketInfos&& other) = default;
const_reference operator[](size_type pos) const { return entries()[pos]; }
const_reference at(size_type pos) const { return entries().at(pos); }
const_reference front() const { return entries().front(); }
const_reference back() const { return entries().back(); }
const_iterator begin() const { return entries().begin(); }
const_iterator end() const { return entries().end(); }
const_reverse_iterator rbegin() const { return entries().rbegin(); }
const_reverse_iterator rend() const { return entries().rend(); }
const_iterator cbegin() const { return entries().cbegin(); }
const_iterator cend() const { return entries().cend(); }
const_reverse_iterator crbegin() const { return entries().crbegin(); }
const_reverse_iterator crend() const { return entries().crend(); }
bool empty() const { return entries().empty(); }
size_type size() const { return entries().size(); }
private:
class Data final : public rtc::RefCountedNonVirtual<Data> {
public:
static rtc::scoped_refptr<Data> Create(const vector_type& entries) {
// Performance optimization for the empty case.
if (entries.empty()) {
return nullptr;
}
return rtc::make_ref_counted<Data>(entries);
}
static rtc::scoped_refptr<Data> Create(vector_type&& entries) {
// Performance optimization for the empty case.
if (entries.empty()) {
return nullptr;
}
return rtc::make_ref_counted<Data>(std::move(entries));
}
const vector_type& entries() const { return entries_; }
explicit Data(const vector_type& entries) : entries_(entries) {}
explicit Data(vector_type&& entries) : entries_(std::move(entries)) {}
~Data() = default;
private:
const vector_type entries_;
};
static const vector_type& empty_entries() {
static const vector_type& value = *new vector_type();
return value;
}
const vector_type& entries() const {
if (data_ != nullptr) {
return data_->entries();
} else {
return empty_entries();
}
}
rtc::scoped_refptr<Data> data_;
};
} // namespace webrtc
#endif // API_RTP_PACKET_INFOS_H_

View File

@ -0,0 +1,141 @@
/*
* Copyright 2019 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_SEQUENCE_CHECKER_H_
#define API_SEQUENCE_CHECKER_H_
#include "api/task_queue/task_queue_base.h"
#include "rtc_base/checks.h"
#include "rtc_base/synchronization/sequence_checker_internal.h"
#include "rtc_base/thread_annotations.h"
namespace webrtc {
// SequenceChecker is a helper class used to help verify that some methods
// of a class are called on the same task queue or thread. A
// SequenceChecker is bound to a a task queue if the object is
// created on a task queue, or a thread otherwise.
//
//
// Example:
// class MyClass {
// public:
// void Foo() {
// RTC_DCHECK_RUN_ON(&sequence_checker_);
// ... (do stuff) ...
// }
//
// private:
// SequenceChecker sequence_checker_;
// }
//
// In Release mode, IsCurrent will always return true.
class RTC_LOCKABLE SequenceChecker
#if RTC_DCHECK_IS_ON
: public webrtc_sequence_checker_internal::SequenceCheckerImpl {
using Impl = webrtc_sequence_checker_internal::SequenceCheckerImpl;
#else
: public webrtc_sequence_checker_internal::SequenceCheckerDoNothing {
using Impl = webrtc_sequence_checker_internal::SequenceCheckerDoNothing;
#endif
public:
enum InitialState : bool { kDetached = false, kAttached = true };
// TODO(tommi): We could maybe join these two ctors and have fewer factory
// functions. At the moment they're separate to minimize code changes when
// we added the second ctor as well as avoiding to have unnecessary code at
// the SequenceChecker which much only run for the SequenceCheckerImpl
// implementation.
// In theory we could have something like:
//
// SequenceChecker(InitialState initial_state = kAttached,
// TaskQueueBase* attached_queue = TaskQueueBase::Current());
//
// But the problem with that is having the call to `Current()` exist for
// `SequenceCheckerDoNothing`.
explicit SequenceChecker(InitialState initial_state = kAttached)
: Impl(initial_state) {}
explicit SequenceChecker(TaskQueueBase* attached_queue)
: Impl(attached_queue) {}
// Returns true if sequence checker is attached to the current sequence.
bool IsCurrent() const { return Impl::IsCurrent(); }
// Detaches checker from sequence to which it is attached. Next attempt
// to do a check with this checker will result in attaching this checker
// to the sequence on which check was performed.
void Detach() { Impl::Detach(); }
};
} // namespace webrtc
// RTC_RUN_ON/RTC_GUARDED_BY/RTC_DCHECK_RUN_ON macros allows to annotate
// variables are accessed from same thread/task queue.
// Using tools designed to check mutexes, it checks at compile time everywhere
// variable is access, there is a run-time dcheck thread/task queue is correct.
//
// class SequenceCheckerExample {
// public:
// int CalledFromPacer() RTC_RUN_ON(pacer_sequence_checker_) {
// return var2_;
// }
//
// void CallMeFromPacer() {
// RTC_DCHECK_RUN_ON(&pacer_sequence_checker_)
// << "Should be called from pacer";
// CalledFromPacer();
// }
//
// private:
// int pacer_var_ RTC_GUARDED_BY(pacer_sequence_checker_);
// SequenceChecker pacer_sequence_checker_;
// };
//
// class TaskQueueExample {
// public:
// class Encoder {
// public:
// rtc::TaskQueueBase& Queue() { return encoder_queue_; }
// void Encode() {
// RTC_DCHECK_RUN_ON(&encoder_queue_);
// DoSomething(var_);
// }
//
// private:
// rtc::TaskQueueBase& encoder_queue_;
// Frame var_ RTC_GUARDED_BY(encoder_queue_);
// };
//
// void Encode() {
// // Will fail at runtime when DCHECK is enabled:
// // encoder_->Encode();
// // Will work:
// rtc::scoped_refptr<Encoder> encoder = encoder_;
// encoder_->Queue().PostTask([encoder] { encoder->Encode(); });
// }
//
// private:
// rtc::scoped_refptr<Encoder> encoder_;
// }
// Document if a function expected to be called from same thread/task queue.
#define RTC_RUN_ON(x) \
RTC_THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(x))
// Checks current code is running on the desired sequence.
//
// First statement validates it is running on the sequence `x`.
// Second statement annotates for the thread safety analyzer the check was done.
// Such annotation has to be attached to a function, and that function has to be
// called. Thus current implementation creates a noop lambda and calls it.
#define RTC_DCHECK_RUN_ON(x) \
RTC_DCHECK((x)->IsCurrent()) \
<< webrtc::webrtc_sequence_checker_internal::ExpectationToString(x); \
[]() RTC_ASSERT_EXCLUSIVE_LOCK(x) {}()
#endif // API_SEQUENCE_CHECKER_H_

View File

@ -0,0 +1,269 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/color_space.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include "absl/types/optional.h"
#include "api/video/hdr_metadata.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
namespace {
// Try to convert `enum_value` into the enum class T. `enum_bitmask` is created
// by the funciton below. Returns true if conversion was successful, false
// otherwise.
template <typename T>
bool SetFromUint8(uint8_t enum_value, uint64_t enum_bitmask, T* out) {
if ((enum_value < 64) && ((enum_bitmask >> enum_value) & 1)) {
*out = static_cast<T>(enum_value);
return true;
}
return false;
}
// This function serves as an assert for the constexpr function below. It's on
// purpose not declared as constexpr so that it causes a build problem if enum
// values of 64 or above are used. The bitmask and the code generating it would
// have to be extended if the standard is updated to include enum values >= 64.
int EnumMustBeLessThan64() {
return -1;
}
template <typename T, size_t N>
constexpr int MakeMask(const int index, const int length, T (&values)[N]) {
return length > 1
? (MakeMask(index, 1, values) +
MakeMask(index + 1, length - 1, values))
: (static_cast<uint8_t>(values[index]) < 64
? (uint64_t{1} << static_cast<uint8_t>(values[index]))
: EnumMustBeLessThan64());
}
// Create a bitmask where each bit corresponds to one potential enum value.
// `values` should be an array listing all possible enum values. The bit is set
// to one if the corresponding enum exists. Only works for enums with values
// less than 64.
template <typename T, size_t N>
constexpr uint64_t CreateEnumBitmask(T (&values)[N]) {
return MakeMask(0, N, values);
}
bool SetChromaSitingFromUint8(uint8_t enum_value,
ColorSpace::ChromaSiting* chroma_siting) {
constexpr ColorSpace::ChromaSiting kChromaSitings[] = {
ColorSpace::ChromaSiting::kUnspecified,
ColorSpace::ChromaSiting::kCollocated, ColorSpace::ChromaSiting::kHalf};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kChromaSitings);
return SetFromUint8(enum_value, enum_bitmask, chroma_siting);
}
} // namespace
ColorSpace::ColorSpace() = default;
ColorSpace::ColorSpace(const ColorSpace& other) = default;
ColorSpace::ColorSpace(ColorSpace&& other) = default;
ColorSpace& ColorSpace::operator=(const ColorSpace& other) = default;
ColorSpace::ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range)
: ColorSpace(primaries,
transfer,
matrix,
range,
ChromaSiting::kUnspecified,
ChromaSiting::kUnspecified,
nullptr) {}
ColorSpace::ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range,
ChromaSiting chroma_siting_horz,
ChromaSiting chroma_siting_vert,
const HdrMetadata* hdr_metadata)
: primaries_(primaries),
transfer_(transfer),
matrix_(matrix),
range_(range),
chroma_siting_horizontal_(chroma_siting_horz),
chroma_siting_vertical_(chroma_siting_vert),
hdr_metadata_(hdr_metadata ? absl::make_optional(*hdr_metadata)
: absl::nullopt) {}
ColorSpace::PrimaryID ColorSpace::primaries() const {
return primaries_;
}
ColorSpace::TransferID ColorSpace::transfer() const {
return transfer_;
}
ColorSpace::MatrixID ColorSpace::matrix() const {
return matrix_;
}
ColorSpace::RangeID ColorSpace::range() const {
return range_;
}
ColorSpace::ChromaSiting ColorSpace::chroma_siting_horizontal() const {
return chroma_siting_horizontal_;
}
ColorSpace::ChromaSiting ColorSpace::chroma_siting_vertical() const {
return chroma_siting_vertical_;
}
const HdrMetadata* ColorSpace::hdr_metadata() const {
return hdr_metadata_ ? &*hdr_metadata_ : nullptr;
}
#define PRINT_ENUM_CASE(TYPE, NAME) \
case TYPE::NAME: \
ss << #NAME; \
break;
std::string ColorSpace::AsString() const {
char buf[1024];
rtc::SimpleStringBuilder ss(buf);
ss << "{primaries:";
switch (primaries_) {
PRINT_ENUM_CASE(PrimaryID, kBT709)
PRINT_ENUM_CASE(PrimaryID, kUnspecified)
PRINT_ENUM_CASE(PrimaryID, kBT470M)
PRINT_ENUM_CASE(PrimaryID, kBT470BG)
PRINT_ENUM_CASE(PrimaryID, kSMPTE170M)
PRINT_ENUM_CASE(PrimaryID, kSMPTE240M)
PRINT_ENUM_CASE(PrimaryID, kFILM)
PRINT_ENUM_CASE(PrimaryID, kBT2020)
PRINT_ENUM_CASE(PrimaryID, kSMPTEST428)
PRINT_ENUM_CASE(PrimaryID, kSMPTEST431)
PRINT_ENUM_CASE(PrimaryID, kSMPTEST432)
PRINT_ENUM_CASE(PrimaryID, kJEDECP22)
}
ss << ", transfer:";
switch (transfer_) {
PRINT_ENUM_CASE(TransferID, kBT709)
PRINT_ENUM_CASE(TransferID, kUnspecified)
PRINT_ENUM_CASE(TransferID, kGAMMA22)
PRINT_ENUM_CASE(TransferID, kGAMMA28)
PRINT_ENUM_CASE(TransferID, kSMPTE170M)
PRINT_ENUM_CASE(TransferID, kSMPTE240M)
PRINT_ENUM_CASE(TransferID, kLINEAR)
PRINT_ENUM_CASE(TransferID, kLOG)
PRINT_ENUM_CASE(TransferID, kLOG_SQRT)
PRINT_ENUM_CASE(TransferID, kIEC61966_2_4)
PRINT_ENUM_CASE(TransferID, kBT1361_ECG)
PRINT_ENUM_CASE(TransferID, kIEC61966_2_1)
PRINT_ENUM_CASE(TransferID, kBT2020_10)
PRINT_ENUM_CASE(TransferID, kBT2020_12)
PRINT_ENUM_CASE(TransferID, kSMPTEST2084)
PRINT_ENUM_CASE(TransferID, kSMPTEST428)
PRINT_ENUM_CASE(TransferID, kARIB_STD_B67)
}
ss << ", matrix:";
switch (matrix_) {
PRINT_ENUM_CASE(MatrixID, kRGB)
PRINT_ENUM_CASE(MatrixID, kBT709)
PRINT_ENUM_CASE(MatrixID, kUnspecified)
PRINT_ENUM_CASE(MatrixID, kFCC)
PRINT_ENUM_CASE(MatrixID, kBT470BG)
PRINT_ENUM_CASE(MatrixID, kSMPTE170M)
PRINT_ENUM_CASE(MatrixID, kSMPTE240M)
PRINT_ENUM_CASE(MatrixID, kYCOCG)
PRINT_ENUM_CASE(MatrixID, kBT2020_NCL)
PRINT_ENUM_CASE(MatrixID, kBT2020_CL)
PRINT_ENUM_CASE(MatrixID, kSMPTE2085)
PRINT_ENUM_CASE(MatrixID, kCDNCLS)
PRINT_ENUM_CASE(MatrixID, kCDCLS)
PRINT_ENUM_CASE(MatrixID, kBT2100_ICTCP)
}
ss << ", range:";
switch (range_) {
PRINT_ENUM_CASE(RangeID, kInvalid)
PRINT_ENUM_CASE(RangeID, kLimited)
PRINT_ENUM_CASE(RangeID, kFull)
PRINT_ENUM_CASE(RangeID, kDerived)
}
ss << "}";
return ss.str();
}
#undef PRINT_ENUM_CASE
bool ColorSpace::set_primaries_from_uint8(uint8_t enum_value) {
constexpr PrimaryID kPrimaryIds[] = {
PrimaryID::kBT709, PrimaryID::kUnspecified, PrimaryID::kBT470M,
PrimaryID::kBT470BG, PrimaryID::kSMPTE170M, PrimaryID::kSMPTE240M,
PrimaryID::kFILM, PrimaryID::kBT2020, PrimaryID::kSMPTEST428,
PrimaryID::kSMPTEST431, PrimaryID::kSMPTEST432, PrimaryID::kJEDECP22};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kPrimaryIds);
return SetFromUint8(enum_value, enum_bitmask, &primaries_);
}
bool ColorSpace::set_transfer_from_uint8(uint8_t enum_value) {
constexpr TransferID kTransferIds[] = {
TransferID::kBT709, TransferID::kUnspecified,
TransferID::kGAMMA22, TransferID::kGAMMA28,
TransferID::kSMPTE170M, TransferID::kSMPTE240M,
TransferID::kLINEAR, TransferID::kLOG,
TransferID::kLOG_SQRT, TransferID::kIEC61966_2_4,
TransferID::kBT1361_ECG, TransferID::kIEC61966_2_1,
TransferID::kBT2020_10, TransferID::kBT2020_12,
TransferID::kSMPTEST2084, TransferID::kSMPTEST428,
TransferID::kARIB_STD_B67};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kTransferIds);
return SetFromUint8(enum_value, enum_bitmask, &transfer_);
}
bool ColorSpace::set_matrix_from_uint8(uint8_t enum_value) {
constexpr MatrixID kMatrixIds[] = {
MatrixID::kRGB, MatrixID::kBT709, MatrixID::kUnspecified,
MatrixID::kFCC, MatrixID::kBT470BG, MatrixID::kSMPTE170M,
MatrixID::kSMPTE240M, MatrixID::kYCOCG, MatrixID::kBT2020_NCL,
MatrixID::kBT2020_CL, MatrixID::kSMPTE2085, MatrixID::kCDNCLS,
MatrixID::kCDCLS, MatrixID::kBT2100_ICTCP};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kMatrixIds);
return SetFromUint8(enum_value, enum_bitmask, &matrix_);
}
bool ColorSpace::set_range_from_uint8(uint8_t enum_value) {
constexpr RangeID kRangeIds[] = {RangeID::kInvalid, RangeID::kLimited,
RangeID::kFull, RangeID::kDerived};
constexpr uint64_t enum_bitmask = CreateEnumBitmask(kRangeIds);
return SetFromUint8(enum_value, enum_bitmask, &range_);
}
bool ColorSpace::set_chroma_siting_horizontal_from_uint8(uint8_t enum_value) {
return SetChromaSitingFromUint8(enum_value, &chroma_siting_horizontal_);
}
bool ColorSpace::set_chroma_siting_vertical_from_uint8(uint8_t enum_value) {
return SetChromaSitingFromUint8(enum_value, &chroma_siting_vertical_);
}
void ColorSpace::set_hdr_metadata(const HdrMetadata* hdr_metadata) {
hdr_metadata_ =
hdr_metadata ? absl::make_optional(*hdr_metadata) : absl::nullopt;
}
} // namespace webrtc

View File

@ -0,0 +1,181 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_COLOR_SPACE_H_
#define API_VIDEO_COLOR_SPACE_H_
#include <stdint.h>
#include <string>
#include "absl/types/optional.h"
#include "api/video/hdr_metadata.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// This class represents color information as specified in T-REC H.273,
// available from https://www.itu.int/rec/T-REC-H.273.
//
// WebRTC's supported codecs:
// - VP9 supports color profiles, see VP9 Bitstream & Decoding Process
// Specification Version 0.6 Section 7.2.2 "Color config semantics" available
// from https://www.webmproject.org.
// - VP8 only supports BT.601, see
// https://tools.ietf.org/html/rfc6386#section-9.2
// - H264 uses the exact same representation as T-REC H.273. See T-REC-H.264
// E.2.1, "VUI parameters semantics", available from
// https://www.itu.int/rec/T-REC-H.264.
class RTC_EXPORT ColorSpace {
public:
enum class PrimaryID : uint8_t {
// The indices are equal to the values specified in T-REC H.273 Table 2.
kBT709 = 1,
kUnspecified = 2,
kBT470M = 4,
kBT470BG = 5,
kSMPTE170M = 6, // Identical to BT601
kSMPTE240M = 7,
kFILM = 8,
kBT2020 = 9,
kSMPTEST428 = 10,
kSMPTEST431 = 11,
kSMPTEST432 = 12,
kJEDECP22 = 22, // Identical to EBU3213-E
// When adding/removing entries here, please make sure to do the
// corresponding change to kPrimaryIds.
};
enum class TransferID : uint8_t {
// The indices are equal to the values specified in T-REC H.273 Table 3.
kBT709 = 1,
kUnspecified = 2,
kGAMMA22 = 4,
kGAMMA28 = 5,
kSMPTE170M = 6,
kSMPTE240M = 7,
kLINEAR = 8,
kLOG = 9,
kLOG_SQRT = 10,
kIEC61966_2_4 = 11,
kBT1361_ECG = 12,
kIEC61966_2_1 = 13,
kBT2020_10 = 14,
kBT2020_12 = 15,
kSMPTEST2084 = 16,
kSMPTEST428 = 17,
kARIB_STD_B67 = 18,
// When adding/removing entries here, please make sure to do the
// corresponding change to kTransferIds.
};
enum class MatrixID : uint8_t {
// The indices are equal to the values specified in T-REC H.273 Table 4.
kRGB = 0,
kBT709 = 1,
kUnspecified = 2,
kFCC = 4,
kBT470BG = 5,
kSMPTE170M = 6,
kSMPTE240M = 7,
kYCOCG = 8,
kBT2020_NCL = 9,
kBT2020_CL = 10,
kSMPTE2085 = 11,
kCDNCLS = 12,
kCDCLS = 13,
kBT2100_ICTCP = 14,
// When adding/removing entries here, please make sure to do the
// corresponding change to kMatrixIds.
};
enum class RangeID {
// The indices are equal to the values specified at
// https://www.webmproject.org/docs/container/#colour for the element Range.
kInvalid = 0,
// Limited Rec. 709 color range with RGB values ranging from 16 to 235.
kLimited = 1,
// Full RGB color range with RGB values from 0 to 255.
kFull = 2,
// Range is defined by MatrixCoefficients/TransferCharacteristics.
kDerived = 3,
// When adding/removing entries here, please make sure to do the
// corresponding change to kRangeIds.
};
enum class ChromaSiting {
// Chroma siting specifies how chroma is subsampled relative to the luma
// samples in a YUV video frame.
// The indices are equal to the values specified at
// https://www.webmproject.org/docs/container/#colour for the element
// ChromaSitingVert and ChromaSitingHorz.
kUnspecified = 0,
kCollocated = 1,
kHalf = 2,
// When adding/removing entries here, please make sure to do the
// corresponding change to kChromaSitings.
};
ColorSpace();
ColorSpace(const ColorSpace& other);
ColorSpace(ColorSpace&& other);
ColorSpace& operator=(const ColorSpace& other);
ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range);
ColorSpace(PrimaryID primaries,
TransferID transfer,
MatrixID matrix,
RangeID range,
ChromaSiting chroma_siting_horizontal,
ChromaSiting chroma_siting_vertical,
const HdrMetadata* hdr_metadata);
friend bool operator==(const ColorSpace& lhs, const ColorSpace& rhs) {
return lhs.primaries_ == rhs.primaries_ && lhs.transfer_ == rhs.transfer_ &&
lhs.matrix_ == rhs.matrix_ && lhs.range_ == rhs.range_ &&
lhs.chroma_siting_horizontal_ == rhs.chroma_siting_horizontal_ &&
lhs.chroma_siting_vertical_ == rhs.chroma_siting_vertical_ &&
lhs.hdr_metadata_ == rhs.hdr_metadata_;
}
friend bool operator!=(const ColorSpace& lhs, const ColorSpace& rhs) {
return !(lhs == rhs);
}
PrimaryID primaries() const;
TransferID transfer() const;
MatrixID matrix() const;
RangeID range() const;
ChromaSiting chroma_siting_horizontal() const;
ChromaSiting chroma_siting_vertical() const;
const HdrMetadata* hdr_metadata() const;
std::string AsString() const;
bool set_primaries_from_uint8(uint8_t enum_value);
bool set_transfer_from_uint8(uint8_t enum_value);
bool set_matrix_from_uint8(uint8_t enum_value);
bool set_range_from_uint8(uint8_t enum_value);
bool set_chroma_siting_horizontal_from_uint8(uint8_t enum_value);
bool set_chroma_siting_vertical_from_uint8(uint8_t enum_value);
void set_hdr_metadata(const HdrMetadata* hdr_metadata);
private:
PrimaryID primaries_ = PrimaryID::kUnspecified;
TransferID transfer_ = TransferID::kUnspecified;
MatrixID matrix_ = MatrixID::kUnspecified;
RangeID range_ = RangeID::kInvalid;
ChromaSiting chroma_siting_horizontal_ = ChromaSiting::kUnspecified;
ChromaSiting chroma_siting_vertical_ = ChromaSiting::kUnspecified;
absl::optional<HdrMetadata> hdr_metadata_;
};
} // namespace webrtc
#endif // API_VIDEO_COLOR_SPACE_H_

View File

@ -0,0 +1,21 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/hdr_metadata.h"
namespace webrtc {
HdrMasteringMetadata::Chromaticity::Chromaticity() = default;
HdrMasteringMetadata::HdrMasteringMetadata() = default;
HdrMetadata::HdrMetadata() = default;
} // namespace webrtc

View File

@ -0,0 +1,105 @@
/*
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_HDR_METADATA_H_
#define API_VIDEO_HDR_METADATA_H_
namespace webrtc {
// SMPTE ST 2086 mastering metadata,
// see https://ieeexplore.ieee.org/document/8353899.
struct HdrMasteringMetadata {
struct Chromaticity {
Chromaticity();
bool operator==(const Chromaticity& rhs) const {
return x == rhs.x && y == rhs.y;
}
bool Validate() const {
return x >= 0.0 && x <= 1.0 && y >= 0.0 && y <= 1.0;
}
// xy chromaticity coordinates must be calculated as specified in ISO
// 11664-3:2012 Section 7, and must be specified with four decimal places.
// The x coordinate should be in the range [0.0001, 0.7400] and the y
// coordinate should be in the range [0.0001, 0.8400]. Valid range [0.0000,
// 1.0000].
float x = 0.0f;
float y = 0.0f;
};
HdrMasteringMetadata();
bool operator==(const HdrMasteringMetadata& rhs) const {
return ((primary_r == rhs.primary_r) && (primary_g == rhs.primary_g) &&
(primary_b == rhs.primary_b) && (white_point == rhs.white_point) &&
(luminance_max == rhs.luminance_max) &&
(luminance_min == rhs.luminance_min));
}
bool Validate() const {
return luminance_max >= 0.0 && luminance_max <= 20000.0 &&
luminance_min >= 0.0 && luminance_min <= 5.0 &&
primary_r.Validate() && primary_g.Validate() &&
primary_b.Validate() && white_point.Validate();
}
// The nominal primaries of the mastering display.
Chromaticity primary_r;
Chromaticity primary_g;
Chromaticity primary_b;
// The nominal chromaticity of the white point of the mastering display.
Chromaticity white_point;
// The nominal maximum display luminance of the mastering display. Specified
// in the unit candela/m2. The value should be in the range [5, 10000] with
// zero decimal places. Valid range [0, 20000].
float luminance_max = 0.0f;
// The nominal minimum display luminance of the mastering display. Specified
// in the unit candela/m2. The value should be in the range [0.0001, 5.0000]
// with four decimal places. Valid range [0.0000, 5.0000].
float luminance_min = 0.0f;
};
// High dynamic range (HDR) metadata common for HDR10 and WebM/VP9-based HDR
// formats. This struct replicates the HDRMetadata struct defined in
// https://cs.chromium.org/chromium/src/media/base/hdr_metadata.h
struct HdrMetadata {
HdrMetadata();
bool operator==(const HdrMetadata& rhs) const {
return (
(max_content_light_level == rhs.max_content_light_level) &&
(max_frame_average_light_level == rhs.max_frame_average_light_level) &&
(mastering_metadata == rhs.mastering_metadata));
}
bool Validate() const {
return max_content_light_level >= 0 && max_content_light_level <= 20000 &&
max_frame_average_light_level >= 0 &&
max_frame_average_light_level <= 20000 &&
mastering_metadata.Validate();
}
HdrMasteringMetadata mastering_metadata;
// Max content light level (CLL), i.e. maximum brightness level present in the
// stream, in nits. 1 nit = 1 candela/m2. Valid range [0, 20000].
int max_content_light_level = 0;
// Max frame-average light level (FALL), i.e. maximum average brightness of
// the brightest frame in the stream, in nits. Valid range [0, 20000].
int max_frame_average_light_level = 0;
};
} // namespace webrtc
#endif // API_VIDEO_HDR_METADATA_H_

View File

@ -0,0 +1,45 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/video_content_type.h"
#include <cstdint>
#include "rtc_base/checks.h"
namespace webrtc {
namespace videocontenttypehelpers {
namespace {
static constexpr uint8_t kScreenshareBitsSize = 1;
static constexpr uint8_t kScreenshareBitsMask =
(1u << kScreenshareBitsSize) - 1;
} // namespace
bool IsScreenshare(const VideoContentType& content_type) {
// Ensure no bits apart from the screenshare bit is set.
// This CHECK is a temporary measure to detect code that introduces
// values according to old versions.
RTC_CHECK((static_cast<uint8_t>(content_type) & !kScreenshareBitsMask) == 0);
return (static_cast<uint8_t>(content_type) & kScreenshareBitsMask) > 0;
}
bool IsValidContentType(uint8_t value) {
// Only the screenshare bit is allowed.
// However, due to previous usage of the next 5 bits, we allow
// the lower 6 bits to be set.
return value < (1 << 6);
}
const char* ToString(const VideoContentType& content_type) {
return IsScreenshare(content_type) ? "screen" : "realtime";
}
} // namespace videocontenttypehelpers
} // namespace webrtc

View File

@ -0,0 +1,36 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_CONTENT_TYPE_H_
#define API_VIDEO_VIDEO_CONTENT_TYPE_H_
#include <stdint.h>
namespace webrtc {
// VideoContentType stored as a single byte, which is sent over the network
// in the rtp-hdrext/video-content-type extension.
// Only the lowest bit is used, per the enum.
enum class VideoContentType : uint8_t {
UNSPECIFIED = 0,
SCREENSHARE = 1,
};
namespace videocontenttypehelpers {
bool IsScreenshare(const VideoContentType& content_type);
bool IsValidContentType(uint8_t value);
const char* ToString(const VideoContentType& content_type);
} // namespace videocontenttypehelpers
} // namespace webrtc
#endif // API_VIDEO_VIDEO_CONTENT_TYPE_H_

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_ROTATION_H_
#define API_VIDEO_VIDEO_ROTATION_H_
namespace webrtc {
// enum for clockwise rotation.
enum VideoRotation {
kVideoRotation_0 = 0,
kVideoRotation_90 = 90,
kVideoRotation_180 = 180,
kVideoRotation_270 = 270
};
} // namespace webrtc
#endif // API_VIDEO_VIDEO_ROTATION_H_

View File

@ -0,0 +1,124 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "api/video/video_timing.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "api/array_view.h"
#include "api/units/time_delta.h"
#include "rtc_base/logging.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/strings/string_builder.h"
namespace webrtc {
uint16_t VideoSendTiming::GetDeltaCappedMs(int64_t base_ms, int64_t time_ms) {
if (time_ms < base_ms) {
RTC_DLOG(LS_ERROR) << "Delta " << (time_ms - base_ms)
<< "ms expected to be positive";
}
return rtc::saturated_cast<uint16_t>(time_ms - base_ms);
}
uint16_t VideoSendTiming::GetDeltaCappedMs(TimeDelta delta) {
if (delta < TimeDelta::Zero()) {
RTC_DLOG(LS_ERROR) << "Delta " << delta.ms()
<< "ms expected to be positive";
}
return rtc::saturated_cast<uint16_t>(delta.ms());
}
TimingFrameInfo::TimingFrameInfo()
: rtp_timestamp(0),
capture_time_ms(-1),
encode_start_ms(-1),
encode_finish_ms(-1),
packetization_finish_ms(-1),
pacer_exit_ms(-1),
network_timestamp_ms(-1),
network2_timestamp_ms(-1),
receive_start_ms(-1),
receive_finish_ms(-1),
decode_start_ms(-1),
decode_finish_ms(-1),
render_time_ms(-1),
flags(VideoSendTiming::kNotTriggered) {}
int64_t TimingFrameInfo::EndToEndDelay() const {
return capture_time_ms >= 0 ? decode_finish_ms - capture_time_ms : -1;
}
bool TimingFrameInfo::IsLongerThan(const TimingFrameInfo& other) const {
int64_t other_delay = other.EndToEndDelay();
return other_delay == -1 || EndToEndDelay() > other_delay;
}
bool TimingFrameInfo::operator<(const TimingFrameInfo& other) const {
return other.IsLongerThan(*this);
}
bool TimingFrameInfo::operator<=(const TimingFrameInfo& other) const {
return !IsLongerThan(other);
}
bool TimingFrameInfo::IsOutlier() const {
return !IsInvalid() && (flags & VideoSendTiming::kTriggeredBySize);
}
bool TimingFrameInfo::IsTimerTriggered() const {
return !IsInvalid() && (flags & VideoSendTiming::kTriggeredByTimer);
}
bool TimingFrameInfo::IsInvalid() const {
return flags == VideoSendTiming::kInvalid;
}
std::string TimingFrameInfo::ToString() const {
if (IsInvalid()) {
return "";
}
char buf[1024];
rtc::SimpleStringBuilder sb(buf);
sb << rtp_timestamp << ',' << capture_time_ms << ',' << encode_start_ms << ','
<< encode_finish_ms << ',' << packetization_finish_ms << ','
<< pacer_exit_ms << ',' << network_timestamp_ms << ','
<< network2_timestamp_ms << ',' << receive_start_ms << ','
<< receive_finish_ms << ',' << decode_start_ms << ',' << decode_finish_ms
<< ',' << render_time_ms << ',' << IsOutlier() << ','
<< IsTimerTriggered();
return sb.str();
}
VideoPlayoutDelay::VideoPlayoutDelay(TimeDelta min, TimeDelta max)
: min_(std::clamp(min, TimeDelta::Zero(), kMax)),
max_(std::clamp(max, min_, kMax)) {
if (!(TimeDelta::Zero() <= min && min <= max && max <= kMax)) {
RTC_LOG(LS_ERROR) << "Invalid video playout delay: [" << min << "," << max
<< "]. Clamped to [" << this->min() << "," << this->max()
<< "]";
}
}
bool VideoPlayoutDelay::Set(TimeDelta min, TimeDelta max) {
if (TimeDelta::Zero() <= min && min <= max && max <= kMax) {
min_ = min;
max_ = max;
return true;
}
return false;
}
} // namespace webrtc

View File

@ -0,0 +1,150 @@
/*
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef API_VIDEO_VIDEO_TIMING_H_
#define API_VIDEO_VIDEO_TIMING_H_
#include <stdint.h>
#include <limits>
#include <string>
#include "api/units/time_delta.h"
#include "rtc_base/system/rtc_export.h"
namespace webrtc {
// Video timing timestamps in ms counted from capture_time_ms of a frame.
// This structure represents data sent in video-timing RTP header extension.
struct RTC_EXPORT VideoSendTiming {
enum TimingFrameFlags : uint8_t {
kNotTriggered = 0, // Timing info valid, but not to be transmitted.
// Used on send-side only.
kTriggeredByTimer = 1 << 0, // Frame marked for tracing by periodic timer.
kTriggeredBySize = 1 << 1, // Frame marked for tracing due to size.
kInvalid = std::numeric_limits<uint8_t>::max() // Invalid, ignore!
};
// Returns |time_ms - base_ms| capped at max 16-bit value.
// Used to fill this data structure as per
// https://webrtc.org/experiments/rtp-hdrext/video-timing/ extension stores
// 16-bit deltas of timestamps from packet capture time.
static uint16_t GetDeltaCappedMs(int64_t base_ms, int64_t time_ms);
static uint16_t GetDeltaCappedMs(TimeDelta delta);
uint16_t encode_start_delta_ms;
uint16_t encode_finish_delta_ms;
uint16_t packetization_finish_delta_ms;
uint16_t pacer_exit_delta_ms;
uint16_t network_timestamp_delta_ms;
uint16_t network2_timestamp_delta_ms;
uint8_t flags = TimingFrameFlags::kInvalid;
};
// Used to report precise timings of a 'timing frames'. Contains all important
// timestamps for a lifetime of that specific frame. Reported as a string via
// GetStats(). Only frame which took the longest between two GetStats calls is
// reported.
struct RTC_EXPORT TimingFrameInfo {
TimingFrameInfo();
// Returns end-to-end delay of a frame, if sender and receiver timestamps are
// synchronized, -1 otherwise.
int64_t EndToEndDelay() const;
// Returns true if current frame took longer to process than `other` frame.
// If other frame's clocks are not synchronized, current frame is always
// preferred.
bool IsLongerThan(const TimingFrameInfo& other) const;
// Returns true if flags are set to indicate this frame was marked for tracing
// due to the size being outside some limit.
bool IsOutlier() const;
// Returns true if flags are set to indicate this frame was marked fro tracing
// due to cyclic timer.
bool IsTimerTriggered() const;
// Returns true if the timing data is marked as invalid, in which case it
// should be ignored.
bool IsInvalid() const;
std::string ToString() const;
bool operator<(const TimingFrameInfo& other) const;
bool operator<=(const TimingFrameInfo& other) const;
uint32_t rtp_timestamp; // Identifier of a frame.
// All timestamps below are in local monotonous clock of a receiver.
// If sender clock is not yet estimated, sender timestamps
// (capture_time_ms ... pacer_exit_ms) are negative values, still
// relatively correct.
int64_t capture_time_ms; // Captrue time of a frame.
int64_t encode_start_ms; // Encode start time.
int64_t encode_finish_ms; // Encode completion time.
int64_t packetization_finish_ms; // Time when frame was passed to pacer.
int64_t pacer_exit_ms; // Time when last packet was pushed out of pacer.
// Two in-network RTP processor timestamps: meaning is application specific.
int64_t network_timestamp_ms;
int64_t network2_timestamp_ms;
int64_t receive_start_ms; // First received packet time.
int64_t receive_finish_ms; // Last received packet time.
int64_t decode_start_ms; // Decode start time.
int64_t decode_finish_ms; // Decode completion time.
int64_t render_time_ms; // Proposed render time to insure smooth playback.
uint8_t flags; // Flags indicating validity and/or why tracing was triggered.
};
// Minimum and maximum playout delay values from capture to render.
// These are best effort values.
//
// min = max = 0 indicates that the receiver should try and render
// frame as soon as possible.
//
// min = x, max = y indicates that the receiver is free to adapt
// in the range (x, y) based on network jitter.
// This class ensures invariant 0 <= min <= max <= kMax.
class RTC_EXPORT VideoPlayoutDelay {
public:
// Maximum supported value for the delay limit.
static constexpr TimeDelta kMax = TimeDelta::Millis(10) * 0xFFF;
// Creates delay limits that indicates receiver should try to render frame
// as soon as possible.
static VideoPlayoutDelay Minimal() {
return VideoPlayoutDelay(TimeDelta::Zero(), TimeDelta::Zero());
}
// Creates valid, but unspecified limits.
VideoPlayoutDelay() = default;
VideoPlayoutDelay(const VideoPlayoutDelay&) = default;
VideoPlayoutDelay& operator=(const VideoPlayoutDelay&) = default;
VideoPlayoutDelay(TimeDelta min, TimeDelta max);
bool Set(TimeDelta min, TimeDelta max);
TimeDelta min() const { return min_; }
TimeDelta max() const { return max_; }
friend bool operator==(const VideoPlayoutDelay& lhs,
const VideoPlayoutDelay& rhs) {
return lhs.min_ == rhs.min_ && lhs.max_ == rhs.max_;
}
private:
TimeDelta min_ = TimeDelta::Zero();
TimeDelta max_ = kMax;
};
} // namespace webrtc
#endif // API_VIDEO_VIDEO_TIMING_H_