forked from mirrors/gecko-dev
Backed out 21 changesets (bug 1867542) for causing Gtest failures at MediaDataEncoderTest.InvalidSize CLOSED TREE
Backed out changeset a092c177e959 (bug 1867542) Backed out changeset 4a7b2f9ad22b (bug 1867542) Backed out changeset 6ea19cc6a14b (bug 1867542) Backed out changeset 46c83c6cf9bb (bug 1867542) Backed out changeset 7af4d3fe37ba (bug 1867542) Backed out changeset afb09d4a02f9 (bug 1867542) Backed out changeset 63f403a0f1ff (bug 1867542) Backed out changeset e87e1bb6bcf8 (bug 1867542) Backed out changeset b56ec46251a9 (bug 1867542) Backed out changeset 1e79f4a01297 (bug 1867542) Backed out changeset ce2e32e30761 (bug 1867542) Backed out changeset 7abe1f239b4f (bug 1867542) Backed out changeset 94f6b561b4d5 (bug 1867542) Backed out changeset 5f7689ac0357 (bug 1867542) Backed out changeset 21d7101ba9a3 (bug 1867542) Backed out changeset ba45280ca500 (bug 1867542) Backed out changeset 8a0c3faaeb96 (bug 1867542) Backed out changeset b31389120219 (bug 1867542) Backed out changeset af86ae97ca08 (bug 1867542) Backed out changeset 4534cfd92775 (bug 1867542) Backed out changeset 0f9376cb7b68 (bug 1867542)
This commit is contained in:
parent
62da5887eb
commit
75710f9718
26 changed files with 838 additions and 1136 deletions
|
|
@ -10,18 +10,21 @@
|
|||
#include "mozilla/AbstractThread.h"
|
||||
#include "mozilla/SpinEventLoopUntil.h"
|
||||
#include "mozilla/media/MediaUtils.h" // For media::Await
|
||||
#include "nsMimeTypes.h"
|
||||
#include "PEMFactory.h"
|
||||
#include "TimeUnits.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "VPXDecoder.h"
|
||||
#include <algorithm>
|
||||
|
||||
#define RUN_IF_SUPPORTED(codecType, test) \
|
||||
do { \
|
||||
RefPtr<PEMFactory> f(new PEMFactory()); \
|
||||
if (f->SupportsCodec(codecType)) { \
|
||||
test(); \
|
||||
} \
|
||||
#include <fstream>
|
||||
|
||||
#define RUN_IF_SUPPORTED(mimeType, test) \
|
||||
do { \
|
||||
RefPtr<PEMFactory> f(new PEMFactory()); \
|
||||
if (f->SupportsMimeType(nsLiteralCString(mimeType))) { \
|
||||
test(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define BLOCK_SIZE 64
|
||||
|
|
@ -30,9 +33,10 @@
|
|||
#define NUM_FRAMES 150UL
|
||||
#define FRAME_RATE 30
|
||||
#define FRAME_DURATION (1000000 / FRAME_RATE)
|
||||
#define BIT_RATE (1000 * 1000) // 1Mbps
|
||||
#define BIT_RATE_MODE MediaDataEncoder::BitrateMode::Variable
|
||||
#define BIT_RATE (1000 * 1000) // 1Mbps
|
||||
#define KEYFRAME_INTERVAL FRAME_RATE // 1 keyframe per second
|
||||
#define VIDEO_VP8 "video/vp8"
|
||||
#define VIDEO_VP9 "video/vp9"
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
|
|
@ -81,12 +85,10 @@ class MediaDataEncoderTest : public testing::Test {
|
|||
img->CopyData(mYUV);
|
||||
RefPtr<MediaData> frame = VideoData::CreateFromImage(
|
||||
kImageSize, 0,
|
||||
media::TimeUnit::FromMicroseconds(AssertedCast<int64_t>(aIndex) *
|
||||
FRAME_DURATION),
|
||||
media::TimeUnit::FromMicroseconds(aIndex * FRAME_DURATION),
|
||||
media::TimeUnit::FromMicroseconds(FRAME_DURATION), img,
|
||||
(aIndex & 0xF) == 0,
|
||||
media::TimeUnit::FromMicroseconds(AssertedCast<int64_t>(aIndex) *
|
||||
FRAME_DURATION));
|
||||
media::TimeUnit::FromMicroseconds(aIndex * FRAME_DURATION));
|
||||
return frame.forget();
|
||||
}
|
||||
|
||||
|
|
@ -119,10 +121,10 @@ class MediaDataEncoderTest : public testing::Test {
|
|||
void Draw(const size_t aIndex) {
|
||||
auto ySize = mYUV.YDataSize();
|
||||
DrawChessboard(mYUV.mYChannel, ySize.width, ySize.height, aIndex << 1);
|
||||
int16_t color = AssertedCast<int16_t>(mYUV.mCbChannel[0] + mColorStep);
|
||||
int16_t color = mYUV.mCbChannel[0] + mColorStep;
|
||||
if (color > 255 || color < 0) {
|
||||
mColorStep = AssertedCast<int16_t>(-mColorStep);
|
||||
color = AssertedCast<int16_t>(mYUV.mCbChannel[0] + mColorStep);
|
||||
mColorStep = -mColorStep;
|
||||
color = mYUV.mCbChannel[0] + mColorStep;
|
||||
}
|
||||
|
||||
size_t size = (mYUV.mCrChannel - mYUV.mCbChannel);
|
||||
|
|
@ -138,33 +140,42 @@ class MediaDataEncoderTest : public testing::Test {
|
|||
|
||||
template <typename T>
|
||||
already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
|
||||
CodecType aCodec, MediaDataEncoder::Usage aUsage,
|
||||
const char* aMimeType, MediaDataEncoder::Usage aUsage,
|
||||
MediaDataEncoder::PixelFormat aPixelFormat, int32_t aWidth, int32_t aHeight,
|
||||
const Maybe<T>& aSpecific) {
|
||||
RefPtr<PEMFactory> f(new PEMFactory());
|
||||
|
||||
if (!f->SupportsCodec(aCodec)) {
|
||||
if (!f->SupportsMimeType(nsCString(aMimeType))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
VideoInfo videoInfo(aWidth, aHeight);
|
||||
videoInfo.mMimeType = nsCString(aMimeType);
|
||||
const RefPtr<TaskQueue> taskQueue(
|
||||
TaskQueue::Create(GetMediaThreadPool(MediaThreadType::PLATFORM_ENCODER),
|
||||
"TestMediaDataEncoder"));
|
||||
|
||||
RefPtr<MediaDataEncoder> e;
|
||||
#ifdef MOZ_WIDGET_ANDROID
|
||||
const MediaDataEncoder::HardwarePreference pref =
|
||||
MediaDataEncoder::HardwarePreference::None;
|
||||
const bool hardwareNotAllowed = false;
|
||||
#else
|
||||
const MediaDataEncoder::HardwarePreference pref =
|
||||
MediaDataEncoder::HardwarePreference::None;
|
||||
const bool hardwareNotAllowed = true;
|
||||
#endif
|
||||
e = f->CreateEncoder(
|
||||
EncoderConfig(aCodec, gfx::IntSize{aWidth, aHeight}, aUsage, aPixelFormat,
|
||||
aPixelFormat, FRAME_RATE /* FPS */,
|
||||
KEYFRAME_INTERVAL /* keyframe interval */,
|
||||
BIT_RATE /* bitrate */, BIT_RATE_MODE, pref, aSpecific),
|
||||
taskQueue);
|
||||
if (aSpecific) {
|
||||
e = f->CreateEncoder(
|
||||
CreateEncoderParams(videoInfo /* track info */, aUsage, taskQueue,
|
||||
aPixelFormat, FRAME_RATE /* FPS */,
|
||||
KEYFRAME_INTERVAL /* keyframe interval */,
|
||||
BIT_RATE /* bitrate */, aSpecific.value()),
|
||||
hardwareNotAllowed);
|
||||
} else {
|
||||
e = f->CreateEncoder(
|
||||
CreateEncoderParams(videoInfo /* track info */, aUsage, taskQueue,
|
||||
aPixelFormat, FRAME_RATE /* FPS */,
|
||||
KEYFRAME_INTERVAL /* keyframe interval */,
|
||||
BIT_RATE /* bitrate */),
|
||||
hardwareNotAllowed);
|
||||
}
|
||||
|
||||
return e.forget();
|
||||
}
|
||||
|
|
@ -174,13 +185,14 @@ static already_AddRefed<MediaDataEncoder> CreateH264Encoder(
|
|||
MediaDataEncoder::PixelFormat aPixelFormat =
|
||||
MediaDataEncoder::PixelFormat::YUV420P,
|
||||
int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
|
||||
const Maybe<H264Specific>& aSpecific =
|
||||
Some(H264Specific(H264_PROFILE_BASE))) {
|
||||
return CreateVideoEncoder(CodecType::H264, aUsage, aPixelFormat, aWidth,
|
||||
aHeight, aSpecific);
|
||||
const Maybe<MediaDataEncoder::H264Specific>& aSpecific =
|
||||
Some(MediaDataEncoder::H264Specific(
|
||||
MediaDataEncoder::H264Specific::ProfileLevel::BaselineAutoLevel))) {
|
||||
return CreateVideoEncoder(VIDEO_MP4, aUsage, aPixelFormat, aWidth, aHeight,
|
||||
aSpecific);
|
||||
}
|
||||
|
||||
void WaitForShutdown(const RefPtr<MediaDataEncoder>& aEncoder) {
|
||||
void WaitForShutdown(RefPtr<MediaDataEncoder> aEncoder) {
|
||||
MOZ_ASSERT(aEncoder);
|
||||
|
||||
Maybe<bool> result;
|
||||
|
|
@ -198,14 +210,14 @@ void WaitForShutdown(const RefPtr<MediaDataEncoder>& aEncoder) {
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, H264Create) {
|
||||
RUN_IF_SUPPORTED(CodecType::H264, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_MP4, []() {
|
||||
RefPtr<MediaDataEncoder> e = CreateH264Encoder();
|
||||
EXPECT_TRUE(e);
|
||||
WaitForShutdown(e);
|
||||
});
|
||||
}
|
||||
|
||||
static bool EnsureInit(const RefPtr<MediaDataEncoder>& aEncoder) {
|
||||
static bool EnsureInit(RefPtr<MediaDataEncoder> aEncoder) {
|
||||
if (!aEncoder) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -217,17 +229,18 @@ static bool EnsureInit(const RefPtr<MediaDataEncoder>& aEncoder) {
|
|||
EXPECT_EQ(TrackInfo::TrackType::kVideoTrack, t);
|
||||
succeeded = true;
|
||||
},
|
||||
[&succeeded](const MediaResult& r) { succeeded = false; });
|
||||
[&succeeded](MediaResult r) { succeeded = false; });
|
||||
return succeeded;
|
||||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, H264Inits) {
|
||||
RUN_IF_SUPPORTED(CodecType::H264, []() {
|
||||
// w/o codec specific: should fail for h264.
|
||||
RUN_IF_SUPPORTED(VIDEO_MP4, []() {
|
||||
// w/o codec specific.
|
||||
RefPtr<MediaDataEncoder> e = CreateH264Encoder(
|
||||
MediaDataEncoder::Usage::Realtime,
|
||||
MediaDataEncoder::PixelFormat::YUV420P, WIDTH, HEIGHT, Nothing());
|
||||
EXPECT_FALSE(e);
|
||||
EXPECT_TRUE(EnsureInit(e));
|
||||
WaitForShutdown(e);
|
||||
|
||||
// w/ codec specific
|
||||
e = CreateH264Encoder();
|
||||
|
|
@ -237,7 +250,7 @@ TEST_F(MediaDataEncoderTest, H264Inits) {
|
|||
}
|
||||
|
||||
static MediaDataEncoder::EncodedData Encode(
|
||||
const RefPtr<MediaDataEncoder>& aEncoder, const size_t aNumFrames,
|
||||
const RefPtr<MediaDataEncoder> aEncoder, const size_t aNumFrames,
|
||||
MediaDataEncoderTest::FrameSource& aSource) {
|
||||
MediaDataEncoder::EncodedData output;
|
||||
bool succeeded;
|
||||
|
|
@ -250,7 +263,7 @@ static MediaDataEncoder::EncodedData Encode(
|
|||
output.AppendElements(std::move(encoded));
|
||||
succeeded = true;
|
||||
},
|
||||
[&succeeded](const MediaResult& r) { succeeded = false; });
|
||||
[&succeeded](MediaResult r) { succeeded = false; });
|
||||
EXPECT_TRUE(succeeded);
|
||||
if (!succeeded) {
|
||||
return output;
|
||||
|
|
@ -266,7 +279,7 @@ static MediaDataEncoder::EncodedData Encode(
|
|||
output.AppendElements(std::move(encoded));
|
||||
succeeded = true;
|
||||
},
|
||||
[&succeeded](const MediaResult& r) { succeeded = false; });
|
||||
[&succeeded](MediaResult r) { succeeded = false; });
|
||||
EXPECT_TRUE(succeeded);
|
||||
if (!succeeded) {
|
||||
return output;
|
||||
|
|
@ -277,7 +290,7 @@ static MediaDataEncoder::EncodedData Encode(
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, H264Encodes) {
|
||||
RUN_IF_SUPPORTED(CodecType::H264, [this]() {
|
||||
RUN_IF_SUPPORTED(VIDEO_MP4, [this]() {
|
||||
// Encode one frame and output in AnnexB format.
|
||||
RefPtr<MediaDataEncoder> e = CreateH264Encoder();
|
||||
EnsureInit(e);
|
||||
|
|
@ -309,28 +322,33 @@ TEST_F(MediaDataEncoderTest, H264Encodes) {
|
|||
});
|
||||
}
|
||||
|
||||
#ifndef DEBUG // Zero width or height will assert/crash in debug builds.
|
||||
TEST_F(MediaDataEncoderTest, InvalidSize) {
|
||||
RUN_IF_SUPPORTED(CodecType::H264, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_MP4, []() {
|
||||
RefPtr<MediaDataEncoder> e0x0 =
|
||||
CreateH264Encoder(MediaDataEncoder::Usage::Realtime,
|
||||
MediaDataEncoder::PixelFormat::YUV420P, 0, 0);
|
||||
EXPECT_NE(e0x0, nullptr);
|
||||
EXPECT_FALSE(EnsureInit(e0x0));
|
||||
|
||||
RefPtr<MediaDataEncoder> e0x1 =
|
||||
CreateH264Encoder(MediaDataEncoder::Usage::Realtime,
|
||||
MediaDataEncoder::PixelFormat::YUV420P, 0, 1);
|
||||
EXPECT_NE(e0x1, nullptr);
|
||||
EXPECT_FALSE(EnsureInit(e0x1));
|
||||
|
||||
RefPtr<MediaDataEncoder> e1x0 =
|
||||
CreateH264Encoder(MediaDataEncoder::Usage::Realtime,
|
||||
MediaDataEncoder::PixelFormat::YUV420P, 1, 0);
|
||||
EXPECT_NE(e1x0, nullptr);
|
||||
EXPECT_FALSE(EnsureInit(e1x0));
|
||||
});
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_WIDGET_ANDROID
|
||||
TEST_F(MediaDataEncoderTest, AndroidNotSupportedSize) {
|
||||
RUN_IF_SUPPORTED(CodecType::H264, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_MP4, []() {
|
||||
RefPtr<MediaDataEncoder> e =
|
||||
CreateH264Encoder(MediaDataEncoder::Usage::Realtime,
|
||||
MediaDataEncoder::PixelFormat::YUV420P, 1, 1);
|
||||
|
|
@ -345,9 +363,10 @@ static already_AddRefed<MediaDataEncoder> CreateVP8Encoder(
|
|||
MediaDataEncoder::PixelFormat aPixelFormat =
|
||||
MediaDataEncoder::PixelFormat::YUV420P,
|
||||
int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
|
||||
const Maybe<VP8Specific>& aSpecific = Some(VP8Specific())) {
|
||||
return CreateVideoEncoder(CodecType::VP8, aUsage, aPixelFormat, aWidth,
|
||||
aHeight, aSpecific);
|
||||
const Maybe<MediaDataEncoder::VPXSpecific::VP8>& aSpecific =
|
||||
Some(MediaDataEncoder::VPXSpecific::VP8())) {
|
||||
return CreateVideoEncoder(VIDEO_VP8, aUsage, aPixelFormat, aWidth, aHeight,
|
||||
aSpecific);
|
||||
}
|
||||
|
||||
static already_AddRefed<MediaDataEncoder> CreateVP9Encoder(
|
||||
|
|
@ -355,13 +374,14 @@ static already_AddRefed<MediaDataEncoder> CreateVP9Encoder(
|
|||
MediaDataEncoder::PixelFormat aPixelFormat =
|
||||
MediaDataEncoder::PixelFormat::YUV420P,
|
||||
int32_t aWidth = WIDTH, int32_t aHeight = HEIGHT,
|
||||
const Maybe<VP9Specific>& aSpecific = Some(VP9Specific())) {
|
||||
return CreateVideoEncoder(CodecType::VP9, aUsage, aPixelFormat, aWidth,
|
||||
aHeight, aSpecific);
|
||||
const Maybe<MediaDataEncoder::VPXSpecific::VP9>& aSpecific =
|
||||
Some(MediaDataEncoder::VPXSpecific::VP9())) {
|
||||
return CreateVideoEncoder(VIDEO_VP9, aUsage, aPixelFormat, aWidth, aHeight,
|
||||
aSpecific);
|
||||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, VP8Create) {
|
||||
RUN_IF_SUPPORTED(CodecType::VP8, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_VP8, []() {
|
||||
RefPtr<MediaDataEncoder> e = CreateVP8Encoder();
|
||||
EXPECT_TRUE(e);
|
||||
WaitForShutdown(e);
|
||||
|
|
@ -369,7 +389,7 @@ TEST_F(MediaDataEncoderTest, VP8Create) {
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, VP8Inits) {
|
||||
RUN_IF_SUPPORTED(CodecType::VP8, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_VP8, []() {
|
||||
// w/o codec specific.
|
||||
RefPtr<MediaDataEncoder> e = CreateVP8Encoder(
|
||||
MediaDataEncoder::Usage::Realtime,
|
||||
|
|
@ -385,7 +405,7 @@ TEST_F(MediaDataEncoderTest, VP8Inits) {
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, VP8Encodes) {
|
||||
RUN_IF_SUPPORTED(CodecType::VP8, [this]() {
|
||||
RUN_IF_SUPPORTED(VIDEO_VP8, [this]() {
|
||||
// Encode one VPX frame.
|
||||
RefPtr<MediaDataEncoder> e = CreateVP8Encoder();
|
||||
EnsureInit(e);
|
||||
|
|
@ -419,7 +439,7 @@ TEST_F(MediaDataEncoderTest, VP8Encodes) {
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, VP9Create) {
|
||||
RUN_IF_SUPPORTED(CodecType::VP9, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_VP9, []() {
|
||||
RefPtr<MediaDataEncoder> e = CreateVP9Encoder();
|
||||
EXPECT_TRUE(e);
|
||||
WaitForShutdown(e);
|
||||
|
|
@ -427,7 +447,7 @@ TEST_F(MediaDataEncoderTest, VP9Create) {
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, VP9Inits) {
|
||||
RUN_IF_SUPPORTED(CodecType::VP9, []() {
|
||||
RUN_IF_SUPPORTED(VIDEO_VP9, []() {
|
||||
// w/o codec specific.
|
||||
RefPtr<MediaDataEncoder> e = CreateVP9Encoder(
|
||||
MediaDataEncoder::Usage::Realtime,
|
||||
|
|
@ -443,7 +463,7 @@ TEST_F(MediaDataEncoderTest, VP9Inits) {
|
|||
}
|
||||
|
||||
TEST_F(MediaDataEncoderTest, VP9Encodes) {
|
||||
RUN_IF_SUPPORTED(CodecType::VP9, [this]() {
|
||||
RUN_IF_SUPPORTED(VIDEO_VP9, [this]() {
|
||||
RefPtr<MediaDataEncoder> e = CreateVP9Encoder();
|
||||
EnsureInit(e);
|
||||
MediaDataEncoder::EncodedData output = Encode(e, 1UL, mData);
|
||||
|
|
|
|||
|
|
@ -6,8 +6,6 @@
|
|||
|
||||
#include "PEMFactory.h"
|
||||
|
||||
#include "PlatformEncoderModule.h"
|
||||
|
||||
#ifdef MOZ_APPLEMEDIA
|
||||
# include "AppleEncoderModule.h"
|
||||
#endif
|
||||
|
|
@ -24,152 +22,53 @@ namespace mozilla {
|
|||
|
||||
LazyLogModule sPEMLog("PlatformEncoderModule");
|
||||
|
||||
#define LOGE(fmt, ...) \
|
||||
MOZ_LOG(sPEMLog, mozilla::LogLevel::Error, \
|
||||
("[PEMFactory] %s: " fmt, __func__, ##__VA_ARGS__))
|
||||
#define LOG(fmt, ...) \
|
||||
MOZ_LOG(sPEMLog, mozilla::LogLevel::Debug, \
|
||||
("[PEMFactory] %s: " fmt, __func__, ##__VA_ARGS__))
|
||||
|
||||
PEMFactory::PEMFactory() {
|
||||
#ifdef MOZ_APPLEMEDIA
|
||||
RefPtr<PlatformEncoderModule> m(new AppleEncoderModule());
|
||||
mCurrentPEMs.AppendElement(m);
|
||||
mModules.AppendElement(m);
|
||||
#endif
|
||||
|
||||
#ifdef MOZ_WIDGET_ANDROID
|
||||
mCurrentPEMs.AppendElement(new AndroidEncoderModule());
|
||||
mModules.AppendElement(new AndroidEncoderModule());
|
||||
#endif
|
||||
|
||||
#ifdef XP_WIN
|
||||
mCurrentPEMs.AppendElement(new WMFEncoderModule());
|
||||
mModules.AppendElement(new WMFEncoderModule());
|
||||
#endif
|
||||
}
|
||||
|
||||
bool PEMFactory::SupportsMimeType(const nsACString& aMimeType) const {
|
||||
for (auto m : mModules) {
|
||||
if (m->SupportsMimeType(aMimeType)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataEncoder> PEMFactory::CreateEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) {
|
||||
RefPtr<PlatformEncoderModule> m = FindPEM(aConfig);
|
||||
const CreateEncoderParams& aParams, const bool aHardwareNotAllowed) {
|
||||
const TrackInfo& info = aParams.mConfig;
|
||||
RefPtr<PlatformEncoderModule> m = FindPEM(info);
|
||||
if (!m) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return aConfig.IsVideo() ? m->CreateVideoEncoder(aConfig, aTaskQueue)
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise>
|
||||
PEMFactory::CreateEncoderAsync(const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) {
|
||||
return CheckAndMaybeCreateEncoder(aConfig, 0, aTaskQueue);
|
||||
}
|
||||
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise>
|
||||
PEMFactory::CheckAndMaybeCreateEncoder(const EncoderConfig& aConfig,
|
||||
uint32_t aIndex,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) {
|
||||
for (uint32_t i = aIndex; i < mCurrentPEMs.Length(); i++) {
|
||||
if (!mCurrentPEMs[i]->Supports(aConfig)) {
|
||||
continue;
|
||||
}
|
||||
return CreateEncoderWithPEM(mCurrentPEMs[i], aConfig, aTaskQueue)
|
||||
->Then(
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[](RefPtr<MediaDataEncoder>&& aEncoder) {
|
||||
return PlatformEncoderModule::CreateEncoderPromise::
|
||||
CreateAndResolve(std::move(aEncoder), __func__);
|
||||
},
|
||||
[self = RefPtr{this}, i, config = aConfig, aTaskQueue,
|
||||
&aConfig](const MediaResult& aError) mutable {
|
||||
// Try the next PEM.
|
||||
return self->CheckAndMaybeCreateEncoder(aConfig, i + 1,
|
||||
aTaskQueue);
|
||||
});
|
||||
}
|
||||
return PlatformEncoderModule::CreateEncoderPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
nsPrintfCString("Error no encoder found for %d",
|
||||
static_cast<int>(aConfig.mCodec))
|
||||
.get()),
|
||||
__func__);
|
||||
}
|
||||
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise>
|
||||
PEMFactory::CreateEncoderWithPEM(PlatformEncoderModule* aPEM,
|
||||
const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) {
|
||||
MOZ_ASSERT(aPEM);
|
||||
MediaResult result = NS_OK;
|
||||
|
||||
if (aConfig.IsAudio()) {
|
||||
return aPEM->AsyncCreateEncoder(aConfig, aTaskQueue)
|
||||
->Then(
|
||||
GetCurrentSerialEventTarget(), __func__,
|
||||
[config = aConfig](RefPtr<MediaDataEncoder>&& aEncoder) {
|
||||
RefPtr<MediaDataEncoder> decoder = std::move(aEncoder);
|
||||
return PlatformEncoderModule::CreateEncoderPromise::
|
||||
CreateAndResolve(decoder, __func__);
|
||||
},
|
||||
[](const MediaResult& aError) {
|
||||
return PlatformEncoderModule::CreateEncoderPromise::
|
||||
CreateAndReject(aError, __func__);
|
||||
});
|
||||
}
|
||||
|
||||
if (!aConfig.IsVideo()) {
|
||||
return PlatformEncoderModule::CreateEncoderPromise::CreateAndReject(
|
||||
MediaResult(
|
||||
NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
RESULT_DETAIL(
|
||||
"Encoder configuration error, expected audio or video.")),
|
||||
__func__);
|
||||
}
|
||||
|
||||
return aPEM->AsyncCreateEncoder(aConfig, aTaskQueue);
|
||||
}
|
||||
|
||||
bool PEMFactory::Supports(const EncoderConfig& aConfig) const {
|
||||
RefPtr<PlatformEncoderModule> found;
|
||||
for (const auto& m : mCurrentPEMs) {
|
||||
if (m->Supports(aConfig)) {
|
||||
// TODO name
|
||||
LOG("Checking if %s supports codec %d: yes", m->GetName(),
|
||||
static_cast<int>(aConfig.mCodec));
|
||||
return true;
|
||||
}
|
||||
LOG("Checking if %s supports codec %d: no", m->GetName(),
|
||||
static_cast<int>(aConfig.mCodec));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PEMFactory::SupportsCodec(CodecType aCodec) const {
|
||||
for (const auto& m : mCurrentPEMs) {
|
||||
if (m->SupportsCodec(aCodec)) {
|
||||
// TODO name
|
||||
LOG("Checking if %s supports codec %d: yes", m->GetName(),
|
||||
static_cast<int>(aCodec));
|
||||
return true;
|
||||
}
|
||||
LOG("Checking if %s supports codec %d: no", m->GetName(),
|
||||
static_cast<int>(aCodec));
|
||||
}
|
||||
LOG("No PEM support %d", static_cast<int>(aCodec));
|
||||
return false;
|
||||
return info.IsVideo() ? m->CreateVideoEncoder(aParams, aHardwareNotAllowed)
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
already_AddRefed<PlatformEncoderModule> PEMFactory::FindPEM(
|
||||
const EncoderConfig& aConfig) const {
|
||||
const TrackInfo& aTrackInfo) const {
|
||||
RefPtr<PlatformEncoderModule> found;
|
||||
for (const auto& m : mCurrentPEMs) {
|
||||
if (m->Supports(aConfig)) {
|
||||
for (auto m : mModules) {
|
||||
if (m->SupportsMimeType(aTrackInfo.mMimeType)) {
|
||||
found = m;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return found.forget();
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#undef LOGE
|
||||
#undef LOG
|
||||
|
|
|
|||
|
|
@ -11,8 +11,6 @@
|
|||
|
||||
namespace mozilla {
|
||||
|
||||
using PEMCreateEncoderPromise = PlatformEncoderModule::CreateEncoderPromise;
|
||||
|
||||
class PEMFactory final {
|
||||
public:
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PEMFactory)
|
||||
|
|
@ -24,28 +22,17 @@ class PEMFactory final {
|
|||
// instance. It's expected that there will be multiple
|
||||
// PlatformEncoderModules alive at the same time.
|
||||
already_AddRefed<MediaDataEncoder> CreateEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue);
|
||||
const CreateEncoderParams& aParams, const bool aHardwareNotAllowed);
|
||||
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise> CreateEncoderAsync(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue);
|
||||
|
||||
bool Supports(const EncoderConfig& aConfig) const;
|
||||
bool SupportsCodec(CodecType aCodec) const;
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const;
|
||||
|
||||
private:
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise>
|
||||
CheckAndMaybeCreateEncoder(const EncoderConfig& aConfig, uint32_t aIndex,
|
||||
const RefPtr<TaskQueue>& aTaskQueue);
|
||||
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise> CreateEncoderWithPEM(
|
||||
PlatformEncoderModule* aPEM, const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue);
|
||||
virtual ~PEMFactory() = default;
|
||||
// Returns the first PEM in our list supporting the codec.
|
||||
// Returns the first PEM in our list supporting the mimetype.
|
||||
already_AddRefed<PlatformEncoderModule> FindPEM(
|
||||
const EncoderConfig& aConfig) const;
|
||||
const TrackInfo& aTrackInfo) const;
|
||||
|
||||
nsTArray<RefPtr<PlatformEncoderModule>> mCurrentPEMs;
|
||||
nsTArray<RefPtr<PlatformEncoderModule>> mModules;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@
|
|||
#if !defined(PlatformDecoderModule_h_)
|
||||
# define PlatformDecoderModule_h_
|
||||
|
||||
# include <queue>
|
||||
|
||||
# include "DecoderDoctorLogger.h"
|
||||
# include "GMPCrashHelper.h"
|
||||
# include "MediaCodecsSupport.h"
|
||||
|
|
@ -123,7 +125,7 @@ struct MOZ_STACK_CLASS CreateDecoderParams final {
|
|||
CreateDecoderParams(const CreateDecoderParams& aParams) = default;
|
||||
|
||||
MOZ_IMPLICIT CreateDecoderParams(const CreateDecoderParamsForAsync& aParams)
|
||||
: mConfig(*aParams.mConfig),
|
||||
: mConfig(*aParams.mConfig.get()),
|
||||
mImageContainer(aParams.mImageContainer),
|
||||
mKnowsCompositor(aParams.mKnowsCompositor),
|
||||
mCrashHelper(aParams.mCrashHelper),
|
||||
|
|
@ -194,7 +196,7 @@ struct MOZ_STACK_CLASS CreateDecoderParams final {
|
|||
mUseNullDecoder = aUseNullDecoder;
|
||||
}
|
||||
void Set(NoWrapper aNoWrapper) { mNoWrapper = aNoWrapper; }
|
||||
void Set(const OptionSet& aOptions) { mOptions = aOptions; }
|
||||
void Set(OptionSet aOptions) { mOptions = aOptions; }
|
||||
void Set(VideoFrameRate aRate) { mRate = aRate; }
|
||||
void Set(layers::KnowsCompositor* aKnowsCompositor) {
|
||||
if (aKnowsCompositor) {
|
||||
|
|
@ -283,7 +285,7 @@ struct MOZ_STACK_CLASS SupportDecoderParams final {
|
|||
mUseNullDecoder = aUseNullDecoder;
|
||||
}
|
||||
void Set(media::NoWrapper aNoWrapper) { mNoWrapper = aNoWrapper; }
|
||||
void Set(const media::OptionSet& aOptions) { mOptions = aOptions; }
|
||||
void Set(media::OptionSet aOptions) { mOptions = aOptions; }
|
||||
void Set(media::VideoFrameRate aRate) { mRate = aRate; }
|
||||
void Set(layers::KnowsCompositor* aKnowsCompositor) {
|
||||
if (aKnowsCompositor) {
|
||||
|
|
@ -437,11 +439,13 @@ class MediaDataDecoder : public DecoderDoctorLifeLogger<MediaDataDecoder> {
|
|||
virtual ~MediaDataDecoder() = default;
|
||||
|
||||
public:
|
||||
using TrackType = TrackInfo::TrackType;
|
||||
using DecodedData = nsTArray<RefPtr<MediaData>>;
|
||||
using InitPromise = MozPromise<TrackType, MediaResult, true>;
|
||||
using DecodePromise = MozPromise<DecodedData, MediaResult, true>;
|
||||
using FlushPromise = MozPromise<bool, MediaResult, true>;
|
||||
typedef TrackInfo::TrackType TrackType;
|
||||
typedef nsTArray<RefPtr<MediaData>> DecodedData;
|
||||
typedef MozPromise<TrackType, MediaResult, /* IsExclusive = */ true>
|
||||
InitPromise;
|
||||
typedef MozPromise<DecodedData, MediaResult, /* IsExclusive = */ true>
|
||||
DecodePromise;
|
||||
typedef MozPromise<bool, MediaResult, /* IsExclusive = */ true> FlushPromise;
|
||||
|
||||
NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING
|
||||
|
||||
|
|
|
|||
|
|
@ -1,97 +0,0 @@
|
|||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "PlatformEncoderModule.h"
|
||||
#include "nsPrintfCString.h"
|
||||
#include "mozilla/ToString.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise>
|
||||
PlatformEncoderModule::AsyncCreateEncoder(const EncoderConfig& aEncoderConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) {
|
||||
RefPtr<MediaDataEncoder> encoder;
|
||||
MediaResult result = NS_OK;
|
||||
if (aEncoderConfig.IsAudio()) {
|
||||
encoder = CreateAudioEncoder(aEncoderConfig, aTaskQueue);
|
||||
} else if (aEncoderConfig.IsVideo()) {
|
||||
encoder = CreateVideoEncoder(aEncoderConfig, aTaskQueue);
|
||||
}
|
||||
if (!encoder) {
|
||||
if (NS_FAILED(result)) {
|
||||
return CreateEncoderPromise::CreateAndReject(result, __func__);
|
||||
}
|
||||
return CreateEncoderPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
nsPrintfCString("Error creating encoder for %d",
|
||||
static_cast<int>(aEncoderConfig.mCodec))
|
||||
.get()),
|
||||
__func__);
|
||||
}
|
||||
return CreateEncoderPromise::CreateAndResolve(encoder, __func__);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
nsCString MaybeToString(const Maybe<T>& aMaybe) {
|
||||
return nsPrintfCString(
|
||||
"%s", aMaybe.isSome() ? ToString(aMaybe.value()).c_str() : "nothing");
|
||||
}
|
||||
|
||||
struct ConfigurationChangeToString {
|
||||
nsCString operator()(const DimensionsChange& aDimensionsChange) {
|
||||
return nsPrintfCString("Dimensions: %dx%d", aDimensionsChange.get().width,
|
||||
aDimensionsChange.get().height);
|
||||
}
|
||||
nsCString operator()(const DisplayDimensionsChange& aDisplayDimensionChange) {
|
||||
if (aDisplayDimensionChange.get().isNothing()) {
|
||||
return nsCString("Display dimensions: nothing");
|
||||
}
|
||||
gfx::IntSize displayDimensions = aDisplayDimensionChange.get().value();
|
||||
return nsPrintfCString("Display dimensions: %dx%d", displayDimensions.width,
|
||||
displayDimensions.height);
|
||||
}
|
||||
nsCString operator()(const BitrateChange& aBitrateChange) {
|
||||
if (aBitrateChange.get().isSome()) {
|
||||
return nsLiteralCString("Bitrate: nothing");
|
||||
}
|
||||
return nsPrintfCString("Bitrate: %skbps",
|
||||
MaybeToString(aBitrateChange.get()).get());
|
||||
}
|
||||
nsCString operator()(const FramerateChange& aFramerateChange) {
|
||||
if (aFramerateChange.get().isNothing()) {
|
||||
return nsCString("Framerate: nothing");
|
||||
}
|
||||
return nsPrintfCString("Framerate: %lfHz", aFramerateChange.get().value());
|
||||
}
|
||||
nsCString operator()(const BitrateModeChange& aBitrateModeChange) {
|
||||
return nsPrintfCString(
|
||||
"Bitrate mode: %s",
|
||||
aBitrateModeChange.get() == MediaDataEncoder::BitrateMode::Constant
|
||||
? "Constant"
|
||||
: "Variable");
|
||||
}
|
||||
nsCString operator()(const UsageChange& aUsageChange) {
|
||||
return nsPrintfCString(
|
||||
"Usage mode: %s",
|
||||
aUsageChange.get() == MediaDataEncoder::Usage::Realtime ? "Realtime"
|
||||
: "Recoding");
|
||||
}
|
||||
nsCString operator()(const ContentHintChange& aContentHintChange) {
|
||||
return nsPrintfCString("Content hint: %s",
|
||||
MaybeToString(aContentHintChange.get()).get());
|
||||
}
|
||||
};
|
||||
|
||||
nsString EncoderConfigurationChangeList::ToString() const {
|
||||
nsString rv(
|
||||
NS_LITERAL_STRING_FROM_CSTRING("EncoderConfigurationChangeList:"_ns));
|
||||
for (const EncoderConfigurationItem& change : mChanges) {
|
||||
nsCString str = change.match(ConfigurationChangeToString());
|
||||
rv.AppendPrintf("- %s\n", str.get());
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
} // namespace mozilla
|
||||
|
|
@ -11,7 +11,6 @@
|
|||
# include "MediaData.h"
|
||||
# include "MediaInfo.h"
|
||||
# include "MediaResult.h"
|
||||
# include "VPXDecoder.h"
|
||||
# include "mozilla/Attributes.h"
|
||||
# include "mozilla/Maybe.h"
|
||||
# include "mozilla/MozPromise.h"
|
||||
|
|
@ -19,111 +18,30 @@
|
|||
# include "mozilla/TaskQueue.h"
|
||||
# include "mozilla/dom/ImageBitmapBinding.h"
|
||||
# include "nsISupportsImpl.h"
|
||||
# include "VideoUtils.h"
|
||||
# include "VPXDecoder.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class MediaDataEncoder;
|
||||
class EncoderConfig;
|
||||
struct EncoderConfigurationChangeList;
|
||||
|
||||
enum class CodecType {
|
||||
_BeginVideo_,
|
||||
H264,
|
||||
VP8,
|
||||
VP9,
|
||||
_EndVideo_,
|
||||
_BeginAudio_ = _EndVideo_,
|
||||
Opus,
|
||||
G722,
|
||||
_EndAudio_,
|
||||
Unknown,
|
||||
};
|
||||
|
||||
struct H264Specific final {
|
||||
const H264_PROFILE mProfile;
|
||||
|
||||
explicit H264Specific(H264_PROFILE aProfile) : mProfile(aProfile) {}
|
||||
};
|
||||
|
||||
struct OpusSpecific final {
|
||||
enum class Application { Voip, Audio, RestricedLowDelay };
|
||||
|
||||
const Application mApplication;
|
||||
const uint8_t mComplexity; // from 0-10
|
||||
|
||||
OpusSpecific(const Application aApplication, const uint8_t aComplexity)
|
||||
: mApplication(aApplication), mComplexity(aComplexity) {
|
||||
MOZ_ASSERT(mComplexity <= 10);
|
||||
}
|
||||
};
|
||||
|
||||
enum class VPXComplexity { Normal, High, Higher, Max };
|
||||
struct VP8Specific {
|
||||
VP8Specific() = default;
|
||||
// Ignore webrtc::VideoCodecVP8::errorConcealmentOn,
|
||||
// for it's always false in the codebase (except libwebrtc test cases).
|
||||
VP8Specific(const VPXComplexity aComplexity, const bool aResilience,
|
||||
const uint8_t aNumTemporalLayers, const bool aDenoising,
|
||||
const bool aAutoResize, const bool aFrameDropping)
|
||||
: mComplexity(aComplexity),
|
||||
mResilience(aResilience),
|
||||
mNumTemporalLayers(aNumTemporalLayers),
|
||||
mDenoising(aDenoising),
|
||||
mAutoResize(aAutoResize),
|
||||
mFrameDropping(aFrameDropping) {}
|
||||
const VPXComplexity mComplexity{VPXComplexity::Normal};
|
||||
const bool mResilience{true};
|
||||
const uint8_t mNumTemporalLayers{1};
|
||||
const bool mDenoising{true};
|
||||
const bool mAutoResize{false};
|
||||
const bool mFrameDropping{false};
|
||||
};
|
||||
|
||||
struct VP9Specific : public VP8Specific {
|
||||
VP9Specific() = default;
|
||||
VP9Specific(const VPXComplexity aComplexity, const bool aResilience,
|
||||
const uint8_t aNumTemporalLayers, const bool aDenoising,
|
||||
const bool aAutoResize, const bool aFrameDropping,
|
||||
const bool aAdaptiveQp, const uint8_t aNumSpatialLayers,
|
||||
const bool aFlexible)
|
||||
: VP8Specific(aComplexity, aResilience, aNumTemporalLayers, aDenoising,
|
||||
aAutoResize, aFrameDropping),
|
||||
mAdaptiveQp(aAdaptiveQp),
|
||||
mNumSpatialLayers(aNumSpatialLayers),
|
||||
mFlexible(aFlexible) {}
|
||||
const bool mAdaptiveQp{true};
|
||||
const uint8_t mNumSpatialLayers{1};
|
||||
const bool mFlexible{false};
|
||||
};
|
||||
struct CreateEncoderParams;
|
||||
|
||||
class PlatformEncoderModule {
|
||||
public:
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformEncoderModule)
|
||||
|
||||
virtual already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) const {
|
||||
const CreateEncoderParams& aParams,
|
||||
const bool aHardwareNotAllowed) const {
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
virtual already_AddRefed<MediaDataEncoder> CreateAudioEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) const {
|
||||
const CreateEncoderParams& aParams) const {
|
||||
return nullptr;
|
||||
};
|
||||
|
||||
using CreateEncoderPromise = MozPromise<RefPtr<MediaDataEncoder>, MediaResult,
|
||||
/* IsExclusive = */ true>;
|
||||
|
||||
// Indicates if the PlatformDecoderModule supports encoding of a codec.
|
||||
virtual bool Supports(const EncoderConfig& aConfig) const = 0;
|
||||
virtual bool SupportsCodec(CodecType aCodecType) const = 0;
|
||||
|
||||
// Returns a readable name for this Platform Encoder Module
|
||||
virtual const char* GetName() const = 0;
|
||||
|
||||
// Asychronously create an encoder
|
||||
RefPtr<PlatformEncoderModule::CreateEncoderPromise> AsyncCreateEncoder(
|
||||
const EncoderConfig& aEncoderConfig, const RefPtr<TaskQueue>& aTaskQueue);
|
||||
// Indicates if the PlatformDecoderModule supports encoding of aMimeType.
|
||||
virtual bool SupportsMimeType(const nsACString& aMimeType) const = 0;
|
||||
|
||||
protected:
|
||||
PlatformEncoderModule() = default;
|
||||
|
|
@ -135,13 +53,108 @@ class MediaDataEncoder {
|
|||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaDataEncoder)
|
||||
|
||||
enum class Usage {
|
||||
Realtime, // Low latency prefered
|
||||
Record
|
||||
Realtime, // For WebRTC
|
||||
Record // For MediaRecoder
|
||||
};
|
||||
using PixelFormat = dom::ImageBitmapFormat;
|
||||
enum BitrateMode { Constant, Variable };
|
||||
|
||||
enum class HardwarePreference { RequireHardware, RequireSoftware, None };
|
||||
enum class CodecType {
|
||||
_BeginVideo_,
|
||||
H264,
|
||||
VP8,
|
||||
VP9,
|
||||
_EndVideo_,
|
||||
_BeginAudio_ = _EndVideo_,
|
||||
Opus,
|
||||
G722,
|
||||
_EndAudio_,
|
||||
Unknown,
|
||||
};
|
||||
|
||||
struct H264Specific final {
|
||||
enum class ProfileLevel { BaselineAutoLevel, MainAutoLevel };
|
||||
|
||||
const ProfileLevel mProfileLevel;
|
||||
|
||||
explicit H264Specific(const ProfileLevel aProfileLevel)
|
||||
: mProfileLevel(aProfileLevel) {}
|
||||
};
|
||||
|
||||
struct OpusSpecific final {
|
||||
enum class Application { Voip, Audio, RestricedLowDelay };
|
||||
|
||||
const Application mApplication;
|
||||
const uint8_t mComplexity; // from 0-10
|
||||
|
||||
OpusSpecific(const Application aApplication, const uint8_t aComplexity)
|
||||
: mApplication(aApplication), mComplexity(aComplexity) {
|
||||
MOZ_ASSERT(mComplexity <= 10);
|
||||
}
|
||||
};
|
||||
|
||||
// From webrtc::VideoCodecVP8. mResilience is a boolean value because while
|
||||
// VP8ResilienceMode has 3 values, kResilientFrames is not supported.
|
||||
# define VPX_COMMON_SETTINGS \
|
||||
const Complexity mComplexity; \
|
||||
const bool mResilience; \
|
||||
const uint8_t mNumTemporalLayers; \
|
||||
const bool mDenoising; \
|
||||
const bool mAutoResize; \
|
||||
const bool mFrameDropping;
|
||||
|
||||
// See webrtc::VideoEncoder::GetDefaultVp(8|9)Settings().
|
||||
# define VPX_COMMON_DEFAULTS(resize) \
|
||||
mComplexity(Complexity::Normal), mResilience(true), mNumTemporalLayers(1), \
|
||||
mDenoising(true), mAutoResize(resize), mFrameDropping(0)
|
||||
|
||||
struct VPXSpecific final {
|
||||
enum class Complexity { Normal, High, Higher, Max };
|
||||
struct VP8 final {
|
||||
VPX_COMMON_SETTINGS
|
||||
// Ignore webrtc::VideoCodecVP8::errorConcealmentOn,
|
||||
// for it's always false in the codebase (except libwebrtc test cases).
|
||||
|
||||
VP8() : VPX_COMMON_DEFAULTS(false /* auto resize */) {}
|
||||
VP8(const Complexity aComplexity, const bool aResilience,
|
||||
const uint8_t aNumTemporalLayers, const bool aDenoising,
|
||||
const bool aAutoResize, const bool aFrameDropping)
|
||||
: mComplexity(aComplexity),
|
||||
mResilience(aResilience),
|
||||
mNumTemporalLayers(aNumTemporalLayers),
|
||||
mDenoising(aDenoising),
|
||||
mAutoResize(aAutoResize),
|
||||
mFrameDropping(aFrameDropping) {}
|
||||
};
|
||||
|
||||
struct VP9 final {
|
||||
VPX_COMMON_SETTINGS
|
||||
// From webrtc::VideoCodecVP9.
|
||||
bool mAdaptiveQp;
|
||||
uint8_t mNumSpatialLayers;
|
||||
bool mFlexible;
|
||||
|
||||
VP9()
|
||||
: VPX_COMMON_DEFAULTS(true /* auto resize */),
|
||||
mAdaptiveQp(true),
|
||||
mNumSpatialLayers(1),
|
||||
mFlexible(false) {}
|
||||
VP9(const Complexity aComplexity, const bool aResilience,
|
||||
const uint8_t aNumTemporalLayers, const bool aDenoising,
|
||||
const bool aAutoResize, const bool aFrameDropping,
|
||||
const bool aAdaptiveQp, const uint8_t aNumSpatialLayers,
|
||||
const bool aFlexible)
|
||||
: mComplexity(aComplexity),
|
||||
mResilience(aResilience),
|
||||
mNumTemporalLayers(aNumTemporalLayers),
|
||||
mDenoising(aDenoising),
|
||||
mAutoResize(aAutoResize),
|
||||
mFrameDropping(aFrameDropping),
|
||||
mAdaptiveQp(aAdaptiveQp),
|
||||
mNumSpatialLayers(aNumSpatialLayers),
|
||||
mFlexible(aFlexible) {}
|
||||
};
|
||||
|
||||
VPXSpecific() = delete;
|
||||
};
|
||||
|
||||
static bool IsVideo(const CodecType aCodec) {
|
||||
return aCodec > CodecType::_BeginVideo_ && aCodec < CodecType::_EndVideo_;
|
||||
|
|
@ -150,13 +163,15 @@ class MediaDataEncoder {
|
|||
return aCodec > CodecType::_BeginAudio_ && aCodec < CodecType::_EndAudio_;
|
||||
}
|
||||
|
||||
using PixelFormat = dom::ImageBitmapFormat;
|
||||
// Sample rate for audio, framerate for video, and bitrate for both.
|
||||
using Rate = uint32_t;
|
||||
|
||||
using InitPromise =
|
||||
MozPromise<TrackInfo::TrackType, MediaResult, /* IsExclusive = */ true>;
|
||||
using EncodedData = nsTArray<RefPtr<MediaRawData>>;
|
||||
using EncodePromise =
|
||||
MozPromise<EncodedData, MediaResult, /* IsExclusive = */ true>;
|
||||
using ReconfigurationPromise =
|
||||
MozPromise<bool, MediaResult, /* IsExclusive = */ true>;
|
||||
|
||||
// Initialize the encoder. It should be ready to encode once the returned
|
||||
// promise resolves. The encoder should do any initialization here, rather
|
||||
|
|
@ -172,12 +187,6 @@ class MediaDataEncoder {
|
|||
// or empty when there is none available yet.
|
||||
virtual RefPtr<EncodePromise> Encode(const MediaData* aSample) = 0;
|
||||
|
||||
// Attempt to reconfigure the encoder on the fly. This can fail if the
|
||||
// underlying PEM doesn't support this type of reconfiguration.
|
||||
virtual RefPtr<ReconfigurationPromise> Reconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>&
|
||||
aConfigurationChanges) = 0;
|
||||
|
||||
// Causes all complete samples in the pipeline that can be encoded to be
|
||||
// output. It indicates that there is no more input sample to insert.
|
||||
// This function is asynchronous.
|
||||
|
|
@ -195,7 +204,7 @@ class MediaDataEncoder {
|
|||
// The ShutdownPromise must only ever be resolved.
|
||||
virtual RefPtr<ShutdownPromise> Shutdown() = 0;
|
||||
|
||||
virtual RefPtr<GenericPromise> SetBitrate(uint32_t aBitsPerSec) {
|
||||
virtual RefPtr<GenericPromise> SetBitrate(Rate aBitsPerSec) {
|
||||
return GenericPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
|
|
@ -213,147 +222,185 @@ class MediaDataEncoder {
|
|||
friend class PlatformEncoderModule;
|
||||
|
||||
protected:
|
||||
template <typename T>
|
||||
struct BaseConfig {
|
||||
const CodecType mCodecType;
|
||||
const Usage mUsage;
|
||||
const Rate mBitsPerSec;
|
||||
Maybe<T> mCodecSpecific;
|
||||
|
||||
void SetCodecSpecific(const T& aCodecSpecific) {
|
||||
mCodecSpecific.emplace(aCodecSpecific);
|
||||
}
|
||||
|
||||
protected:
|
||||
BaseConfig(const CodecType aCodecType, const Usage aUsage,
|
||||
const Rate aBitsPerSec)
|
||||
: mCodecType(aCodecType), mUsage(aUsage), mBitsPerSec(aBitsPerSec) {}
|
||||
|
||||
virtual ~BaseConfig() = default;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct VideoConfig final : public BaseConfig<T> {
|
||||
const gfx::IntSize mSize;
|
||||
const PixelFormat mSourcePixelFormat;
|
||||
const uint8_t mFramerate;
|
||||
const size_t mKeyframeInterval;
|
||||
|
||||
VideoConfig(const CodecType aCodecType, const Usage aUsage,
|
||||
const gfx::IntSize& aSize, const PixelFormat aSourcePixelFormat,
|
||||
const uint8_t aFramerate, const size_t aKeyframeInterval,
|
||||
const Rate aBitrate)
|
||||
: BaseConfig<T>(aCodecType, aUsage, aBitrate),
|
||||
mSize(aSize),
|
||||
mSourcePixelFormat(aSourcePixelFormat),
|
||||
mFramerate(aFramerate),
|
||||
mKeyframeInterval(aKeyframeInterval) {}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
struct AudioConfig final : public BaseConfig<T> {
|
||||
const uint8_t mNumChannels;
|
||||
const Rate mSampleRate;
|
||||
|
||||
AudioConfig(const CodecType aCodecType, const Usage aUsage,
|
||||
const Rate aBitrate, const Rate aSampleRate,
|
||||
const uint8_t aNumChannels)
|
||||
: BaseConfig<T>(aCodecType, aUsage, aBitrate),
|
||||
mNumChannels(aNumChannels),
|
||||
mSampleRate(aSampleRate) {}
|
||||
};
|
||||
|
||||
virtual ~MediaDataEncoder() = default;
|
||||
|
||||
public:
|
||||
using H264Config = VideoConfig<H264Specific>;
|
||||
using VP8Config = VideoConfig<VPXSpecific::VP8>;
|
||||
using VP9Config = VideoConfig<VPXSpecific::VP9>;
|
||||
};
|
||||
|
||||
class EncoderConfig final {
|
||||
public:
|
||||
using CodecSpecific =
|
||||
Variant<H264Specific, OpusSpecific, VP8Specific, VP9Specific>;
|
||||
struct MOZ_STACK_CLASS CreateEncoderParams final {
|
||||
union CodecSpecific {
|
||||
MediaDataEncoder::H264Specific mH264;
|
||||
MediaDataEncoder::OpusSpecific mOpus;
|
||||
MediaDataEncoder::VPXSpecific::VP8 mVP8;
|
||||
MediaDataEncoder::VPXSpecific::VP9 mVP9;
|
||||
|
||||
EncoderConfig(const EncoderConfig& aConfig)
|
||||
: mCodec(aConfig.mCodec),
|
||||
mSize(aConfig.mSize),
|
||||
mUsage(aConfig.mUsage),
|
||||
mHardwarePreference(aConfig.mHardwarePreference),
|
||||
mPixelFormat(aConfig.mPixelFormat),
|
||||
mSourcePixelFormat(aConfig.mSourcePixelFormat),
|
||||
mFramerate(aConfig.mFramerate),
|
||||
mKeyframeInterval(aConfig.mKeyframeInterval),
|
||||
mBitrate(aConfig.mBitrate),
|
||||
mBitrateMode(aConfig.mBitrateMode) {}
|
||||
explicit CodecSpecific(const MediaDataEncoder::H264Specific&& aH264)
|
||||
: mH264(aH264) {}
|
||||
explicit CodecSpecific(const MediaDataEncoder::OpusSpecific&& aOpus)
|
||||
: mOpus(aOpus) {}
|
||||
explicit CodecSpecific(const MediaDataEncoder::VPXSpecific::VP8&& aVP8)
|
||||
: mVP8(aVP8) {}
|
||||
explicit CodecSpecific(const MediaDataEncoder::VPXSpecific::VP9&& aVP9)
|
||||
: mVP9(aVP9) {}
|
||||
};
|
||||
|
||||
template <typename... Ts>
|
||||
EncoderConfig(const CodecType aCodecType, gfx::IntSize aSize,
|
||||
const MediaDataEncoder::Usage aUsage,
|
||||
const MediaDataEncoder::PixelFormat aPixelFormat,
|
||||
const MediaDataEncoder::PixelFormat aSourcePixelFormat,
|
||||
const uint8_t aFramerate, const size_t aKeyframeInterval,
|
||||
const uint32_t aBitrate,
|
||||
const MediaDataEncoder::BitrateMode aBitrateMode,
|
||||
const MediaDataEncoder::HardwarePreference aHardwarePreference,
|
||||
const Maybe<CodecSpecific>& aCodecSpecific)
|
||||
: mCodec(aCodecType),
|
||||
mSize(aSize),
|
||||
CreateEncoderParams(const TrackInfo& aConfig,
|
||||
const MediaDataEncoder::Usage aUsage,
|
||||
const RefPtr<TaskQueue> aTaskQueue,
|
||||
const MediaDataEncoder::PixelFormat aPixelFormat,
|
||||
const uint8_t aFramerate, const size_t aKeyframeInterval,
|
||||
const MediaDataEncoder::Rate aBitrate)
|
||||
: mConfig(aConfig),
|
||||
mUsage(aUsage),
|
||||
mHardwarePreference(aHardwarePreference),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mPixelFormat(aPixelFormat),
|
||||
mSourcePixelFormat(aSourcePixelFormat),
|
||||
mFramerate(aFramerate),
|
||||
mKeyframeInterval(aKeyframeInterval),
|
||||
mBitrate(aBitrate),
|
||||
mBitrateMode(aBitrateMode),
|
||||
mCodecSpecific(aCodecSpecific) {}
|
||||
|
||||
static CodecType CodecTypeForMime(const nsACString& aMimeType) {
|
||||
if (MP4Decoder::IsH264(aMimeType)) {
|
||||
return CodecType::H264;
|
||||
}
|
||||
if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)) {
|
||||
return CodecType::VP8;
|
||||
}
|
||||
if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) {
|
||||
return CodecType::VP9;
|
||||
}
|
||||
MOZ_ASSERT_UNREACHABLE("Unsupported Mimetype");
|
||||
return CodecType::Unknown;
|
||||
mBitrate(aBitrate) {
|
||||
MOZ_ASSERT(mTaskQueue);
|
||||
}
|
||||
|
||||
bool IsVideo() const {
|
||||
return mCodec > CodecType::_BeginVideo_ && mCodec < CodecType::_EndVideo_;
|
||||
template <typename... Ts>
|
||||
CreateEncoderParams(const TrackInfo& aConfig,
|
||||
const MediaDataEncoder::Usage aUsage,
|
||||
const RefPtr<TaskQueue> aTaskQueue,
|
||||
const MediaDataEncoder::PixelFormat aPixelFormat,
|
||||
const uint8_t aFramerate, const size_t aKeyframeInterval,
|
||||
const MediaDataEncoder::Rate aBitrate,
|
||||
const Ts&&... aCodecSpecific)
|
||||
: mConfig(aConfig),
|
||||
mUsage(aUsage),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mPixelFormat(aPixelFormat),
|
||||
mFramerate(aFramerate),
|
||||
mKeyframeInterval(aKeyframeInterval),
|
||||
mBitrate(aBitrate) {
|
||||
MOZ_ASSERT(mTaskQueue);
|
||||
SetCodecSpecific(std::forward<const Ts>(aCodecSpecific)...);
|
||||
}
|
||||
|
||||
bool IsAudio() const {
|
||||
return mCodec > CodecType::_BeginAudio_ && mCodec < CodecType::_EndAudio_;
|
||||
}
|
||||
|
||||
CodecType mCodec;
|
||||
gfx::IntSize mSize;
|
||||
MediaDataEncoder::Usage mUsage;
|
||||
MediaDataEncoder::HardwarePreference mHardwarePreference;
|
||||
MediaDataEncoder::PixelFormat mPixelFormat;
|
||||
MediaDataEncoder::PixelFormat mSourcePixelFormat;
|
||||
uint8_t mFramerate{};
|
||||
size_t mKeyframeInterval{};
|
||||
uint32_t mBitrate{};
|
||||
MediaDataEncoder::BitrateMode mBitrateMode{};
|
||||
Maybe<CodecSpecific> mCodecSpecific;
|
||||
};
|
||||
|
||||
// Wrap a type to make it unique. This allows using ergonomically in the Variant
|
||||
// below. Simply aliasing with `using` isn't enough, because typedefs in C++
|
||||
// don't produce strong types, so two integer variants result in
|
||||
// the same type, making it ambiguous to the Variant code.
|
||||
// T is the type to be wrapped. Phantom is a type that is only used to
|
||||
// disambiguate and should be unique in the program.
|
||||
template <typename T, typename Phantom>
|
||||
class StrongTypedef {
|
||||
public:
|
||||
explicit StrongTypedef(T const& value) : mValue(value) {}
|
||||
explicit StrongTypedef(T&& value) : mValue(std::move(value)) {}
|
||||
T& get() { return mValue; }
|
||||
T const& get() const { return mValue; }
|
||||
|
||||
private:
|
||||
T mValue;
|
||||
};
|
||||
|
||||
// Dimensions of the video frames
|
||||
using DimensionsChange =
|
||||
StrongTypedef<gfx::IntSize, struct DimensionsChangeType>;
|
||||
// Expected display size of the encoded frames, can influence encoding
|
||||
using DisplayDimensionsChange =
|
||||
StrongTypedef<Maybe<gfx::IntSize>, struct DisplayDimensionsChangeType>;
|
||||
// If present, the bitrate in kbps of the encoded stream. If absent, let the
|
||||
// platform decide.
|
||||
using BitrateChange = StrongTypedef<Maybe<uint32_t>, struct BitrateChangeType>;
|
||||
// If present, the expected framerate of the output video stream. If absent,
|
||||
// infer from the input frames timestamp.
|
||||
using FramerateChange =
|
||||
StrongTypedef<Maybe<double>, struct FramerateChangeType>;
|
||||
// The bitrate mode (variable, constant) of the encoding
|
||||
using BitrateModeChange =
|
||||
StrongTypedef<MediaDataEncoder::BitrateMode, struct BitrateModeChangeType>;
|
||||
// The usage for the encoded stream, this influence latency, ordering, etc.
|
||||
using UsageChange =
|
||||
StrongTypedef<MediaDataEncoder::Usage, struct UsageChangeType>;
|
||||
// If present, the expected content of the video frames (screen, movie, etc.).
|
||||
// The value the string can have isn't decided just yet. When absent, the
|
||||
// encoder uses generic settings.
|
||||
using ContentHintChange =
|
||||
StrongTypedef<Maybe<nsString>, struct ContentHintTypeType>;
|
||||
|
||||
// A change to a parameter of an encoder instance.
|
||||
using EncoderConfigurationItem =
|
||||
Variant<DimensionsChange, DisplayDimensionsChange, BitrateModeChange,
|
||||
BitrateChange, FramerateChange, UsageChange, ContentHintChange>;
|
||||
|
||||
// A list of changes to an encoder configuration, that _might_ be able to change
|
||||
// on the fly. Not all encoder modules can adjust their configuration on the
|
||||
// fly.
|
||||
struct EncoderConfigurationChangeList {
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EncoderConfigurationChangeList)
|
||||
bool Empty() const { return mChanges.IsEmpty(); }
|
||||
template <typename T>
|
||||
void Push(const T& aItem) {
|
||||
mChanges.AppendElement(aItem);
|
||||
void SetCodecSpecific(const T&& aCodecSpecific) {
|
||||
mCodecSpecific.emplace(std::forward<const T>(aCodecSpecific));
|
||||
}
|
||||
nsString ToString() const;
|
||||
|
||||
nsTArray<EncoderConfigurationItem> mChanges;
|
||||
const MediaDataEncoder::H264Config ToH264Config() const {
|
||||
const VideoInfo* info = mConfig.GetAsVideoInfo();
|
||||
MOZ_ASSERT(info);
|
||||
|
||||
auto config = MediaDataEncoder::H264Config(
|
||||
MediaDataEncoder::CodecType::H264, mUsage, info->mImage, mPixelFormat,
|
||||
mFramerate, mKeyframeInterval, mBitrate);
|
||||
if (mCodecSpecific) {
|
||||
config.SetCodecSpecific(mCodecSpecific.ref().mH264);
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
const MediaDataEncoder::VP8Config ToVP8Config() const {
|
||||
const VideoInfo* info = mConfig.GetAsVideoInfo();
|
||||
MOZ_ASSERT(info);
|
||||
|
||||
auto config = MediaDataEncoder::VP8Config(
|
||||
CodecTypeForMime(info->mMimeType), mUsage, info->mImage, mPixelFormat,
|
||||
mFramerate, mKeyframeInterval, mBitrate);
|
||||
if (mCodecSpecific) {
|
||||
config.SetCodecSpecific(mCodecSpecific.ref().mVP8);
|
||||
}
|
||||
return config;
|
||||
}
|
||||
|
||||
const MediaDataEncoder::VP9Config ToVP9Config() const {
|
||||
const VideoInfo* info = mConfig.GetAsVideoInfo();
|
||||
MOZ_ASSERT(info);
|
||||
|
||||
auto config = MediaDataEncoder::VP9Config(
|
||||
CodecTypeForMime(info->mMimeType), mUsage, info->mImage, mPixelFormat,
|
||||
mFramerate, mKeyframeInterval, mBitrate);
|
||||
if (mCodecSpecific) {
|
||||
config.SetCodecSpecific(mCodecSpecific.ref().mVP9);
|
||||
}
|
||||
return config;
|
||||
}
|
||||
|
||||
static MediaDataEncoder::CodecType CodecTypeForMime(
|
||||
const nsACString& aMimeType) {
|
||||
if (MP4Decoder::IsH264(aMimeType)) {
|
||||
return MediaDataEncoder::CodecType::H264;
|
||||
} else if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP8)) {
|
||||
return MediaDataEncoder::CodecType::VP8;
|
||||
} else if (VPXDecoder::IsVPX(aMimeType, VPXDecoder::VP9)) {
|
||||
return MediaDataEncoder::CodecType::VP9;
|
||||
} else {
|
||||
MOZ_ASSERT_UNREACHABLE("Unsupported Mimetype");
|
||||
return MediaDataEncoder::CodecType::Unknown;
|
||||
}
|
||||
}
|
||||
|
||||
const TrackInfo& mConfig;
|
||||
const MediaDataEncoder::Usage mUsage;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
const MediaDataEncoder::PixelFormat mPixelFormat;
|
||||
const uint8_t mFramerate;
|
||||
const size_t mKeyframeInterval;
|
||||
const MediaDataEncoder::Rate mBitrate;
|
||||
Maybe<CodecSpecific> mCodecSpecific;
|
||||
|
||||
private:
|
||||
~EncoderConfigurationChangeList() = default;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "H264.h"
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
#include "AnnexB.h"
|
||||
#include "BitReader.h"
|
||||
|
|
@ -10,6 +11,7 @@
|
|||
#include "BufferReader.h"
|
||||
#include "ByteStreamsUtils.h"
|
||||
#include "ByteWriter.h"
|
||||
#include "mozilla/ArrayUtils.h"
|
||||
#include "mozilla/PodOperations.h"
|
||||
#include "mozilla/ResultExtensions.h"
|
||||
#include "mozilla/Try.h"
|
||||
|
|
@ -297,7 +299,7 @@ class SPSNAL {
|
|||
MOZ_ASSERT(mLength / 8 <= mDecodedNAL->Length());
|
||||
|
||||
if (memcmp(mDecodedNAL->Elements(), aOther.mDecodedNAL->Elements(),
|
||||
mLength / 8) != 0) {
|
||||
mLength / 8)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -643,14 +645,14 @@ bool H264::DecodeSPS(const mozilla::MediaByteBuffer* aSPS, SPSData& aDest) {
|
|||
// Determine display size.
|
||||
if (aDest.sample_ratio > 1.0) {
|
||||
// Increase the intrinsic width
|
||||
aDest.display_width = ConditionDimension(
|
||||
AssertedCast<float>(aDest.pic_width) * aDest.sample_ratio);
|
||||
aDest.display_width =
|
||||
ConditionDimension(aDest.pic_width * aDest.sample_ratio);
|
||||
aDest.display_height = aDest.pic_height;
|
||||
} else {
|
||||
// Increase the intrinsic height
|
||||
aDest.display_width = aDest.pic_width;
|
||||
aDest.display_height = ConditionDimension(
|
||||
AssertedCast<float>(aDest.pic_height) / aDest.sample_ratio);
|
||||
aDest.display_height =
|
||||
ConditionDimension(aDest.pic_height / aDest.sample_ratio);
|
||||
}
|
||||
|
||||
aDest.valid = true;
|
||||
|
|
@ -911,8 +913,6 @@ uint32_t H264::ComputeMaxRefFrames(const mozilla::MediaByteBuffer* aExtraData) {
|
|||
case 4:
|
||||
nalLen = reader.ReadU32().unwrapOr(0);
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT_UNREACHABLE("NAL length is up to 4 bytes");
|
||||
}
|
||||
if (!nalLen) {
|
||||
continue;
|
||||
|
|
@ -921,12 +921,11 @@ uint32_t H264::ComputeMaxRefFrames(const mozilla::MediaByteBuffer* aExtraData) {
|
|||
if (!p) {
|
||||
return FrameType::INVALID;
|
||||
}
|
||||
int8_t nalType = AssertedCast<int8_t>(*p & 0x1f);
|
||||
int8_t nalType = *p & 0x1f;
|
||||
if (nalType == H264_NAL_IDR_SLICE) {
|
||||
// IDR NAL.
|
||||
return FrameType::I_FRAME;
|
||||
}
|
||||
if (nalType == H264_NAL_SEI) {
|
||||
} else if (nalType == H264_NAL_SEI) {
|
||||
RefPtr<mozilla::MediaByteBuffer> decodedNAL = DecodeNALUnit(p, nalLen);
|
||||
SEIRecoveryData data;
|
||||
if (DecodeRecoverySEI(decodedNAL, data)) {
|
||||
|
|
@ -1000,8 +999,6 @@ uint32_t H264::ComputeMaxRefFrames(const mozilla::MediaByteBuffer* aExtraData) {
|
|||
Unused << reader.ReadU32().map(
|
||||
[&](uint32_t x) mutable { return nalLen = x; });
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT_UNREACHABLE("NAL length size is at most 4 bytes");
|
||||
}
|
||||
const uint8_t* p = reader.Read(nalLen);
|
||||
if (!p) {
|
||||
|
|
@ -1315,7 +1312,7 @@ void H264::WriteExtraData(MediaByteBuffer* aDestExtraData,
|
|||
if (byteBuffer[0] != 1) {
|
||||
return mozilla::Err(NS_ERROR_FAILURE);
|
||||
}
|
||||
AVCCConfig avcc{};
|
||||
AVCCConfig avcc;
|
||||
avcc.mConfigurationVersion = byteBuffer[0];
|
||||
avcc.mAVCProfileIndication = byteBuffer[1];
|
||||
avcc.mProfileCompatibility = byteBuffer[2];
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ struct SPSData {
|
|||
gfx::YUVColorSpace ColorSpace() const;
|
||||
gfx::ColorDepth ColorDepth() const;
|
||||
|
||||
bool valid = {};
|
||||
bool valid;
|
||||
|
||||
/* Decoded Members */
|
||||
/*
|
||||
|
|
@ -61,58 +61,58 @@ struct SPSData {
|
|||
pic_width = ((pic_width_in_mbs_minus1 + 1) * 16)
|
||||
- (frame_crop_left_offset + frame_crop_right_offset) * 2
|
||||
*/
|
||||
uint32_t pic_width = {};
|
||||
uint32_t pic_width;
|
||||
/*
|
||||
pic_height is the decoded height according to:
|
||||
pic_height = (2 - frame_mbs_only_flag) * ((pic_height_in_map_units_minus1 +
|
||||
1) * 16)
|
||||
- (frame_crop_top_offset + frame_crop_bottom_offset) * 2
|
||||
*/
|
||||
uint32_t pic_height = {};
|
||||
uint32_t pic_height;
|
||||
|
||||
bool interlaced = {};
|
||||
bool interlaced;
|
||||
|
||||
/*
|
||||
Displayed size.
|
||||
display_width and display_height are adjusted according to the display
|
||||
sample aspect ratio.
|
||||
*/
|
||||
uint32_t display_width = {};
|
||||
uint32_t display_height = {};
|
||||
uint32_t display_width;
|
||||
uint32_t display_height;
|
||||
|
||||
float sample_ratio = {};
|
||||
float sample_ratio;
|
||||
|
||||
uint32_t crop_left = {};
|
||||
uint32_t crop_right = {};
|
||||
uint32_t crop_top = {};
|
||||
uint32_t crop_bottom = {};
|
||||
uint32_t crop_left;
|
||||
uint32_t crop_right;
|
||||
uint32_t crop_top;
|
||||
uint32_t crop_bottom;
|
||||
|
||||
/*
|
||||
H264 decoding parameters according to ITU-T H.264 (T-REC-H.264-201402-I/en)
|
||||
http://www.itu.int/rec/T-REC-H.264-201402-I/en
|
||||
*/
|
||||
|
||||
bool constraint_set0_flag = {};
|
||||
bool constraint_set1_flag = {};
|
||||
bool constraint_set2_flag = {};
|
||||
bool constraint_set3_flag = {};
|
||||
bool constraint_set4_flag = {};
|
||||
bool constraint_set5_flag = {};
|
||||
bool constraint_set0_flag;
|
||||
bool constraint_set1_flag;
|
||||
bool constraint_set2_flag;
|
||||
bool constraint_set3_flag;
|
||||
bool constraint_set4_flag;
|
||||
bool constraint_set5_flag;
|
||||
|
||||
/*
|
||||
profile_idc and level_idc indicate the profile and level to which the coded
|
||||
video sequence conforms when the SVC sequence parameter set is the active
|
||||
SVC sequence parameter set.
|
||||
*/
|
||||
uint8_t profile_idc = {};
|
||||
uint8_t level_idc = {};
|
||||
uint8_t profile_idc;
|
||||
uint8_t level_idc;
|
||||
|
||||
/*
|
||||
seq_parameter_set_id identifies the sequence parameter set that is referred
|
||||
to by the picture parameter set. The value of seq_parameter_set_id shall be
|
||||
in the range of 0 to 31, inclusive.
|
||||
*/
|
||||
uint8_t seq_parameter_set_id = {};
|
||||
uint8_t seq_parameter_set_id;
|
||||
|
||||
/*
|
||||
chroma_format_idc specifies the chroma sampling relative to the luma
|
||||
|
|
@ -122,7 +122,7 @@ struct SPSData {
|
|||
When profile_idc is equal to 183, chroma_format_idc shall be equal to 0
|
||||
(4:0:0 chroma format).
|
||||
*/
|
||||
uint8_t chroma_format_idc = {};
|
||||
uint8_t chroma_format_idc;
|
||||
|
||||
/*
|
||||
bit_depth_luma_minus8 specifies the bit depth of the samples of the luma
|
||||
|
|
@ -133,7 +133,7 @@ struct SPSData {
|
|||
When bit_depth_luma_minus8 is not present, it shall be inferred to be equal
|
||||
to 0. bit_depth_luma_minus8 shall be in the range of 0 to 6, inclusive.
|
||||
*/
|
||||
uint8_t bit_depth_luma_minus8 = {};
|
||||
uint8_t bit_depth_luma_minus8;
|
||||
|
||||
/*
|
||||
bit_depth_chroma_minus8 specifies the bit depth of the samples of the chroma
|
||||
|
|
@ -145,7 +145,7 @@ struct SPSData {
|
|||
equal to 0. bit_depth_chroma_minus8 shall be in the range of 0 to 6,
|
||||
inclusive.
|
||||
*/
|
||||
uint8_t bit_depth_chroma_minus8 = {};
|
||||
uint8_t bit_depth_chroma_minus8;
|
||||
|
||||
/*
|
||||
separate_colour_plane_flag equal to 1 specifies that the three colour
|
||||
|
|
@ -158,7 +158,7 @@ struct SPSData {
|
|||
that each use the monochrome coding syntax. In this case, each colour plane
|
||||
is associated with a specific colour_plane_id value.
|
||||
*/
|
||||
bool separate_colour_plane_flag = {};
|
||||
bool separate_colour_plane_flag;
|
||||
|
||||
/*
|
||||
seq_scaling_matrix_present_flag equal to 1 specifies that the flags
|
||||
|
|
@ -170,7 +170,7 @@ struct SPSData {
|
|||
i = 6..11. When seq_scaling_matrix_present_flag is not present, it shall be
|
||||
inferred to be equal to 0.
|
||||
*/
|
||||
bool seq_scaling_matrix_present_flag = {};
|
||||
bool seq_scaling_matrix_present_flag;
|
||||
|
||||
/*
|
||||
log2_max_frame_num_minus4 specifies the value of the variable
|
||||
|
|
@ -180,14 +180,14 @@ struct SPSData {
|
|||
MaxFrameNum = 2( log2_max_frame_num_minus4 + 4 ). The value of
|
||||
log2_max_frame_num_minus4 shall be in the range of 0 to 12, inclusive.
|
||||
*/
|
||||
uint8_t log2_max_frame_num = {};
|
||||
uint8_t log2_max_frame_num;
|
||||
|
||||
/*
|
||||
pic_order_cnt_type specifies the method to decode picture order
|
||||
count (as specified in subclause 8.2.1). The value of
|
||||
pic_order_cnt_type shall be in the range of 0 to 2, inclusive.
|
||||
*/
|
||||
uint8_t pic_order_cnt_type = {};
|
||||
uint8_t pic_order_cnt_type;
|
||||
|
||||
/*
|
||||
log2_max_pic_order_cnt_lsb_minus4 specifies the value of the
|
||||
|
|
@ -200,7 +200,7 @@ struct SPSData {
|
|||
The value of log2_max_pic_order_cnt_lsb_minus4 shall be in
|
||||
the range of 0 to 12, inclusive.
|
||||
*/
|
||||
uint8_t log2_max_pic_order_cnt_lsb = {};
|
||||
uint8_t log2_max_pic_order_cnt_lsb;
|
||||
|
||||
/*
|
||||
delta_pic_order_always_zero_flag equal to 1 specifies that
|
||||
|
|
@ -208,7 +208,7 @@ struct SPSData {
|
|||
not present in the slice headers of the sequence and shall
|
||||
be inferred to be equal to 0.
|
||||
*/
|
||||
bool delta_pic_order_always_zero_flag = {};
|
||||
bool delta_pic_order_always_zero_flag;
|
||||
|
||||
/*
|
||||
offset_for_non_ref_pic is used to calculate the picture
|
||||
|
|
@ -216,7 +216,7 @@ struct SPSData {
|
|||
8.2.1. The value of offset_for_non_ref_pic shall be in the
|
||||
range of -231 to 231 - 1, inclusive.
|
||||
*/
|
||||
int8_t offset_for_non_ref_pic = {};
|
||||
int8_t offset_for_non_ref_pic;
|
||||
|
||||
/*
|
||||
offset_for_top_to_bottom_field is used to calculate the
|
||||
|
|
@ -224,7 +224,7 @@ struct SPSData {
|
|||
subclause 8.2.1. The value of offset_for_top_to_bottom_field
|
||||
shall be in the range of -231 to 231 - 1, inclusive.
|
||||
*/
|
||||
int8_t offset_for_top_to_bottom_field = {};
|
||||
int8_t offset_for_top_to_bottom_field;
|
||||
|
||||
/*
|
||||
max_num_ref_frames specifies the maximum number of short-term and
|
||||
|
|
@ -236,7 +236,7 @@ struct SPSData {
|
|||
max_num_ref_frames shall be in the range of 0 to MaxDpbFrames (as
|
||||
specified in subclause A.3.1 or A.3.2), inclusive.
|
||||
*/
|
||||
uint32_t max_num_ref_frames = {};
|
||||
uint32_t max_num_ref_frames;
|
||||
|
||||
/*
|
||||
gaps_in_frame_num_value_allowed_flag specifies the allowed
|
||||
|
|
@ -244,20 +244,20 @@ struct SPSData {
|
|||
decoding process in case of an inferred gap between values of
|
||||
frame_num as specified in subclause 8.2.5.2.
|
||||
*/
|
||||
bool gaps_in_frame_num_allowed_flag = {};
|
||||
bool gaps_in_frame_num_allowed_flag;
|
||||
|
||||
/*
|
||||
pic_width_in_mbs_minus1 plus 1 specifies the width of each
|
||||
decoded picture in units of macroblocks. 16 macroblocks in a row
|
||||
*/
|
||||
uint32_t pic_width_in_mbs = {};
|
||||
uint32_t pic_width_in_mbs;
|
||||
|
||||
/*
|
||||
pic_height_in_map_units_minus1 plus 1 specifies the height in
|
||||
slice group map units of a decoded frame or field. 16
|
||||
macroblocks in each column.
|
||||
*/
|
||||
uint32_t pic_height_in_map_units = {};
|
||||
uint32_t pic_height_in_map_units;
|
||||
|
||||
/*
|
||||
frame_mbs_only_flag equal to 0 specifies that coded pictures of
|
||||
|
|
@ -266,7 +266,7 @@ struct SPSData {
|
|||
coded picture of the coded video sequence is a coded frame
|
||||
containing only frame macroblocks.
|
||||
*/
|
||||
bool frame_mbs_only_flag = {};
|
||||
bool frame_mbs_only_flag;
|
||||
|
||||
/*
|
||||
mb_adaptive_frame_field_flag equal to 0 specifies no
|
||||
|
|
@ -276,7 +276,7 @@ struct SPSData {
|
|||
macroblocks within frames. When mb_adaptive_frame_field_flag
|
||||
is not present, it shall be inferred to be equal to 0.
|
||||
*/
|
||||
bool mb_adaptive_frame_field_flag = {};
|
||||
bool mb_adaptive_frame_field_flag;
|
||||
|
||||
/*
|
||||
direct_8x8_inference_flag specifies the method used in the derivation
|
||||
|
|
@ -284,7 +284,7 @@ struct SPSData {
|
|||
as specified in clause 8.4.1.2. When frame_mbs_only_flag is equal to 0,
|
||||
direct_8x8_inference_flag shall be equal to 1.
|
||||
*/
|
||||
bool direct_8x8_inference_flag = {};
|
||||
bool direct_8x8_inference_flag;
|
||||
|
||||
/*
|
||||
frame_cropping_flag equal to 1 specifies that the frame cropping
|
||||
|
|
@ -292,11 +292,11 @@ struct SPSData {
|
|||
set. frame_cropping_flag equal to 0 specifies that the frame
|
||||
cropping offset parameters are not present.
|
||||
*/
|
||||
bool frame_cropping_flag = {};
|
||||
uint32_t frame_crop_left_offset = {};
|
||||
uint32_t frame_crop_right_offset = {};
|
||||
uint32_t frame_crop_top_offset = {};
|
||||
uint32_t frame_crop_bottom_offset = {};
|
||||
bool frame_cropping_flag;
|
||||
uint32_t frame_crop_left_offset;
|
||||
uint32_t frame_crop_right_offset;
|
||||
uint32_t frame_crop_top_offset;
|
||||
uint32_t frame_crop_bottom_offset;
|
||||
|
||||
// VUI Parameters
|
||||
|
||||
|
|
@ -307,14 +307,14 @@ struct SPSData {
|
|||
the vui_parameters( ) syntax structure as specified in Annex E
|
||||
is not present.
|
||||
*/
|
||||
bool vui_parameters_present_flag = {};
|
||||
bool vui_parameters_present_flag;
|
||||
|
||||
/*
|
||||
aspect_ratio_info_present_flag equal to 1 specifies that
|
||||
aspect_ratio_idc is present. aspect_ratio_info_present_flag
|
||||
equal to 0 specifies that aspect_ratio_idc is not present.
|
||||
*/
|
||||
bool aspect_ratio_info_present_flag = {};
|
||||
bool aspect_ratio_info_present_flag;
|
||||
|
||||
/*
|
||||
aspect_ratio_idc specifies the value of the sample aspect
|
||||
|
|
@ -325,9 +325,9 @@ struct SPSData {
|
|||
present, aspect_ratio_idc value shall be inferred to be
|
||||
equal to 0.
|
||||
*/
|
||||
uint8_t aspect_ratio_idc = {};
|
||||
uint32_t sar_width = {};
|
||||
uint32_t sar_height = {};
|
||||
uint8_t aspect_ratio_idc;
|
||||
uint32_t sar_width;
|
||||
uint32_t sar_height;
|
||||
|
||||
/*
|
||||
video_signal_type_present_flag equal to 1 specifies that video_format,
|
||||
|
|
@ -335,7 +335,7 @@ struct SPSData {
|
|||
video_signal_type_present_flag equal to 0, specify that video_format,
|
||||
video_full_range_flag and colour_description_present_flag are not present.
|
||||
*/
|
||||
bool video_signal_type_present_flag = {};
|
||||
bool video_signal_type_present_flag;
|
||||
|
||||
/*
|
||||
overscan_info_present_flag equal to1 specifies that the
|
||||
|
|
@ -343,7 +343,7 @@ struct SPSData {
|
|||
equal to 0 or is not present, the preferred display method for the video
|
||||
signal is unspecified (Unspecified).
|
||||
*/
|
||||
bool overscan_info_present_flag = {};
|
||||
bool overscan_info_present_flag;
|
||||
/*
|
||||
overscan_appropriate_flag equal to 1 indicates that the cropped decoded
|
||||
pictures output are suitable for display using overscan.
|
||||
|
|
@ -351,7 +351,7 @@ struct SPSData {
|
|||
pictures output contain visually important information in the entire region
|
||||
out to the edges of the cropping rectangle of the picture
|
||||
*/
|
||||
bool overscan_appropriate_flag = {};
|
||||
bool overscan_appropriate_flag;
|
||||
|
||||
/*
|
||||
video_format indicates the representation of the pictures as specified in
|
||||
|
|
@ -360,7 +360,7 @@ struct SPSData {
|
|||
element is not present, video_format value shall be inferred to be equal
|
||||
to 5. (Unspecified video format)
|
||||
*/
|
||||
uint8_t video_format = {};
|
||||
uint8_t video_format;
|
||||
|
||||
/*
|
||||
video_full_range_flag indicates the black level and range of the luma and
|
||||
|
|
@ -369,7 +369,7 @@ struct SPSData {
|
|||
When the video_full_range_flag syntax element is not present, the value of
|
||||
video_full_range_flag shall be inferred to be equal to 0.
|
||||
*/
|
||||
bool video_full_range_flag = {};
|
||||
bool video_full_range_flag;
|
||||
|
||||
/*
|
||||
colour_description_present_flag equal to1 specifies that colour_primaries,
|
||||
|
|
@ -377,7 +377,7 @@ struct SPSData {
|
|||
colour_description_present_flag equal to 0 specifies that colour_primaries,
|
||||
transfer_characteristics and matrix_coefficients are not present.
|
||||
*/
|
||||
bool colour_description_present_flag = {};
|
||||
bool colour_description_present_flag;
|
||||
|
||||
/*
|
||||
colour_primaries indicates the chromaticity coordinates of the source
|
||||
|
|
@ -387,7 +387,7 @@ struct SPSData {
|
|||
colour_primaries shall be inferred to be equal to 2 (the chromaticity is
|
||||
unspecified or is determined by the application).
|
||||
*/
|
||||
uint8_t colour_primaries = {};
|
||||
uint8_t colour_primaries;
|
||||
|
||||
/*
|
||||
transfer_characteristics indicates the opto-electronic transfer
|
||||
|
|
@ -399,21 +399,21 @@ struct SPSData {
|
|||
(the transfer characteristics are unspecified or are determined by the
|
||||
application).
|
||||
*/
|
||||
uint8_t transfer_characteristics = {};
|
||||
uint8_t transfer_characteristics;
|
||||
|
||||
uint8_t matrix_coefficients = {};
|
||||
bool chroma_loc_info_present_flag = {};
|
||||
uint8_t matrix_coefficients;
|
||||
bool chroma_loc_info_present_flag;
|
||||
/*
|
||||
The value of chroma_sample_loc_type_top_field and
|
||||
chroma_sample_loc_type_bottom_field shall be in the range of 0 to 5,
|
||||
inclusive
|
||||
*/
|
||||
uint8_t chroma_sample_loc_type_top_field = {};
|
||||
uint8_t chroma_sample_loc_type_bottom_field = {};
|
||||
uint8_t chroma_sample_loc_type_top_field;
|
||||
uint8_t chroma_sample_loc_type_bottom_field;
|
||||
|
||||
bool scaling_matrix_present = {};
|
||||
uint8_t scaling_matrix4x4[6][16] = {};
|
||||
uint8_t scaling_matrix8x8[6][64] = {};
|
||||
bool scaling_matrix_present;
|
||||
uint8_t scaling_matrix4x4[6][16];
|
||||
uint8_t scaling_matrix8x8[6][64];
|
||||
|
||||
SPSData();
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8,12 +8,18 @@
|
|||
#include "H264.h"
|
||||
#include "MediaData.h"
|
||||
#include "MediaInfo.h"
|
||||
#include "SimpleMap.h"
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "libyuv/convert_from.h"
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/ResultVariant.h"
|
||||
|
||||
#include "nsMimeTypes.h"
|
||||
|
||||
#include "libyuv.h"
|
||||
|
||||
namespace mozilla {
|
||||
using media::TimeUnit;
|
||||
|
||||
extern LazyLogModule sPEMLog;
|
||||
#define AND_ENC_LOG(arg, ...) \
|
||||
|
|
@ -32,23 +38,24 @@ extern LazyLogModule sPEMLog;
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
RefPtr<MediaDataEncoder::InitPromise> AndroidDataEncoder::Init() {
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaDataEncoder::InitPromise> AndroidDataEncoder<ConfigType>::Init() {
|
||||
// Sanity-check the input size for Android software encoder fails to do it.
|
||||
if (mConfig.mSize.width == 0 || mConfig.mSize.height == 0) {
|
||||
return InitPromise::CreateAndReject(NS_ERROR_ILLEGAL_VALUE, __func__);
|
||||
}
|
||||
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&AndroidDataEncoder::ProcessInit);
|
||||
&AndroidDataEncoder<ConfigType>::ProcessInit);
|
||||
}
|
||||
|
||||
static const char* MimeTypeOf(CodecType aCodec) {
|
||||
static const char* MimeTypeOf(MediaDataEncoder::CodecType aCodec) {
|
||||
switch (aCodec) {
|
||||
case CodecType::H264:
|
||||
case MediaDataEncoder::CodecType::H264:
|
||||
return "video/avc";
|
||||
case CodecType::VP8:
|
||||
case MediaDataEncoder::CodecType::VP8:
|
||||
return "video/x-vnd.on2.vp8";
|
||||
case CodecType::VP9:
|
||||
case MediaDataEncoder::CodecType::VP9:
|
||||
return "video/x-vnd.on2.vp9";
|
||||
default:
|
||||
return "";
|
||||
|
|
@ -57,10 +64,11 @@ static const char* MimeTypeOf(CodecType aCodec) {
|
|||
|
||||
using FormatResult = Result<java::sdk::MediaFormat::LocalRef, MediaResult>;
|
||||
|
||||
FormatResult ToMediaFormat(const EncoderConfig& aConfig) {
|
||||
template <typename ConfigType>
|
||||
FormatResult ToMediaFormat(const ConfigType& aConfig) {
|
||||
nsresult rv = NS_OK;
|
||||
java::sdk::MediaFormat::LocalRef format;
|
||||
rv = java::sdk::MediaFormat::CreateVideoFormat(MimeTypeOf(aConfig.mCodec),
|
||||
rv = java::sdk::MediaFormat::CreateVideoFormat(MimeTypeOf(aConfig.mCodecType),
|
||||
aConfig.mSize.width,
|
||||
aConfig.mSize.height, &format);
|
||||
NS_ENSURE_SUCCESS(
|
||||
|
|
@ -73,7 +81,7 @@ FormatResult ToMediaFormat(const EncoderConfig& aConfig) {
|
|||
"fail to set bitrate mode")));
|
||||
|
||||
rv = format->SetInteger(java::sdk::MediaFormat::KEY_BIT_RATE,
|
||||
AssertedCast<int>(aConfig.mBitrate));
|
||||
aConfig.mBitsPerSec);
|
||||
NS_ENSURE_SUCCESS(rv, FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
"fail to set bitrate")));
|
||||
|
||||
|
|
@ -91,8 +99,8 @@ FormatResult ToMediaFormat(const EncoderConfig& aConfig) {
|
|||
// Ensure interval >= 1. A negative value means no key frames are
|
||||
// requested after the first frame. A zero value means a stream
|
||||
// containing all key frames is requested.
|
||||
int32_t intervalInSec = AssertedCast<int32_t>(
|
||||
std::max<size_t>(1, aConfig.mKeyframeInterval / aConfig.mFramerate));
|
||||
int32_t intervalInSec =
|
||||
std::max<size_t>(1, aConfig.mKeyframeInterval / aConfig.mFramerate);
|
||||
rv = format->SetInteger(java::sdk::MediaFormat::KEY_I_FRAME_INTERVAL,
|
||||
intervalInSec);
|
||||
NS_ENSURE_SUCCESS(rv,
|
||||
|
|
@ -102,7 +110,9 @@ FormatResult ToMediaFormat(const EncoderConfig& aConfig) {
|
|||
return format;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::InitPromise> AndroidDataEncoder::ProcessInit() {
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaDataEncoder::InitPromise>
|
||||
AndroidDataEncoder<ConfigType>::ProcessInit() {
|
||||
AssertOnTaskQueue();
|
||||
MOZ_ASSERT(!mJavaEncoder);
|
||||
|
||||
|
|
@ -113,7 +123,7 @@ RefPtr<MediaDataEncoder::InitPromise> AndroidDataEncoder::ProcessInit() {
|
|||
}
|
||||
mInputBufferInfo = bufferInfo;
|
||||
|
||||
FormatResult result = ToMediaFormat(mConfig);
|
||||
FormatResult result = ToMediaFormat<ConfigType>(mConfig);
|
||||
if (result.isErr()) {
|
||||
return InitPromise::CreateAndReject(result.unwrapErr(), __func__);
|
||||
}
|
||||
|
|
@ -147,18 +157,20 @@ RefPtr<MediaDataEncoder::InitPromise> AndroidDataEncoder::ProcessInit() {
|
|||
return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::Encode(
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder<ConfigType>::Encode(
|
||||
const MediaData* aSample) {
|
||||
RefPtr<AndroidDataEncoder> self = this;
|
||||
MOZ_ASSERT(aSample != nullptr);
|
||||
|
||||
RefPtr<const MediaData> sample(aSample);
|
||||
return InvokeAsync(mTaskQueue, __func__,
|
||||
[self, sample]() { return self->ProcessEncode(sample); });
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, sample]() {
|
||||
return self->ProcessEncode(std::move(sample));
|
||||
});
|
||||
}
|
||||
|
||||
static jni::ByteBuffer::LocalRef ConvertI420ToNV12Buffer(
|
||||
RefPtr<const VideoData>& aSample, RefPtr<MediaByteBuffer>& aYUVBuffer,
|
||||
RefPtr<const VideoData> aSample, RefPtr<MediaByteBuffer>& aYUVBuffer,
|
||||
int aStride, int aYPlaneHeight) {
|
||||
const layers::PlanarYCbCrImage* image = aSample->mImage->AsPlanarYCbCrImage();
|
||||
MOZ_ASSERT(image);
|
||||
|
|
@ -191,8 +203,9 @@ static jni::ByteBuffer::LocalRef ConvertI420ToNV12Buffer(
|
|||
return jni::ByteBuffer::New(aYUVBuffer->Elements(), aYUVBuffer->Length());
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::ProcessEncode(
|
||||
const RefPtr<const MediaData>& aSample) {
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaDataEncoder::EncodePromise>
|
||||
AndroidDataEncoder<ConfigType>::ProcessEncode(RefPtr<const MediaData> aSample) {
|
||||
AssertOnTaskQueue();
|
||||
|
||||
REJECT_IF_ERROR();
|
||||
|
|
@ -210,11 +223,11 @@ RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::ProcessEncode(
|
|||
}
|
||||
|
||||
if (aSample->mKeyframe) {
|
||||
mInputBufferInfo->Set(0, AssertedCast<int32_t>(mYUVBuffer->Length()),
|
||||
mInputBufferInfo->Set(0, mYUVBuffer->Length(),
|
||||
aSample->mTime.ToMicroseconds(),
|
||||
java::sdk::MediaCodec::BUFFER_FLAG_SYNC_FRAME);
|
||||
} else {
|
||||
mInputBufferInfo->Set(0, AssertedCast<int32_t>(mYUVBuffer->Length()),
|
||||
mInputBufferInfo->Set(0, mYUVBuffer->Length(),
|
||||
aSample->mTime.ToMicroseconds(), 0);
|
||||
}
|
||||
|
||||
|
|
@ -223,8 +236,9 @@ RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::ProcessEncode(
|
|||
if (mEncodedData.Length() > 0) {
|
||||
EncodedData pending = std::move(mEncodedData);
|
||||
return EncodePromise::CreateAndResolve(std::move(pending), __func__);
|
||||
} else {
|
||||
return EncodePromise::CreateAndResolve(EncodedData(), __func__);
|
||||
}
|
||||
return EncodePromise::CreateAndResolve(EncodedData(), __func__);
|
||||
}
|
||||
|
||||
class AutoRelease final {
|
||||
|
|
@ -266,7 +280,8 @@ static RefPtr<MediaByteBuffer> ExtractCodecConfig(
|
|||
return avcc;
|
||||
}
|
||||
|
||||
void AndroidDataEncoder::ProcessOutput(
|
||||
template <typename ConfigType>
|
||||
void AndroidDataEncoder<ConfigType>::ProcessOutput(
|
||||
java::Sample::GlobalRef&& aSample,
|
||||
java::SampleBuffer::GlobalRef&& aBuffer) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
|
|
@ -314,16 +329,9 @@ void AndroidDataEncoder::ProcessOutput(
|
|||
mConfig.mUsage == Usage::Realtime);
|
||||
return;
|
||||
}
|
||||
RefPtr<MediaRawData> output;
|
||||
if (mConfig.mCodec == CodecType::H264) {
|
||||
output = GetOutputDataH264(
|
||||
aBuffer, offset, size,
|
||||
!!(flags & java::sdk::MediaCodec::BUFFER_FLAG_KEY_FRAME));
|
||||
} else {
|
||||
output = GetOutputData(
|
||||
aBuffer, offset, size,
|
||||
!!(flags & java::sdk::MediaCodec::BUFFER_FLAG_KEY_FRAME));
|
||||
}
|
||||
RefPtr<MediaRawData> output =
|
||||
GetOutputData(aBuffer, offset, size,
|
||||
!!(flags & java::sdk::MediaCodec::BUFFER_FLAG_KEY_FRAME));
|
||||
output->mEOS = isEOS;
|
||||
output->mTime = media::TimeUnit::FromMicroseconds(presentationTimeUs);
|
||||
mEncodedData.AppendElement(std::move(output));
|
||||
|
|
@ -338,7 +346,8 @@ void AndroidDataEncoder::ProcessOutput(
|
|||
}
|
||||
}
|
||||
|
||||
RefPtr<MediaRawData> AndroidDataEncoder::GetOutputData(
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaRawData> AndroidDataEncoder<ConfigType>::GetOutputData(
|
||||
java::SampleBuffer::Param aBuffer, const int32_t aOffset,
|
||||
const int32_t aSize, const bool aIsKeyFrame) {
|
||||
// Copy frame data from Java buffer.
|
||||
|
|
@ -356,8 +365,10 @@ RefPtr<MediaRawData> AndroidDataEncoder::GetOutputData(
|
|||
return output;
|
||||
}
|
||||
|
||||
// AVC/H.264 frame can be in avcC or Annex B and needs extra conversion steps.
|
||||
RefPtr<MediaRawData> AndroidDataEncoder::GetOutputDataH264(
|
||||
// AVC/H.264 frame can be in avcC or Annex B and needs extra convertion steps.
|
||||
template <>
|
||||
RefPtr<MediaRawData>
|
||||
AndroidDataEncoder<MediaDataEncoder::H264Config>::GetOutputData(
|
||||
java::SampleBuffer::Param aBuffer, const int32_t aOffset,
|
||||
const int32_t aSize, const bool aIsKeyFrame) {
|
||||
auto output = MakeRefPtr<MediaRawData>();
|
||||
|
|
@ -397,12 +408,16 @@ RefPtr<MediaRawData> AndroidDataEncoder::GetOutputDataH264(
|
|||
return output;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::Drain() {
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaDataEncoder::EncodePromise>
|
||||
AndroidDataEncoder<ConfigType>::Drain() {
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&AndroidDataEncoder::ProcessDrain);
|
||||
&AndroidDataEncoder<ConfigType>::ProcessDrain);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::ProcessDrain() {
|
||||
template <typename ConfigType>
|
||||
RefPtr<MediaDataEncoder::EncodePromise>
|
||||
AndroidDataEncoder<ConfigType>::ProcessDrain() {
|
||||
AssertOnTaskQueue();
|
||||
MOZ_ASSERT(mJavaEncoder);
|
||||
MOZ_ASSERT(mDrainPromise.IsEmpty());
|
||||
|
|
@ -431,12 +446,14 @@ RefPtr<MediaDataEncoder::EncodePromise> AndroidDataEncoder::ProcessDrain() {
|
|||
}
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> AndroidDataEncoder::Shutdown() {
|
||||
template <typename ConfigType>
|
||||
RefPtr<ShutdownPromise> AndroidDataEncoder<ConfigType>::Shutdown() {
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&AndroidDataEncoder::ProcessShutdown);
|
||||
&AndroidDataEncoder<ConfigType>::ProcessShutdown);
|
||||
}
|
||||
|
||||
RefPtr<ShutdownPromise> AndroidDataEncoder::ProcessShutdown() {
|
||||
template <typename ConfigType>
|
||||
RefPtr<ShutdownPromise> AndroidDataEncoder<ConfigType>::ProcessShutdown() {
|
||||
AssertOnTaskQueue();
|
||||
if (mJavaEncoder) {
|
||||
mJavaEncoder->Release();
|
||||
|
|
@ -454,18 +471,24 @@ RefPtr<ShutdownPromise> AndroidDataEncoder::ProcessShutdown() {
|
|||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<GenericPromise> AndroidDataEncoder::SetBitrate(uint32_t aBitsPerSec) {
|
||||
template <typename ConfigType>
|
||||
RefPtr<GenericPromise> AndroidDataEncoder<ConfigType>::SetBitrate(
|
||||
const MediaDataEncoder::Rate aBitsPerSec) {
|
||||
RefPtr<AndroidDataEncoder> self(this);
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, aBitsPerSec]() {
|
||||
self->mJavaEncoder->SetBitrate(AssertedCast<int>(aBitsPerSec));
|
||||
self->mJavaEncoder->SetBitrate(aBitsPerSec);
|
||||
return GenericPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void AndroidDataEncoder::Error(const MediaResult& aError) {
|
||||
template <typename ConfigType>
|
||||
void AndroidDataEncoder<ConfigType>::Error(const MediaResult& aError) {
|
||||
if (!mTaskQueue->IsCurrentThreadIn()) {
|
||||
nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<MediaResult>(
|
||||
"AndroidDataEncoder::Error", this, &AndroidDataEncoder::Error, aError));
|
||||
"AndroidDataEncoder::Error", this,
|
||||
&AndroidDataEncoder<ConfigType>::Error, aError));
|
||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||
Unused << rv;
|
||||
return;
|
||||
|
|
@ -475,21 +498,25 @@ void AndroidDataEncoder::Error(const MediaResult& aError) {
|
|||
mError = Some(aError);
|
||||
}
|
||||
|
||||
void AndroidDataEncoder::CallbacksSupport::HandleInput(int64_t aTimestamp,
|
||||
bool aProcessed) {}
|
||||
template <typename ConfigType>
|
||||
void AndroidDataEncoder<ConfigType>::CallbacksSupport::HandleInput(
|
||||
int64_t aTimestamp, bool aProcessed) {}
|
||||
|
||||
void AndroidDataEncoder::CallbacksSupport::HandleOutput(
|
||||
template <typename ConfigType>
|
||||
void AndroidDataEncoder<ConfigType>::CallbacksSupport::HandleOutput(
|
||||
java::Sample::Param aSample, java::SampleBuffer::Param aBuffer) {
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (mEncoder) {
|
||||
mEncoder->ProcessOutput(aSample, aBuffer);
|
||||
mEncoder->ProcessOutput(std::move(aSample), std::move(aBuffer));
|
||||
}
|
||||
}
|
||||
|
||||
void AndroidDataEncoder::CallbacksSupport::HandleOutputFormatChanged(
|
||||
java::sdk::MediaFormat::Param aFormat) {}
|
||||
template <typename ConfigType>
|
||||
void AndroidDataEncoder<ConfigType>::CallbacksSupport::
|
||||
HandleOutputFormatChanged(java::sdk::MediaFormat::Param aFormat) {}
|
||||
|
||||
void AndroidDataEncoder::CallbacksSupport::HandleError(
|
||||
template <typename ConfigType>
|
||||
void AndroidDataEncoder<ConfigType>::CallbacksSupport::HandleError(
|
||||
const MediaResult& aError) {
|
||||
MutexAutoLock lock(mMutex);
|
||||
if (mEncoder) {
|
||||
|
|
@ -497,6 +524,10 @@ void AndroidDataEncoder::CallbacksSupport::HandleError(
|
|||
}
|
||||
}
|
||||
|
||||
// Force compiler to generate code.
|
||||
template class AndroidDataEncoder<MediaDataEncoder::H264Config>;
|
||||
template class AndroidDataEncoder<MediaDataEncoder::VP8Config>;
|
||||
template class AndroidDataEncoder<MediaDataEncoder::VP9Config>;
|
||||
} // namespace mozilla
|
||||
|
||||
#undef AND_ENC_LOG
|
||||
|
|
|
|||
|
|
@ -7,18 +7,20 @@
|
|||
|
||||
#include "MediaData.h"
|
||||
#include "PlatformEncoderModule.h"
|
||||
#include "TimeUnits.h"
|
||||
|
||||
#include "JavaCallbacksSupport.h"
|
||||
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/Monitor.h"
|
||||
#include "mozilla/Mutex.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
template <typename ConfigType>
|
||||
class AndroidDataEncoder final : public MediaDataEncoder {
|
||||
public:
|
||||
AndroidDataEncoder(const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue)
|
||||
AndroidDataEncoder(const ConfigType& aConfig, RefPtr<TaskQueue> aTaskQueue)
|
||||
: mConfig(aConfig), mTaskQueue(aTaskQueue) {
|
||||
MOZ_ASSERT(mConfig.mSize.width > 0 && mConfig.mSize.height > 0);
|
||||
MOZ_ASSERT(mTaskQueue);
|
||||
|
|
@ -27,14 +29,7 @@ class AndroidDataEncoder final : public MediaDataEncoder {
|
|||
RefPtr<EncodePromise> Encode(const MediaData* aSample) override;
|
||||
RefPtr<EncodePromise> Drain() override;
|
||||
RefPtr<ShutdownPromise> Shutdown() override;
|
||||
RefPtr<GenericPromise> SetBitrate(uint32_t aBitsPerSec) override;
|
||||
RefPtr<ReconfigurationPromise> Reconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges)
|
||||
override {
|
||||
// General reconfiguration interface not implemented right now
|
||||
return MediaDataEncoder::ReconfigurationPromise::CreateAndReject(
|
||||
NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
|
||||
};
|
||||
RefPtr<GenericPromise> SetBitrate(const Rate aBitsPerSec) override;
|
||||
|
||||
nsCString GetDescriptionName() const override { return "Android Encoder"_ns; }
|
||||
|
||||
|
|
@ -67,7 +62,7 @@ class AndroidDataEncoder final : public MediaDataEncoder {
|
|||
|
||||
// Methods only called on mTaskQueue.
|
||||
RefPtr<InitPromise> ProcessInit();
|
||||
RefPtr<EncodePromise> ProcessEncode(const RefPtr<const MediaData>& aSample);
|
||||
RefPtr<EncodePromise> ProcessEncode(RefPtr<const MediaData> aSample);
|
||||
RefPtr<EncodePromise> ProcessDrain();
|
||||
RefPtr<ShutdownPromise> ProcessShutdown();
|
||||
void ProcessInput();
|
||||
|
|
@ -76,17 +71,13 @@ class AndroidDataEncoder final : public MediaDataEncoder {
|
|||
RefPtr<MediaRawData> GetOutputData(java::SampleBuffer::Param aBuffer,
|
||||
const int32_t aOffset, const int32_t aSize,
|
||||
const bool aIsKeyFrame);
|
||||
RefPtr<MediaRawData> GetOutputDataH264(java::SampleBuffer::Param aBuffer,
|
||||
const int32_t aOffset,
|
||||
const int32_t aSize,
|
||||
const bool aIsKeyFrame);
|
||||
void Error(const MediaResult& aError);
|
||||
|
||||
void AssertOnTaskQueue() const {
|
||||
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
|
||||
}
|
||||
|
||||
EncoderConfig mConfig;
|
||||
const ConfigType mConfig;
|
||||
|
||||
RefPtr<TaskQueue> mTaskQueue;
|
||||
|
||||
|
|
@ -108,8 +99,8 @@ class AndroidDataEncoder final : public MediaDataEncoder {
|
|||
// SPS/PPS NALUs for realtime usage, avcC otherwise.
|
||||
RefPtr<MediaByteBuffer> mConfigData;
|
||||
|
||||
enum class DrainState { DRAINABLE, DRAINING, DRAINED };
|
||||
DrainState mDrainState = DrainState::DRAINABLE;
|
||||
enum class DrainState { DRAINED, DRAINABLE, DRAINING };
|
||||
DrainState mDrainState;
|
||||
|
||||
Maybe<MediaResult> mError;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@
|
|||
#include "AndroidEncoderModule.h"
|
||||
|
||||
#include "AndroidDataEncoder.h"
|
||||
#include "MP4Decoder.h"
|
||||
#include "VPXDecoder.h"
|
||||
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/java/HardwareCodecCapabilityUtilsWrappers.h"
|
||||
|
|
@ -16,27 +18,38 @@ extern LazyLogModule sPEMLog;
|
|||
sPEMLog, mozilla::LogLevel::Debug, \
|
||||
("AndroidEncoderModule(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
|
||||
|
||||
bool AndroidEncoderModule::SupportsCodec(CodecType aCodec) const {
|
||||
return (aCodec == CodecType::H264 &&
|
||||
bool AndroidEncoderModule::SupportsMimeType(const nsACString& aMimeType) const {
|
||||
return (MP4Decoder::IsH264(aMimeType) &&
|
||||
java::HardwareCodecCapabilityUtils::HasHWH264(true /* encoder */)) ||
|
||||
(aCodec == CodecType::VP8 &&
|
||||
(VPXDecoder::IsVP8(aMimeType) &&
|
||||
java::HardwareCodecCapabilityUtils::HasHWVP8(true /* encoder */)) ||
|
||||
(aCodec == CodecType::VP9 &&
|
||||
(VPXDecoder::IsVP9(aMimeType) &&
|
||||
java::HardwareCodecCapabilityUtils::HasHWVP9(true /* encoder */));
|
||||
}
|
||||
|
||||
bool AndroidEncoderModule::Supports(const EncoderConfig& aConfig) const {
|
||||
// No deep check here for now
|
||||
return SupportsCodec(aConfig.mCodec);
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataEncoder> AndroidEncoderModule::CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) const {
|
||||
if (!Supports(aConfig)) {
|
||||
AND_PEM_LOG("Unsupported codec type: %d", static_cast<int>(aConfig.mCodec));
|
||||
return nullptr;
|
||||
const CreateEncoderParams& aParams, const bool aHardwareNotAllowed) const {
|
||||
// TODO: extend AndroidDataEncoder and Java codec to accept this option.
|
||||
MOZ_ASSERT(!aHardwareNotAllowed);
|
||||
|
||||
RefPtr<MediaDataEncoder> encoder;
|
||||
switch (CreateEncoderParams::CodecTypeForMime(aParams.mConfig.mMimeType)) {
|
||||
case MediaDataEncoder::CodecType::H264:
|
||||
return MakeRefPtr<AndroidDataEncoder<MediaDataEncoder::H264Config>>(
|
||||
aParams.ToH264Config(), aParams.mTaskQueue)
|
||||
.forget();
|
||||
case MediaDataEncoder::CodecType::VP8:
|
||||
return MakeRefPtr<AndroidDataEncoder<MediaDataEncoder::VP8Config>>(
|
||||
aParams.ToVP8Config(), aParams.mTaskQueue)
|
||||
.forget();
|
||||
case MediaDataEncoder::CodecType::VP9:
|
||||
return MakeRefPtr<AndroidDataEncoder<MediaDataEncoder::VP9Config>>(
|
||||
aParams.ToVP9Config(), aParams.mTaskQueue)
|
||||
.forget();
|
||||
default:
|
||||
AND_PEM_LOG("Unsupported MIME type:%s", aParams.mConfig.mMimeType.get());
|
||||
return nullptr;
|
||||
}
|
||||
return MakeRefPtr<AndroidDataEncoder>(aConfig, aTaskQueue).forget();
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -11,17 +11,11 @@ namespace mozilla {
|
|||
|
||||
class AndroidEncoderModule final : public PlatformEncoderModule {
|
||||
public:
|
||||
AndroidEncoderModule() = default;
|
||||
virtual ~AndroidEncoderModule() = default;
|
||||
// aCodec is the full codec string
|
||||
bool Supports(const EncoderConfig& aConfig) const override;
|
||||
bool SupportsCodec(CodecType aCodec) const override;
|
||||
|
||||
const char* GetName() const override { return "Android Encoder Module"; }
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) const override;
|
||||
const CreateEncoderParams& aParams,
|
||||
const bool aHardwareNotAllowed) const override;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
#include "MP4Decoder.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "VPXDecoder.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/StaticPrefs_media.h"
|
||||
#include "mozilla/gfx/gfxVars.h"
|
||||
|
|
@ -173,7 +174,11 @@ bool AppleDecoderModule::IsVideoSupported(
|
|||
}
|
||||
int profile = aConfig.mExtraData->ElementAt(4);
|
||||
|
||||
return profile == 0 || profile == 2;
|
||||
if (profile != 0 && profile != 2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* static */
|
||||
|
|
|
|||
|
|
@ -7,63 +7,19 @@
|
|||
#include "AppleEncoderModule.h"
|
||||
|
||||
#include "AppleVTEncoder.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "MP4Decoder.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
extern LazyLogModule sPEMLog;
|
||||
#define LOGE(fmt, ...) \
|
||||
MOZ_LOG(sPEMLog, mozilla::LogLevel::Error, \
|
||||
("[AppleEncoderModule] %s: " fmt, __func__, ##__VA_ARGS__))
|
||||
#define LOGD(fmt, ...) \
|
||||
MOZ_LOG(sPEMLog, mozilla::LogLevel::Debug, \
|
||||
("[AppleEncoderModule] %s: " fmt, __func__, ##__VA_ARGS__))
|
||||
|
||||
bool AppleEncoderModule::SupportsCodec(CodecType aCodec) const {
|
||||
return aCodec == CodecType::H264;
|
||||
}
|
||||
|
||||
bool AppleEncoderModule::Supports(const EncoderConfig& aConfig) const {
|
||||
if (aConfig.mCodec == CodecType::H264) {
|
||||
if (!aConfig.mCodecSpecific ||
|
||||
!aConfig.mCodecSpecific->is<H264Specific>()) {
|
||||
LOGE(
|
||||
"Asking for support codec for h264 without h264 specific config, "
|
||||
"error.");
|
||||
return false;
|
||||
}
|
||||
H264Specific specific = aConfig.mCodecSpecific->as<H264Specific>();
|
||||
int width = aConfig.mSize.width;
|
||||
int height = aConfig.mSize.height;
|
||||
if (width % 2 || !width) {
|
||||
LOGE("Invalid width of %d for h264", width);
|
||||
return false;
|
||||
}
|
||||
if (height % 2 || !height) {
|
||||
LOGE("Invalid height of %d for h264", height);
|
||||
return false;
|
||||
}
|
||||
if (specific.mProfile != H264_PROFILE_BASE &&
|
||||
specific.mProfile != H264_PROFILE_MAIN) {
|
||||
LOGE("Invalid profile of %d for h264", specific.mProfile);
|
||||
return false;
|
||||
}
|
||||
if (width > 4096 || height > 4096) {
|
||||
LOGE("Invalid dimensions of %dx%d for h264 encoding", width, height);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
bool AppleEncoderModule::SupportsMimeType(const nsACString& aMimeType) const {
|
||||
return MP4Decoder::IsH264(aMimeType);
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataEncoder> AppleEncoderModule::CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) const {
|
||||
RefPtr<MediaDataEncoder> encoder(new AppleVTEncoder(aConfig, aTaskQueue));
|
||||
const CreateEncoderParams& aParams, const bool aHardwareNotAllowed) const {
|
||||
RefPtr<MediaDataEncoder> encoder(new AppleVTEncoder(
|
||||
aParams.ToH264Config(), aParams.mTaskQueue, aHardwareNotAllowed));
|
||||
return encoder.forget();
|
||||
}
|
||||
|
||||
#undef LOGE
|
||||
#undef LOGD
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -12,16 +12,14 @@
|
|||
namespace mozilla {
|
||||
class AppleEncoderModule final : public PlatformEncoderModule {
|
||||
public:
|
||||
virtual ~AppleEncoderModule() = default;
|
||||
AppleEncoderModule() {}
|
||||
virtual ~AppleEncoderModule() {}
|
||||
|
||||
bool Supports(const EncoderConfig& aConfig) const override;
|
||||
bool SupportsCodec(CodecType aCodec) const override;
|
||||
|
||||
const char* GetName() const override { return "Apple Encoder Module"; }
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) const override;
|
||||
const CreateEncoderParams& aParams,
|
||||
const bool aHardwareNotAllowed) const override;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -20,10 +20,12 @@
|
|||
#include "VPXDecoder.h"
|
||||
#include "VideoUtils.h"
|
||||
#include "gfxMacUtils.h"
|
||||
#include "gfxPlatform.h"
|
||||
#include "mozilla/ArrayUtils.h"
|
||||
#include "mozilla/Logging.h"
|
||||
#include "mozilla/TaskQueue.h"
|
||||
#include "mozilla/gfx/gfxVars.h"
|
||||
#include "nsThreadUtils.h"
|
||||
|
||||
#define LOG(...) DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, __VA_ARGS__)
|
||||
#define LOGEX(_this, ...) \
|
||||
|
|
@ -35,7 +37,7 @@ using namespace layers;
|
|||
|
||||
AppleVTDecoder::AppleVTDecoder(const VideoInfo& aConfig,
|
||||
layers::ImageContainer* aImageContainer,
|
||||
const CreateDecoderParams::OptionSet& aOptions,
|
||||
CreateDecoderParams::OptionSet aOptions,
|
||||
layers::KnowsCompositor* aKnowsCompositor,
|
||||
Maybe<TrackingId> aTrackingId)
|
||||
: mExtraData(aConfig.mExtraData),
|
||||
|
|
@ -334,8 +336,7 @@ static void PlatformCallback(void* decompressionOutputRefCon,
|
|||
NS_WARNING("VideoToolbox decoder returned an error");
|
||||
decoder->OnDecodeError(status);
|
||||
return;
|
||||
}
|
||||
if (!image) {
|
||||
} else if (!image) {
|
||||
NS_WARNING("VideoToolbox decoder returned no data");
|
||||
} else if (flags & kVTDecodeInfo_FrameDropped) {
|
||||
NS_WARNING(" ...frame tagged as dropped...");
|
||||
|
|
@ -605,8 +606,7 @@ MediaResult AppleVTDecoder::InitializeSession() {
|
|||
mStreamType == StreamType::H264
|
||||
? kCMVideoCodecType_H264
|
||||
: CMVideoCodecType(AppleDecoderModule::kCMVideoCodecType_VP9),
|
||||
AssertedCast<int32_t>(mPictureWidth),
|
||||
AssertedCast<int32_t>(mPictureHeight), extensions, &mFormat);
|
||||
mPictureWidth, mPictureHeight, extensions, &mFormat);
|
||||
if (rv != noErr) {
|
||||
return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
RESULT_DETAIL("Couldn't create format description!"));
|
||||
|
|
@ -652,9 +652,8 @@ MediaResult AppleVTDecoder::InitializeSession() {
|
|||
}
|
||||
|
||||
CFDictionaryRef AppleVTDecoder::CreateDecoderExtensions() {
|
||||
AutoCFRelease<CFDataRef> data =
|
||||
CFDataCreate(kCFAllocatorDefault, mExtraData->Elements(),
|
||||
AssertedCast<CFIndex>(mExtraData->Length()));
|
||||
AutoCFRelease<CFDataRef> data = CFDataCreate(
|
||||
kCFAllocatorDefault, mExtraData->Elements(), mExtraData->Length());
|
||||
|
||||
const void* atomsKey[1];
|
||||
atomsKey[0] = mStreamType == StreamType::H264 ? CFSTR("avcC") : CFSTR("vpcC");
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ class AppleVTDecoder final : public MediaDataDecoder,
|
|||
|
||||
AppleVTDecoder(const VideoInfo& aConfig,
|
||||
layers::ImageContainer* aImageContainer,
|
||||
const CreateDecoderParams::OptionSet& aOptions,
|
||||
CreateDecoderParams::OptionSet aOptions,
|
||||
layers::KnowsCompositor* aKnowsCompositor,
|
||||
Maybe<TrackingId> aTrackingId);
|
||||
|
||||
|
|
|
|||
|
|
@ -14,14 +14,16 @@
|
|||
#include "AnnexB.h"
|
||||
#include "H264.h"
|
||||
|
||||
#include "libyuv.h"
|
||||
|
||||
#include "AppleUtils.h"
|
||||
|
||||
namespace mozilla {
|
||||
extern LazyLogModule sPEMLog;
|
||||
#define LOGE(fmt, ...) \
|
||||
#define VTENC_LOGE(fmt, ...) \
|
||||
MOZ_LOG(sPEMLog, mozilla::LogLevel::Error, \
|
||||
("[AppleVTEncoder] %s: " fmt, __func__, ##__VA_ARGS__))
|
||||
#define LOGD(fmt, ...) \
|
||||
#define VTENC_LOGD(fmt, ...) \
|
||||
MOZ_LOG(sPEMLog, mozilla::LogLevel::Debug, \
|
||||
("[AppleVTEncoder] %s: " fmt, __func__, ##__VA_ARGS__))
|
||||
|
||||
|
|
@ -42,18 +44,18 @@ static void FrameCallback(void* aEncoder, void* aFrameRefCon, OSStatus aStatus,
|
|||
VTEncodeInfoFlags aInfoFlags,
|
||||
CMSampleBufferRef aSampleBuffer) {
|
||||
if (aStatus != noErr || !aSampleBuffer) {
|
||||
LOGE("VideoToolbox encoder returned no data status=%d sample=%p", aStatus,
|
||||
aSampleBuffer);
|
||||
VTENC_LOGE("VideoToolbox encoder returned no data status=%d sample=%p",
|
||||
aStatus, aSampleBuffer);
|
||||
aSampleBuffer = nullptr;
|
||||
} else if (aInfoFlags & kVTEncodeInfo_FrameDropped) {
|
||||
LOGE("frame tagged as dropped");
|
||||
VTENC_LOGE("frame tagged as dropped");
|
||||
return;
|
||||
}
|
||||
(static_cast<AppleVTEncoder*>(aEncoder))->OutputFrame(aSampleBuffer);
|
||||
}
|
||||
|
||||
static bool SetAverageBitrate(VTCompressionSessionRef& aSession,
|
||||
uint32_t aBitsPerSec) {
|
||||
MediaDataEncoder::Rate aBitsPerSec) {
|
||||
int64_t bps(aBitsPerSec);
|
||||
AutoCFRelease<CFNumberRef> bitrate(
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &bps));
|
||||
|
|
@ -62,68 +64,24 @@ static bool SetAverageBitrate(VTCompressionSessionRef& aSession,
|
|||
bitrate) == noErr;
|
||||
}
|
||||
|
||||
static bool SetConstantBitrate(VTCompressionSessionRef& aSession,
|
||||
uint32_t aBitsPerSec) {
|
||||
int64_t bps(aBitsPerSec);
|
||||
AutoCFRelease<CFNumberRef> bitrate(
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &bps));
|
||||
// Not available before macOS 13 -- this will fail cleanly when not supported
|
||||
// but the symbol kVTCompressionPropertyKey_ConstantBitRate isn't available
|
||||
// return VTSessionSetProperty(aSession,
|
||||
// bitrate) == noErr;
|
||||
|
||||
if (__builtin_available(macos 13.0, *)) {
|
||||
return VTSessionSetProperty(aSession,
|
||||
kVTCompressionPropertyKey_ConstantBitRate,
|
||||
bitrate) == noErr;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool SetBitrateAndMode(VTCompressionSessionRef& aSession,
|
||||
MediaDataEncoder::BitrateMode aBitrateMode,
|
||||
uint32_t aBitsPerSec) {
|
||||
if (aBitrateMode == MediaDataEncoder::BitrateMode::Variable) {
|
||||
return SetAverageBitrate(aSession, aBitsPerSec);
|
||||
}
|
||||
return SetConstantBitrate(aSession, aBitsPerSec);
|
||||
}
|
||||
|
||||
static bool SetFrameRate(VTCompressionSessionRef& aSession, int64_t aFPS) {
|
||||
AutoCFRelease<CFNumberRef> framerate(
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &framerate));
|
||||
return VTSessionSetProperty(aSession,
|
||||
kVTCompressionPropertyKey_ExpectedFrameRate,
|
||||
framerate) == noErr;
|
||||
}
|
||||
|
||||
static bool SetRealtime(VTCompressionSessionRef& aSession, bool aEnabled) {
|
||||
if (aEnabled) {
|
||||
return VTSessionSetProperty(aSession, kVTCompressionPropertyKey_RealTime,
|
||||
kCFBooleanTrue) == noErr &&
|
||||
VTSessionSetProperty(aSession,
|
||||
kVTCompressionPropertyKey_AllowFrameReordering,
|
||||
kCFBooleanFalse) == noErr;
|
||||
}
|
||||
static bool SetRealtimeProperties(VTCompressionSessionRef& aSession) {
|
||||
return VTSessionSetProperty(aSession, kVTCompressionPropertyKey_RealTime,
|
||||
kCFBooleanFalse) == noErr &&
|
||||
kCFBooleanTrue) == noErr &&
|
||||
VTSessionSetProperty(aSession,
|
||||
kVTCompressionPropertyKey_AllowFrameReordering,
|
||||
kCFBooleanTrue) == noErr;
|
||||
kCFBooleanFalse) == noErr;
|
||||
}
|
||||
|
||||
static bool SetProfileLevel(VTCompressionSessionRef& aSession,
|
||||
H264_PROFILE aValue) {
|
||||
AppleVTEncoder::H264Specific::ProfileLevel aValue) {
|
||||
CFStringRef profileLevel = nullptr;
|
||||
switch (aValue) {
|
||||
case H264_PROFILE::H264_PROFILE_BASE:
|
||||
case AppleVTEncoder::H264Specific::ProfileLevel::BaselineAutoLevel:
|
||||
profileLevel = kVTProfileLevel_H264_Baseline_AutoLevel;
|
||||
break;
|
||||
case H264_PROFILE::H264_PROFILE_MAIN:
|
||||
case AppleVTEncoder::H264Specific::ProfileLevel::MainAutoLevel:
|
||||
profileLevel = kVTProfileLevel_H264_Main_AutoLevel;
|
||||
break;
|
||||
default:
|
||||
LOGE("Profile %d not handled", static_cast<int>(aValue));
|
||||
}
|
||||
|
||||
return profileLevel ? VTSessionSetProperty(
|
||||
|
|
@ -136,7 +94,6 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
|
|||
MOZ_ASSERT(!mInited, "Cannot initialize encoder again without shutting down");
|
||||
|
||||
if (mConfig.mSize.width == 0 || mConfig.mSize.height == 0) {
|
||||
LOGE("width or height 0 in encoder init");
|
||||
return InitPromise::CreateAndReject(NS_ERROR_ILLEGAL_VALUE, __func__);
|
||||
}
|
||||
|
||||
|
|
@ -144,7 +101,6 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
|
|||
AutoCFRelease<CFDictionaryRef> srcBufferAttr(
|
||||
BuildSourceImageBufferAttributes());
|
||||
if (!srcBufferAttr) {
|
||||
LOGE("Failed to create source buffer attr");
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR,
|
||||
"fail to create source buffer attributes"),
|
||||
|
|
@ -157,44 +113,36 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
|
|||
&FrameCallback, this /* outputCallbackRefCon */, &mSession);
|
||||
|
||||
if (status != noErr) {
|
||||
LOGE("Failed to create compression session");
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
"fail to create encoder session"),
|
||||
__func__);
|
||||
}
|
||||
|
||||
if (mConfig.mUsage == Usage::Realtime && !SetRealtime(mSession, true)) {
|
||||
LOGE("fail to configurate realtime properties");
|
||||
if (!SetAverageBitrate(mSession, mConfig.mBitsPerSec)) {
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
"fail to configurate average bitrate"),
|
||||
__func__);
|
||||
}
|
||||
|
||||
if (mConfig.mBitrate) {
|
||||
if (!SetBitrateAndMode(mSession, mConfig.mBitrateMode, mConfig.mBitrate)) {
|
||||
LOGE("failed to set bitrate to %d and mode to %s", mConfig.mBitrate,
|
||||
mConfig.mBitrateMode == MediaDataEncoder::BitrateMode::Constant
|
||||
? "constant"
|
||||
: "variable");
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
"fail to configurate bitrate"),
|
||||
__func__);
|
||||
}
|
||||
if (mConfig.mUsage == Usage::Realtime && !SetRealtimeProperties(mSession)) {
|
||||
VTENC_LOGE("fail to configurate realtime properties");
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
"fail to configurate average bitrate"),
|
||||
__func__);
|
||||
}
|
||||
|
||||
int64_t interval =
|
||||
mConfig.mKeyframeInterval > std::numeric_limits<int64_t>::max()
|
||||
? std::numeric_limits<int64_t>::max()
|
||||
: AssertedCast<int64_t>(mConfig.mKeyframeInterval);
|
||||
: mConfig.mKeyframeInterval;
|
||||
AutoCFRelease<CFNumberRef> cf(
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &interval));
|
||||
if (VTSessionSetProperty(mSession,
|
||||
kVTCompressionPropertyKey_MaxKeyFrameInterval,
|
||||
cf) != noErr) {
|
||||
LOGE("Failed to set max keyframe interval");
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(
|
||||
NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
|
|
@ -204,13 +152,12 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
|
|||
}
|
||||
|
||||
if (mConfig.mCodecSpecific) {
|
||||
const H264Specific& specific = mConfig.mCodecSpecific->as<H264Specific>();
|
||||
if (!SetProfileLevel(mSession, specific.mProfile)) {
|
||||
LOGE("Failed to set profile level");
|
||||
const H264Specific& specific = mConfig.mCodecSpecific.ref();
|
||||
if (!SetProfileLevel(mSession, specific.mProfileLevel)) {
|
||||
return InitPromise::CreateAndReject(
|
||||
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
nsPrintfCString("fail to configurate profile level:%d",
|
||||
int(specific.mProfile))),
|
||||
int(specific.mProfileLevel))),
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
|
@ -220,7 +167,6 @@ RefPtr<MediaDataEncoder::InitPromise> AppleVTEncoder::Init() {
|
|||
mSession, kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder,
|
||||
kCFAllocatorDefault, &isUsingHW);
|
||||
mIsHardwareAccelerated = status == noErr && isUsingHW == kCFBooleanTrue;
|
||||
LOGD("Using hw acceleration: %s", mIsHardwareAccelerated ? "yes" : "no");
|
||||
if (isUsingHW) {
|
||||
CFRelease(isUsingHW);
|
||||
}
|
||||
|
|
@ -255,7 +201,7 @@ static Maybe<OSType> MapPixelFormat(MediaDataEncoder::PixelFormat aFormat) {
|
|||
CFDictionaryRef AppleVTEncoder::BuildSourceImageBufferAttributes() {
|
||||
Maybe<OSType> fmt = MapPixelFormat(mConfig.mSourcePixelFormat);
|
||||
if (fmt.isNothing()) {
|
||||
LOGE("unsupported source pixel format");
|
||||
VTENC_LOGE("unsupported source pixel format");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
@ -295,7 +241,7 @@ static size_t GetNumParamSets(CMFormatDescriptionRef aDescription) {
|
|||
OSStatus status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
|
||||
aDescription, 0, nullptr, nullptr, &numParamSets, nullptr);
|
||||
if (status != noErr) {
|
||||
LOGE("Cannot get number of parameter sets from format description");
|
||||
VTENC_LOGE("Cannot get number of parameter sets from format description");
|
||||
}
|
||||
|
||||
return numParamSets;
|
||||
|
|
@ -310,7 +256,7 @@ static size_t GetParamSet(CMFormatDescriptionRef aDescription, size_t aIndex,
|
|||
if (CMVideoFormatDescriptionGetH264ParameterSetAtIndex(
|
||||
aDescription, aIndex, aDataPtr, &length, nullptr, &headerSize) !=
|
||||
noErr) {
|
||||
LOGE("failed to get parameter set from format description");
|
||||
VTENC_LOGE("fail to get parameter set from format description");
|
||||
return 0;
|
||||
}
|
||||
MOZ_ASSERT(headerSize == sizeof(kNALUStart), "Only support 4 byte header");
|
||||
|
|
@ -330,11 +276,11 @@ static bool WriteSPSPPS(MediaRawData* aDst,
|
|||
return false;
|
||||
}
|
||||
if (!writer->Append(kNALUStart, sizeof(kNALUStart))) {
|
||||
LOGE("Cannot write NAL unit start code");
|
||||
VTENC_LOGE("Cannot write NAL unit start code");
|
||||
return false;
|
||||
}
|
||||
if (!writer->Append(data, length)) {
|
||||
LOGE("Cannot write parameter set");
|
||||
VTENC_LOGE("Cannot write parameter set");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
@ -347,19 +293,19 @@ static RefPtr<MediaByteBuffer> extractAvcc(
|
|||
aDescription,
|
||||
kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms);
|
||||
if (!list) {
|
||||
LOGE("fail to get atoms");
|
||||
VTENC_LOGE("fail to get atoms");
|
||||
return nullptr;
|
||||
}
|
||||
CFDataRef avcC = static_cast<CFDataRef>(
|
||||
CFDictionaryGetValue(static_cast<CFDictionaryRef>(list), CFSTR("avcC")));
|
||||
if (!avcC) {
|
||||
LOGE("fail to extract avcC");
|
||||
VTENC_LOGE("fail to extract avcC");
|
||||
return nullptr;
|
||||
}
|
||||
CFIndex length = CFDataGetLength(avcC);
|
||||
const UInt8* bytes = CFDataGetBytePtr(avcC);
|
||||
if (length <= 0 || !bytes) {
|
||||
LOGE("empty avcC");
|
||||
VTENC_LOGE("empty avcC");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
@ -377,7 +323,7 @@ bool AppleVTEncoder::WriteExtraData(MediaRawData* aDst, CMSampleBufferRef aSrc,
|
|||
aDst->mKeyframe = true;
|
||||
CMFormatDescriptionRef desc = CMSampleBufferGetFormatDescription(aSrc);
|
||||
if (!desc) {
|
||||
LOGE("fail to get format description from sample");
|
||||
VTENC_LOGE("fail to get format description from sample");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -403,14 +349,14 @@ static bool WriteNALUs(MediaRawData* aDst, CMSampleBufferRef aSrc,
|
|||
size_t srcRemaining = CMSampleBufferGetTotalSampleSize(aSrc);
|
||||
CMBlockBufferRef block = CMSampleBufferGetDataBuffer(aSrc);
|
||||
if (!block) {
|
||||
LOGE("Cannot get block buffer frome sample");
|
||||
VTENC_LOGE("Cannot get block buffer frome sample");
|
||||
return false;
|
||||
}
|
||||
UniquePtr<MediaRawDataWriter> writer(aDst->CreateWriter());
|
||||
size_t writtenLength = aDst->Size();
|
||||
// Ensure capacity.
|
||||
if (!writer->SetSize(writtenLength + srcRemaining)) {
|
||||
LOGE("Cannot allocate buffer");
|
||||
VTENC_LOGE("Cannot allocate buffer");
|
||||
return false;
|
||||
}
|
||||
size_t readLength = 0;
|
||||
|
|
@ -421,7 +367,7 @@ static bool WriteNALUs(MediaRawData* aDst, CMSampleBufferRef aSrc,
|
|||
if (CMBlockBufferCopyDataBytes(block, readLength, sizeof(unitSizeBytes),
|
||||
reinterpret_cast<uint32_t*>(
|
||||
unitSizeBytes)) != kCMBlockBufferNoErr) {
|
||||
LOGE("Cannot copy unit size bytes");
|
||||
VTENC_LOGE("Cannot copy unit size bytes");
|
||||
return false;
|
||||
}
|
||||
size_t unitSize =
|
||||
|
|
@ -442,7 +388,7 @@ static bool WriteNALUs(MediaRawData* aDst, CMSampleBufferRef aSrc,
|
|||
if (CMBlockBufferCopyDataBytes(block, readLength, unitSize,
|
||||
writer->Data() + writtenLength) !=
|
||||
kCMBlockBufferNoErr) {
|
||||
LOGE("Cannot copy unit data");
|
||||
VTENC_LOGE("Cannot copy unit data");
|
||||
return false;
|
||||
}
|
||||
readLength += unitSize;
|
||||
|
|
@ -454,7 +400,6 @@ static bool WriteNALUs(MediaRawData* aDst, CMSampleBufferRef aSrc,
|
|||
}
|
||||
|
||||
void AppleVTEncoder::OutputFrame(CMSampleBufferRef aBuffer) {
|
||||
LOGD("::OutputFrame");
|
||||
RefPtr<MediaRawData> output(new MediaRawData());
|
||||
|
||||
bool asAnnexB = mConfig.mUsage == Usage::Realtime;
|
||||
|
|
@ -477,13 +422,11 @@ void AppleVTEncoder::ProcessOutput(RefPtr<MediaRawData>&& aOutput) {
|
|||
Unused << rv;
|
||||
return;
|
||||
}
|
||||
LOGD("::ProcessOutput (%zu bytes)", !aOutput.get() ? 0 : aOutput->Size());
|
||||
AssertOnTaskQueue();
|
||||
|
||||
if (aOutput) {
|
||||
mEncodedData.AppendElement(std::move(aOutput));
|
||||
} else {
|
||||
LOGE("::ProcessOutput: fatal error");
|
||||
mError = NS_ERROR_DOM_MEDIA_FATAL_ERR;
|
||||
}
|
||||
}
|
||||
|
|
@ -498,16 +441,8 @@ RefPtr<MediaDataEncoder::EncodePromise> AppleVTEncoder::Encode(
|
|||
std::move(sample));
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::ReconfigurationPromise> AppleVTEncoder::Reconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges) {
|
||||
return InvokeAsync<const RefPtr<const EncoderConfigurationChangeList>&>(
|
||||
mTaskQueue, this, __func__, &AppleVTEncoder::ProcessReconfigure,
|
||||
aConfigurationChanges);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AppleVTEncoder::ProcessEncode(
|
||||
const RefPtr<const VideoData>& aSample) {
|
||||
LOGD("::ProcessEncode");
|
||||
RefPtr<const VideoData> aSample) {
|
||||
AssertOnTaskQueue();
|
||||
MOZ_ASSERT(mSession);
|
||||
|
||||
|
|
@ -538,7 +473,6 @@ RefPtr<MediaDataEncoder::EncodePromise> AppleVTEncoder::ProcessEncode(
|
|||
CMTimeMake(aSample->mDuration.ToMicroseconds(), USECS_PER_S), frameProps,
|
||||
nullptr /* sourceFrameRefcon */, &info);
|
||||
if (status != noErr) {
|
||||
LOGE("VTCompressionSessionEncodeFrame error");
|
||||
return EncodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
__func__);
|
||||
}
|
||||
|
|
@ -546,51 +480,6 @@ RefPtr<MediaDataEncoder::EncodePromise> AppleVTEncoder::ProcessEncode(
|
|||
return EncodePromise::CreateAndResolve(std::move(mEncodedData), __func__);
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::ReconfigurationPromise>
|
||||
AppleVTEncoder::ProcessReconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges) {
|
||||
bool ok = false;
|
||||
for (const auto& confChange : aConfigurationChanges->mChanges) {
|
||||
ok |= confChange.match(
|
||||
// Not supported yet
|
||||
[&](const DimensionsChange& aChange) -> bool { return false; },
|
||||
[&](const DisplayDimensionsChange& aChange) -> bool { return false; },
|
||||
[&](const BitrateModeChange& aChange) -> bool {
|
||||
mConfig.mBitrateMode = aChange.get();
|
||||
return SetBitrateAndMode(mSession, mConfig.mBitrateMode,
|
||||
mConfig.mBitrate);
|
||||
},
|
||||
[&](const BitrateChange& aChange) -> bool {
|
||||
mConfig.mBitrate = aChange.get().refOr(0);
|
||||
// 0 is the default in AppleVTEncoder: the encoder chooses the bitrate
|
||||
// based on the content.
|
||||
return SetBitrateAndMode(mSession, mConfig.mBitrateMode,
|
||||
mConfig.mBitrate);
|
||||
},
|
||||
[&](const FramerateChange& aChange) -> bool {
|
||||
// 0 means default, in VideoToolbox, and is valid, perform some light
|
||||
// sanitation on other values.
|
||||
double fps = aChange.get().refOr(0);
|
||||
if (std::isnan(fps) || fps < 0 ||
|
||||
int64_t(fps) > std::numeric_limits<int32_t>::max()) {
|
||||
LOGE("Invalid fps of %lf", fps);
|
||||
return false;
|
||||
}
|
||||
return SetFrameRate(mSession, AssertedCast<int64_t>(fps));
|
||||
},
|
||||
[&](const UsageChange& aChange) -> bool {
|
||||
mConfig.mUsage = aChange.get();
|
||||
return SetRealtime(mSession, aChange.get() == Usage::Realtime);
|
||||
},
|
||||
[&](const ContentHintChange& aChange) -> bool { return false; });
|
||||
};
|
||||
using P = MediaDataEncoder::ReconfigurationPromise;
|
||||
if (ok) {
|
||||
return P::CreateAndResolve(true, __func__);
|
||||
}
|
||||
return P::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
|
||||
}
|
||||
|
||||
static size_t NumberOfPlanes(MediaDataEncoder::PixelFormat aPixelFormat) {
|
||||
switch (aPixelFormat) {
|
||||
case MediaDataEncoder::PixelFormat::RGBA32:
|
||||
|
|
@ -605,18 +494,13 @@ static size_t NumberOfPlanes(MediaDataEncoder::PixelFormat aPixelFormat) {
|
|||
case MediaDataEncoder::PixelFormat::YUV420SP_NV12:
|
||||
return 2;
|
||||
default:
|
||||
LOGE("Unsupported input pixel format");
|
||||
VTENC_LOGE("Unsupported input pixel format");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
using namespace layers;
|
||||
|
||||
static void ReleaseImageInterleaved(void* aReleaseRef,
|
||||
const void* aBaseAddress) {
|
||||
(static_cast<Image*>(aReleaseRef))->Release();
|
||||
}
|
||||
|
||||
static void ReleaseImage(void* aImageGrip, const void* aDataPtr,
|
||||
size_t aDataSize, size_t aNumOfPlanes,
|
||||
const void** aPlanes) {
|
||||
|
|
@ -626,91 +510,61 @@ static void ReleaseImage(void* aImageGrip, const void* aDataPtr,
|
|||
CVPixelBufferRef AppleVTEncoder::CreateCVPixelBuffer(const Image* aSource) {
|
||||
AssertOnTaskQueue();
|
||||
|
||||
if (aSource->GetFormat() == ImageFormat::PLANAR_YCBCR) {
|
||||
PlanarYCbCrImage* image = const_cast<Image*>(aSource)->AsPlanarYCbCrImage();
|
||||
if (!image || !image->GetData()) {
|
||||
return nullptr;
|
||||
}
|
||||
// TODO: support types other than YUV
|
||||
PlanarYCbCrImage* image = const_cast<Image*>(aSource)->AsPlanarYCbCrImage();
|
||||
if (!image || !image->GetData()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
OSType format = MapPixelFormat(mConfig.mSourcePixelFormat).ref();
|
||||
size_t numPlanes = NumberOfPlanes(mConfig.mSourcePixelFormat);
|
||||
const PlanarYCbCrImage::Data* yuv = image->GetData();
|
||||
if (!yuv) {
|
||||
OSType format = MapPixelFormat(mConfig.mSourcePixelFormat).ref();
|
||||
size_t numPlanes = NumberOfPlanes(mConfig.mSourcePixelFormat);
|
||||
const PlanarYCbCrImage::Data* yuv = image->GetData();
|
||||
if (!yuv) {
|
||||
return nullptr;
|
||||
}
|
||||
auto ySize = yuv->YDataSize();
|
||||
auto cbcrSize = yuv->CbCrDataSize();
|
||||
void* addresses[3] = {};
|
||||
size_t widths[3] = {};
|
||||
size_t heights[3] = {};
|
||||
size_t strides[3] = {};
|
||||
switch (numPlanes) {
|
||||
case 3:
|
||||
addresses[2] = yuv->mCrChannel;
|
||||
widths[2] = cbcrSize.width;
|
||||
heights[2] = cbcrSize.height;
|
||||
strides[2] = yuv->mCbCrStride;
|
||||
[[fallthrough]];
|
||||
case 2:
|
||||
addresses[1] = yuv->mCbChannel;
|
||||
widths[1] = cbcrSize.width;
|
||||
heights[1] = cbcrSize.height;
|
||||
strides[1] = yuv->mCbCrStride;
|
||||
[[fallthrough]];
|
||||
case 1:
|
||||
addresses[0] = yuv->mYChannel;
|
||||
widths[0] = ySize.width;
|
||||
heights[0] = ySize.height;
|
||||
strides[0] = yuv->mYStride;
|
||||
break;
|
||||
default:
|
||||
return nullptr;
|
||||
}
|
||||
auto ySize = yuv->YDataSize();
|
||||
auto cbcrSize = yuv->CbCrDataSize();
|
||||
void* addresses[3] = {};
|
||||
size_t widths[3] = {};
|
||||
size_t heights[3] = {};
|
||||
size_t strides[3] = {};
|
||||
switch (numPlanes) {
|
||||
case 3:
|
||||
addresses[2] = yuv->mCrChannel;
|
||||
widths[2] = cbcrSize.width;
|
||||
heights[2] = cbcrSize.height;
|
||||
strides[2] = yuv->mCbCrStride;
|
||||
[[fallthrough]];
|
||||
case 2:
|
||||
addresses[1] = yuv->mCbChannel;
|
||||
widths[1] = cbcrSize.width;
|
||||
heights[1] = cbcrSize.height;
|
||||
strides[1] = yuv->mCbCrStride;
|
||||
[[fallthrough]];
|
||||
case 1:
|
||||
addresses[0] = yuv->mYChannel;
|
||||
widths[0] = ySize.width;
|
||||
heights[0] = ySize.height;
|
||||
strides[0] = yuv->mYStride;
|
||||
break;
|
||||
default:
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
CVPixelBufferRef buffer = nullptr;
|
||||
image->AddRef(); // Grip input buffers.
|
||||
CVReturn rv = CVPixelBufferCreateWithPlanarBytes(
|
||||
kCFAllocatorDefault, yuv->mPictureRect.width, yuv->mPictureRect.height,
|
||||
format, nullptr /* dataPtr */, 0 /* dataSize */, numPlanes, addresses,
|
||||
widths, heights, strides, ReleaseImage /* releaseCallback */,
|
||||
image /* releaseRefCon */, nullptr /* pixelBufferAttributes */,
|
||||
&buffer);
|
||||
if (rv == kCVReturnSuccess) {
|
||||
return buffer;
|
||||
// |image| will be released in |ReleaseImage()|.
|
||||
}
|
||||
LOGE("CVPIxelBufferCreateWithPlanarBytes error");
|
||||
CVPixelBufferRef buffer = nullptr;
|
||||
image->AddRef(); // Grip input buffers.
|
||||
CVReturn rv = CVPixelBufferCreateWithPlanarBytes(
|
||||
kCFAllocatorDefault, yuv->mPictureRect.width, yuv->mPictureRect.height,
|
||||
format, nullptr /* dataPtr */, 0 /* dataSize */, numPlanes, addresses,
|
||||
widths, heights, strides, ReleaseImage /* releaseCallback */,
|
||||
image /* releaseRefCon */, nullptr /* pixelBufferAttributes */, &buffer);
|
||||
if (rv == kCVReturnSuccess) {
|
||||
return buffer;
|
||||
// |image| will be released in |ReleaseImage()|.
|
||||
} else {
|
||||
image->Release();
|
||||
return nullptr;
|
||||
}
|
||||
if (aSource->GetFormat() == ImageFormat::MOZ2D_SURFACE) {
|
||||
Image* source = const_cast<Image*>(aSource);
|
||||
RefPtr<gfx::SourceSurface> surface = source->GetAsSourceSurface();
|
||||
RefPtr<gfx::DataSourceSurface> dataSurface = surface->GetDataSurface();
|
||||
gfx::DataSourceSurface::ScopedMap map(dataSurface,
|
||||
gfx::DataSourceSurface::READ);
|
||||
if (NS_WARN_IF(!map.IsMapped())) {
|
||||
LOGE("Error scopedmap");
|
||||
return nullptr;
|
||||
}
|
||||
OSType format = MapPixelFormat(mConfig.mSourcePixelFormat).ref();
|
||||
CVPixelBufferRef buffer = nullptr;
|
||||
source->AddRef();
|
||||
|
||||
CVReturn rv = CVPixelBufferCreateWithBytes(
|
||||
kCFAllocatorDefault, aSource->GetSize().Width(),
|
||||
aSource->GetSize().Height(), format, map.GetData(), map.GetStride(),
|
||||
ReleaseImageInterleaved, source, nullptr, &buffer);
|
||||
if (rv == kCVReturnSuccess) {
|
||||
return buffer;
|
||||
// |source| will be released in |ReleaseImageInterleaved()|.
|
||||
}
|
||||
LOGE("CVPIxelBufferCreateWithBytes error");
|
||||
source->Release();
|
||||
return nullptr;
|
||||
}
|
||||
LOGE("Image conversion not implemented in AppleVTEncoder");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<MediaDataEncoder::EncodePromise> AppleVTEncoder::Drain() {
|
||||
|
|
@ -729,7 +583,6 @@ RefPtr<MediaDataEncoder::EncodePromise> AppleVTEncoder::ProcessDrain() {
|
|||
OSStatus status =
|
||||
VTCompressionSessionCompleteFrames(mSession, kCMTimeIndefinite);
|
||||
if (status != noErr) {
|
||||
LOGE("VTCompressionSessionCompleteFrames error");
|
||||
return EncodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR,
|
||||
__func__);
|
||||
}
|
||||
|
|
@ -760,19 +613,16 @@ RefPtr<ShutdownPromise> AppleVTEncoder::ProcessShutdown() {
|
|||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
}
|
||||
|
||||
RefPtr<GenericPromise> AppleVTEncoder::SetBitrate(uint32_t aBitsPerSec) {
|
||||
RefPtr<GenericPromise> AppleVTEncoder::SetBitrate(
|
||||
MediaDataEncoder::Rate aBitsPerSec) {
|
||||
RefPtr<AppleVTEncoder> self = this;
|
||||
return InvokeAsync(mTaskQueue, __func__, [self, aBitsPerSec]() {
|
||||
MOZ_ASSERT(self->mSession);
|
||||
bool rv = SetBitrateAndMode(self->mSession, self->mConfig.mBitrateMode,
|
||||
aBitsPerSec);
|
||||
return rv ? GenericPromise::CreateAndResolve(true, __func__)
|
||||
: GenericPromise::CreateAndReject(
|
||||
NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, __func__);
|
||||
return SetAverageBitrate(self->mSession, aBitsPerSec)
|
||||
? GenericPromise::CreateAndResolve(true, __func__)
|
||||
: GenericPromise::CreateAndReject(
|
||||
NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, __func__);
|
||||
});
|
||||
}
|
||||
|
||||
#undef LOGE
|
||||
#undef LOGD
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
#include <VideoToolbox/VideoToolbox.h>
|
||||
|
||||
#include "PlatformEncoderModule.h"
|
||||
#include "TimeUnits.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
|
|
@ -20,13 +21,13 @@ class Image;
|
|||
|
||||
class AppleVTEncoder final : public MediaDataEncoder {
|
||||
public:
|
||||
AppleVTEncoder(const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue)
|
||||
using Config = H264Config;
|
||||
|
||||
AppleVTEncoder(const Config& aConfig, RefPtr<TaskQueue> aTaskQueue,
|
||||
const bool aHwardwareNotAllowed)
|
||||
: mConfig(aConfig),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mHardwareNotAllowed(
|
||||
aConfig.mHardwarePreference ==
|
||||
MediaDataEncoder::HardwarePreference::RequireSoftware),
|
||||
mHardwareNotAllowed(aHwardwareNotAllowed),
|
||||
mFramesCompleted(false),
|
||||
mError(NS_OK),
|
||||
mSession(nullptr) {
|
||||
|
|
@ -36,12 +37,9 @@ class AppleVTEncoder final : public MediaDataEncoder {
|
|||
|
||||
RefPtr<InitPromise> Init() override;
|
||||
RefPtr<EncodePromise> Encode(const MediaData* aSample) override;
|
||||
RefPtr<ReconfigurationPromise> Reconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges)
|
||||
override;
|
||||
RefPtr<EncodePromise> Drain() override;
|
||||
RefPtr<ShutdownPromise> Shutdown() override;
|
||||
RefPtr<GenericPromise> SetBitrate(uint32_t aBitsPerSec) override;
|
||||
RefPtr<GenericPromise> SetBitrate(Rate aBitsPerSec) override;
|
||||
|
||||
nsCString GetDescriptionName() const override {
|
||||
MOZ_ASSERT(mSession);
|
||||
|
|
@ -53,10 +51,7 @@ class AppleVTEncoder final : public MediaDataEncoder {
|
|||
|
||||
private:
|
||||
virtual ~AppleVTEncoder() { MOZ_ASSERT(!mSession); }
|
||||
RefPtr<EncodePromise> ProcessEncode(const RefPtr<const VideoData>& aSample);
|
||||
RefPtr<ReconfigurationPromise> ProcessReconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>&
|
||||
aConfigurationChanges);
|
||||
RefPtr<EncodePromise> ProcessEncode(RefPtr<const VideoData> aSample);
|
||||
void ProcessOutput(RefPtr<MediaRawData>&& aOutput);
|
||||
void ResolvePromise();
|
||||
RefPtr<EncodePromise> ProcessDrain();
|
||||
|
|
@ -68,7 +63,7 @@ class AppleVTEncoder final : public MediaDataEncoder {
|
|||
const bool aAsAnnexB);
|
||||
void AssertOnTaskQueue() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); }
|
||||
|
||||
EncoderConfig mConfig;
|
||||
const Config mConfig;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
const bool mHardwareNotAllowed;
|
||||
// Access only in mTaskQueue.
|
||||
|
|
|
|||
|
|
@ -42,7 +42,6 @@ UNIFIED_SOURCES += [
|
|||
"PDMFactory.cpp",
|
||||
"PEMFactory.cpp",
|
||||
"PlatformDecoderModule.cpp",
|
||||
"PlatformEncoderModule.cpp",
|
||||
"wrappers/AudioTrimmer.cpp",
|
||||
"wrappers/MediaChangeMonitor.cpp",
|
||||
"wrappers/MediaDataDecoderProxy.cpp",
|
||||
|
|
|
|||
|
|
@ -23,13 +23,13 @@ namespace mozilla {
|
|||
|
||||
extern LazyLogModule sPEMLog;
|
||||
|
||||
static const GUID CodecToSubtype(CodecType aCodec) {
|
||||
static const GUID CodecToSubtype(MediaDataEncoder::CodecType aCodec) {
|
||||
switch (aCodec) {
|
||||
case CodecType::H264:
|
||||
case MediaDataEncoder::CodecType::H264:
|
||||
return MFVideoFormat_H264;
|
||||
case CodecType::VP8:
|
||||
case MediaDataEncoder::CodecType::VP8:
|
||||
return MFVideoFormat_VP80;
|
||||
case CodecType::VP9:
|
||||
case MediaDataEncoder::CodecType::VP9:
|
||||
return MFVideoFormat_VP90;
|
||||
default:
|
||||
MOZ_ASSERT(false, "Unsupported codec");
|
||||
|
|
@ -37,7 +37,7 @@ static const GUID CodecToSubtype(CodecType aCodec) {
|
|||
}
|
||||
}
|
||||
|
||||
bool CanCreateWMFEncoder(CodecType aCodec) {
|
||||
bool CanCreateWMFEncoder(MediaDataEncoder::CodecType aCodec) {
|
||||
bool canCreate = false;
|
||||
mscom::EnsureMTA([&]() {
|
||||
if (!wmf::MediaFoundationInitializer::HasInitialized()) {
|
||||
|
|
@ -81,18 +81,35 @@ static already_AddRefed<MediaByteBuffer> ParseH264Parameters(
|
|||
return avcc.forget();
|
||||
}
|
||||
|
||||
static uint32_t GetProfile(H264_PROFILE aProfileLevel) {
|
||||
static uint32_t GetProfile(
|
||||
MediaDataEncoder::H264Specific::ProfileLevel aProfileLevel) {
|
||||
switch (aProfileLevel) {
|
||||
case H264_PROFILE_BASE:
|
||||
case MediaDataEncoder::H264Specific::ProfileLevel::BaselineAutoLevel:
|
||||
return eAVEncH264VProfile_Base;
|
||||
case H264_PROFILE_MAIN:
|
||||
case MediaDataEncoder::H264Specific::ProfileLevel::MainAutoLevel:
|
||||
return eAVEncH264VProfile_Main;
|
||||
default:
|
||||
return eAVEncH264VProfile_unknown;
|
||||
}
|
||||
}
|
||||
|
||||
already_AddRefed<IMFMediaType> CreateInputType(EncoderConfig& aConfig) {
|
||||
template <typename Config>
|
||||
HRESULT SetMediaTypes(RefPtr<MFTEncoder>& aEncoder, Config& aConfig) {
|
||||
RefPtr<IMFMediaType> inputType = CreateInputType(aConfig);
|
||||
if (!inputType) {
|
||||
return E_FAIL;
|
||||
}
|
||||
|
||||
RefPtr<IMFMediaType> outputType = CreateOutputType(aConfig);
|
||||
if (!outputType) {
|
||||
return E_FAIL;
|
||||
}
|
||||
|
||||
return aEncoder->SetMediaTypes(inputType, outputType);
|
||||
}
|
||||
|
||||
template <typename Config>
|
||||
already_AddRefed<IMFMediaType> CreateInputType(Config& aConfig) {
|
||||
RefPtr<IMFMediaType> type;
|
||||
return SUCCEEDED(wmf::MFCreateMediaType(getter_AddRefs(type))) &&
|
||||
SUCCEEDED(
|
||||
|
|
@ -109,12 +126,14 @@ already_AddRefed<IMFMediaType> CreateInputType(EncoderConfig& aConfig) {
|
|||
: nullptr;
|
||||
}
|
||||
|
||||
already_AddRefed<IMFMediaType> CreateOutputType(EncoderConfig& aConfig) {
|
||||
template <typename Config>
|
||||
already_AddRefed<IMFMediaType> CreateOutputType(Config& aConfig) {
|
||||
RefPtr<IMFMediaType> type;
|
||||
if (FAILED(wmf::MFCreateMediaType(getter_AddRefs(type))) ||
|
||||
FAILED(type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video)) ||
|
||||
FAILED(type->SetGUID(MF_MT_SUBTYPE, CodecToSubtype(aConfig.mCodec))) ||
|
||||
FAILED(type->SetUINT32(MF_MT_AVG_BITRATE, aConfig.mBitrate)) ||
|
||||
FAILED(
|
||||
type->SetGUID(MF_MT_SUBTYPE, CodecToSubtype(aConfig.mCodecType))) ||
|
||||
FAILED(type->SetUINT32(MF_MT_AVG_BITRATE, aConfig.mBitsPerSec)) ||
|
||||
FAILED(type->SetUINT32(MF_MT_INTERLACE_MODE,
|
||||
MFVideoInterlace_Progressive)) ||
|
||||
FAILED(
|
||||
|
|
@ -123,32 +142,24 @@ already_AddRefed<IMFMediaType> CreateOutputType(EncoderConfig& aConfig) {
|
|||
aConfig.mSize.height))) {
|
||||
return nullptr;
|
||||
}
|
||||
if (aConfig.mCodecSpecific) {
|
||||
if (aConfig.mCodecSpecific->is<H264Specific>()) {
|
||||
if (FAILED(type->SetUINT32(
|
||||
MF_MT_MPEG2_PROFILE,
|
||||
GetProfile(
|
||||
aConfig.mCodecSpecific->as<H264Specific>().mProfile)))) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
if (aConfig.mCodecSpecific &&
|
||||
FAILED(SetCodecSpecific(type, aConfig.mCodecSpecific.ref()))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return type.forget();
|
||||
}
|
||||
|
||||
HRESULT SetMediaTypes(RefPtr<MFTEncoder>& aEncoder, EncoderConfig& aConfig) {
|
||||
RefPtr<IMFMediaType> inputType = CreateInputType(aConfig);
|
||||
if (!inputType) {
|
||||
return E_FAIL;
|
||||
}
|
||||
template <typename T>
|
||||
HRESULT SetCodecSpecific(IMFMediaType* aOutputType, const T& aSpecific) {
|
||||
return S_OK;
|
||||
}
|
||||
|
||||
RefPtr<IMFMediaType> outputType = CreateOutputType(aConfig);
|
||||
if (!outputType) {
|
||||
return E_FAIL;
|
||||
}
|
||||
|
||||
return aEncoder->SetMediaTypes(inputType, outputType);
|
||||
template <>
|
||||
HRESULT SetCodecSpecific(IMFMediaType* aOutputType,
|
||||
const MediaDataEncoder::H264Specific& aSpecific) {
|
||||
return aOutputType->SetUINT32(MF_MT_MPEG2_PROFILE,
|
||||
GetProfile(aSpecific.mProfileLevel));
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -11,23 +11,32 @@
|
|||
namespace mozilla {
|
||||
extern LazyLogModule sPEMLog;
|
||||
|
||||
bool WMFEncoderModule::SupportsCodec(CodecType aCodecType) const {
|
||||
return CanCreateWMFEncoder(aCodecType);
|
||||
}
|
||||
|
||||
bool WMFEncoderModule::Supports(const EncoderConfig& aConfig) const {
|
||||
if (aConfig.mCodec == CodecType::H264 &&
|
||||
(aConfig.mCodecSpecific.isNothing() ||
|
||||
!aConfig.mCodecSpecific->is<H264Specific>())) {
|
||||
return false;
|
||||
}
|
||||
return SupportsCodec(aConfig.mCodec);
|
||||
bool WMFEncoderModule::SupportsMimeType(const nsACString& aMimeType) const {
|
||||
return CanCreateWMFEncoder(CreateEncoderParams::CodecTypeForMime(aMimeType));
|
||||
}
|
||||
|
||||
already_AddRefed<MediaDataEncoder> WMFEncoderModule::CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig, const RefPtr<TaskQueue>& aTaskQueue) const {
|
||||
RefPtr<MediaDataEncoder> encoder(
|
||||
new WMFMediaDataEncoder(aConfig, aTaskQueue));
|
||||
const CreateEncoderParams& aParams, const bool aHardwareNotAllowed) const {
|
||||
MediaDataEncoder::CodecType codec =
|
||||
CreateEncoderParams::CodecTypeForMime(aParams.mConfig.mMimeType);
|
||||
RefPtr<MediaDataEncoder> encoder;
|
||||
switch (codec) {
|
||||
case MediaDataEncoder::CodecType::H264:
|
||||
encoder = new WMFMediaDataEncoder<MediaDataEncoder::H264Config>(
|
||||
aParams.ToH264Config(), aParams.mTaskQueue, aHardwareNotAllowed);
|
||||
break;
|
||||
case MediaDataEncoder::CodecType::VP8:
|
||||
encoder = new WMFMediaDataEncoder<MediaDataEncoder::VP8Config>(
|
||||
aParams.ToVP8Config(), aParams.mTaskQueue, aHardwareNotAllowed);
|
||||
break;
|
||||
case MediaDataEncoder::CodecType::VP9:
|
||||
encoder = new WMFMediaDataEncoder<MediaDataEncoder::VP9Config>(
|
||||
aParams.ToVP9Config(), aParams.mTaskQueue, aHardwareNotAllowed);
|
||||
break;
|
||||
default:
|
||||
// Do nothing.
|
||||
break;
|
||||
}
|
||||
return encoder.forget();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,14 +12,11 @@
|
|||
namespace mozilla {
|
||||
class WMFEncoderModule final : public PlatformEncoderModule {
|
||||
public:
|
||||
virtual bool Supports(const EncoderConfig& aConfig) const override;
|
||||
virtual bool SupportsCodec(CodecType aCodec) const override;
|
||||
|
||||
const char* GetName() const override { return "WMF Encoder Module"; }
|
||||
bool SupportsMimeType(const nsACString& aMimeType) const override;
|
||||
|
||||
already_AddRefed<MediaDataEncoder> CreateVideoEncoder(
|
||||
const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue) const override;
|
||||
const CreateEncoderParams& aParams,
|
||||
const bool aHardwareNotAllowed) const override;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
|
|
@ -16,22 +16,20 @@
|
|||
|
||||
namespace mozilla {
|
||||
|
||||
template <typename ConfigType>
|
||||
class WMFMediaDataEncoder final : public MediaDataEncoder {
|
||||
public:
|
||||
WMFMediaDataEncoder(const EncoderConfig& aConfig,
|
||||
const RefPtr<TaskQueue>& aTaskQueue)
|
||||
WMFMediaDataEncoder(const ConfigType& aConfig, RefPtr<TaskQueue> aTaskQueue,
|
||||
const bool aHardwareNotAllowed)
|
||||
: mConfig(aConfig),
|
||||
mTaskQueue(aTaskQueue),
|
||||
mHardwareNotAllowed(aConfig.mHardwarePreference ==
|
||||
HardwarePreference::RequireSoftware ||
|
||||
aConfig.mHardwarePreference ==
|
||||
HardwarePreference::None) {
|
||||
mHardwareNotAllowed(aHardwareNotAllowed) {
|
||||
MOZ_ASSERT(mTaskQueue);
|
||||
}
|
||||
|
||||
RefPtr<InitPromise> Init() override {
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&WMFMediaDataEncoder::ProcessInit);
|
||||
&WMFMediaDataEncoder<ConfigType>::ProcessInit);
|
||||
}
|
||||
RefPtr<EncodePromise> Encode(const MediaData* aSample) override {
|
||||
MOZ_ASSERT(aSample);
|
||||
|
|
@ -62,7 +60,7 @@ class WMFMediaDataEncoder final : public MediaDataEncoder {
|
|||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
}
|
||||
RefPtr<GenericPromise> SetBitrate(uint32_t aBitsPerSec) override {
|
||||
RefPtr<GenericPromise> SetBitrate(Rate aBitsPerSec) override {
|
||||
return InvokeAsync(
|
||||
mTaskQueue, __func__,
|
||||
[self = RefPtr<WMFMediaDataEncoder>(this), aBitsPerSec]() {
|
||||
|
|
@ -74,16 +72,8 @@ class WMFMediaDataEncoder final : public MediaDataEncoder {
|
|||
});
|
||||
}
|
||||
|
||||
RefPtr<ReconfigurationPromise> Reconfigure(
|
||||
const RefPtr<const EncoderConfigurationChangeList>& aConfigurationChanges)
|
||||
override {
|
||||
// General reconfiguration interface not implemented right now
|
||||
return MediaDataEncoder::ReconfigurationPromise::CreateAndReject(
|
||||
NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__);
|
||||
};
|
||||
|
||||
nsCString GetDescriptionName() const override {
|
||||
return MFTEncoder::GetFriendlyName(CodecToSubtype(mConfig.mCodec));
|
||||
return MFTEncoder::GetFriendlyName(CodecToSubtype(mConfig.mCodecType));
|
||||
}
|
||||
|
||||
private:
|
||||
|
|
@ -145,13 +135,13 @@ class WMFMediaDataEncoder final : public MediaDataEncoder {
|
|||
}
|
||||
|
||||
HRESULT InitMFTEncoder(RefPtr<MFTEncoder>& aEncoder) {
|
||||
HRESULT hr = aEncoder->Create(CodecToSubtype(mConfig.mCodec));
|
||||
HRESULT hr = aEncoder->Create(CodecToSubtype(mConfig.mCodecType));
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
|
||||
|
||||
hr = SetMediaTypes(aEncoder, mConfig);
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
|
||||
|
||||
hr = aEncoder->SetModes(mConfig.mBitrate);
|
||||
hr = aEncoder->SetModes(mConfig.mBitsPerSec);
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
|
||||
|
||||
return S_OK;
|
||||
|
|
@ -292,7 +282,7 @@ class WMFMediaDataEncoder final : public MediaDataEncoder {
|
|||
|
||||
bool WriteFrameData(RefPtr<MediaRawData>& aDest, LockBuffer& aSrc,
|
||||
bool aIsKeyframe) {
|
||||
if (mConfig.mCodec == CodecType::H264) {
|
||||
if (std::is_same_v<ConfigType, MediaDataEncoder::H264Config>) {
|
||||
size_t prependLength = 0;
|
||||
RefPtr<MediaByteBuffer> avccHeader;
|
||||
if (aIsKeyframe && mConfigData) {
|
||||
|
|
@ -334,7 +324,7 @@ class WMFMediaDataEncoder final : public MediaDataEncoder {
|
|||
|
||||
void AssertOnTaskQueue() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); }
|
||||
|
||||
EncoderConfig mConfig;
|
||||
const ConfigType mConfig;
|
||||
const RefPtr<TaskQueue> mTaskQueue;
|
||||
const bool mHardwareNotAllowed;
|
||||
RefPtr<MFTEncoder> mEncoder;
|
||||
|
|
|
|||
|
|
@ -12,10 +12,12 @@
|
|||
#include "modules/video_coding/utility/vp8_header_parser.h"
|
||||
#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
|
||||
#include "mozilla/Maybe.h"
|
||||
#include "mozilla/Span.h"
|
||||
#include "mozilla/gfx/Point.h"
|
||||
#include "mozilla/media/MediaUtils.h"
|
||||
#include "mozilla/StaticPrefs_media.h"
|
||||
#include "PEMFactory.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
#include "VideoUtils.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
|
@ -34,26 +36,28 @@ extern LazyLogModule sPEMLog;
|
|||
|
||||
using namespace media;
|
||||
using namespace layers;
|
||||
using MimeTypeResult = Maybe<nsLiteralCString>;
|
||||
|
||||
CodecType ConvertWebrtcCodecTypeToCodecType(
|
||||
static MimeTypeResult ConvertWebrtcCodecTypeToMimeType(
|
||||
const webrtc::VideoCodecType& aType) {
|
||||
switch (aType) {
|
||||
case webrtc::VideoCodecType::kVideoCodecVP8:
|
||||
return CodecType::VP8;
|
||||
return Some("video/vp8"_ns);
|
||||
case webrtc::VideoCodecType::kVideoCodecVP9:
|
||||
return CodecType::VP9;
|
||||
return Some("video/vp9"_ns);
|
||||
case webrtc::VideoCodecType::kVideoCodecH264:
|
||||
return CodecType::H264;
|
||||
return Some("video/avc"_ns);
|
||||
default:
|
||||
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unsupported codec type");
|
||||
}
|
||||
return Nothing();
|
||||
}
|
||||
|
||||
bool WebrtcMediaDataEncoder::CanCreate(
|
||||
const webrtc::VideoCodecType aCodecType) {
|
||||
auto factory = MakeRefPtr<PEMFactory>();
|
||||
CodecType type = ConvertWebrtcCodecTypeToCodecType(aCodecType);
|
||||
return factory->SupportsCodec(type);
|
||||
MimeTypeResult mimeType = ConvertWebrtcCodecTypeToMimeType(aCodecType);
|
||||
return mimeType ? factory->SupportsMimeType(mimeType.ref()) : false;
|
||||
}
|
||||
|
||||
static const char* PacketModeStr(const webrtc::CodecSpecificInfo& aInfo) {
|
||||
|
|
@ -72,7 +76,7 @@ static const char* PacketModeStr(const webrtc::CodecSpecificInfo& aInfo) {
|
|||
}
|
||||
}
|
||||
|
||||
static H264_PROFILE ConvertProfileLevel(
|
||||
static MediaDataEncoder::H264Specific::ProfileLevel ConvertProfileLevel(
|
||||
const webrtc::SdpVideoFormat::Parameters& aParameters) {
|
||||
const absl::optional<webrtc::H264ProfileLevelId> profileLevel =
|
||||
webrtc::ParseSdpForH264ProfileLevelId(aParameters);
|
||||
|
|
@ -80,21 +84,22 @@ static H264_PROFILE ConvertProfileLevel(
|
|||
(profileLevel->profile == webrtc::H264Profile::kProfileBaseline ||
|
||||
profileLevel->profile ==
|
||||
webrtc::H264Profile::kProfileConstrainedBaseline)) {
|
||||
return H264_PROFILE::H264_PROFILE_BASE;
|
||||
return MediaDataEncoder::H264Specific::ProfileLevel::BaselineAutoLevel;
|
||||
}
|
||||
return H264_PROFILE::H264_PROFILE_MAIN;
|
||||
return MediaDataEncoder::H264Specific::ProfileLevel::MainAutoLevel;
|
||||
}
|
||||
|
||||
static VPXComplexity MapComplexity(webrtc::VideoCodecComplexity aComplexity) {
|
||||
static MediaDataEncoder::VPXSpecific::Complexity MapComplexity(
|
||||
webrtc::VideoCodecComplexity aComplexity) {
|
||||
switch (aComplexity) {
|
||||
case webrtc::VideoCodecComplexity::kComplexityNormal:
|
||||
return VPXComplexity::Normal;
|
||||
return MediaDataEncoder::VPXSpecific::Complexity::Normal;
|
||||
case webrtc::VideoCodecComplexity::kComplexityHigh:
|
||||
return VPXComplexity::High;
|
||||
return MediaDataEncoder::VPXSpecific::Complexity::High;
|
||||
case webrtc::VideoCodecComplexity::kComplexityHigher:
|
||||
return VPXComplexity::Higher;
|
||||
return MediaDataEncoder::VPXSpecific::Complexity::Higher;
|
||||
case webrtc::VideoCodecComplexity::kComplexityMax:
|
||||
return VPXComplexity::Max;
|
||||
return MediaDataEncoder::VPXSpecific::Complexity::Max;
|
||||
default:
|
||||
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad complexity value");
|
||||
}
|
||||
|
|
@ -193,6 +198,14 @@ int32_t WebrtcMediaDataEncoder::InitEncode(
|
|||
|
||||
bool WebrtcMediaDataEncoder::SetupConfig(
|
||||
const webrtc::VideoCodec* aCodecSettings) {
|
||||
MimeTypeResult mimeType =
|
||||
ConvertWebrtcCodecTypeToMimeType(aCodecSettings->codecType);
|
||||
if (!mimeType) {
|
||||
LOG("Get incorrect mime type");
|
||||
return false;
|
||||
}
|
||||
mInfo = VideoInfo(aCodecSettings->width, aCodecSettings->height);
|
||||
mInfo.mMimeType = mimeType.extract();
|
||||
mMaxFrameRate = aCodecSettings->maxFramerate;
|
||||
// Those bitrates in codec setting are all kbps, so we have to covert them to
|
||||
// bps.
|
||||
|
|
@ -231,33 +244,34 @@ already_AddRefed<MediaDataEncoder> WebrtcMediaDataEncoder::CreateEncoder(
|
|||
MOZ_ASSERT_UNREACHABLE("Unsupported codec type");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
CodecType type;
|
||||
Maybe<EncoderConfig::CodecSpecific> specific;
|
||||
CreateEncoderParams params(
|
||||
mInfo, MediaDataEncoder::Usage::Realtime,
|
||||
TaskQueue::Create(GetMediaThreadPool(MediaThreadType::PLATFORM_ENCODER),
|
||||
"WebrtcMediaDataEncoder::mEncoder"),
|
||||
MediaDataEncoder::PixelFormat::YUV420P, aCodecSettings->maxFramerate,
|
||||
keyframeInterval, mBitrateAdjuster.GetTargetBitrateBps());
|
||||
switch (aCodecSettings->codecType) {
|
||||
case webrtc::VideoCodecType::kVideoCodecH264: {
|
||||
type = CodecType::H264;
|
||||
specific.emplace(H264Specific(ConvertProfileLevel(mFormatParams)));
|
||||
params.SetCodecSpecific(
|
||||
MediaDataEncoder::H264Specific(ConvertProfileLevel(mFormatParams)));
|
||||
break;
|
||||
}
|
||||
case webrtc::VideoCodecType::kVideoCodecVP8: {
|
||||
type = CodecType::VP8;
|
||||
const webrtc::VideoCodecVP8& vp8 = aCodecSettings->VP8();
|
||||
const webrtc::VideoCodecComplexity complexity =
|
||||
aCodecSettings->GetVideoEncoderComplexity();
|
||||
const bool frameDropEnabled = aCodecSettings->GetFrameDropEnabled();
|
||||
specific.emplace(VP8Specific(MapComplexity(complexity), false,
|
||||
vp8.numberOfTemporalLayers, vp8.denoisingOn,
|
||||
vp8.automaticResizeOn, frameDropEnabled));
|
||||
params.SetCodecSpecific(MediaDataEncoder::VPXSpecific::VP8(
|
||||
MapComplexity(complexity), false, vp8.numberOfTemporalLayers,
|
||||
vp8.denoisingOn, vp8.automaticResizeOn, frameDropEnabled));
|
||||
break;
|
||||
}
|
||||
case webrtc::VideoCodecType::kVideoCodecVP9: {
|
||||
type = CodecType::VP9;
|
||||
const webrtc::VideoCodecVP9& vp9 = aCodecSettings->VP9();
|
||||
const webrtc::VideoCodecComplexity complexity =
|
||||
aCodecSettings->GetVideoEncoderComplexity();
|
||||
const bool frameDropEnabled = aCodecSettings->GetFrameDropEnabled();
|
||||
specific.emplace(VP9Specific(
|
||||
params.SetCodecSpecific(MediaDataEncoder::VPXSpecific::VP9(
|
||||
MapComplexity(complexity), false, vp9.numberOfTemporalLayers,
|
||||
vp9.denoisingOn, vp9.automaticResizeOn, frameDropEnabled,
|
||||
vp9.adaptiveQpMode, vp9.numberOfSpatialLayers, vp9.flexibleMode));
|
||||
|
|
@ -266,14 +280,7 @@ already_AddRefed<MediaDataEncoder> WebrtcMediaDataEncoder::CreateEncoder(
|
|||
default:
|
||||
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unsupported codec type");
|
||||
}
|
||||
EncoderConfig config(
|
||||
type, {aCodecSettings->width, aCodecSettings->height},
|
||||
MediaDataEncoder::Usage::Realtime, MediaDataEncoder::PixelFormat::YUV420P,
|
||||
MediaDataEncoder::PixelFormat::YUV420P, aCodecSettings->maxFramerate,
|
||||
keyframeInterval, mBitrateAdjuster.GetTargetBitrateBps(),
|
||||
MediaDataEncoder::BitrateMode::Variable,
|
||||
MediaDataEncoder::HardwarePreference::None, specific);
|
||||
return mFactory->CreateEncoder(config, mTaskQueue);
|
||||
return mFactory->CreateEncoder(params, swOnly);
|
||||
}
|
||||
|
||||
WebrtcVideoEncoder::EncoderInfo WebrtcMediaDataEncoder::GetEncoderInfo() const {
|
||||
|
|
@ -464,7 +471,7 @@ int32_t WebrtcMediaDataEncoder::Encode(
|
|||
self->mBitrateAdjuster.Update(image.size());
|
||||
}
|
||||
},
|
||||
[self = RefPtr<WebrtcMediaDataEncoder>(this)](const MediaResult& aError) {
|
||||
[self = RefPtr<WebrtcMediaDataEncoder>(this)](const MediaResult aError) {
|
||||
self->mError = aError;
|
||||
});
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ class WebrtcMediaDataEncoder : public RefCountedWebrtcVideoEncoder {
|
|||
int32_t Shutdown() override;
|
||||
|
||||
int32_t Encode(
|
||||
const webrtc::VideoFrame& aInputFrame,
|
||||
const webrtc::VideoFrame& aFrame,
|
||||
const std::vector<webrtc::VideoFrameType>* aFrameTypes) override;
|
||||
|
||||
int32_t SetRates(
|
||||
|
|
@ -68,9 +68,9 @@ class WebrtcMediaDataEncoder : public RefCountedWebrtcVideoEncoder {
|
|||
webrtc::SdpVideoFormat::Parameters mFormatParams;
|
||||
webrtc::CodecSpecificInfo mCodecSpecific;
|
||||
webrtc::BitrateAdjuster mBitrateAdjuster;
|
||||
uint32_t mMaxFrameRate = {0};
|
||||
uint32_t mMinBitrateBps = {0};
|
||||
uint32_t mMaxBitrateBps = {0};
|
||||
uint32_t mMaxFrameRate;
|
||||
uint32_t mMinBitrateBps;
|
||||
uint32_t mMaxBitrateBps;
|
||||
};
|
||||
|
||||
} // namespace mozilla
|
||||
|
|
|
|||
Loading…
Reference in a new issue