Bug 1873663 - Remove legacy audio decoders (vorbis, opus, pcm). r=media-playback-reviewers,alwu

In addition, this removes prefs to decode mp3 using anything else than ffmpeg.
More patches to rationalize our decoder usage will follow.

Differential Revision: https://phabricator.services.mozilla.com/D198024
This commit is contained in:
Paul Adenot 2024-01-12 09:18:35 +00:00
parent 8188f3b1ee
commit caabfb8e4a
25 changed files with 46 additions and 1227 deletions

View file

@ -1216,4 +1216,13 @@ bool OnCellularConnection() {
return false;
}
bool IsWaveMimetype(const nsACString& aMimeType) {
return aMimeType.EqualsLiteral("audio/x-wav") ||
aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
aMimeType.EqualsLiteral("audio/wave; codecs=3") ||
aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
aMimeType.EqualsLiteral("audio/wave; codecs=65534");
}
} // end namespace mozilla

View file

@ -576,6 +576,8 @@ inline gfx::YUVColorSpace DefaultColorSpace(const gfx::IntSize& aSize) {
: gfx::YUVColorSpace::BT709;
}
bool IsWaveMimetype(const nsACString& aMimeType);
} // end namespace mozilla
#endif

View file

@ -8,14 +8,11 @@
#ifdef MOZ_AV1
# include "AOMDecoder.h"
#endif
#include "OpusDecoder.h"
#include "RemoteAudioDecoder.h"
#include "RemoteDecoderManagerChild.h"
#include "RemoteMediaDataDecoder.h"
#include "RemoteVideoDecoder.h"
#include "VideoUtils.h"
#include "VorbisDecoder.h"
#include "WAVDecoder.h"
#include "gfxConfig.h"
#include "mozilla/RemoteDecodeUtils.h"
@ -73,7 +70,7 @@ RemoteDecoderModule::AsyncCreateDecoder(const CreateDecoderParams& aParams) {
// that IsDefaultPlaybackDeviceMono provides. We want to avoid calls
// to IsDefaultPlaybackDeviceMono on RDD because initializing audio
// backends on RDD will be blocked by the sandbox.
if (OpusDataDecoder::IsOpus(aParams.mConfig.mMimeType) &&
if (aParams.mConfig.mMimeType.Equals("audio/opus") &&
IsDefaultPlaybackDeviceMono()) {
CreateDecoderParams params = aParams;
params.mOptions += CreateDecoderParams::Option::DefaultPlaybackDeviceMono;

View file

@ -7,9 +7,7 @@
#include "MediaSourceDemuxer.h"
#include "MediaSourceUtils.h"
#include "OpusDecoder.h"
#include "SourceBufferList.h"
#include "VorbisDecoder.h"
#include "VideoUtils.h"
#include "nsPrintfCString.h"
@ -277,9 +275,9 @@ MediaSourceTrackDemuxer::MediaSourceTrackDemuxer(MediaSourceDemuxer* aParent,
mManager(aManager),
mReset(true),
mPreRoll(TimeUnit::FromMicroseconds(
OpusDataDecoder::IsOpus(mParent->GetTrackInfo(mType)->mMimeType) ||
VorbisDataDecoder::IsVorbis(
mParent->GetTrackInfo(mType)->mMimeType)
mParent->GetTrackInfo(mType)->mMimeType.EqualsLiteral("audio/opus") ||
mParent->GetTrackInfo(mType)->mMimeType.EqualsLiteral(
"audio/vorbis")
? 80000
: mParent->GetTrackInfo(mType)->mMimeType.EqualsLiteral(
"audio/mp4a-latm")

View file

@ -13,8 +13,6 @@
#include "MP4Metadata.h"
#include "mozilla/Logging.h"
// OpusDecoder header is really needed only by MP4 in rust
#include "OpusDecoder.h"
#include "mp4parse.h"
#define LOG(...) \

View file

@ -17,7 +17,6 @@
#include "OggCodecState.h"
#include "OggRLBox.h"
#include "OpusDecoder.h"
#include "OpusParser.h"
#include "VideoUtils.h"
#include "XiphExtradata.h"

View file

@ -10,15 +10,13 @@
#endif
#include "MediaCodecsSupport.h"
#include "MP4Decoder.h"
#include "OpusDecoder.h"
#include "PlatformDecoderModule.h"
#include "TheoraDecoder.h"
#include "VPXDecoder.h"
#include "VorbisDecoder.h"
#include "WAVDecoder.h"
#include "mozilla/AppShutdown.h"
#include "mozilla/gfx/gfxVars.h"
#include "nsTHashMap.h"
#include "VideoUtils.h"
using MediaCodecsSupport = mozilla::media::MediaCodecsSupport;
@ -263,16 +261,16 @@ MediaCodec MCSInfo::GetMediaCodecFromMimeType(const nsACString& aMimeType) {
if (MP4Decoder::IsAAC(aMimeType)) {
return MediaCodec::AAC;
}
if (VorbisDataDecoder::IsVorbis(aMimeType)) {
if (aMimeType.EqualsLiteral("audio/vorbis")) {
return MediaCodec::Vorbis;
}
if (aMimeType.EqualsLiteral("audio/flac")) {
return MediaCodec::FLAC;
}
if (WaveDataDecoder::IsWave(aMimeType)) {
if (IsWaveMimetype(aMimeType)) {
return MediaCodec::Wave;
}
if (OpusDataDecoder::IsOpus(aMimeType)) {
if (aMimeType.EqualsLiteral("audio/opus")) {
return MediaCodec::Opus;
}
if (aMimeType.EqualsLiteral("audio/mpeg")) {

View file

@ -19,12 +19,9 @@
#include "MP4Decoder.h"
#include "MediaChangeMonitor.h"
#include "MediaInfo.h"
#include "OpusDecoder.h"
#include "TheoraDecoder.h"
#include "VPXDecoder.h"
#include "VideoUtils.h"
#include "VorbisDecoder.h"
#include "WAVDecoder.h"
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/RemoteDecodeUtils.h"
#include "mozilla/RemoteDecoderManagerChild.h"
@ -882,16 +879,16 @@ DecodeSupportSet PDMFactory::SupportsMimeType(
if (aMimeType.EqualsLiteral("audio/mpeg")) {
return MCSInfo::GetDecodeSupportSet(MediaCodec::MP3, aSupported);
}
if (OpusDataDecoder::IsOpus(aMimeType)) {
if (aMimeType.EqualsLiteral("audio/opus")) {
return MCSInfo::GetDecodeSupportSet(MediaCodec::Opus, aSupported);
}
if (VorbisDataDecoder::IsVorbis(aMimeType)) {
if (aMimeType.EqualsLiteral("audio/vorbis")) {
return MCSInfo::GetDecodeSupportSet(MediaCodec::Vorbis, aSupported);
}
if (aMimeType.EqualsLiteral("audio/flac")) {
return MCSInfo::GetDecodeSupportSet(MediaCodec::FLAC, aSupported);
}
if (WaveDataDecoder::IsWave(aMimeType)) {
if (IsWaveMimetype(aMimeType)) {
return MCSInfo::GetDecodeSupportSet(MediaCodec::Wave, aSupported);
}
}

View file

@ -6,11 +6,8 @@
#include "AgnosticDecoderModule.h"
#include "OpusDecoder.h"
#include "TheoraDecoder.h"
#include "VPXDecoder.h"
#include "VorbisDecoder.h"
#include "WAVDecoder.h"
#include "mozilla/Logging.h"
#include "mozilla/StaticPrefs_media.h"
#include "VideoUtils.h"
@ -132,11 +129,7 @@ media::DecodeSupportSet AgnosticDecoderModule::Supports(
(AOMDecoder::IsAV1(mimeType) && IsAvailable(DecoderType::AV1)) ||
#endif
(VPXDecoder::IsVPX(mimeType) && IsAvailable(DecoderType::VPX)) ||
(TheoraDecoder::IsTheora(mimeType) && IsAvailable(DecoderType::Theora)) ||
(VorbisDataDecoder::IsVorbis(mimeType) &&
IsAvailable(DecoderType::Vorbis)) ||
(WaveDataDecoder::IsWave(mimeType) && IsAvailable(DecoderType::Wave)) ||
(OpusDataDecoder::IsOpus(mimeType) && IsAvailable(DecoderType::Opus));
(TheoraDecoder::IsTheora(mimeType) && IsAvailable(DecoderType::Theora));
MOZ_LOG(sPDMLog, LogLevel::Debug,
("Agnostic decoder %s requested type '%s'",
supports ? "supports" : "rejects", mimeType.BeginReading()));
@ -180,22 +173,7 @@ already_AddRefed<MediaDataDecoder> AgnosticDecoderModule::CreateVideoDecoder(
already_AddRefed<MediaDataDecoder> AgnosticDecoderModule::CreateAudioDecoder(
const CreateDecoderParams& aParams) {
if (Supports(SupportDecoderParams(aParams), nullptr /* diagnostic */)
.isEmpty()) {
return nullptr;
}
RefPtr<MediaDataDecoder> m;
const TrackInfo& config = aParams.mConfig;
if (VorbisDataDecoder::IsVorbis(config.mMimeType)) {
m = new VorbisDataDecoder(aParams);
} else if (OpusDataDecoder::IsOpus(config.mMimeType)) {
m = new OpusDataDecoder(aParams);
} else if (WaveDataDecoder::IsWave(config.mMimeType)) {
m = new WaveDataDecoder(aParams);
}
return m.forget();
return nullptr;
}
/* static */

View file

@ -1,360 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "OpusDecoder.h"
#include <inttypes.h> // For PRId64
#include "OpusParser.h"
#include "TimeUnits.h"
#include "VideoUtils.h"
#include "VorbisDecoder.h" // For VorbisLayout
#include "mozilla/EndianUtils.h"
#include "mozilla/PodOperations.h"
#include "mozilla/SyncRunnable.h"
#include <opus/opus.h>
extern "C" {
#include <opus/opus_multistream.h>
}
#define OPUS_DEBUG(arg, ...) \
DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, "::%s: " arg, __func__, \
##__VA_ARGS__)
namespace mozilla {
OpusDataDecoder::OpusDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig()),
mOpusDecoder(nullptr),
mSkip(0),
mDecodedHeader(false),
mPaddingDiscarded(false),
mFrames(0),
mChannelMap(AudioConfig::ChannelLayout::UNKNOWN_MAP),
mDefaultPlaybackDeviceMono(aParams.mOptions.contains(
CreateDecoderParams::Option::DefaultPlaybackDeviceMono)) {}
OpusDataDecoder::~OpusDataDecoder() {
if (mOpusDecoder) {
opus_multistream_decoder_destroy(mOpusDecoder);
mOpusDecoder = nullptr;
}
}
RefPtr<ShutdownPromise> OpusDataDecoder::Shutdown() {
// mThread may not be set if Init hasn't been called first.
MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
return ShutdownPromise::CreateAndResolve(true, __func__);
}
RefPtr<MediaDataDecoder::InitPromise> OpusDataDecoder::Init() {
mThread = GetCurrentSerialEventTarget();
if (!mInfo.mCodecSpecificConfig.is<OpusCodecSpecificData>()) {
MOZ_ASSERT_UNREACHABLE();
OPUS_DEBUG("Opus decoder got non-opus codec specific data");
return InitPromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Opus decoder got non-opus codec specific data!")),
__func__);
}
const OpusCodecSpecificData opusCodecSpecificData =
mInfo.mCodecSpecificConfig.as<OpusCodecSpecificData>();
RefPtr<MediaByteBuffer> opusHeaderBlob =
opusCodecSpecificData.mHeadersBinaryBlob;
size_t length = opusHeaderBlob->Length();
uint8_t* p = opusHeaderBlob->Elements();
if (NS_FAILED(DecodeHeader(p, length))) {
OPUS_DEBUG("Error decoding header!");
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Error decoding header!")),
__func__);
}
MOZ_ASSERT(mMappingTable.Length() >= uint32_t(mOpusParser->mChannels));
int r;
mOpusDecoder = opus_multistream_decoder_create(
mOpusParser->mRate, mOpusParser->mChannels, mOpusParser->mStreams,
mOpusParser->mCoupledStreams, mMappingTable.Elements(), &r);
if (!mOpusDecoder) {
OPUS_DEBUG("Error creating decoder!");
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Error creating decoder!")),
__func__);
}
// Opus has a special feature for stereo coding where it represent wide
// stereo channels by 180-degree out of phase. This improves quality, but
// needs to be disabled when the output is downmixed to mono. Playback number
// of channels are set in AudioSink, using the same method
// `DecideAudioPlaybackChannels()`, and triggers downmix if needed.
if (mDefaultPlaybackDeviceMono || DecideAudioPlaybackChannels(mInfo) == 1) {
opus_multistream_decoder_ctl(mOpusDecoder,
OPUS_SET_PHASE_INVERSION_DISABLED(1));
}
mSkip = mOpusParser->mPreSkip;
mPaddingDiscarded = false;
if (opusCodecSpecificData.mContainerCodecDelayFrames !=
mOpusParser->mPreSkip) {
NS_WARNING(
"Invalid Opus header: container CodecDelay and Opus pre-skip do not "
"match!");
}
OPUS_DEBUG("Opus preskip in extradata: %" PRId64 " frames",
opusCodecSpecificData.mContainerCodecDelayFrames);
if (mInfo.mRate != (uint32_t)mOpusParser->mRate) {
NS_WARNING("Invalid Opus header: container and codec rate do not match!");
}
if (mInfo.mChannels != (uint32_t)mOpusParser->mChannels) {
NS_WARNING(
"Invalid Opus header: container and codec channels do not match!");
}
return r == OPUS_OK
? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
: InitPromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL(
"could not create opus multistream decoder!")),
__func__);
}
nsresult OpusDataDecoder::DecodeHeader(const unsigned char* aData,
size_t aLength) {
MOZ_ASSERT(!mOpusParser);
MOZ_ASSERT(!mOpusDecoder);
MOZ_ASSERT(!mDecodedHeader);
mDecodedHeader = true;
mOpusParser = MakeUnique<OpusParser>();
if (!mOpusParser->DecodeHeader(const_cast<unsigned char*>(aData), aLength)) {
return NS_ERROR_FAILURE;
}
int channels = mOpusParser->mChannels;
mMappingTable.SetLength(channels);
AudioConfig::ChannelLayout vorbisLayout(
channels, VorbisDataDecoder::VorbisLayout(channels));
if (vorbisLayout.IsValid()) {
mChannelMap = vorbisLayout.Map();
AudioConfig::ChannelLayout smpteLayout(
AudioConfig::ChannelLayout::SMPTEDefault(vorbisLayout));
AutoTArray<uint8_t, 8> map;
map.SetLength(channels);
if (mOpusParser->mChannelMapping == 1 &&
vorbisLayout.MappingTable(smpteLayout, &map)) {
for (int i = 0; i < channels; i++) {
mMappingTable[i] = mOpusParser->mMappingTable[map[i]];
}
} else {
// Use Opus set channel mapping and return channels as-is.
PodCopy(mMappingTable.Elements(), mOpusParser->mMappingTable, channels);
}
} else {
// Create a dummy mapping table so that channel ordering stay the same
// during decoding.
for (int i = 0; i < channels; i++) {
mMappingTable[i] = i;
}
}
return NS_OK;
}
RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Decode(
MediaRawData* aSample) {
MOZ_ASSERT(mThread->IsOnCurrentThread());
PROCESS_DECODE_LOG(aSample);
uint32_t channels = mOpusParser->mChannels;
if (mPaddingDiscarded) {
// Discard padding should be used only on the final packet, so
// decoding after a padding discard is invalid.
OPUS_DEBUG("Opus error, discard padding on interstitial packet");
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Discard padding on interstitial packet")),
__func__);
}
if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block.
mFrames = 0;
mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
}
// Maximum value is 63*2880, so there's no chance of overflow.
int frames_number =
opus_packet_get_nb_frames(aSample->Data(), aSample->Size());
if (frames_number <= 0) {
OPUS_DEBUG("Invalid packet header: r=%d length=%zu", frames_number,
aSample->Size());
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Invalid packet header: r=%d length=%u",
frames_number, uint32_t(aSample->Size()))),
__func__);
}
int samples = opus_packet_get_samples_per_frame(
aSample->Data(), opus_int32(mOpusParser->mRate));
// A valid Opus packet must be between 2.5 and 120 ms long (48kHz).
CheckedInt32 totalFrames =
CheckedInt32(frames_number) * CheckedInt32(samples);
if (!totalFrames.isValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Frames count overflow")),
__func__);
}
int frames = totalFrames.value();
if (frames < 120 || frames > 5760) {
OPUS_DEBUG("Invalid packet frames: %d", frames);
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Invalid packet frames:%d", frames)),
__func__);
}
AlignedAudioBuffer buffer(frames * channels);
if (!buffer) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
// Decode to the appropriate sample type.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
int ret = opus_multistream_decode_float(mOpusDecoder, aSample->Data(),
aSample->Size(), buffer.get(), frames,
false);
#else
int ret =
opus_multistream_decode(mOpusDecoder, aSample->Data(), aSample->Size(),
buffer.get(), frames, false);
#endif
if (ret < 0) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("Opus decoding error:%d", ret)),
__func__);
}
NS_ASSERTION(ret == frames, "Opus decoded too few audio samples");
auto startTime = aSample->mTime;
OPUS_DEBUG("Decoding frames: [%lf, %lf]", aSample->mTime.ToSeconds(),
aSample->GetEndTime().ToSeconds());
// Trim the initial frames while the decoder is settling.
if (mSkip > 0) {
int32_t skipFrames = std::min<int32_t>(mSkip, frames);
int32_t keepFrames = frames - skipFrames;
OPUS_DEBUG("Opus decoder trimming %d of %d frames", skipFrames, frames);
PodMove(buffer.get(), buffer.get() + skipFrames * channels,
keepFrames * channels);
startTime = startTime + media::TimeUnit(skipFrames, mOpusParser->mRate);
frames = keepFrames;
mSkip -= skipFrames;
aSample->mTime += media::TimeUnit(skipFrames, 48000);
aSample->mDuration -= media::TimeUnit(skipFrames, 48000);
OPUS_DEBUG("Adjusted frame after trimming pre-roll: [%lf, %lf]",
aSample->mTime.ToSeconds(), aSample->GetEndTime().ToSeconds());
}
// Apply the header gain if one was specified.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
if (mOpusParser->mGain != 1.0f) {
float gain = mOpusParser->mGain;
uint32_t samples = frames * channels;
for (uint32_t i = 0; i < samples; i++) {
buffer[i] *= gain;
}
}
#else
if (mOpusParser->mGain_Q16 != 65536) {
int64_t gain_Q16 = mOpusParser->mGain_Q16;
uint32_t samples = frames * channels;
for (uint32_t i = 0; i < samples; i++) {
int32_t val = static_cast<int32_t>((gain_Q16 * buffer[i] + 32768) >> 16);
buffer[i] = static_cast<AudioDataValue>(MOZ_CLIP_TO_15(val));
}
}
#endif
auto duration = media::TimeUnit(frames, mOpusParser->mRate);
if (!duration.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting WebM audio duration")),
__func__);
}
auto time = startTime -
media::TimeUnit(mOpusParser->mPreSkip, mOpusParser->mRate) +
media::TimeUnit(mFrames, mOpusParser->mRate);
if (!time.IsValid()) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow shifting tstamp by codec delay")),
__func__);
};
mFrames += frames;
mTotalFrames += frames;
OPUS_DEBUG("Total frames so far: %" PRId64, mTotalFrames);
if (!frames) {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
// Trim extra allocated frames.
buffer.SetLength(frames * channels);
return DecodePromise::CreateAndResolve(
DecodedData{new AudioData(aSample->mOffset, time, std::move(buffer),
mOpusParser->mChannels, mOpusParser->mRate,
mChannelMap)},
__func__);
}
RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Drain() {
MOZ_ASSERT(mThread->IsOnCurrentThread());
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
RefPtr<MediaDataDecoder::FlushPromise> OpusDataDecoder::Flush() {
MOZ_ASSERT(mThread->IsOnCurrentThread());
if (!mOpusDecoder) {
return FlushPromise::CreateAndResolve(true, __func__);
}
MOZ_ASSERT(mOpusDecoder);
// Reset the decoder.
opus_multistream_decoder_ctl(mOpusDecoder, OPUS_RESET_STATE);
mSkip = mOpusParser->mPreSkip;
mPaddingDiscarded = false;
mLastFrameTime.reset();
return FlushPromise::CreateAndResolve(true, __func__);
}
/* static */
bool OpusDataDecoder::IsOpus(const nsACString& aMimeType) {
return aMimeType.EqualsLiteral("audio/opus");
}
} // namespace mozilla
#undef OPUS_DEBUG

View file

@ -1,70 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(OpusDecoder_h_)
# define OpusDecoder_h_
# include "PlatformDecoderModule.h"
# include "mozilla/Maybe.h"
# include "nsTArray.h"
struct OpusMSDecoder;
namespace mozilla {
class OpusParser;
DDLoggedTypeDeclNameAndBase(OpusDataDecoder, MediaDataDecoder);
class OpusDataDecoder final : public MediaDataDecoder,
public DecoderDoctorLifeLogger<OpusDataDecoder> {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(OpusDataDecoder, final);
explicit OpusDataDecoder(const CreateDecoderParams& aParams);
RefPtr<InitPromise> Init() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
nsCString GetDescriptionName() const override {
return "opus audio decoder"_ns;
}
nsCString GetCodecName() const override { return "opus"_ns; }
// Return true if mimetype is Opus
static bool IsOpus(const nsACString& aMimeType);
private:
~OpusDataDecoder();
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
const AudioInfo mInfo;
nsCOMPtr<nsISerialEventTarget> mThread;
// Opus decoder state
UniquePtr<OpusParser> mOpusParser;
OpusMSDecoder* mOpusDecoder;
uint16_t mSkip; // Samples left to trim before playback.
bool mDecodedHeader;
// Opus padding should only be discarded on the final packet. Once this
// is set to true, if the reader attempts to decode any further packets it
// will raise an error so we can indicate that the file is invalid.
bool mPaddingDiscarded;
int64_t mFrames;
int64_t mTotalFrames = 0;
Maybe<int64_t> mLastFrameTime;
AutoTArray<uint8_t, 8> mMappingTable;
AudioConfig::ChannelLayout::ChannelMap mChannelMap;
bool mDefaultPlaybackDeviceMono;
};
} // namespace mozilla
#endif

View file

@ -1,363 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "VorbisDecoder.h"
#include "VideoUtils.h"
#include "XiphExtradata.h"
#include "mozilla/Logging.h"
#include "mozilla/PodOperations.h"
#include "mozilla/SyncRunnable.h"
#undef LOG
#define LOG(type, msg) MOZ_LOG(sPDMLog, type, msg)
namespace mozilla {
ogg_packet InitVorbisPacket(const unsigned char* aData, size_t aLength,
bool aBOS, bool aEOS, int64_t aGranulepos,
int64_t aPacketNo) {
ogg_packet packet;
packet.packet = const_cast<unsigned char*>(aData);
packet.bytes = aLength;
packet.b_o_s = aBOS;
packet.e_o_s = aEOS;
packet.granulepos = aGranulepos;
packet.packetno = aPacketNo;
return packet;
}
VorbisDataDecoder::VorbisDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig()), mPacketCount(0), mFrames(0) {
// Zero these member vars to avoid crashes in Vorbis clear functions when
// destructor is called before |Init|.
PodZero(&mVorbisBlock);
PodZero(&mVorbisDsp);
PodZero(&mVorbisInfo);
PodZero(&mVorbisComment);
}
VorbisDataDecoder::~VorbisDataDecoder() {
vorbis_block_clear(&mVorbisBlock);
vorbis_dsp_clear(&mVorbisDsp);
vorbis_info_clear(&mVorbisInfo);
vorbis_comment_clear(&mVorbisComment);
}
RefPtr<ShutdownPromise> VorbisDataDecoder::Shutdown() {
// mThread may not be set if Init hasn't been called first.
MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
return ShutdownPromise::CreateAndResolve(true, __func__);
}
RefPtr<MediaDataDecoder::InitPromise> VorbisDataDecoder::Init() {
mThread = GetCurrentSerialEventTarget();
vorbis_info_init(&mVorbisInfo);
vorbis_comment_init(&mVorbisComment);
PodZero(&mVorbisDsp);
PodZero(&mVorbisBlock);
AutoTArray<unsigned char*, 4> headers;
AutoTArray<size_t, 4> headerLens;
MOZ_ASSERT(mInfo.mCodecSpecificConfig.is<VorbisCodecSpecificData>(),
"Vorbis decoder should get vorbis codec specific data");
RefPtr<MediaByteBuffer> vorbisHeaderBlob =
GetAudioCodecSpecificBlob(mInfo.mCodecSpecificConfig);
if (!XiphExtradataToHeaders(headers, headerLens, vorbisHeaderBlob->Elements(),
vorbisHeaderBlob->Length())) {
LOG(LogLevel::Warning, ("VorbisDecoder: could not get vorbis header"));
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Could not get vorbis header.")),
__func__);
}
for (size_t i = 0; i < headers.Length(); i++) {
if (NS_FAILED(DecodeHeader(headers[i], headerLens[i]))) {
LOG(LogLevel::Warning,
("VorbisDecoder: could not get decode vorbis header"));
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Could not decode vorbis header.")),
__func__);
}
}
MOZ_ASSERT(mPacketCount == 3);
int r = vorbis_synthesis_init(&mVorbisDsp, &mVorbisInfo);
if (r) {
LOG(LogLevel::Warning, ("VorbisDecoder: could not init vorbis decoder"));
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Systhesis init fail.")),
__func__);
}
r = vorbis_block_init(&mVorbisDsp, &mVorbisBlock);
if (r) {
LOG(LogLevel::Warning, ("VorbisDecoder: could not init vorbis block"));
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Block init fail.")),
__func__);
}
if (mInfo.mRate != (uint32_t)mVorbisDsp.vi->rate) {
LOG(LogLevel::Warning, ("VorbisDecoder: Invalid Vorbis header: container "
"and codec rate do not match!"));
}
if (mInfo.mChannels != (uint32_t)mVorbisDsp.vi->channels) {
LOG(LogLevel::Warning, ("VorbisDecoder: Invalid Vorbis header: container "
"and codec channels do not match!"));
}
AudioConfig::ChannelLayout layout(mVorbisDsp.vi->channels);
if (!layout.IsValid()) {
LOG(LogLevel::Warning,
("VorbisDecoder: Invalid Vorbis header: invalid channel layout!"));
return InitPromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Invalid audio layout.")),
__func__);
}
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
nsresult VorbisDataDecoder::DecodeHeader(const unsigned char* aData,
size_t aLength) {
bool bos = mPacketCount == 0;
ogg_packet pkt =
InitVorbisPacket(aData, aLength, bos, false, 0, mPacketCount++);
MOZ_ASSERT(mPacketCount <= 3);
int r = vorbis_synthesis_headerin(&mVorbisInfo, &mVorbisComment, &pkt);
return r == 0 ? NS_OK : NS_ERROR_FAILURE;
}
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Decode(
MediaRawData* aSample) {
MOZ_ASSERT(mThread->IsOnCurrentThread());
PROCESS_DECODE_LOG(aSample);
const unsigned char* aData = aSample->Data();
size_t aLength = aSample->Size();
int64_t aOffset = aSample->mOffset;
MOZ_ASSERT(mPacketCount >= 3);
if (!mLastFrameTime ||
mLastFrameTime.ref() != aSample->mTime.ToMicroseconds()) {
// We are starting a new block.
mFrames = 0;
mLastFrameTime = Some(aSample->mTime.ToMicroseconds());
}
ogg_packet pkt =
InitVorbisPacket(aData, aLength, false, aSample->mEOS,
aSample->mTimecode.ToMicroseconds(), mPacketCount++);
int err = vorbis_synthesis(&mVorbisBlock, &pkt);
if (err) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis:%d", err)),
__func__);
LOG(LogLevel::Warning, ("vorbis_synthesis returned an error"));
}
err = vorbis_synthesis_blockin(&mVorbisDsp, &mVorbisBlock);
if (err) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_blockin:%d", err)),
__func__);
LOG(LogLevel::Warning, ("vorbis_synthesis_blockin returned an error"));
}
float** pcm = nullptr;
int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
if (frames == 0) {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
DecodedData results;
while (frames > 0) {
uint32_t channels = mVorbisDsp.vi->channels;
uint32_t rate = mVorbisDsp.vi->rate;
AlignedAudioBuffer buffer(frames * channels);
if (!buffer) {
LOG(LogLevel::Warning, ("VorbisDecoder: cannot allocate buffer"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (uint32_t j = 0; j < channels; ++j) {
float* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i * channels + j] = channel[i];
}
}
auto duration = media::TimeUnit(frames, rate);
if (!duration.IsValid()) {
LOG(LogLevel::Warning, ("VorbisDecoder: invalid packet duration"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio duration")),
__func__);
}
auto total_duration = media::TimeUnit(mFrames, rate);
if (!total_duration.IsValid()) {
LOG(LogLevel::Warning, ("VorbisDecoder: invalid total duration"));
return DecodePromise::CreateAndReject(
MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Overflow converting audio total_duration")),
__func__);
}
auto time = total_duration + aSample->mTime;
if (!time.IsValid()) {
LOG(LogLevel::Warning, ("VorbisDecoder: invalid sample time"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL(
"Overflow adding total_duration and aSample->mTime")),
__func__);
};
if (!mAudioConverter) {
const AudioConfig::ChannelLayout layout =
AudioConfig::ChannelLayout(channels, VorbisLayout(channels));
AudioConfig in(layout, channels, rate);
AudioConfig out(AudioConfig::ChannelLayout::SMPTEDefault(layout),
channels, rate);
mAudioConverter = MakeUnique<AudioConverter>(in, out);
}
MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
AudioSampleBuffer data(std::move(buffer));
data = mAudioConverter->Process(std::move(data));
RefPtr<AudioData> audio =
new AudioData(aOffset, time, data.Forget(), channels, rate,
mAudioConverter->OutputConfig().Layout().Map());
MOZ_DIAGNOSTIC_ASSERT(duration == audio->mDuration, "must be equal");
results.AppendElement(std::move(audio));
mFrames += frames;
err = vorbis_synthesis_read(&mVorbisDsp, frames);
if (err) {
LOG(LogLevel::Warning, ("VorbisDecoder: vorbis_synthesis_read"));
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("vorbis_synthesis_read:%d", err)),
__func__);
}
frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
}
return DecodePromise::CreateAndResolve(std::move(results), __func__);
}
RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Drain() {
MOZ_ASSERT(mThread->IsOnCurrentThread());
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
RefPtr<MediaDataDecoder::FlushPromise> VorbisDataDecoder::Flush() {
MOZ_ASSERT(mThread->IsOnCurrentThread());
// Ignore failed results from vorbis_synthesis_restart. They
// aren't fatal and it fails when ResetDecode is called at a
// time when no vorbis data has been read.
vorbis_synthesis_restart(&mVorbisDsp);
mLastFrameTime.reset();
return FlushPromise::CreateAndResolve(true, __func__);
}
/* static */
bool VorbisDataDecoder::IsVorbis(const nsACString& aMimeType) {
return aMimeType.EqualsLiteral("audio/vorbis");
}
/* static */
const AudioConfig::Channel* VorbisDataDecoder::VorbisLayout(
uint32_t aChannels) {
// From https://www.xiph.org/vorbis/doc/Vorbis_I_spec.html
// Section 4.3.9.
typedef AudioConfig::Channel Channel;
switch (aChannels) {
case 1: // the stream is monophonic
{
static const Channel config[] = {AudioConfig::CHANNEL_FRONT_CENTER};
return config;
}
case 2: // the stream is stereo. channel order: left, right
{
static const Channel config[] = {AudioConfig::CHANNEL_FRONT_LEFT,
AudioConfig::CHANNEL_FRONT_RIGHT};
return config;
}
case 3: // the stream is a 1d-surround encoding. channel order: left,
// center, right
{
static const Channel config[] = {AudioConfig::CHANNEL_FRONT_LEFT,
AudioConfig::CHANNEL_FRONT_CENTER,
AudioConfig::CHANNEL_FRONT_RIGHT};
return config;
}
case 4: // the stream is quadraphonic surround. channel order: front left,
// front right, rear left, rear right
{
static const Channel config[] = {
AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_RIGHT,
AudioConfig::CHANNEL_BACK_LEFT, AudioConfig::CHANNEL_BACK_RIGHT};
return config;
}
case 5: // the stream is five-channel surround. channel order: front left,
// center, front right, rear left, rear right
{
static const Channel config[] = {
AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_BACK_LEFT,
AudioConfig::CHANNEL_BACK_RIGHT};
return config;
}
case 6: // the stream is 5.1 surround. channel order: front left, center,
// front right, rear left, rear right, LFE
{
static const Channel config[] = {
AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_BACK_LEFT,
AudioConfig::CHANNEL_BACK_RIGHT, AudioConfig::CHANNEL_LFE};
return config;
}
case 7: // surround. channel order: front left, center, front right, side
// left, side right, rear center, LFE
{
static const Channel config[] = {
AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_SIDE_LEFT,
AudioConfig::CHANNEL_SIDE_RIGHT, AudioConfig::CHANNEL_BACK_CENTER,
AudioConfig::CHANNEL_LFE};
return config;
}
case 8: // the stream is 7.1 surround. channel order: front left, center,
// front right, side left, side right, rear left, rear right, LFE
{
static const Channel config[] = {
AudioConfig::CHANNEL_FRONT_LEFT, AudioConfig::CHANNEL_FRONT_CENTER,
AudioConfig::CHANNEL_FRONT_RIGHT, AudioConfig::CHANNEL_SIDE_LEFT,
AudioConfig::CHANNEL_SIDE_RIGHT, AudioConfig::CHANNEL_BACK_LEFT,
AudioConfig::CHANNEL_BACK_RIGHT, AudioConfig::CHANNEL_LFE};
return config;
}
default:
return nullptr;
}
}
} // namespace mozilla
#undef LOG

View file

@ -1,62 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(VorbisDecoder_h_)
# define VorbisDecoder_h_
# include "AudioConverter.h"
# include "PlatformDecoderModule.h"
# include "mozilla/Maybe.h"
# include <vorbis/codec.h>
namespace mozilla {
DDLoggedTypeDeclNameAndBase(VorbisDataDecoder, MediaDataDecoder);
class VorbisDataDecoder final
: public MediaDataDecoder,
public DecoderDoctorLifeLogger<VorbisDataDecoder> {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VorbisDataDecoder, final);
explicit VorbisDataDecoder(const CreateDecoderParams& aParams);
RefPtr<InitPromise> Init() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
nsCString GetDescriptionName() const override {
return "vorbis audio decoder"_ns;
}
nsCString GetCodecName() const override { return "vorbis"_ns; }
// Return true if mimetype is Vorbis
static bool IsVorbis(const nsACString& aMimeType);
static const AudioConfig::Channel* VorbisLayout(uint32_t aChannels);
private:
~VorbisDataDecoder();
nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
const AudioInfo mInfo;
nsCOMPtr<nsISerialEventTarget> mThread;
// Vorbis decoder state
vorbis_info mVorbisInfo;
vorbis_comment mVorbisComment;
vorbis_dsp_state mVorbisDsp;
vorbis_block mVorbisBlock;
int64_t mPacketCount;
int64_t mFrames;
Maybe<int64_t> mLastFrameTime;
UniquePtr<AudioConverter> mAudioConverter;
};
} // namespace mozilla
#endif

View file

@ -1,162 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "WAVDecoder.h"
#include "AudioSampleFormat.h"
#include "BufferReader.h"
#include "VideoUtils.h"
#include "mozilla/Casting.h"
#include "mozilla/SyncRunnable.h"
namespace mozilla {
int16_t DecodeALawSample(uint8_t aValue) {
aValue = aValue ^ 0x55;
int8_t sign = (aValue & 0x80) ? -1 : 1;
uint8_t exponent = (aValue & 0x70) >> 4;
uint8_t mantissa = aValue & 0x0F;
int16_t sample = mantissa << 4;
switch (exponent) {
case 0:
sample += 8;
break;
case 1:
sample += 0x108;
break;
default:
sample += 0x108;
sample <<= exponent - 1;
}
return sign * sample;
}
int16_t DecodeULawSample(uint8_t aValue) {
aValue = aValue ^ 0xFF;
int8_t sign = (aValue & 0x80) ? -1 : 1;
uint8_t exponent = (aValue & 0x70) >> 4;
uint8_t mantissa = aValue & 0x0F;
int16_t sample = (33 + 2 * mantissa) * (2 << (exponent + 1)) - 33;
return sign * sample;
}
WaveDataDecoder::WaveDataDecoder(const CreateDecoderParams& aParams)
: mInfo(aParams.AudioConfig()) {}
RefPtr<ShutdownPromise> WaveDataDecoder::Shutdown() {
// mThread may not be set if Init hasn't been called first.
MOZ_ASSERT(!mThread || mThread->IsOnCurrentThread());
return ShutdownPromise::CreateAndResolve(true, __func__);
}
RefPtr<MediaDataDecoder::InitPromise> WaveDataDecoder::Init() {
mThread = GetCurrentSerialEventTarget();
return InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__);
}
RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Decode(
MediaRawData* aSample) {
MOZ_ASSERT(mThread->IsOnCurrentThread());
size_t aLength = aSample->Size();
BufferReader aReader(aSample->Data(), aLength);
int64_t aOffset = aSample->mOffset;
int32_t frames = aLength * 8 / mInfo.mBitDepth / mInfo.mChannels;
AlignedAudioBuffer buffer(frames * mInfo.mChannels);
if (!buffer) {
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (int i = 0; i < frames; ++i) {
for (unsigned int j = 0; j < mInfo.mChannels; ++j) {
if (mInfo.mProfile == 3) { // IEEE Float Data
auto res = aReader.ReadLEU32();
if (res.isErr()) {
return DecodePromise::CreateAndReject(
MediaResult(res.unwrapErr(), __func__), __func__);
}
float sample = BitwiseCast<float>(res.unwrap());
buffer[i * mInfo.mChannels + j] =
FloatToAudioSample<AudioDataValue>(sample);
} else if (mInfo.mProfile == 6) { // ALAW Data
auto res = aReader.ReadU8();
if (res.isErr()) {
return DecodePromise::CreateAndReject(
MediaResult(res.unwrapErr(), __func__), __func__);
}
int16_t decoded = DecodeALawSample(res.unwrap());
buffer[i * mInfo.mChannels + j] =
IntegerToAudioSample<AudioDataValue>(decoded);
} else if (mInfo.mProfile == 7) { // ULAW Data
auto res = aReader.ReadU8();
if (res.isErr()) {
return DecodePromise::CreateAndReject(
MediaResult(res.unwrapErr(), __func__), __func__);
}
int16_t decoded = DecodeULawSample(res.unwrap());
buffer[i * mInfo.mChannels + j] =
IntegerToAudioSample<AudioDataValue>(decoded);
} else { // PCM Data
if (mInfo.mBitDepth == 8) {
auto res = aReader.ReadU8();
if (res.isErr()) {
return DecodePromise::CreateAndReject(
MediaResult(res.unwrapErr(), __func__), __func__);
}
buffer[i * mInfo.mChannels + j] =
UInt8bitToAudioSample<AudioDataValue>(res.unwrap());
} else if (mInfo.mBitDepth == 16) {
auto res = aReader.ReadLE16();
if (res.isErr()) {
return DecodePromise::CreateAndReject(
MediaResult(res.unwrapErr(), __func__), __func__);
}
buffer[i * mInfo.mChannels + j] =
IntegerToAudioSample<AudioDataValue>(res.unwrap());
} else if (mInfo.mBitDepth == 24) {
auto res = aReader.ReadLE24();
if (res.isErr()) {
return DecodePromise::CreateAndReject(
MediaResult(res.unwrapErr(), __func__), __func__);
}
buffer[i * mInfo.mChannels + j] =
Int24bitToAudioSample<AudioDataValue>(res.unwrap());
}
}
}
}
return DecodePromise::CreateAndResolve(
DecodedData{new AudioData(aOffset, aSample->mTime, std::move(buffer),
mInfo.mChannels, mInfo.mRate)},
__func__);
}
RefPtr<MediaDataDecoder::DecodePromise> WaveDataDecoder::Drain() {
MOZ_ASSERT(mThread->IsOnCurrentThread());
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
}
RefPtr<MediaDataDecoder::FlushPromise> WaveDataDecoder::Flush() {
MOZ_ASSERT(mThread->IsOnCurrentThread());
return FlushPromise::CreateAndResolve(true, __func__);
}
/* static */
bool WaveDataDecoder::IsWave(const nsACString& aMimeType) {
// Some WebAudio uses "audio/x-wav",
// WAVdemuxer uses "audio/wave; codecs=aNum".
return aMimeType.EqualsLiteral("audio/x-wav") ||
aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
aMimeType.EqualsLiteral("audio/wave; codecs=3") ||
aMimeType.EqualsLiteral("audio/wave; codecs=6") ||
aMimeType.EqualsLiteral("audio/wave; codecs=7") ||
aMimeType.EqualsLiteral("audio/wave; codecs=65534");
}
} // namespace mozilla
#undef LOG

View file

@ -1,44 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(WaveDecoder_h_)
# define WaveDecoder_h_
# include "PlatformDecoderModule.h"
namespace mozilla {
DDLoggedTypeDeclNameAndBase(WaveDataDecoder, MediaDataDecoder);
class WaveDataDecoder final : public MediaDataDecoder,
public DecoderDoctorLifeLogger<WaveDataDecoder> {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WaveDataDecoder, final);
explicit WaveDataDecoder(const CreateDecoderParams& aParams);
// Return true if mimetype is Wave
static bool IsWave(const nsACString& aMimeType);
RefPtr<InitPromise> Init() override;
RefPtr<DecodePromise> Decode(MediaRawData* aSample) override;
RefPtr<DecodePromise> Drain() override;
RefPtr<FlushPromise> Flush() override;
RefPtr<ShutdownPromise> Shutdown() override;
nsCString GetDescriptionName() const override {
return "wave audio decoder"_ns;
}
nsCString GetCodecName() const override { return "wave"_ns; }
private:
~WaveDataDecoder() = default;
const AudioInfo mInfo;
nsCOMPtr<nsISerialEventTarget> mThread;
};
} // namespace mozilla
#endif

View file

@ -1,4 +1,4 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
//* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
@ -10,11 +10,9 @@
# include "AOMDecoder.h"
#endif
#include "MediaInfo.h"
#include "OpusDecoder.h"
#include "RemoteDataDecoder.h"
#include "TheoraDecoder.h"
#include "VPXDecoder.h"
#include "VorbisDecoder.h"
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/Components.h"
#include "mozilla/StaticPrefs_media.h"
@ -101,37 +99,25 @@ DecodeSupportSet AndroidDecoderModule::SupportsMimeType(
}
break;
// Prefer the ffvpx mp3 software decoder if available.
case MediaCodec::MP3:
if (StaticPrefs::media_ffvpx_mp3_enabled()) {
return media::DecodeSupportSet{};
}
if (sSupportedCodecs &&
sSupportedCodecs->contains(MediaCodecsSupport::MP3SoftwareDecode)) {
return DecodeSupport::SoftwareDecode;
}
return media::DecodeSupportSet{};
// Prefer the gecko decoder for theora/opus/vorbis; stagefright crashes
// on content demuxed from mp4.
// Not all android devices support FLAC/theora even when they say they do.
case MediaCodec::Theora:
SLOG("Rejecting video of type %s", aMimeType.Data());
return media::DecodeSupportSet{};
// Always use our own software decoder (in ffvpx) for audio except for AAC
case MediaCodec::MP3:
[[fallthrough]];
case MediaCodec::Opus:
[[fallthrough]];
case MediaCodec::Vorbis:
[[fallthrough]];
case MediaCodec::Wave:
[[fallthrough]];
case MediaCodec::FLAC:
SLOG("Rejecting audio of type %s", aMimeType.Data());
return media::DecodeSupportSet{};
// When checking "audio/x-wav", CreateDecoder can cause a JNI ERROR by
// Accessing a stale local reference leading to a SIGSEGV crash.
// To avoid this we check for wav types here.
case MediaCodec::Wave:
return media::DecodeSupportSet{};
// H264 always reports software decode
case MediaCodec::H264:
return DecodeSupport::SoftwareDecode;

View file

@ -81,9 +81,7 @@ already_AddRefed<MediaDataDecoder> AppleDecoderModule::CreateAudioDecoder(
DecodeSupportSet AppleDecoderModule::SupportsMimeType(
const nsACString& aMimeType, DecoderDoctorDiagnostics* aDiagnostics) const {
bool checkSupport = (aMimeType.EqualsLiteral("audio/mpeg") &&
!StaticPrefs::media_ffvpx_mp3_enabled()) ||
aMimeType.EqualsLiteral("audio/mp4a-latm") ||
bool checkSupport = aMimeType.EqualsLiteral("audio/mp4a-latm") ||
MP4Decoder::IsH264(aMimeType) ||
VPXDecoder::IsVP9(aMimeType);
DecodeSupportSet supportType{};

View file

@ -425,41 +425,23 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
AVCodecID FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType,
const AudioInfo& aInfo) {
if (aMimeType.EqualsLiteral("audio/mpeg")) {
if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
return AV_CODEC_ID_AAC;
}
#ifdef FFVPX_VERSION
if (!StaticPrefs::media_ffvpx_mp3_enabled()) {
return AV_CODEC_ID_NONE;
}
#endif
if (aMimeType.EqualsLiteral("audio/mpeg")) {
return AV_CODEC_ID_MP3;
}
if (aMimeType.EqualsLiteral("audio/flac")) {
return AV_CODEC_ID_FLAC;
}
if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
return AV_CODEC_ID_AAC;
}
if (aMimeType.EqualsLiteral("audio/vorbis")) {
#ifdef FFVPX_VERSION
if (!StaticPrefs::media_ffvpx_vorbis_enabled()) {
return AV_CODEC_ID_NONE;
}
#endif
return AV_CODEC_ID_VORBIS;
}
#ifdef FFVPX_VERSION
if (aMimeType.EqualsLiteral("audio/opus")) {
if (!StaticPrefs::media_ffvpx_opus_enabled()) {
return AV_CODEC_ID_NONE;
}
return AV_CODEC_ID_OPUS;
}
#endif
#ifdef FFVPX_VERSION
if (aMimeType.Find("wav") != kNotFound) {
if (!StaticPrefs::media_ffvpx_wav_enabled()) {
return AV_CODEC_ID_NONE;
}
if (aMimeType.EqualsLiteral("audio/x-wav") ||
aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {

View file

@ -8,11 +8,8 @@ EXPORTS += [
"agnostic/AgnosticDecoderModule.h",
"agnostic/BlankDecoderModule.h",
"agnostic/DummyMediaDataDecoder.h",
"agnostic/OpusDecoder.h",
"agnostic/TheoraDecoder.h",
"agnostic/VorbisDecoder.h",
"agnostic/VPXDecoder.h",
"agnostic/WAVDecoder.h",
"AllocationPolicy.h",
"MediaCodecsSupport.h",
"MediaTelemetryConstants.h",
@ -32,11 +29,8 @@ UNIFIED_SOURCES += [
"agnostic/BlankDecoderModule.cpp",
"agnostic/DummyMediaDataDecoder.cpp",
"agnostic/NullDecoderModule.cpp",
"agnostic/OpusDecoder.cpp",
"agnostic/TheoraDecoder.cpp",
"agnostic/VorbisDecoder.cpp",
"agnostic/VPXDecoder.cpp",
"agnostic/WAVDecoder.cpp",
"AllocationPolicy.cpp",
"MediaCodecsSupport.cpp",
"PDMFactory.cpp",

View file

@ -303,12 +303,9 @@ bool WMFDecoderModule::CanCreateMFTDecoder(const WMFStreamType& aType) {
return false;
}
break;
// Always use ffvpx for mp3
case WMFStreamType::MP3:
// Prefer ffvpx mp3 decoder over WMF.
if (StaticPrefs::media_ffvpx_mp3_enabled()) {
return false;
}
break;
return false;
default:
break;
}

View file

@ -16,9 +16,7 @@
# include "AOMDecoder.h"
#endif
#include "MP4Decoder.h"
#include "OpusDecoder.h"
#include "VideoUtils.h"
#include "VorbisDecoder.h"
#include "VPXDecoder.h"
#include "mozilla/ArrayUtils.h"
#include "mozilla/CheckedInt.h"
@ -117,10 +115,10 @@ WMFStreamType GetStreamTypeFromMimeType(const nsCString& aMimeType) {
if (aMimeType.EqualsLiteral("audio/mpeg")) {
return WMFStreamType::MP3;
}
if (OpusDataDecoder::IsOpus(aMimeType)) {
if (aMimeType.EqualsLiteral("audio/opus")) {
return WMFStreamType::OPUS;
}
if (VorbisDataDecoder::IsVorbis(aMimeType)) {
if (aMimeType.EqualsLiteral("audio/vorbis")) {
return WMFStreamType::VORBIS;
}
return WMFStreamType::Unknown;
@ -304,9 +302,9 @@ GUID AudioMimeTypeToMediaFoundationSubtype(const nsACString& aMimeType) {
return MFAudioFormat_MP3;
} else if (MP4Decoder::IsAAC(aMimeType)) {
return MFAudioFormat_AAC;
} else if (VorbisDataDecoder::IsVorbis(aMimeType)) {
} else if (aMimeType.EqualsLiteral("audio/vorbis")) {
return MFAudioFormat_Vorbis;
} else if (OpusDataDecoder::IsOpus(aMimeType)) {
} else if (aMimeType.EqualsLiteral("audio/opus")) {
return MFAudioFormat_Opus;
}
NS_WARNING("Unsupport audio mimetype");

View file

@ -12,10 +12,4 @@
using namespace mozilla;
TEST(CanCreateMFTDecoder, NoIPC)
{
const auto ffvpxMP3Pref = StaticPrefs::GetPrefName_media_ffvpx_mp3_enabled();
const bool ffvpxMP3WasOn = Preferences::GetBool(ffvpxMP3Pref);
Preferences::SetBool(ffvpxMP3Pref, false);
EXPECT_TRUE(WMFDecoderModule::CanCreateMFTDecoder(WMFStreamType::MP3));
Preferences::SetBool(ffvpxMP3Pref, ffvpxMP3WasOn);
}
{ EXPECT_TRUE(WMFDecoderModule::CanCreateMFTDecoder(WMFStreamType::H264)); }

View file

@ -9,7 +9,6 @@
#ifdef MOZ_AV1
# include "AOMDecoder.h"
#endif
#include "OpusDecoder.h"
#include "VPXDecoder.h"
#include "WebMDemuxer.h"
#include "WebMBufferedParser.h"

View file

@ -139,20 +139,12 @@ function audioTestData() {
decoder: "ffvpx audio decoder",
},
WINNT: {
process: SpecialPowers.getBoolPref("media.ffvpx.mp3.enabled")
? "Utility Generic"
: "Utility WMF",
decoder: SpecialPowers.getBoolPref("media.ffvpx.mp3.enabled")
? "ffvpx audio decoder"
: "wmf audio decoder",
process: "Utility Generic",
decoder: "ffvpx audio decoder",
},
Darwin: {
process: SpecialPowers.getBoolPref("media.ffvpx.mp3.enabled")
? "Utility Generic"
: "Utility AppleMedia",
decoder: SpecialPowers.getBoolPref("media.ffvpx.mp3.enabled")
? "ffvpx audio decoder"
: "apple coremedia decoder",
process: "Utility Generic",
decoder: "ffvpx audio decoder",
},
},
},

View file

@ -10100,42 +10100,6 @@
#endif
mirror: always
- name: media.ffvpx.mp3.enabled
type: RelaxedAtomicBool
#ifdef MOZ_FFVPX
value: true
#else
value: false
#endif
mirror: always
- name: media.ffvpx.vorbis.enabled
type: RelaxedAtomicBool
#ifdef MOZ_FFVPX
value: true
#else
value: false
#endif
mirror: always
- name: media.ffvpx.opus.enabled
type: RelaxedAtomicBool
#ifdef MOZ_FFVPX
value: true
#else
value: false
#endif
mirror: always
- name: media.ffvpx.wav.enabled
type: RelaxedAtomicBool
#ifdef MOZ_FFVPX
value: true
#else
value: false
#endif
mirror: always
# Set to true in marionette tests to disable the sanity test
# which would lead to unnecessary start of the RDD process.
- name: media.sanity-test.disabled