Backed out 57 changesets (bug 1839389, bug 1840869, bug 1840399, bug 1840402, bug 1823953, bug 1828912, bug 1826382, bug 1837160, bug 1839391, bug 1833654) for causing build bustages in ogg_<something> CLOSED TREE

Backed out changeset 61356e1447e3 (bug 1823953)
Backed out changeset 85785505b6d6 (bug 1823953)
Backed out changeset 46a61cbfe8a8 (bug 1833654)
Backed out changeset 83e3de80337b (bug 1833654)
Backed out changeset 1a10c12874ac (bug 1840399)
Backed out changeset 6b087145b67f (bug 1833654)
Backed out changeset b9ac857ad43f (bug 1840399)
Backed out changeset 4b841e8dd033 (bug 1823953)
Backed out changeset 650e35803834 (bug 1823953)
Backed out changeset c11b58ac0709 (bug 1823953)
Backed out changeset c0249c90bc31 (bug 1823953)
Backed out changeset 8929288d5aec (bug 1823953)
Backed out changeset 828792b886bd (bug 1823953)
Backed out changeset 873f1d4a8875 (bug 1840869)
Backed out changeset a25abd05302c (bug 1823953)
Backed out changeset d4b1eb442c36 (bug 1840399)
Backed out changeset c25509d72a96 (bug 1840399)
Backed out changeset 0f72a0626a28 (bug 1840402)
Backed out changeset 82e7574364ce (bug 1840399)
Backed out changeset 93073105f063 (bug 1840399)
Backed out changeset 56ec8e3405e9 (bug 1840399)
Backed out changeset ff15dad37ab8 (bug 1840399)
Backed out changeset 0655ebd61eda (bug 1840399)
Backed out changeset 7bca1ae06c7d (bug 1828912)
Backed out changeset 8a5a849cfe5f (bug 1828912)
Backed out changeset 3d8422a2038a (bug 1828912)
Backed out changeset f08ee5de9370 (bug 1823953)
Backed out changeset a4eb210620ff (bug 1823953)
Backed out changeset aa8914cd55be (bug 1839391)
Backed out changeset 3ea1f43e4024 (bug 1823953)
Backed out changeset 3efe02ffa1c8 (bug 1826382)
Backed out changeset 81c4553ec23d (bug 1839391)
Backed out changeset 130894e4a781 (bug 1839391)
Backed out changeset 9a0247b0fc85 (bug 1839391)
Backed out changeset 11a923064382 (bug 1839391)
Backed out changeset 98ffb66160c3 (bug 1837160)
Backed out changeset a80dda9a220a (bug 1837160)
Backed out changeset 251b4ef97a2b (bug 1837160)
Backed out changeset 7372632eb32f (bug 1837160)
Backed out changeset c5d54bc3ee26 (bug 1839389)
Backed out changeset b232ec1bbc2d (bug 1833654)
Backed out changeset fc7ba125c2fe (bug 1833654)
Backed out changeset 8a47f6882e61 (bug 1823953)
Backed out changeset e29810541b53 (bug 1828912)
Backed out changeset bcf10730c8c9 (bug 1828912)
Backed out changeset 8df8290b6c33 (bug 1826382)
Backed out changeset 2811d12803cf (bug 1826382)
Backed out changeset 3fc718561ec9 (bug 1826382)
Backed out changeset 7827183776e1 (bug 1823953)
Backed out changeset a3eb5f228d9a (bug 1826382)
Backed out changeset 3113ad2e0987 (bug 1823953)
Backed out changeset 4b1dc01525af (bug 1823953)
Backed out changeset f7f4a7585ceb (bug 1823953)
Backed out changeset 93042f1becec (bug 1823953)
Backed out changeset b9ca30a0a066 (bug 1823953)
Backed out changeset 1000c4a6a92a (bug 1823953)
Backed out changeset 05dc13775fd6 (bug 1823953)
This commit is contained in:
Cristian Tuns 2023-08-01 09:37:39 -04:00
parent 6798545271
commit 5d6b51256b
165 changed files with 9843 additions and 6505 deletions

View file

@ -1368,6 +1368,7 @@ media/libpng/
media/libsoundtouch/
media/libspeex_resampler/
media/libtheora/
media/libtremor/
media/libvorbis/
media/libvpx/
media/libwebp/

View file

@ -22,4 +22,5 @@
# changes to stick? As of bug 928195, this shouldn't be necessary! Please
# don't change CLOBBER for WebIDL changes any more.
Modified system_headers to use in bug 1823953.
Merge day clobber 2023-07-31

View file

@ -151,7 +151,6 @@
@BINPATH@/@DLL_PREFIX@mozsqlite3@DLL_SUFFIX@
#endif
@BINPATH@/@DLL_PREFIX@lgpllibs@DLL_SUFFIX@
@BINPATH@/@DLL_PREFIX@gkcodecs@DLL_SUFFIX@
#ifdef MOZ_FFVPX
@BINPATH@/@DLL_PREFIX@mozavutil@DLL_SUFFIX@
@BINPATH@/@DLL_PREFIX@mozavcodec@DLL_SUFFIX@
@ -457,7 +456,6 @@ i686/xul.dll
i686/nss3.dll
i686/mozglue.dll
i686/lgpllibs.dll
i686/gkcodecs.dll
i686/msvcp140.dll
i686/vcruntime140.dll
i686/vcruntime140_1.dll

View file

@ -1,63 +0,0 @@
ogg_calloc_func
ogg_free_func
ogg_malloc_func
ogg_realloc_func
ogg_set_mem_functions
ogg_stream_clear
ogg_stream_eos
ogg_stream_flush
ogg_stream_init
ogg_stream_packetin
ogg_stream_pageout
opus_decode
opus_decoder_create
opus_decoder_ctl
opus_decoder_destroy
opus_encode
opus_encode_float
opus_encoder_create
opus_encoder_ctl
opus_encoder_destroy
opus_get_version_string
opus_multistream_decode
opus_multistream_decode_float
opus_multistream_decoder_create
opus_multistream_decoder_ctl
opus_multistream_decoder_destroy
opus_multistream_encode
opus_multistream_encoder_create
opus_multistream_encoder_ctl
opus_multistream_encoder_destroy
opus_packet_get_nb_channels
opus_packet_get_nb_frames
opus_packet_get_samples_per_frame
opus_packet_parse
opus_strerror
vorbis_block_clear
vorbis_block_init
vorbis_comment_clear
vorbis_comment_init
vorbis_dsp_clear
vorbis_info_clear
vorbis_info_init
vorbis_packet_blocksize
vorbis_synthesis
vorbis_synthesis_blockin
vorbis_synthesis_headerin
vorbis_synthesis_init
vorbis_synthesis_pcmout
vorbis_synthesis_read
vorbis_synthesis_restart
th_info_init
th_setup_free
th_decode_free
th_comment_clear
th_decode_headerin
th_packet_isheader
th_granule_frame
th_decode_packetin
th_decode_ycbcr_out
th_comment_init
th_packet_iskeyframe
th_info_clear
th_decode_alloc

View file

@ -1,16 +0,0 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# The gkcodecs library contains code from third-party libraries implementing
# encoding an decoding for particular audio codecs.
#
# They are compiled in a separate shared library to be able to be available
# both from libxul (when decoding using the codec integration layer Gecko
# provides) and from ffmpeg (when decoding trough ffmpeg).
GeckoSharedLibrary("gkcodecs", linkage=None)
SHARED_LIBRARY_NAME = "gkcodecs"
SYMBOLS_FILE = "gkcodecs.symbols"

View file

@ -7,7 +7,6 @@
external_dirs = []
DIRS += [
"gkcodecs",
"lgpllibs",
"rlbox",
"sqlite",
@ -39,6 +38,9 @@ external_dirs += ["modules/xz-embedded"]
if CONFIG["MOZ_VORBIS"]:
external_dirs += ["media/libvorbis"]
if CONFIG["MOZ_TREMOR"]:
external_dirs += ["media/libtremor"]
if not CONFIG["MOZ_SYSTEM_LIBVPX"]:
external_dirs += ["media/libvpx"]

View file

@ -1012,11 +1012,6 @@ system_headers = [
'wx/xrc/xmlres.h',
'xlocale.h',
'zmouse.h',
'vorbis/codec.h',
'opus/opus.h',
'opus/opus_multistream.h',
'ogg/ogg.h',
'theora/theoradec.h'
]
if CONFIG['MOZ_X11']:

View file

@ -210,7 +210,13 @@ class AudioConfig {
FORMAT_S24,
FORMAT_S32,
FORMAT_FLT,
# if defined(MOZ_SAMPLE_TYPE_FLOAT32)
FORMAT_DEFAULT = FORMAT_FLT
# elif defined(MOZ_SAMPLE_TYPE_S16)
FORMAT_DEFAULT = FORMAT_S16
# else
# error "Not supported audio type"
# endif
};
AudioConfig(const ChannelLayout& aChannelLayout, uint32_t aRate,

View file

@ -25,8 +25,12 @@ enum AudioSampleFormat {
AUDIO_FORMAT_S16,
// Signed 32-bit float samples
AUDIO_FORMAT_FLOAT32,
// The format used for output by AudioStream.
// The format used for output by AudioStream.
#ifdef MOZ_SAMPLE_TYPE_S16
AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_S16
#else
AUDIO_OUTPUT_FORMAT = AUDIO_FORMAT_FLOAT32
#endif
};
enum { MAX_AUDIO_SAMPLE_SIZE = sizeof(float) };
@ -37,15 +41,15 @@ class AudioSampleTraits;
template <>
class AudioSampleTraits<AUDIO_FORMAT_FLOAT32> {
public:
using Type = float;
typedef float Type;
};
template <>
class AudioSampleTraits<AUDIO_FORMAT_S16> {
public:
using Type = int16_t;
typedef int16_t Type;
};
using AudioDataValue = AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type;
typedef AudioSampleTraits<AUDIO_OUTPUT_FORMAT>::Type AudioDataValue;
template <typename T>
class AudioSampleTypeToFormat;
@ -70,11 +74,9 @@ class AudioSampleTypeToFormat<short> {
* http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html
*/
inline float AudioSampleToFloat(float aValue) { return aValue; }
inline float AudioSampleToFloat(int16_t aValue) {
return static_cast<float>(aValue) / 32768.0f;
}
inline float AudioSampleToFloat(int16_t aValue) { return aValue / 32768.0f; }
inline float AudioSampleToFloat(int32_t aValue) {
return static_cast<float>(aValue) / (float)(1U << 31);
return aValue / (float)(1U << 31);
}
template <typename T>
@ -96,8 +98,7 @@ T UInt8bitToAudioSample(uint8_t aValue);
template <>
inline float UInt8bitToAudioSample<float>(uint8_t aValue) {
return static_cast<float>(aValue) * (static_cast<float>(2) / UINT8_MAX) -
static_cast<float>(1);
return aValue * (static_cast<float>(2) / UINT8_MAX) - static_cast<float>(1);
}
template <>
inline int16_t UInt8bitToAudioSample<int16_t>(uint8_t aValue) {
@ -109,7 +110,7 @@ T IntegerToAudioSample(int16_t aValue);
template <>
inline float IntegerToAudioSample<float>(int16_t aValue) {
return static_cast<float>(aValue) / 32768.0f;
return aValue / 32768.0f;
}
template <>
inline int16_t IntegerToAudioSample<int16_t>(int16_t aValue) {
@ -121,7 +122,7 @@ T Int24bitToAudioSample(int32_t aValue);
template <>
inline float Int24bitToAudioSample<float>(int32_t aValue) {
return static_cast<float>(aValue) / static_cast<float>(1 << 23);
return aValue / static_cast<float>(1 << 23);
}
template <>
inline int16_t Int24bitToAudioSample<int16_t>(int32_t aValue) {

View file

@ -7,7 +7,6 @@
#include "GraphDriver.h"
#include "AudioNodeEngine.h"
#include "cubeb/cubeb.h"
#include "mozilla/dom/AudioContext.h"
#include "mozilla/dom/AudioDeviceInfo.h"
#include "mozilla/dom/BaseAudioContextBinding.h"
@ -592,7 +591,14 @@ void AudioCallbackDriver::Init() {
"This is blocking and should never run on the main thread.");
output.rate = mSampleRate;
#ifdef MOZ_SAMPLE_TYPE_S16
MOZ_ASSERT(AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16);
output.format = CUBEB_SAMPLE_S16NE;
#else
MOZ_ASSERT(AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_FLOAT32);
output.format = CUBEB_SAMPLE_FLOAT32NE;
#endif
if (!mOutputChannelCount) {
LOG(LogLevel::Warning, ("Output number of channels is 0."));

View file

@ -59,11 +59,10 @@ class TrackInfoSharedPtr;
// becomes:
// AlignedFloatBuffer buffer(samples);
// if (!buffer) { return NS_ERROR_OUT_OF_MEMORY; }
class InflatableShortBuffer;
template <typename Type, int Alignment = 32>
class AlignedBuffer {
public:
friend InflatableShortBuffer;
AlignedBuffer()
: mData(nullptr), mLength(0), mBuffer(nullptr), mCapacity(0) {}
@ -84,7 +83,7 @@ class AlignedBuffer {
AlignedBuffer(const AlignedBuffer& aOther)
: AlignedBuffer(aOther.Data(), aOther.Length()) {}
AlignedBuffer(AlignedBuffer&& aOther) noexcept
AlignedBuffer(AlignedBuffer&& aOther)
: mData(aOther.mData),
mLength(aOther.mLength),
mBuffer(std::move(aOther.mBuffer)),
@ -94,7 +93,7 @@ class AlignedBuffer {
aOther.mCapacity = 0;
}
AlignedBuffer& operator=(AlignedBuffer&& aOther) noexcept {
AlignedBuffer& operator=(AlignedBuffer&& aOther) {
this->~AlignedBuffer();
new (this) AlignedBuffer(std::move(aOther));
return *this;
@ -247,41 +246,10 @@ class AlignedBuffer {
size_t mCapacity{}; // in bytes
};
using AlignedByteBuffer = AlignedBuffer<uint8_t>;
using AlignedFloatBuffer = AlignedBuffer<float>;
using AlignedShortBuffer = AlignedBuffer<int16_t>;
using AlignedAudioBuffer = AlignedBuffer<AudioDataValue>;
// A buffer in which int16_t audio can be written to, and then converted to
// float32 audio without reallocating.
// This class is useful when an API hands out int16_t audio but the samples
// need to be immediately converted to f32.
class InflatableShortBuffer {
public:
explicit InflatableShortBuffer(size_t aElementCount)
: mBuffer(aElementCount * 2) {}
AlignedFloatBuffer Inflate() {
// Convert the data from int16_t to f32 in place, in the same buffer.
// The reason this works is because the buffer has in fact twice the
// capacity, and the loop goes backward.
float* output = reinterpret_cast<float*>(mBuffer.mData);
for (size_t i = Length() - 1; i--;) {
output[i] = AudioSampleToFloat(mBuffer.mData[i]);
}
AlignedFloatBuffer rv;
rv.mBuffer = std::move(mBuffer.mBuffer);
rv.mCapacity = mBuffer.mCapacity;
rv.mLength = Length();
rv.mData = output;
return rv;
}
size_t Length() const { return mBuffer.mLength / 2; }
int16_t* get() const { return mBuffer.get(); }
explicit operator bool() const { return mBuffer.mData != nullptr; }
protected:
AlignedShortBuffer mBuffer;
};
typedef AlignedBuffer<uint8_t> AlignedByteBuffer;
typedef AlignedBuffer<float> AlignedFloatBuffer;
typedef AlignedBuffer<int16_t> AlignedShortBuffer;
typedef AlignedBuffer<AudioDataValue> AlignedAudioBuffer;
// Container that holds media samples.
class MediaData {
@ -448,16 +416,16 @@ class VideoInfo;
// Holds a decoded video frame, in YCbCr format. These are queued in the reader.
class VideoData : public MediaData {
public:
using IntRect = gfx::IntRect;
using IntSize = gfx::IntSize;
using ColorDepth = gfx::ColorDepth;
using ColorRange = gfx::ColorRange;
using YUVColorSpace = gfx::YUVColorSpace;
using ColorSpace2 = gfx::ColorSpace2;
using ChromaSubsampling = gfx::ChromaSubsampling;
using ImageContainer = layers::ImageContainer;
using Image = layers::Image;
using PlanarYCbCrImage = layers::PlanarYCbCrImage;
typedef gfx::IntRect IntRect;
typedef gfx::IntSize IntSize;
typedef gfx::ColorDepth ColorDepth;
typedef gfx::ColorRange ColorRange;
typedef gfx::YUVColorSpace YUVColorSpace;
typedef gfx::ColorSpace2 ColorSpace2;
typedef gfx::ChromaSubsampling ChromaSubsampling;
typedef layers::ImageContainer ImageContainer;
typedef layers::Image Image;
typedef layers::PlanarYCbCrImage PlanarYCbCrImage;
static const Type sType = Type::VIDEO_DATA;
static const char* sTypeName;
@ -475,7 +443,7 @@ class VideoData : public MediaData {
uint32_t mSkip;
};
Plane mPlanes[3]{};
Plane mPlanes[3];
YUVColorSpace mYUVColorSpace = YUVColorSpace::Identity;
ColorSpace2 mColorPrimaries = ColorSpace2::UNKNOWN;
ColorDepth mColorDepth = ColorDepth::COLOR_8;
@ -689,6 +657,10 @@ class MediaRawData final : public MediaData {
// Indicates that this is the last packet of the stream.
bool mEOS = false;
// Indicate to the audio decoder that mDiscardPadding frames should be
// trimmed.
uint32_t mDiscardPadding = 0;
RefPtr<TrackInfoSharedPtr> mTrackInfo;
// May contain the original start time and duration of the frames.

View file

@ -56,7 +56,7 @@ mozilla::LazyLogModule gMediaDemuxerLog("MediaDemuxer");
namespace mozilla {
using MediaDataDecoderID = void*;
typedef void* MediaDataDecoderID;
/**
* This class tracks shutdown promises to ensure all decoders are shut down
@ -73,7 +73,7 @@ class MediaFormatReader::ShutdownPromisePool {
RefPtr<ShutdownPromise> Shutdown();
// Track a shutdown promise.
void Track(const RefPtr<ShutdownPromise>& aPromise);
void Track(RefPtr<ShutdownPromise> aPromise);
// Shut down a decoder and track its shutdown promise.
void ShutdownDecoder(already_AddRefed<MediaDataDecoder> aDecoder) {
@ -96,7 +96,7 @@ RefPtr<ShutdownPromise> MediaFormatReader::ShutdownPromisePool::Shutdown() {
}
void MediaFormatReader::ShutdownPromisePool::Track(
const RefPtr<ShutdownPromise>& aPromise) {
RefPtr<ShutdownPromise> aPromise) {
MOZ_DIAGNOSTIC_ASSERT(!mShutdown);
MOZ_DIAGNOSTIC_ASSERT(!mPromises.Contains(aPromise));
mPromises.Insert(aPromise);
@ -1069,7 +1069,7 @@ bool MediaFormatReader::ResolveSetCDMPromiseIfDone(TrackType aTrack) {
if (mSetCDMForTracks.isEmpty()) {
LOGV("%s : Done ", __func__);
mSetCDMPromise.Resolve(/* aResolveValue = */ true, __func__);
mSetCDMPromise.Resolve(/* aIgnored = */ true, __func__);
if (HasAudio()) {
ScheduleUpdate(TrackInfo::kAudioTrack);
}
@ -1135,8 +1135,7 @@ RefPtr<SetCDMPromise> MediaFormatReader::SetCDMProxy(CDMProxy* aProxy) {
// 3) A null cdm proxy is set
// the promise can be resolved directly.
mSetCDMForTracks.clear();
return SetCDMPromise::CreateAndResolve(/* aResolveValue = */ true,
__func__);
return SetCDMPromise::CreateAndResolve(/* aIgnored = */ true, __func__);
}
RefPtr<SetCDMPromise> p = mSetCDMPromise.Ensure(__func__);
@ -1560,16 +1559,16 @@ void MediaFormatReader::DoDemuxVideo() {
p->Then(
OwnerThread(), __func__,
[self, perfRecorder(std::move(perfRecorder))](
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) mutable {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) mutable {
perfRecorder.Record();
self->OnVideoDemuxCompleted(aSamples);
self->OnVideoDemuxCompleted(std::move(aSamples));
},
[self](const MediaResult& aError) { self->OnVideoDemuxFailed(aError); })
->Track(mVideo.mDemuxRequest);
}
void MediaFormatReader::OnVideoDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
AUTO_PROFILER_LABEL("MediaFormatReader::OnVideoDemuxCompleted",
MEDIA_PLAYBACK);
LOGV("%zu video samples demuxed (sid:%d)", aSamples->GetSamples().Length(),
@ -1655,16 +1654,16 @@ void MediaFormatReader::DoDemuxAudio() {
p->Then(
OwnerThread(), __func__,
[self, perfRecorder(std::move(perfRecorder))](
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) mutable {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) mutable {
perfRecorder.Record();
self->OnAudioDemuxCompleted(aSamples);
self->OnAudioDemuxCompleted(std::move(aSamples));
},
[self](const MediaResult& aError) { self->OnAudioDemuxFailed(aError); })
->Track(mAudio.mDemuxRequest);
}
void MediaFormatReader::OnAudioDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
LOGV("%zu audio samples demuxed (sid:%d)", aSamples->GetSamples().Length(),
aSamples->GetSamples()[0]->mTrackInfo
? aSamples->GetSamples()[0]->mTrackInfo->GetID()
@ -1685,7 +1684,7 @@ void MediaFormatReader::NotifyNewOutput(
DDLOG(DDLogCategory::Log,
aTrack == TrackInfo::kAudioTrack ? "decoded_audio" : "decoded_video",
"no output samples");
} else {
} else
for (auto&& sample : aResults) {
if (DecoderDoctorLogger::IsDDLoggingEnabled()) {
switch (sample->mType) {
@ -1760,7 +1759,6 @@ void MediaFormatReader::NotifyNewOutput(
decoder.mNumOfConsecutiveUtilityCrashes = 0;
}
}
}
LOG("Done processing new %s samples", TrackTypeToStr(aTrack));
if (!aResults.IsEmpty()) {
@ -2685,7 +2683,7 @@ RefPtr<MediaFormatReader::WaitForDataPromise> MediaFormatReader::WaitForData(
return p;
}
nsresult MediaFormatReader::ResetDecode(const TrackSet& aTracks) {
nsresult MediaFormatReader::ResetDecode(TrackSet aTracks) {
AUTO_PROFILER_LABEL("MediaFormatReader::ResetDecode", MEDIA_PLAYBACK);
MOZ_ASSERT(OnTaskQueue());
LOGV("");
@ -3306,34 +3304,29 @@ void MediaFormatReader::GetDebugInfo(dom::MediaFormatReaderDebugInfo& aInfo) {
aInfo.mAudioState.mWaitingPromise = !mAudio.mWaitingPromise.IsEmpty();
aInfo.mAudioState.mHasDemuxRequest = mAudio.mDemuxRequest.Exists();
aInfo.mAudioState.mDemuxQueueSize =
AssertedCast<int>(mAudio.mQueuedSamples.Length());
uint32_t(mAudio.mQueuedSamples.Length());
aInfo.mAudioState.mHasDecoder = mAudio.mDecodeRequest.Exists();
aInfo.mAudioState.mTimeTreshold =
mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().Time().ToSeconds()
: -1.0;
aInfo.mAudioState.mTimeTresholdHasSeeked =
mAudio.mTimeThreshold ? mAudio.mTimeThreshold.ref().mHasSeeked : false;
aInfo.mAudioState.mNumSamplesInput =
AssertedCast<int64_t>(mAudio.mNumSamplesInput);
aInfo.mAudioState.mNumSamplesOutput =
AssertedCast<int64_t>(mAudio.mNumSamplesOutput);
aInfo.mAudioState.mQueueSize =
AssertedCast<int32_t>(size_t(mAudio.mSizeOfQueue));
aInfo.mAudioState.mPending = AssertedCast<int>(mAudio.mOutput.Length());
aInfo.mAudioState.mNumSamplesInput = mAudio.mNumSamplesInput;
aInfo.mAudioState.mNumSamplesOutput = mAudio.mNumSamplesOutput;
aInfo.mAudioState.mQueueSize = size_t(mAudio.mSizeOfQueue);
aInfo.mAudioState.mPending = mAudio.mOutput.Length();
aInfo.mAudioState.mWaitingForData = mAudio.mWaitingForData;
aInfo.mAudioState.mDemuxEOS = mAudio.mDemuxEOS;
aInfo.mAudioState.mDrainState = int32_t(mAudio.mDrainState);
aInfo.mAudioState.mWaitingForKey = mAudio.mWaitingForKey;
aInfo.mAudioState.mLastStreamSourceID =
AssertedCast<int64_t>(mAudio.mLastStreamSourceID);
aInfo.mAudioState.mLastStreamSourceID = mAudio.mLastStreamSourceID;
}
CopyUTF8toUTF16(audioDecoderName, aInfo.mAudioDecoderName);
CopyUTF8toUTF16(audioType, aInfo.mAudioType);
aInfo.mAudioChannels = AssertedCast<int32_t>(audioInfo.mChannels);
aInfo.mAudioChannels = audioInfo.mChannels;
aInfo.mAudioRate = audioInfo.mRate;
aInfo.mAudioFramesDecoded =
AssertedCast<int64_t>(mAudio.mNumSamplesOutputTotal);
aInfo.mAudioFramesDecoded = mAudio.mNumSamplesOutputTotal;
VideoInfo videoInfo;
if (HasVideo()) {
@ -3346,26 +3339,22 @@ void MediaFormatReader::GetDebugInfo(dom::MediaFormatReaderDebugInfo& aInfo) {
aInfo.mVideoState.mWaitingPromise = !mVideo.mWaitingPromise.IsEmpty();
aInfo.mVideoState.mHasDemuxRequest = mVideo.mDemuxRequest.Exists();
aInfo.mVideoState.mDemuxQueueSize =
AssertedCast<int32_t>(mVideo.mQueuedSamples.Length());
uint32_t(mVideo.mQueuedSamples.Length());
aInfo.mVideoState.mHasDecoder = mVideo.mDecodeRequest.Exists();
aInfo.mVideoState.mTimeTreshold =
mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().Time().ToSeconds()
: -1.0;
aInfo.mVideoState.mTimeTresholdHasSeeked =
mVideo.mTimeThreshold ? mVideo.mTimeThreshold.ref().mHasSeeked : false;
aInfo.mVideoState.mNumSamplesInput =
AssertedCast<int64_t>(mVideo.mNumSamplesInput);
aInfo.mVideoState.mNumSamplesOutput =
AssertedCast<int64_t>(mVideo.mNumSamplesOutput);
aInfo.mVideoState.mQueueSize =
AssertedCast<int32_t>(size_t(mVideo.mSizeOfQueue));
aInfo.mVideoState.mPending = AssertedCast<int32_t>(mVideo.mOutput.Length());
aInfo.mVideoState.mNumSamplesInput = mVideo.mNumSamplesInput;
aInfo.mVideoState.mNumSamplesOutput = mVideo.mNumSamplesOutput;
aInfo.mVideoState.mQueueSize = size_t(mVideo.mSizeOfQueue);
aInfo.mVideoState.mPending = mVideo.mOutput.Length();
aInfo.mVideoState.mWaitingForData = mVideo.mWaitingForData;
aInfo.mVideoState.mDemuxEOS = mVideo.mDemuxEOS;
aInfo.mVideoState.mDrainState = int32_t(mVideo.mDrainState);
aInfo.mVideoState.mWaitingForKey = mVideo.mWaitingForKey;
aInfo.mVideoState.mLastStreamSourceID =
AssertedCast<int64_t>(mVideo.mLastStreamSourceID);
aInfo.mVideoState.mLastStreamSourceID = mVideo.mLastStreamSourceID;
}
CopyUTF8toUTF16(videoDecoderName, aInfo.mVideoDecoderName);
@ -3376,19 +3365,14 @@ void MediaFormatReader::GetDebugInfo(dom::MediaFormatReaderDebugInfo& aInfo) {
videoInfo.mDisplay.height < 0 ? 0 : videoInfo.mDisplay.height;
aInfo.mVideoRate = mVideo.mMeanRate.Mean();
aInfo.mVideoHardwareAccelerated = VideoIsHardwareAccelerated();
aInfo.mVideoNumSamplesOutputTotal =
AssertedCast<int64_t>(mVideo.mNumSamplesOutputTotal);
aInfo.mVideoNumSamplesSkippedTotal =
AssertedCast<int64_t>(mVideo.mNumSamplesSkippedTotal);
aInfo.mVideoNumSamplesOutputTotal = mVideo.mNumSamplesOutputTotal;
aInfo.mVideoNumSamplesSkippedTotal = mVideo.mNumSamplesSkippedTotal;
// Looking at dropped frames
FrameStatisticsData stats = mFrameStats->GetFrameStatisticsData();
aInfo.mFrameStats.mDroppedDecodedFrames =
AssertedCast<int64_t>(stats.mDroppedDecodedFrames);
aInfo.mFrameStats.mDroppedSinkFrames =
AssertedCast<int64_t>(stats.mDroppedSinkFrames);
aInfo.mFrameStats.mDroppedCompositorFrames =
AssertedCast<int64_t>(stats.mDroppedCompositorFrames);
aInfo.mFrameStats.mDroppedDecodedFrames = stats.mDroppedDecodedFrames;
aInfo.mFrameStats.mDroppedSinkFrames = stats.mDroppedSinkFrames;
aInfo.mFrameStats.mDroppedCompositorFrames = stats.mDroppedCompositorFrames;
}
void MediaFormatReader::SetVideoNullDecode(bool aIsNullDecode) {
@ -3419,7 +3403,7 @@ void MediaFormatReader::SetNullDecode(TrackType aTrack, bool aIsNullDecode) {
void MediaFormatReader::OnFirstDemuxCompleted(
TrackInfo::TrackType aType,
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
AUTO_PROFILER_LABEL("MediaFormatReader::OnFirstDemuxCompleted",
MEDIA_PLAYBACK);
MOZ_ASSERT(OnTaskQueue());

View file

@ -56,7 +56,7 @@ struct MetadataHolder {
UniquePtr<MetadataTags> mTags;
};
using MediaDecoderOwnerID = void*;
typedef void* MediaDecoderOwnerID;
struct MOZ_STACK_CLASS MediaFormatReaderInit {
MediaResource* mResource = nullptr;
@ -75,8 +75,8 @@ class MediaFormatReader final
: public SupportsThreadSafeWeakPtr<MediaFormatReader>,
public DecoderDoctorLifeLogger<MediaFormatReader> {
static const bool IsExclusive = true;
using TrackType = TrackInfo::TrackType;
using NotifyDataArrivedPromise = MozPromise<bool, MediaResult, IsExclusive>;
typedef TrackInfo::TrackType TrackType;
typedef MozPromise<bool, MediaResult, IsExclusive> NotifyDataArrivedPromise;
public:
MOZ_DECLARE_REFCOUNTED_TYPENAME(MediaFormatReader)
@ -161,7 +161,7 @@ class MediaFormatReader final
//
// aParam is a set of TrackInfo::TrackType enums specifying which
// queues need to be reset, defaulting to both audio and video tracks.
nsresult ResetDecode(const TrackSet& aTracks);
nsresult ResetDecode(TrackSet aTracks);
// Destroys the decoding state. The reader cannot be made usable again.
// This is different from ReleaseMediaResources() as it is irreversable,
@ -501,34 +501,31 @@ class MediaFormatReader final
// if we have too many, or if warnings should be treated as errors.
return mNumOfConsecutiveDecodingError > mMaxConsecutiveDecodingError ||
StaticPrefs::media_playback_warnings_as_errors();
}
if (mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
} else if (mError.ref() == NS_ERROR_DOM_MEDIA_NEED_NEW_DECODER) {
// If the caller asked for a new decoder we shouldn't treat
// it as fatal.
return false;
}
if (mError.ref() ==
NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_RDD_OR_GPU_ERR) {
} else if (mError.ref() ==
NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_RDD_OR_GPU_ERR) {
// Allow RDD crashes to be non-fatal, but give up
// if we have too many, or if warnings should be treated as errors.
return mNumOfConsecutiveRDDOrGPUCrashes >
mMaxConsecutiveRDDOrGPUCrashes ||
StaticPrefs::media_playback_warnings_as_errors();
}
if (mError.ref() ==
NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_UTILITY_ERR) {
} else if (mError.ref() ==
NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_UTILITY_ERR) {
bool tooManyConsecutiveCrashes =
mNumOfConsecutiveUtilityCrashes > mMaxConsecutiveUtilityCrashes;
// TODO: Telemetry?
return tooManyConsecutiveCrashes ||
StaticPrefs::media_playback_warnings_as_errors();
}
if (mError.ref() ==
NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR) {
} else if (mError.ref() ==
NS_ERROR_DOM_MEDIA_REMOTE_DECODER_CRASHED_MF_CDM_ERR) {
return false;
} else {
// All other error types are fatal
return true;
}
// All other error types are fatal
return true;
}
// If set, all decoded samples prior mTimeThreshold will be dropped.
@ -653,8 +650,7 @@ class MediaFormatReader final
if (aValue == media::TimeUnit::Zero()) {
return;
}
mMean += static_cast<float>((1.0f / aValue.ToSeconds() - mMean) /
static_cast<double>(++mCount));
mMean += (1.0f / aValue.ToSeconds() - mMean) / ++mCount;
}
void Reset() {
@ -738,15 +734,13 @@ class MediaFormatReader final
void OnDemuxFailed(TrackType aTrack, const MediaResult& aError);
void DoDemuxVideo();
void OnVideoDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples);
void OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
void OnVideoDemuxFailed(const MediaResult& aError) {
OnDemuxFailed(TrackType::kVideoTrack, aError);
}
void DoDemuxAudio();
void OnAudioDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples);
void OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
void OnAudioDemuxFailed(const MediaResult& aError) {
OnDemuxFailed(TrackType::kAudioTrack, aError);
}
@ -824,9 +818,8 @@ class MediaFormatReader final
MediaEventListener mOnTrackWaitingForKeyListener;
void OnFirstDemuxCompleted(
TrackInfo::TrackType aType,
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples);
void OnFirstDemuxCompleted(TrackInfo::TrackType aType,
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
void OnFirstDemuxFailed(TrackInfo::TrackType aType,
const MediaResult& aError);

View file

@ -37,7 +37,7 @@ class MetadataTag {
}
};
using MetadataTags = nsTHashMap<nsCStringHashKey, nsCString>;
typedef nsTHashMap<nsCStringHashKey, nsCString> MetadataTags;
// Start codec specific data structs. If modifying these remember to also
// modify the MediaIPCUtils so that any new members are sent across IPC.
@ -128,10 +128,11 @@ struct Mp3CodecSpecificData {
struct OpusCodecSpecificData {
bool operator==(const OpusCodecSpecificData& rhs) const {
return mContainerCodecDelayFrames == rhs.mContainerCodecDelayFrames &&
return mContainerCodecDelayMicroSeconds ==
rhs.mContainerCodecDelayMicroSeconds &&
*mHeadersBinaryBlob == *rhs.mHeadersBinaryBlob;
}
// The codec delay (aka pre-skip) in audio frames.
// The codec delay (aka pre-skip) in microseconds.
// See https://tools.ietf.org/html/rfc7845#section-4.2 for more info.
// This member should store the codec delay parsed from the container file.
// In some cases (such as the ogg container), this information is derived
@ -139,7 +140,7 @@ struct OpusCodecSpecificData {
// separately redundant. However, other containers store the delay in
// addition to the header blob, in which case we can check this container
// delay against the header delay to ensure they're consistent.
int64_t mContainerCodecDelayFrames{-1};
int64_t mContainerCodecDelayMicroSeconds{-1};
// A binary blob of opus header data, specifically the Identification Header.
// See https://datatracker.ietf.org/doc/html/rfc7845.html#section-5.1
@ -400,10 +401,10 @@ class VideoInfo : public TrackInfo {
return imageRect;
}
imageRect.x = AssertedCast<int>((imageRect.x * aWidth) / mImage.width);
imageRect.y = AssertedCast<int>((imageRect.y * aHeight) / mImage.height);
imageRect.SetWidth(AssertedCast<int>(w));
imageRect.SetHeight(AssertedCast<int>(h));
imageRect.x = (imageRect.x * aWidth) / mImage.width;
imageRect.y = (imageRect.y * aHeight) / mImage.height;
imageRect.SetWidth(w);
imageRect.SetHeight(h);
return imageRect;
}
@ -540,7 +541,7 @@ class EncryptionInfo {
// Encryption data.
CopyableTArray<uint8_t> mInitData;
};
using InitDatas = CopyableTArray<InitData>;
typedef CopyableTArray<InitData> InitDatas;
// True if the stream has encryption metadata
bool IsEncrypted() const { return mEncrypted; }

View file

@ -12,6 +12,7 @@
#include "MediaContainerType.h"
#include "MediaResource.h"
#include "TimeUnits.h"
#include "VorbisUtils.h"
#include "mozilla/Base64.h"
#include "mozilla/dom/ContentChild.h"
#include "mozilla/SchedulerGroup.h"
@ -51,8 +52,8 @@ CheckedInt64 SaferMultDiv(int64_t aValue, uint64_t aMul, uint64_t aDiv) {
if (aMul > INT64_MAX || aDiv > INT64_MAX) {
return CheckedInt64(INT64_MAX) + 1; // Return an invalid checked int.
}
int64_t mul = AssertedCast<int64_t>(aMul);
int64_t div = AssertedCast<int64_t>(aDiv);
int64_t mul = aMul;
int64_t div = aDiv;
int64_t major = aValue / div;
int64_t remainder = aValue % div;
return CheckedInt64(remainder) * mul / div + CheckedInt64(major) * mul;
@ -95,12 +96,10 @@ static int32_t ConditionDimension(float aValue) {
void ScaleDisplayByAspectRatio(gfx::IntSize& aDisplay, float aAspectRatio) {
if (aAspectRatio > 1.0) {
// Increase the intrinsic width
aDisplay.width =
ConditionDimension(aAspectRatio * AssertedCast<float>(aDisplay.width));
aDisplay.width = ConditionDimension(aAspectRatio * aDisplay.width);
} else {
// Increase the intrinsic height
aDisplay.height =
ConditionDimension(AssertedCast<float>(aDisplay.height) / aAspectRatio);
aDisplay.height = ConditionDimension(aDisplay.height / aAspectRatio);
}
}
@ -1051,7 +1050,11 @@ bool ParseCodecsString(const nsAString& aCodecs,
expectMoreTokens = tokenizer.separatorAfterCurrentToken();
aOutCodecs.AppendElement(token);
}
return !expectMoreTokens;
if (expectMoreTokens) {
// Last codec name was empty
return false;
}
return true;
}
bool ParseMIMETypeString(const nsAString& aMIMEType,

View file

@ -437,8 +437,8 @@ enum class StringListRangeEmptyItems {
template <typename String,
StringListRangeEmptyItems empties = StringListRangeEmptyItems::Skip>
class StringListRange {
using CharType = typename String::char_type;
using Pointer = const CharType*;
typedef typename String::char_type CharType;
typedef const CharType* Pointer;
public:
// Iterator into range, trims items and optionally skips empty items.
@ -453,7 +453,7 @@ class StringListRange {
}
// DereferencedType should be 'const nsDependent[C]String' pointing into
// mList (which is 'const ns[C]String&').
using DereferencedType = decltype(Substring(Pointer(), Pointer()));
typedef decltype(Substring(Pointer(), Pointer())) DereferencedType;
DereferencedType operator*() { return Substring(mStart, mEnd); }
private:

27
dom/media/VorbisUtils.h Normal file
View file

@ -0,0 +1,27 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef VORBISUTILS_H_
#define VORBISUTILS_H_
#ifdef MOZ_SAMPLE_TYPE_S16
# include <ogg/os_types.h>
typedef ogg_int32_t VorbisPCMValue;
# define MOZ_CLIP_TO_15(x) ((x) < -32768 ? -32768 : (x) <= 32767 ? (x) : 32767)
// Convert the output of vorbis_synthesis_pcmout to a AudioDataValue
# define MOZ_CONVERT_VORBIS_SAMPLE(x) \
(static_cast<AudioDataValue>(MOZ_CLIP_TO_15((x) >> 9)))
#else /* MOZ_SAMPLE_TYPE_FLOAT32 */
typedef float VorbisPCMValue;
# define MOZ_CONVERT_VORBIS_SAMPLE(x) (x)
#endif
#endif /* VORBISUTILS_H_ */

View file

@ -338,10 +338,17 @@ nsresult OpusTrackEncoder::Encode(AudioSegment* aSegment) {
// really predict the output frame count at each call.
resamplingDest.SetLength(outframes * mChannels);
#if MOZ_SAMPLE_TYPE_S16
short* in = reinterpret_cast<short*>(pcm.Elements());
short* out = reinterpret_cast<short*>(resamplingDest.Elements());
speex_resampler_process_interleaved_int(mResampler, in, &inframes, out,
&outframes);
#else
float* in = reinterpret_cast<float*>(pcm.Elements());
float* out = reinterpret_cast<float*>(resamplingDest.Elements());
speex_resampler_process_interleaved_float(mResampler, in, &inframes, out,
&outframes);
#endif
MOZ_ASSERT(pcm.Length() >= mResampledLeftover.Length());
PodCopy(pcm.Elements(), mResampledLeftover.Elements(),
@ -399,9 +406,15 @@ nsresult OpusTrackEncoder::Encode(AudioSegment* aSegment) {
frameData->SetLength(MAX_DATA_BYTES);
// result is returned as opus error code if it is negative.
result = 0;
#ifdef MOZ_SAMPLE_TYPE_S16
const opus_int16* pcmBuf = static_cast<opus_int16*>(pcm.Elements());
result = opus_encode(mEncoder, pcmBuf, NumOutputFramesPerPacket(),
frameData->Elements(), MAX_DATA_BYTES);
#else
const float* pcmBuf = static_cast<float*>(pcm.Elements());
result = opus_encode_float(mEncoder, pcmBuf, NumOutputFramesPerPacket(),
frameData->Elements(), MAX_DATA_BYTES);
#endif
frameData->SetLength(result >= 0 ? result : 0);
if (result < 0) {

View file

@ -146,11 +146,11 @@ struct ParamTraits<mozilla::OpusCodecSpecificData> {
using paramType = mozilla::OpusCodecSpecificData;
static void Write(MessageWriter* aWriter, const paramType& aParam) {
WriteParam(aWriter, aParam.mContainerCodecDelayFrames);
WriteParam(aWriter, aParam.mContainerCodecDelayMicroSeconds);
WriteParam(aWriter, *aParam.mHeadersBinaryBlob);
}
static bool Read(MessageReader* aReader, paramType* aResult) {
return ReadParam(aReader, &aResult->mContainerCodecDelayFrames) &&
return ReadParam(aReader, &aResult->mContainerCodecDelayMicroSeconds) &&
ReadParam(aReader, aResult->mHeadersBinaryBlob.get());
}
};

View file

@ -146,7 +146,8 @@ bool ArrayOfRemoteMediaRawData::Fill(
mSamples.AppendElement(RemoteMediaRawData{
MediaDataIPDL(entry->mOffset, entry->mTime, entry->mTimecode,
entry->mDuration, entry->mKeyframe),
entry->mEOS, height, entry->mOriginalPresentationWindow,
entry->mEOS, height, entry->mDiscardPadding,
entry->mOriginalPresentationWindow,
entry->mCrypto.IsEncrypted() && entry->mShouldCopyCryptoToRemoteRawData
? Some(CryptoInfo{
entry->mCrypto.mCryptoScheme,
@ -210,6 +211,7 @@ already_AddRefed<MediaRawData> ArrayOfRemoteMediaRawData::ElementAt(
rawData->mDuration = sample.mBase.duration();
rawData->mKeyframe = sample.mBase.keyframe();
rawData->mEOS = sample.mEOS;
rawData->mDiscardPadding = sample.mDiscardPadding;
rawData->mExtraData = mExtraDatas.MediaByteBufferAt(aIndex);
if (sample.mCryptoConfig) {
CryptoSample& cypto = rawData->GetWritableCrypto();
@ -254,6 +256,7 @@ ipc::IPDLParamTraits<ArrayOfRemoteMediaRawData::RemoteMediaRawData>::Write(
WriteIPDLParam(aWriter, aActor, aVar.mBase);
WriteIPDLParam(aWriter, aActor, aVar.mEOS);
WriteIPDLParam(aWriter, aActor, aVar.mHeight);
WriteIPDLParam(aWriter, aActor, aVar.mDiscardPadding);
WriteIPDLParam(aWriter, aActor, aVar.mOriginalPresentationWindow);
WriteIPDLParam(aWriter, aActor, aVar.mCryptoConfig);
}
@ -265,6 +268,7 @@ ipc::IPDLParamTraits<ArrayOfRemoteMediaRawData::RemoteMediaRawData>::Read(
return ReadIPDLParam(aReader, aActor, &aVar->mBase) &&
ReadIPDLParam(aReader, aActor, &aVar->mEOS) &&
ReadIPDLParam(aReader, aActor, &aVar->mHeight) &&
ReadIPDLParam(aReader, aActor, &aVar->mDiscardPadding) &&
ReadIPDLParam(aReader, aActor, &aVar->mOriginalPresentationWindow) &&
ReadIPDLParam(aReader, aActor, &aVar->mCryptoConfig);
};

View file

@ -234,6 +234,7 @@ class ArrayOfRemoteMediaRawData {
bool mEOS;
// This will be zero for audio.
int32_t mHeight;
uint32_t mDiscardPadding;
Maybe<media::TimeInterval> mOriginalPresentationWindow;
Maybe<CryptoInfo> mCryptoConfig;
};

View file

@ -42,7 +42,7 @@ extern mozilla::LogModule* GetMediaSourceAPILog();
namespace mozilla {
using media::TimeUnit;
using AppendState = SourceBufferAttributes::AppendState;
typedef SourceBufferAttributes::AppendState AppendState;
namespace dom {

View file

@ -49,8 +49,8 @@ using dom::SourceBufferAppendMode;
using media::TimeInterval;
using media::TimeIntervals;
using media::TimeUnit;
using AppendBufferResult = SourceBufferTask::AppendBufferResult;
using AppendState = SourceBufferAttributes::AppendState;
typedef SourceBufferTask::AppendBufferResult AppendBufferResult;
typedef SourceBufferAttributes::AppendState AppendState;
static const char* AppendStateToStr(AppendState aState) {
switch (aState) {
@ -557,7 +557,7 @@ void TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
if (frame->GetEndTime() >= lowerLimit) {
break;
}
partialEvict += AssertedCast<int64_t>(frame->ComputedSizeOfIncludingThis());
partialEvict += frame->ComputedSizeOfIncludingThis();
}
const int64_t finalSize = mSizeSourceBuffer - aSizeToEvict;
@ -596,7 +596,7 @@ void TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
// Don't evict before the end of the current segment
TimeUnit upperLimit = futureBuffered[0].mEnd;
uint32_t evictedFramesStartIndex = buffer.Length();
for (uint32_t i = buffer.Length() - 1; i-- > 0;) {
for (int32_t i = buffer.Length() - 1; i >= 0; i--) {
const auto& frame = buffer[i];
if (frame->mTime <= upperLimit || toEvict < 0) {
// We've reached a frame that shouldn't be evicted -> Evict after it ->
@ -605,7 +605,7 @@ void TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
evictedFramesStartIndex = i + 1;
break;
}
toEvict -= AssertedCast<int64_t>(frame->ComputedSizeOfIncludingThis());
toEvict -= frame->ComputedSizeOfIncludingThis();
}
if (evictedFramesStartIndex < buffer.Length()) {
MSE_DEBUG("Step2. Evicting %" PRId64 " bytes from trailing data",
@ -616,8 +616,7 @@ void TrackBuffersManager::DoEvictData(const TimeUnit& aPlaybackTime,
}
RefPtr<TrackBuffersManager::RangeRemovalPromise>
TrackBuffersManager::CodedFrameRemovalWithPromise(
const TimeInterval& aInterval) {
TrackBuffersManager::CodedFrameRemovalWithPromise(TimeInterval aInterval) {
mTaskQueueCapability->AssertOnCurrentThread();
RefPtr<RangeRemovalTask> task = new RangeRemovalTask(aInterval);
@ -627,7 +626,7 @@ TrackBuffersManager::CodedFrameRemovalWithPromise(
return p;
}
bool TrackBuffersManager::CodedFrameRemoval(const TimeInterval& aInterval) {
bool TrackBuffersManager::CodedFrameRemoval(TimeInterval aInterval) {
MOZ_ASSERT(OnTaskQueue());
AUTO_PROFILER_LABEL("TrackBuffersManager::CodedFrameRemoval", MEDIA_PLAYBACK);
MSE_DEBUG("From %.2fs to %.2f", aInterval.mStart.ToSeconds(),
@ -652,7 +651,7 @@ bool TrackBuffersManager::CodedFrameRemoval(const TimeInterval& aInterval) {
bool dataRemoved = false;
// 3. For each track buffer in this source buffer, run the following steps:
for (auto* track : GetTracksList()) {
for (auto track : GetTracksList()) {
MSE_DEBUGV("Processing %s track", track->mInfo->mMimeType.get());
// 1. Let remove end timestamp be the current value of duration
// See bug: https://www.w3.org/Bugs/Public/show_bug.cgi?id=28727
@ -1625,7 +1624,7 @@ void TrackBuffersManager::MaybeDispatchEncryptedEvent(
}
void TrackBuffersManager::OnVideoDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
mTaskQueueCapability->AssertOnCurrentThread();
MSE_DEBUG("%zu video samples demuxed", aSamples->GetSamples().Length());
mVideoTracks.mDemuxRequest.Complete();
@ -1649,7 +1648,7 @@ void TrackBuffersManager::DoDemuxAudio() {
}
void TrackBuffersManager::OnAudioDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples) {
RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples) {
mTaskQueueCapability->AssertOnCurrentThread();
MSE_DEBUG("%zu audio samples demuxed", aSamples->GetSamples().Length());
// When using MSE, it's possible for each fragments to have their own
@ -1796,7 +1795,7 @@ TimeInterval TrackBuffersManager::PresentationInterval(
TimeInterval(aSamples[0]->mTime, aSamples[0]->GetEndTime());
for (uint32_t i = 1; i < aSamples.Length(); i++) {
const auto& sample = aSamples[i];
auto& sample = aSamples[i];
presentationInterval = presentationInterval.Span(
TimeInterval(sample->mTime, sample->GetEndTime()));
}
@ -2703,7 +2702,7 @@ uint32_t TrackBuffersManager::SkipToNextRandomAccessPoint(
TimeUnit nextSampleTimecode = trackData.mNextSampleTimecode;
TimeUnit nextSampleTime = trackData.mNextSampleTime;
uint32_t i = trackData.mNextGetSampleIndex.ref();
uint32_t originalPos = i;
int32_t originalPos = i;
for (; i < track.Length(); i++) {
const MediaRawData* sample =
@ -2730,7 +2729,7 @@ uint32_t TrackBuffersManager::SkipToNextRandomAccessPoint(
} else if (i > 0) {
// Go back to the previous keyframe or the original position so the next
// demux can succeed and be decoded.
for (uint32_t j = i - 1; j-- > originalPos;) {
for (int j = i - 1; j >= originalPos; j--) {
const RefPtr<MediaRawData>& sample = track[j];
if (sample->mKeyframe) {
trackData.mNextSampleTimecode = sample->mTimecode;
@ -2866,12 +2865,11 @@ already_AddRefed<MediaRawData> TrackBuffersManager::GetSample(
int32_t TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
const TimeUnit& aFuzz) const {
MOZ_ASSERT(OnTaskQueue());
const auto& trackData = GetTracksData(aTrack);
auto& trackData = GetTracksData(aTrack);
const TrackBuffer& track = GetTrackBuffer(aTrack);
int32_t trackLength = AssertedCast<int32_t>(track.Length());
// Perform an exact search first.
for (int32_t i = 0; i < trackLength; i++) {
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{sample->mTimecode, sample->GetEndTimecode()};
@ -2885,7 +2883,7 @@ int32_t TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
}
}
for (int32_t i = 0; i < trackLength; i++) {
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{sample->mTimecode, sample->GetEndTimecode(),
aFuzz};
@ -2902,7 +2900,7 @@ int32_t TrackBuffersManager::FindCurrentPosition(TrackInfo::TrackType aTrack,
// We couldn't find our sample by decode timestamp. Attempt to find it using
// presentation timestamp. There will likely be small jerkiness.
for (int32_t i = 0; i < trackLength; i++) {
for (uint32_t i = 0; i < track.Length(); i++) {
const RefPtr<MediaRawData>& sample = track[i];
TimeInterval sampleInterval{sample->mTime, sample->GetEndTime(), aFuzz};
@ -3027,14 +3025,11 @@ void TrackBuffersManager::GetDebugInfo(
if (HasAudio()) {
aInfo.mNextSampleTime = mAudioTracks.mNextSampleTime.ToSeconds();
aInfo.mNumSamples =
AssertedCast<int32_t>(mAudioTracks.mBuffers[0].Length());
aInfo.mBufferSize = AssertedCast<int32_t>(mAudioTracks.mSizeBuffer);
aInfo.mEvictable = AssertedCast<int32_t>(Evictable(TrackInfo::kAudioTrack));
aInfo.mNextGetSampleIndex =
AssertedCast<int32_t>(mAudioTracks.mNextGetSampleIndex.valueOr(-1));
aInfo.mNextInsertionIndex =
AssertedCast<int32_t>(mAudioTracks.mNextInsertionIndex.valueOr(-1));
aInfo.mNumSamples = mAudioTracks.mBuffers[0].Length();
aInfo.mBufferSize = mAudioTracks.mSizeBuffer;
aInfo.mEvictable = Evictable(TrackInfo::kAudioTrack);
aInfo.mNextGetSampleIndex = mAudioTracks.mNextGetSampleIndex.valueOr(-1);
aInfo.mNextInsertionIndex = mAudioTracks.mNextInsertionIndex.valueOr(-1);
media::TimeIntervals ranges = SafeBuffered(TrackInfo::kAudioTrack);
dom::Sequence<dom::BufferRange> items;
for (uint32_t i = 0; i < ranges.Length(); ++i) {
@ -3049,14 +3044,11 @@ void TrackBuffersManager::GetDebugInfo(
aInfo.mRanges = std::move(items);
} else if (HasVideo()) {
aInfo.mNextSampleTime = mVideoTracks.mNextSampleTime.ToSeconds();
aInfo.mNumSamples =
AssertedCast<int32_t>(mVideoTracks.mBuffers[0].Length());
aInfo.mBufferSize = AssertedCast<int32_t>(mVideoTracks.mSizeBuffer);
aInfo.mEvictable = AssertedCast<int32_t>(Evictable(TrackInfo::kVideoTrack));
aInfo.mNextGetSampleIndex =
AssertedCast<int32_t>(mVideoTracks.mNextGetSampleIndex.valueOr(-1));
aInfo.mNextInsertionIndex =
AssertedCast<int32_t>(mVideoTracks.mNextInsertionIndex.valueOr(-1));
aInfo.mNumSamples = mVideoTracks.mBuffers[0].Length();
aInfo.mBufferSize = mVideoTracks.mSizeBuffer;
aInfo.mEvictable = Evictable(TrackInfo::kVideoTrack);
aInfo.mNextGetSampleIndex = mVideoTracks.mNextGetSampleIndex.valueOr(-1);
aInfo.mNextInsertionIndex = mVideoTracks.mNextInsertionIndex.valueOr(-1);
media::TimeIntervals ranges = SafeBuffered(TrackInfo::kVideoTrack);
dom::Sequence<dom::BufferRange> items;
for (uint32_t i = 0; i < ranges.Length(); ++i) {

View file

@ -74,11 +74,11 @@ class TrackBuffersManager final
BUFFER_FULL,
};
using TrackType = TrackInfo::TrackType;
using MediaType = MediaData::Type;
using TrackBuffer = nsTArray<RefPtr<MediaRawData>>;
using AppendPromise = SourceBufferTask::AppendPromise;
using RangeRemovalPromise = SourceBufferTask::RangeRemovalPromise;
typedef TrackInfo::TrackType TrackType;
typedef MediaData::Type MediaType;
typedef nsTArray<RefPtr<MediaRawData>> TrackBuffer;
typedef SourceBufferTask::AppendPromise AppendPromise;
typedef SourceBufferTask::RangeRemovalPromise RangeRemovalPromise;
// Interface for SourceBuffer
TrackBuffersManager(MediaSourceDecoder* aParentDecoder,
@ -182,7 +182,8 @@ class TrackBuffersManager final
void AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes) const;
private:
using CodedFrameProcessingPromise = MozPromise<bool, MediaResult, true>;
typedef MozPromise<bool, MediaResult, /* IsExclusive = */ true>
CodedFrameProcessingPromise;
~TrackBuffersManager();
// All following functions run on the taskqueue.
@ -205,8 +206,8 @@ class TrackBuffersManager final
// Called by ResetParserState.
void CompleteResetParserState() MOZ_REQUIRES(mTaskQueueCapability);
RefPtr<RangeRemovalPromise> CodedFrameRemovalWithPromise(
const media::TimeInterval& aInterval) MOZ_REQUIRES(mTaskQueueCapability);
bool CodedFrameRemoval(const media::TimeInterval& aInterval)
media::TimeInterval aInterval) MOZ_REQUIRES(mTaskQueueCapability);
bool CodedFrameRemoval(media::TimeInterval aInterval)
MOZ_REQUIRES(mTaskQueueCapability);
// Removes all coded frames -- this is not to spec and should be used as a
// last resort to clear buffers only if other methods cannot.
@ -267,7 +268,7 @@ class TrackBuffersManager final
MOZ_GUARDED_BY(mTaskQueueCapability);
void OnDemuxerInitDone(const MediaResult& aResult);
void OnDemuxerInitFailed(const MediaResult& aError);
void OnDemuxerInitFailed(const MediaResult& aFailure);
void OnDemuxerResetDone(const MediaResult& aResult)
MOZ_REQUIRES(mTaskQueueCapability);
MozPromiseRequestHolder<MediaDataDemuxer::InitPromise> mDemuxerInitRequest;
@ -275,16 +276,14 @@ class TrackBuffersManager final
void OnDemuxFailed(TrackType aTrack, const MediaResult& aError)
MOZ_REQUIRES(mTaskQueueCapability);
void DoDemuxVideo() MOZ_REQUIRES(mTaskQueueCapability);
void OnVideoDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples);
void OnVideoDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
void OnVideoDemuxFailed(const MediaResult& aError) {
mVideoTracks.mDemuxRequest.Complete();
mTaskQueueCapability->AssertOnCurrentThread();
OnDemuxFailed(TrackType::kVideoTrack, aError);
}
void DoDemuxAudio() MOZ_REQUIRES(mTaskQueueCapability);
void OnAudioDemuxCompleted(
const RefPtr<MediaTrackDemuxer::SamplesHolder>& aSamples);
void OnAudioDemuxCompleted(RefPtr<MediaTrackDemuxer::SamplesHolder> aSamples);
void OnAudioDemuxFailed(const MediaResult& aError) {
mAudioTracks.mDemuxRequest.Complete();
mTaskQueueCapability->AssertOnCurrentThread();
@ -388,8 +387,8 @@ class TrackBuffersManager final
mEvictable = 0;
mLastIndex = 0;
}
uint32_t mEvictable = 0;
uint32_t mLastIndex = 0;
uint32_t mEvictable;
uint32_t mLastIndex;
};
// Size of data that can be safely evicted during the next eviction
// cycle.

View file

@ -207,6 +207,7 @@ EXPORTS += [
"VideoLimits.h",
"VideoSegment.h",
"VideoUtils.h",
"VorbisUtils.h",
"WavDumper.h",
"XiphExtradata.h",
]

View file

@ -121,6 +121,13 @@ bool MP3TrackDemuxer::Init() {
mInfo->mBitDepth = 16;
mInfo->mMimeType = "audio/mpeg";
mInfo->mDuration = Duration().valueOr(TimeUnit::FromInfinity());
Mp3CodecSpecificData mp3CodecData{};
if (mEncoderDelay) {
mp3CodecData.mEncoderDelayFrames = mEncoderDelay;
mp3CodecData.mEncoderPaddingFrames = mEncoderPadding;
}
mInfo->mCodecSpecificConfig =
AudioCodecSpecificVariant{std::move(mp3CodecData)};
MP3LOG("Init mInfo={mRate=%d mChannels=%d mBitDepth=%d mDuration=%s (%lfs)}",
mInfo->mRate, mInfo->mChannels, mInfo->mBitDepth,

View file

@ -172,13 +172,14 @@ MediaResult MP4AudioInfo::Update(const Mp4parseTrackInfo* aTrack,
mp4ParseSampleCodecSpecific.length >= 12) {
uint16_t preskip = mozilla::LittleEndian::readUint16(
mp4ParseSampleCodecSpecific.data + 10);
opusCodecSpecificData.mContainerCodecDelayFrames = preskip;
opusCodecSpecificData.mContainerCodecDelayMicroSeconds =
mozilla::FramesToUsecs(preskip, 48000).value();
LOG("Opus stream in MP4 container, %" PRId64
" microseconds of encoder delay (%" PRIu16 ").",
opusCodecSpecificData.mContainerCodecDelayFrames, preskip);
opusCodecSpecificData.mContainerCodecDelayMicroSeconds, preskip);
} else {
// This file will error later as it will be rejected by the opus decoder.
opusCodecSpecificData.mContainerCodecDelayFrames = 0;
opusCodecSpecificData.mContainerCodecDelayMicroSeconds = 0;
}
opusCodecSpecificData.mHeadersBinaryBlob->AppendElements(
mp4ParseSampleCodecSpecific.data, mp4ParseSampleCodecSpecific.length);

View file

@ -1044,12 +1044,7 @@ Result<Ok, nsresult> Edts::Parse(Box& aBox) {
if (media_time == -1 && i) {
LOG_WARN(Edts, "Multiple empty edit, not handled");
} else if (media_time == -1) {
if (segment_duration > std::numeric_limits<int64_t>::max()) {
NS_WARNING("Segment duration higher than int64_t max.");
mEmptyOffset = std::numeric_limits<int64_t>::max();
} else {
mEmptyOffset = static_cast<int64_t>(segment_duration);
}
mEmptyOffset = segment_duration;
emptyEntry = true;
} else if (i > 1 || (i > 0 && !emptyEntry)) {
LOG_WARN(Edts,

View file

@ -235,7 +235,7 @@ struct SampleDescriptionEntry {
// Used to indicate in variants if all tracks should be parsed.
struct ParseAllTracks {};
using TrackParseMode = Variant<ParseAllTracks, uint32_t>;
typedef Variant<ParseAllTracks, uint32_t> TrackParseMode;
class Moof final : public Atom {
public:

View file

@ -13,7 +13,6 @@
#include <stdint.h>
#include <algorithm>
#include <opus/opus.h>
#include <opus/opus_multistream.h>
#include "OggCodecState.h"
#include "OggRLBox.h"
@ -22,6 +21,7 @@
#include "VideoUtils.h"
#include "XiphExtradata.h"
#include "nsDebug.h"
#include "opus/opus_multistream.h"
namespace mozilla {
@ -269,15 +269,15 @@ already_AddRefed<MediaRawData> OggCodecState::PacketOutAsMediaRawData() {
return nullptr;
}
TimeUnit endTimestamp = Time(packet->granulepos);
NS_ASSERTION(endTimestamp.IsPositiveOrZero(), "timestamp invalid");
int64_t end_tstamp = Time(packet->granulepos);
NS_ASSERTION(end_tstamp >= 0, "timestamp invalid");
TimeUnit duration = PacketDuration(packet.get());
NS_ASSERTION(duration.IsPositiveOrZero(), "duration invalid");
int64_t duration = PacketDuration(packet.get());
NS_ASSERTION(duration >= 0, "duration invalid");
sample->mTimecode = Time(packet->granulepos);
sample->mTime = endTimestamp - duration;
sample->mDuration = duration;
sample->mTimecode = TimeUnit::FromMicroseconds(packet->granulepos);
sample->mTime = TimeUnit::FromMicroseconds(end_tstamp - duration);
sample->mDuration = TimeUnit::FromMicroseconds(duration);
sample->mKeyframe = IsKeyframe(packet.get());
sample->mEOS = packet->e_o_s;
@ -441,19 +441,18 @@ bool TheoraState::DecodeHeader(OggPacketPtr aPacket) {
// header packets. Assume bad input.
// Our caller will deactivate the bitstream.
return false;
}
if (ret > 0 && isSetupHeader && mPacketCount == 3) {
} else if (ret > 0 && isSetupHeader && mPacketCount == 3) {
// Successfully read the three header packets.
mDoneReadingHeaders = true;
}
return true;
}
TimeUnit TheoraState::Time(int64_t aGranulepos) {
int64_t TheoraState::Time(int64_t granulepos) {
if (!mActive) {
return TimeUnit::Invalid();
return -1;
}
return TheoraState::Time(&mTheoraInfo, aGranulepos);
return TheoraState::Time(&mTheoraInfo, granulepos);
}
bool TheoraState::IsHeader(ogg_packet* aPacket) {
@ -465,9 +464,9 @@ bool TheoraState::IsHeader(ogg_packet* aPacket) {
(((_info)->version_minor > (_min) || (_info)->version_minor == (_min)) && \
(_info)->version_subminor >= (_sub)))
TimeUnit TheoraState::Time(th_info* aInfo, int64_t aGranulepos) {
int64_t TheoraState::Time(th_info* aInfo, int64_t aGranulepos) {
if (aGranulepos < 0 || aInfo->fps_numerator == 0) {
return TimeUnit::Invalid();
return -1;
}
// Implementation of th_granule_frame inlined here to operate
// on the th_info structure instead of the theora_state.
@ -478,38 +477,35 @@ TimeUnit TheoraState::Time(th_info* aInfo, int64_t aGranulepos) {
CheckedInt64 t =
((CheckedInt64(frameno) + 1) * USECS_PER_S) * aInfo->fps_denominator;
if (!t.isValid()) {
return TimeUnit::Invalid();
return -1;
}
t /= aInfo->fps_numerator;
// TODO -- use rationals here
return TimeUnit::FromMicroseconds(t.value());
return t.isValid() ? t.value() : -1;
}
TimeUnit TheoraState::StartTime(int64_t aGranulepos) {
if (aGranulepos < 0 || !mActive || mTheoraInfo.fps_numerator == 0) {
return TimeUnit::Invalid();
int64_t TheoraState::StartTime(int64_t granulepos) {
if (granulepos < 0 || !mActive || mTheoraInfo.fps_numerator == 0) {
return -1;
}
CheckedInt64 t =
(CheckedInt64(th_granule_frame(mCtx, aGranulepos)) * USECS_PER_S) *
(CheckedInt64(th_granule_frame(mCtx, granulepos)) * USECS_PER_S) *
mTheoraInfo.fps_denominator;
if (!t.isValid()) {
return TimeUnit::Invalid();
return -1;
}
// TODO -- use rationals here
return TimeUnit::FromMicroseconds(t.value() / mTheoraInfo.fps_numerator);
return t.value() / mTheoraInfo.fps_numerator;
}
TimeUnit TheoraState::PacketDuration(ogg_packet* aPacket) {
int64_t TheoraState::PacketDuration(ogg_packet* aPacket) {
if (!mActive || mTheoraInfo.fps_numerator == 0) {
return TimeUnit::Invalid();
return -1;
}
CheckedInt64 t = SaferMultDiv(mTheoraInfo.fps_denominator, USECS_PER_S,
mTheoraInfo.fps_numerator);
return t.isValid() ? TimeUnit::FromMicroseconds(t.value())
: TimeUnit::Invalid();
return t.isValid() ? t.value() : -1;
}
TimeUnit TheoraState::MaxKeyframeOffset() {
int64_t TheoraState::MaxKeyframeOffset() {
// Determine the maximum time in microseconds by which a key frame could
// offset for the theora bitstream. Theora granulepos encode time as:
// ((key_frame_number << granule_shift) + frame_offset).
@ -525,13 +521,13 @@ TimeUnit TheoraState::MaxKeyframeOffset() {
(mTheoraInfo.fps_denominator * USECS_PER_S) / mTheoraInfo.fps_numerator;
// Total time in usecs keyframe can be offset from any given frame.
return TimeUnit::FromMicroseconds(frameDuration * keyframeDiff);
return frameDuration * keyframeDiff;
}
bool TheoraState::IsKeyframe(ogg_packet* aPacket) {
bool TheoraState::IsKeyframe(ogg_packet* pkt) {
// first bit of packet is 1 for header, 0 for data
// second bit of packet is 1 for inter frame, 0 for intra frame
return (aPacket->bytes >= 1 && (aPacket->packet[0] & 0x40) == 0x00);
return (pkt->bytes >= 1 && (pkt->packet[0] & 0x40) == 0x00);
}
nsresult TheoraState::PageIn(tainted_opaque_ogg<ogg_page*> aPage) {
@ -590,8 +586,7 @@ void TheoraState::ReconstructTheoraGranulepos() {
ogg_int64_t version_3_2_1 = TheoraVersion(&mTheoraInfo, 3, 2, 1);
ogg_int64_t lastFrame =
th_granule_frame(mCtx, lastGranulepos) + version_3_2_1;
ogg_int64_t firstFrame =
AssertedCast<ogg_int64_t>(lastFrame - mUnstamped.Length() + 1);
ogg_int64_t firstFrame = lastFrame - mUnstamped.Length() + 1;
// Until we encounter a keyframe, we'll assume that the "keyframe"
// segment of the granulepos is the first frame, or if that causes
@ -721,8 +716,7 @@ bool VorbisState::DecodeHeader(OggPacketPtr aPacket) {
// header packets. Assume bad input. Our caller will deactivate the
// bitstream.
return false;
}
if (!ret && isSetupHeader && mPacketCount == 3) {
} else if (!ret && isSetupHeader && mPacketCount == 3) {
// Successfully read the three header packets.
// The bitstream remains active.
mDoneReadingHeaders = true;
@ -773,32 +767,33 @@ bool VorbisState::Init() {
return true;
}
TimeUnit VorbisState::Time(int64_t aGranulepos) {
int64_t VorbisState::Time(int64_t granulepos) {
if (!mActive) {
return TimeUnit::Invalid();
return -1;
}
return VorbisState::Time(&mVorbisInfo, aGranulepos);
return VorbisState::Time(&mVorbisInfo, granulepos);
}
TimeUnit VorbisState::Time(vorbis_info* aInfo, int64_t aGranulepos) {
int64_t VorbisState::Time(vorbis_info* aInfo, int64_t aGranulepos) {
if (aGranulepos == -1 || aInfo->rate == 0) {
return TimeUnit::Invalid();
return -1;
}
return TimeUnit(aGranulepos, aInfo->rate);
CheckedInt64 t = SaferMultDiv(aGranulepos, USECS_PER_S, aInfo->rate);
return t.isValid() ? t.value() : 0;
}
TimeUnit VorbisState::PacketDuration(ogg_packet* aPacket) {
int64_t VorbisState::PacketDuration(ogg_packet* aPacket) {
if (!mActive) {
return TimeUnit::Invalid();
return -1;
}
if (aPacket->granulepos == -1) {
return TimeUnit::Invalid();
return -1;
}
// @FIXME store these in a more stable place
if (mVorbisPacketSamples.count(aPacket) == 0) {
// We haven't seen this packet, don't know its size?
return TimeUnit::Invalid();
return -1;
}
long samples = mVorbisPacketSamples[aPacket];
@ -905,8 +900,8 @@ void VorbisState::ReconstructVorbisGranulepos() {
}
bool unknownGranulepos = last->granulepos == -1;
int64_t totalSamples = 0;
for (int32_t i = AssertedCast<int32_t>(mUnstamped.Length() - 1); i > 0; i--) {
int totalSamples = 0;
for (int32_t i = mUnstamped.Length() - 1; i > 0; i--) {
auto& packet = mUnstamped[i];
auto& prev = mUnstamped[i - 1];
ogg_int64_t granulepos = packet->granulepos;
@ -1030,7 +1025,7 @@ bool OpusState::Init(void) {
mInfo.mBitDepth = 16;
// Save preskip & the first header packet for the Opus decoder
OpusCodecSpecificData opusData;
opusData.mContainerCodecDelayFrames = mParser->mPreSkip;
opusData.mContainerCodecDelayMicroSeconds = Time(0, mParser->mPreSkip);
if (!mHeaders.PeekFront()) {
return false;
@ -1086,22 +1081,22 @@ UniquePtr<MetadataTags> OpusState::GetTags() {
}
/* Return the timestamp (in microseconds) equivalent to a granulepos. */
TimeUnit OpusState::Time(int64_t aGranulepos) {
int64_t OpusState::Time(int64_t aGranulepos) {
if (!mActive) {
return TimeUnit::Invalid();
return -1;
}
return Time(mParser->mPreSkip, aGranulepos);
}
TimeUnit OpusState::Time(int aPreSkip, int64_t aGranulepos) {
int64_t OpusState::Time(int aPreSkip, int64_t aGranulepos) {
if (aGranulepos < 0) {
return TimeUnit::Invalid();
return -1;
}
int64_t offsetGranulePos = aGranulepos - aPreSkip;
// Ogg Opus always runs at a granule rate of 48 kHz.
return TimeUnit(offsetGranulePos, 48000);
CheckedInt64 t = SaferMultDiv(aGranulepos - aPreSkip, USECS_PER_S, 48000);
return t.isValid() ? t.value() : -1;
}
bool OpusState::IsHeader(ogg_packet* aPacket) {
@ -1147,8 +1142,7 @@ nsresult OpusState::PageIn(tainted_opaque_ogg<ogg_page*> aPage) {
// It even works before we've created the actual Opus decoder.
static int GetOpusDeltaGP(ogg_packet* packet) {
int nframes;
nframes = opus_packet_get_nb_frames(packet->packet,
AssertedCast<int32_t>(packet->bytes));
nframes = opus_packet_get_nb_frames(packet->packet, packet->bytes);
if (nframes > 0) {
return nframes * opus_packet_get_samples_per_frame(packet->packet, 48000);
}
@ -1156,8 +1150,9 @@ static int GetOpusDeltaGP(ogg_packet* packet) {
return nframes;
}
TimeUnit OpusState::PacketDuration(ogg_packet* aPacket) {
return TimeUnit(GetOpusDeltaGP(aPacket), 48000);
int64_t OpusState::PacketDuration(ogg_packet* aPacket) {
CheckedInt64 t = SaferMultDiv(GetOpusDeltaGP(aPacket), USECS_PER_S, 48000);
return t.isValid() ? t.value() : -1;
}
bool OpusState::ReconstructOpusGranulepos(void) {
@ -1173,9 +1168,8 @@ bool OpusState::ReconstructOpusGranulepos(void) {
if (mPrevPageGranulepos != -1) {
// If this file only has one page and the final granule position is
// smaller than the pre-skip amount, we MUST reject the stream.
if (!mDoneReadingHeaders && last->granulepos < mParser->mPreSkip) {
if (!mDoneReadingHeaders && last->granulepos < mParser->mPreSkip)
return false;
}
int64_t last_gp = last->granulepos;
gp = mPrevPageGranulepos;
// Loop through the packets forwards, adding the current packet's
@ -1201,11 +1195,12 @@ bool OpusState::ReconstructOpusGranulepos(void) {
}
mPrevPageGranulepos = last_gp;
return true;
} else {
NS_WARNING("No previous granule position to use for Opus end trimming.");
// If we don't have a previous granule position, fall through.
// We simply won't trim any samples from the end.
// TODO: Are we guaranteed to have seen a previous page if there is one?
}
NS_WARNING("No previous granule position to use for Opus end trimming.");
// If we don't have a previous granule position, fall through.
// We simply won't trim any samples from the end.
// TODO: Are we guaranteed to have seen a previous page if there is one?
}
auto& last = mUnstamped.LastElement();
@ -1268,16 +1263,7 @@ already_AddRefed<MediaRawData> OpusState::PacketOutAsMediaRawData() {
int64_t startFrame = mPrevPacketGranulepos;
frames -= std::max<int64_t>(
0, std::min(endFrame - startFrame, static_cast<int64_t>(frames)));
TimeUnit toTrim = TimeUnit(frames, 48000);
LOG(LogLevel::Debug,
("Trimming last opus packet: [%s, %s] to [%s, %s]",
data->mTime.ToString().get(), data->GetEndTime().ToString().get(),
data->mTime.ToString().get(),
(data->mTime + data->mDuration - toTrim).ToString().get()));
data->mOriginalPresentationWindow =
Some(media::TimeInterval{data->mTime, data->mTime + data->mDuration});
data->mDuration -= toTrim;
data->mDiscardPadding = frames;
}
// Save this packet's granule position in case we need to perform end
@ -1301,16 +1287,19 @@ bool FlacState::DecodeHeader(OggPacketPtr aPacket) {
return true;
}
TimeUnit FlacState::Time(int64_t aGranulepos) {
int64_t FlacState::Time(int64_t granulepos) {
if (!mParser.mInfo.IsValid()) {
return TimeUnit::Invalid();
return -1;
}
return TimeUnit(aGranulepos, mParser.mInfo.mRate);
CheckedInt64 t = SaferMultDiv(granulepos, USECS_PER_S, mParser.mInfo.mRate);
if (!t.isValid()) {
return -1;
}
return t.value();
}
TimeUnit FlacState::PacketDuration(ogg_packet* aPacket) {
return TimeUnit(mParser.BlockDuration(aPacket->packet, aPacket->bytes),
mParser.mInfo.mRate);
int64_t FlacState::PacketDuration(ogg_packet* aPacket) {
return mParser.BlockDuration(aPacket->packet, aPacket->bytes);
}
bool FlacState::IsHeader(ogg_packet* aPacket) {
@ -1368,7 +1357,7 @@ bool FlacState::ReconstructFlacGranulepos(void) {
// packet's duration from its granulepos to get the value
// for the current packet.
for (uint32_t i = mUnstamped.Length() - 1; i > 0; i--) {
int64_t offset =
int offset =
mParser.BlockDuration(mUnstamped[i]->packet, mUnstamped[i]->bytes);
// Check for error (negative offset) and overflow.
if (offset >= 0) {
@ -1496,8 +1485,7 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
int64_t numKeyPoints =
LittleEndian::readInt64(aPacket->packet + INDEX_NUM_KEYPOINTS_OFFSET);
TimeUnit endTime = TimeUnit::Zero();
TimeUnit startTime = TimeUnit::Zero();
int64_t endTime = 0, startTime = 0;
const unsigned char* p = aPacket->packet;
int64_t timeDenom =
@ -1511,10 +1499,21 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
// Extract the start time.
int64_t timeRawInt = LittleEndian::readInt64(p + INDEX_FIRST_NUMER_OFFSET);
startTime = TimeUnit(timeRawInt, timeDenom);
CheckedInt64 t = SaferMultDiv(timeRawInt, USECS_PER_S, timeDenom);
if (!t.isValid()) {
return (mActive = false);
} else {
startTime = t.value();
}
// Extract the end time.
timeRawInt = LittleEndian::readInt64(p + INDEX_LAST_NUMER_OFFSET);
endTime = TimeUnit(timeRawInt, timeDenom);
t = SaferMultDiv(timeRawInt, USECS_PER_S, timeDenom);
if (!t.isValid()) {
return (mActive = false);
} else {
endTime = t.value();
}
// Check the numKeyPoints value read, ensure we're not going to run out of
// memory while trying to decode the index packet.
@ -1524,10 +1523,8 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
return (mActive = false);
}
int64_t sizeofIndex =
AssertedCast<int64_t>(aPacket->bytes - INDEX_KEYPOINT_OFFSET);
int64_t maxNumKeyPoints =
AssertedCast<int64_t>(sizeofIndex / MIN_KEY_POINT_SIZE);
int64_t sizeofIndex = aPacket->bytes - INDEX_KEYPOINT_OFFSET;
int64_t maxNumKeyPoints = sizeofIndex / MIN_KEY_POINT_SIZE;
if (aPacket->bytes < minPacketSize.value() ||
numKeyPoints > maxNumKeyPoints || numKeyPoints < 0) {
// Packet size is less than the theoretical minimum size, or the packet is
@ -1548,7 +1545,7 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
const unsigned char* limit = aPacket->packet + aPacket->bytes;
int64_t numKeyPointsRead = 0;
CheckedInt64 offset = 0;
TimeUnit time = TimeUnit::Zero();
CheckedInt64 time = 0;
while (p < limit && numKeyPointsRead < numKeyPoints) {
int64_t delta = 0;
p = ReadVariableLengthInt(p, limit, delta);
@ -1558,15 +1555,19 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
return (mActive = false);
}
p = ReadVariableLengthInt(p, limit, delta);
time += TimeUnit(delta, timeDenom);
if (!time.IsValid() || time > endTime || time < startTime) {
time += delta;
if (!time.isValid() || time.value() > endTime || time.value() < startTime) {
return (mActive = false);
}
keyPoints->Add(offset.value(), time);
CheckedInt64 timeUsecs = SaferMultDiv(time.value(), USECS_PER_S, timeDenom);
if (!timeUsecs.isValid()) {
return (mActive = false);
}
keyPoints->Add(offset.value(), timeUsecs.value());
numKeyPointsRead++;
}
uint32_t keyPointsRead = keyPoints->Length();
int32_t keyPointsRead = keyPoints->Length();
if (keyPointsRead > 0) {
mIndex.InsertOrUpdate(serialno, std::move(keyPoints));
}
@ -1577,7 +1578,7 @@ bool SkeletonState::DecodeIndex(ogg_packet* aPacket) {
}
nsresult SkeletonState::IndexedSeekTargetForTrack(uint32_t aSerialno,
const TimeUnit& aTarget,
int64_t aTarget,
nsKeyPoint& aResult) {
nsKeyFrameIndex* index = nullptr;
mIndex.Get(aSerialno, &index);
@ -1588,15 +1589,14 @@ nsresult SkeletonState::IndexedSeekTargetForTrack(uint32_t aSerialno,
}
// Binary search to find the last key point with time less than target.
uint32_t start = 0;
uint32_t end = index->Length() - 1;
int start = 0;
int end = index->Length() - 1;
while (end > start) {
uint32_t mid = start + ((end - start + 1) >> 1);
int mid = start + ((end - start + 1) >> 1);
if (index->Get(mid).mTime == aTarget) {
start = mid;
break;
}
if (index->Get(mid).mTime < aTarget) {
} else if (index->Get(mid).mTime < aTarget) {
start = mid;
} else {
end = mid - 1;
@ -1608,7 +1608,7 @@ nsresult SkeletonState::IndexedSeekTargetForTrack(uint32_t aSerialno,
return NS_OK;
}
nsresult SkeletonState::IndexedSeekTarget(const TimeUnit& aTarget,
nsresult SkeletonState::IndexedSeekTarget(int64_t aTarget,
nsTArray<uint32_t>& aTracks,
nsSeekTarget& aResult) {
if (!mActive || mVersion < SKELETON_VERSION(4, 0)) {
@ -1630,20 +1630,21 @@ nsresult SkeletonState::IndexedSeekTarget(const TimeUnit& aTarget,
if (r.IsNull()) {
return NS_ERROR_FAILURE;
}
LOG(LogLevel::Debug, ("Indexed seek target for time %s is offset %" PRId64,
aTarget.ToString().get(), r.mKeyPoint.mOffset));
LOG(LogLevel::Debug,
("Indexed seek target for time %" PRId64 " is offset %" PRId64, aTarget,
r.mKeyPoint.mOffset));
aResult = r;
return NS_OK;
}
nsresult SkeletonState::GetDuration(const nsTArray<uint32_t>& aTracks,
TimeUnit& aDuration) {
int64_t& aDuration) {
if (!mActive || mVersion < SKELETON_VERSION(4, 0) || !HasIndex() ||
aTracks.Length() == 0) {
return NS_ERROR_FAILURE;
}
TimeUnit endTime = TimeUnit::FromNegativeInfinity();
TimeUnit startTime = TimeUnit::FromInfinity();
int64_t endTime = INT64_MIN;
int64_t startTime = INT64_MAX;
for (uint32_t i = 0; i < aTracks.Length(); i++) {
nsKeyFrameIndex* index = nullptr;
mIndex.Get(aTracks[i], &index);
@ -1659,8 +1660,9 @@ nsresult SkeletonState::GetDuration(const nsTArray<uint32_t>& aTracks,
}
}
NS_ASSERTION(endTime > startTime, "Duration must be positive");
aDuration = endTime - startTime;
return aDuration.IsValid() ? NS_OK : NS_ERROR_FAILURE;
CheckedInt64 duration = CheckedInt64(endTime) - startTime;
aDuration = duration.isValid() ? duration.value() : 0;
return duration.isValid() ? NS_OK : NS_ERROR_FAILURE;
}
bool SkeletonState::DecodeFisbone(ogg_packet* aPacket) {
@ -1760,10 +1762,9 @@ bool SkeletonState::DecodeHeader(OggPacketPtr aPacket) {
aPacket->packet + SKELETON_PRESENTATION_TIME_NUMERATOR_OFFSET);
int64_t d = LittleEndian::readInt64(
aPacket->packet + SKELETON_PRESENTATION_TIME_DENOMINATOR_OFFSET);
mPresentationTime = d == 0 ? 0
: AssertedCast<int64_t>(static_cast<float>(n) /
static_cast<float>(d)) *
USECS_PER_S;
mPresentationTime =
d == 0 ? 0
: (static_cast<float>(n) / static_cast<float>(d)) * USECS_PER_S;
mVersion = SKELETON_VERSION(verMajor, verMinor);
// We can only care to parse Skeleton version 4.0+.

View file

@ -3,7 +3,6 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Intervals.h"
#if !defined(OggCodecState_h_)
# define OggCodecState_h_
@ -17,7 +16,11 @@
# include <nsClassHashtable.h>
# include <theora/theoradec.h>
# include <vorbis/codec.h>
# ifdef MOZ_TREMOR
# include <tremor/ivorbiscodec.h>
# else
# include <vorbis/codec.h>
# endif
// Uncomment the following to validate that we're predicting the number
// of Vorbis samples in each packet correctly.
@ -100,7 +103,7 @@ class OggPacketQueue : private nsDeque<ogg_packet> {
// converting granulepos to timestamps.
class OggCodecState {
public:
using MetadataTags = mozilla::MetadataTags;
typedef mozilla::MetadataTags MetadataTags;
// Ogg types we know about
enum CodecType {
TYPE_VORBIS = 0,
@ -133,39 +136,28 @@ class OggCodecState {
// Build a hash table with tag metadata parsed from the stream.
virtual UniquePtr<MetadataTags> GetTags() { return nullptr; }
using TimeUnit = media::TimeUnit;
// Returns the end time that a granulepos represents.
virtual TimeUnit Time(int64_t aGranulepos) { return TimeUnit::Invalid(); }
virtual int64_t Time(int64_t granulepos) { return -1; }
// Returns the start time that a granulepos represents.
virtual TimeUnit StartTime(int64_t aGranulepos) {
return TimeUnit::Invalid();
}
virtual int64_t StartTime(int64_t granulepos) { return -1; }
// Returns the duration of the given packet, if it can be determined.
virtual TimeUnit PacketDuration(ogg_packet* aPacket) {
return TimeUnit::Invalid();
}
virtual int64_t PacketDuration(ogg_packet* aPacket) { return -1; }
// Returns the start time of the given packet, if it can be determined.
virtual TimeUnit PacketStartTime(ogg_packet* aPacket) {
virtual int64_t PacketStartTime(ogg_packet* aPacket) {
if (aPacket->granulepos < 0) {
return TimeUnit::Invalid();
}
TimeUnit endTime = Time(aPacket->granulepos);
TimeUnit duration = PacketDuration(aPacket);
// When looping, it's possible to find header packets there because the
// demuxing restarts from the beginning of the stream. Just skip and retry
// with the next packet.
if (!duration.IsValid()) {
return TimeUnit::Invalid();
return -1;
}
int64_t endTime = Time(aPacket->granulepos);
int64_t duration = PacketDuration(aPacket);
if (duration > endTime) {
// Audio preskip may eat a whole packet or more.
return TimeUnit::Zero();
return 0;
} else {
return endTime - duration;
}
return endTime - duration;
}
// Initializes the codec state.
@ -231,7 +223,7 @@ class OggCodecState {
// Returns the maximum number of microseconds which a keyframe can be offset
// from any given interframe.b
virtual TimeUnit MaxKeyframeOffset() { return TimeUnit::Zero(); }
virtual int64_t MaxKeyframeOffset() { return 0; }
// Public access for mTheoraInfo.keyframe_granule_shift
virtual int32_t KeyFrameGranuleJobs() { return 0; }
@ -313,8 +305,8 @@ class VorbisState : public OggCodecState {
CodecType GetType() override { return TYPE_VORBIS; }
bool DecodeHeader(OggPacketPtr aPacket) override;
TimeUnit Time(int64_t aGranulepos) override;
TimeUnit PacketDuration(ogg_packet* aPacket) override;
int64_t Time(int64_t granulepos) override;
int64_t PacketDuration(ogg_packet* aPacket) override;
bool Init() override;
nsresult Reset() override;
bool IsHeader(ogg_packet* aPacket) override;
@ -326,14 +318,14 @@ class VorbisState : public OggCodecState {
private:
AudioInfo mInfo;
vorbis_info mVorbisInfo = {};
vorbis_comment mComment = {};
vorbis_dsp_state mDsp = {};
vorbis_block mBlock = {};
vorbis_info mVorbisInfo;
vorbis_comment mComment;
vorbis_dsp_state mDsp;
vorbis_block mBlock;
OggPacketQueue mHeaders;
// Returns the end time that a granulepos represents.
static TimeUnit Time(vorbis_info* aInfo, int64_t aGranulePos);
static int64_t Time(vorbis_info* aInfo, int64_t aGranulePos);
// Reconstructs the granulepos of Vorbis packets stored in the mUnstamped
// array.
@ -388,26 +380,26 @@ class TheoraState : public OggCodecState {
CodecType GetType() override { return TYPE_THEORA; }
bool DecodeHeader(OggPacketPtr aPacket) override;
TimeUnit Time(int64_t aGranulepos) override;
TimeUnit StartTime(int64_t aGranulepos) override;
TimeUnit PacketDuration(ogg_packet* aPacket) override;
int64_t Time(int64_t granulepos) override;
int64_t StartTime(int64_t granulepos) override;
int64_t PacketDuration(ogg_packet* aPacket) override;
bool Init() override;
nsresult Reset() override;
bool IsHeader(ogg_packet* aPacket) override;
bool IsKeyframe(ogg_packet* aPacket) override;
nsresult PageIn(tainted_opaque_ogg<ogg_page*> aPage) override;
const TrackInfo* GetInfo() const override { return &mInfo; }
TimeUnit MaxKeyframeOffset() override;
int64_t MaxKeyframeOffset() override;
int32_t KeyFrameGranuleJobs() override {
return mTheoraInfo.keyframe_granule_shift;
}
private:
// Returns the end time that a granulepos represents.
static TimeUnit Time(th_info* aInfo, int64_t aGranulePos);
static int64_t Time(th_info* aInfo, int64_t aGranulePos);
th_info mTheoraInfo = {};
th_comment mComment = {};
th_info mTheoraInfo;
th_comment mComment;
th_setup_info* mSetup;
th_dec_ctx* mCtx;
@ -430,8 +422,8 @@ class OpusState : public OggCodecState {
CodecType GetType() override { return TYPE_OPUS; }
bool DecodeHeader(OggPacketPtr aPacket) override;
TimeUnit Time(int64_t aGranulepos) override;
TimeUnit PacketDuration(ogg_packet* aPacket) override;
int64_t Time(int64_t aGranulepos) override;
int64_t PacketDuration(ogg_packet* aPacket) override;
bool Init() override;
nsresult Reset() override;
nsresult Reset(bool aStart);
@ -441,7 +433,7 @@ class OpusState : public OggCodecState {
const TrackInfo* GetInfo() const override { return &mInfo; }
// Returns the end time that a granulepos represents.
static TimeUnit Time(int aPreSkip, int64_t aGranulepos);
static int64_t Time(int aPreSkip, int64_t aGranulepos);
// Construct and return a table of tags from the metadata header.
UniquePtr<MetadataTags> GetTags() override;
@ -506,7 +498,7 @@ class SkeletonState : public OggCodecState {
CodecType GetType() override { return TYPE_SKELETON; }
bool DecodeHeader(OggPacketPtr aPacket) override;
TimeUnit Time(int64_t aGranulepos) override { return TimeUnit::Invalid(); }
int64_t Time(int64_t granulepos) override { return -1; }
bool IsHeader(ogg_packet* aPacket) override { return true; }
// Return true if the given time (in milliseconds) is within
@ -517,18 +509,18 @@ class SkeletonState : public OggCodecState {
// and its presentation time.
class nsKeyPoint {
public:
nsKeyPoint() : mOffset(INT64_MAX), mTime(TimeUnit::Invalid()) {}
nsKeyPoint() : mOffset(INT64_MAX), mTime(INT64_MAX) {}
nsKeyPoint(int64_t aOffset, TimeUnit aTime)
nsKeyPoint(int64_t aOffset, int64_t aTime)
: mOffset(aOffset), mTime(aTime) {}
// Offset from start of segment/link-in-the-chain in bytes.
int64_t mOffset;
// Presentation time
TimeUnit mTime;
// Presentation time in usecs.
int64_t mTime;
bool IsNull() { return mOffset == INT64_MAX && !mTime.IsValid(); }
bool IsNull() { return mOffset == INT64_MAX && mTime == INT64_MAX; }
};
// Stores a keyframe's byte-offset, presentation time and the serialno
@ -544,8 +536,7 @@ class SkeletonState : public OggCodecState {
// Determines from the seek index the keyframe which you must seek back to
// in order to get all keyframes required to render all streams with
// serialnos in aTracks, at time aTarget.
nsresult IndexedSeekTarget(const TimeUnit& aTarget,
nsTArray<uint32_t>& aTracks,
nsresult IndexedSeekTarget(int64_t aTarget, nsTArray<uint32_t>& aTracks,
nsSeekTarget& aResult);
bool HasIndex() const { return mIndex.Count() > 0; }
@ -554,7 +545,7 @@ class SkeletonState : public OggCodecState {
// an index. aTracks must be filled with the serialnos of the active tracks.
// The duration is calculated as the greatest end time of all active tracks,
// minus the smalled start time of all the active tracks.
nsresult GetDuration(const nsTArray<uint32_t>& aTracks, TimeUnit& aDuration);
nsresult GetDuration(const nsTArray<uint32_t>& aTracks, int64_t& aDuration);
private:
// Decodes an index packet. Returns false on failure.
@ -564,8 +555,7 @@ class SkeletonState : public OggCodecState {
// Gets the keypoint you must seek to in order to get the keyframe required
// to render the stream at time aTarget on stream with serial aSerialno.
nsresult IndexedSeekTargetForTrack(uint32_t aSerialno,
const TimeUnit& aTarget,
nsresult IndexedSeekTargetForTrack(uint32_t aSerialno, int64_t aTarget,
nsKeyPoint& aResult);
// Version of the decoded skeleton track, as per the SKELETON_VERSION macro.
@ -581,15 +571,15 @@ class SkeletonState : public OggCodecState {
// stream.
class nsKeyFrameIndex {
public:
nsKeyFrameIndex(const TimeUnit& aStartTime, const TimeUnit& aEndTime)
nsKeyFrameIndex(int64_t aStartTime, int64_t aEndTime)
: mStartTime(aStartTime), mEndTime(aEndTime) {
MOZ_COUNT_CTOR(nsKeyFrameIndex);
}
MOZ_COUNTED_DTOR(nsKeyFrameIndex)
void Add(int64_t aOffset, const TimeUnit& aTime) {
mKeyPoints.AppendElement(nsKeyPoint(aOffset, aTime));
void Add(int64_t aOffset, int64_t aTimeMs) {
mKeyPoints.AppendElement(nsKeyPoint(aOffset, aTimeMs));
}
const nsKeyPoint& Get(uint32_t aIndex) const { return mKeyPoints[aIndex]; }
@ -597,10 +587,10 @@ class SkeletonState : public OggCodecState {
uint32_t Length() const { return mKeyPoints.Length(); }
// Presentation time of the first sample in this stream in usecs.
const TimeUnit mStartTime;
const int64_t mStartTime;
// End time of the last sample in this stream in usecs.
const TimeUnit mEndTime;
const int64_t mEndTime;
private:
nsTArray<nsKeyPoint> mKeyPoints;
@ -617,8 +607,8 @@ class FlacState : public OggCodecState {
CodecType GetType() override { return TYPE_FLAC; }
bool DecodeHeader(OggPacketPtr aPacket) override;
TimeUnit Time(int64_t aGranulepos) override;
TimeUnit PacketDuration(ogg_packet* aPacket) override;
int64_t Time(int64_t granulepos) override;
int64_t PacketDuration(ogg_packet* aPacket) override;
bool IsHeader(ogg_packet* aPacket) override;
nsresult PageIn(tainted_opaque_ogg<ogg_page*> aPage) override;

View file

@ -8,7 +8,6 @@
#include "OggRLBox.h"
#include "MediaDataDemuxer.h"
#include "OggCodecState.h"
#include "TimeUnits.h"
#include "XiphExtradata.h"
#include "mozilla/AbstractThread.h"
#include "mozilla/Atomics.h"
@ -18,7 +17,6 @@
#include "mozilla/SharedThreadPool.h"
#include "mozilla/Telemetry.h"
#include "mozilla/TimeStamp.h"
#include "nsDebug.h"
#include "nsAutoRef.h"
#include "nsError.h"
@ -57,7 +55,7 @@ using media::TimeUnit;
// seek target. This is becaue it's usually quicker to just keep downloading
// from an exisiting connection than to do another bisection inside that
// small range, which would open a new HTTP connetion.
static const TimeUnit OGG_SEEK_FUZZ_USECS = TimeUnit::FromMicroseconds(500000);
static const uint32_t OGG_SEEK_FUZZ_USECS = 500000;
// The number of microseconds of "pre-roll" we use for Opus streams.
// The specification recommends 80 ms.
@ -188,16 +186,14 @@ bool OggDemuxer::HasVideo() const { return mTheoraState; }
bool OggDemuxer::HaveStartTime() const { return mStartTime.isSome(); }
TimeUnit OggDemuxer::StartTime() const {
return mStartTime.refOr(TimeUnit::Zero());
}
int64_t OggDemuxer::StartTime() const { return mStartTime.refOr(0); }
bool OggDemuxer::HaveStartTime(TrackInfo::TrackType aType) {
return OggState(aType).mStartTime.isSome();
}
TimeUnit OggDemuxer::StartTime(TrackInfo::TrackType aType) {
return OggState(aType).mStartTime.refOr(TimeUnit::Zero());
int64_t OggDemuxer::StartTime(TrackInfo::TrackType aType) {
return OggState(aType).mStartTime.refOr(TimeUnit::Zero()).ToMicroseconds();
}
RefPtr<OggDemuxer::InitPromise> OggDemuxer::Init() {
@ -386,11 +382,10 @@ void OggDemuxer::SetupTargetSkeleton() {
// the end of resource to get it.
nsTArray<uint32_t> tracks;
BuildSerialList(tracks);
TimeUnit duration = TimeUnit::Zero();
int64_t duration = 0;
if (NS_SUCCEEDED(mSkeletonState->GetDuration(tracks, duration))) {
OGG_DEBUG("Got duration from Skeleton index %s",
duration.ToString().get());
mInfo.mMetadataDuration.emplace(duration);
OGG_DEBUG("Got duration from Skeleton index %" PRId64, duration);
mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(duration));
}
}
}
@ -570,10 +565,10 @@ nsresult OggDemuxer::ReadMetadata() {
SetupMediaTracksInfo(serials);
if (HasAudio() || HasVideo()) {
TimeUnit startTime = TimeUnit::Invalid();
int64_t startTime = -1;
FindStartTime(startTime);
if (startTime.IsValid()) {
OGG_DEBUG("Detected stream start time %s", startTime.ToString().get());
if (startTime >= 0) {
OGG_DEBUG("Detected stream start time %" PRId64, startTime);
mStartTime.emplace(startTime);
}
@ -585,14 +580,14 @@ nsresult OggDemuxer::ReadMetadata() {
MOZ_ASSERT(length > 0, "Must have a content length to get end time");
TimeUnit endTime = RangeEndTime(TrackInfo::kAudioTrack, length);
int64_t endTime = RangeEndTime(TrackInfo::kAudioTrack, length);
if (endTime.IsValid()) {
mInfo.mUnadjustedMetadataEndTime.emplace(endTime);
mInfo.mMetadataDuration.emplace(endTime -
mStartTime.refOr(TimeUnit::Zero()));
OGG_DEBUG("Got Ogg duration from seeking to end %s",
endTime.ToString().get());
if (endTime != -1) {
mInfo.mUnadjustedMetadataEndTime.emplace(
TimeUnit::FromMicroseconds(endTime));
mInfo.mMetadataDuration.emplace(
TimeUnit::FromMicroseconds(endTime - mStartTime.refOr(0)));
OGG_DEBUG("Got Ogg duration from seeking to end %" PRId64, endTime);
}
}
if (mInfo.mMetadataDuration.isNothing()) {
@ -935,7 +930,7 @@ TimeIntervals OggDemuxer::GetBuffered(TrackInfo::TrackType aType) {
// we special-case (startOffset == 0) so that the first
// buffered range always appears to be buffered from the media start
// time, rather than from the end-time of the first page.
TimeUnit startTime = (startOffset == 0) ? StartTime() : TimeUnit::Invalid();
int64_t startTime = (startOffset == 0) ? StartTime() : -1;
// Find the start time of the range. Read pages until we find one with a
// granulepos which we can convert into a timestamp to use as the time of
@ -947,7 +942,7 @@ TimeIntervals OggDemuxer::GetBuffered(TrackInfo::TrackType aType) {
}
auto clean_page = MakeScopeExit([&] { mSandbox->free_in_sandbox(page); });
while (!startTime.IsValid()) {
while (startTime == -1) {
int32_t discard;
PageSyncResult pageSyncResult =
PageSync(mSandbox.get(), Resource(aType), sync.mState, true,
@ -986,22 +981,22 @@ TimeIntervals OggDemuxer::GetBuffered(TrackInfo::TrackType aType) {
(serial == mVorbisState->mSerial)
.unverified_safe_because(time_interval_reason)) {
startTime = mVorbisState->Time(granulepos);
MOZ_ASSERT(startTime.IsPositive(), "Must have positive start time");
MOZ_ASSERT(startTime > 0, "Must have positive start time");
} else if (aType == TrackInfo::kAudioTrack && mOpusState &&
(serial == mOpusState->mSerial)
.unverified_safe_because(time_interval_reason)) {
startTime = mOpusState->Time(granulepos);
MOZ_ASSERT(startTime.IsPositive(), "Must have positive start time");
MOZ_ASSERT(startTime > 0, "Must have positive start time");
} else if (aType == TrackInfo::kAudioTrack && mFlacState &&
(serial == mFlacState->mSerial)
.unverified_safe_because(time_interval_reason)) {
startTime = mFlacState->Time(granulepos);
MOZ_ASSERT(startTime.IsPositive(), "Must have positive start time");
MOZ_ASSERT(startTime > 0, "Must have positive start time");
} else if (aType == TrackInfo::kVideoTrack && mTheoraState &&
(serial == mTheoraState->mSerial)
.unverified_safe_because(time_interval_reason)) {
startTime = mTheoraState->Time(granulepos);
MOZ_ASSERT(startTime.IsPositive(), "Must have positive start time");
MOZ_ASSERT(startTime > 0, "Must have positive start time");
} else if (mCodecStore.Contains(
serial.unverified_safe_because(time_interval_reason))) {
// Stream is not the theora or vorbis stream we're playing,
@ -1029,13 +1024,14 @@ TimeIntervals OggDemuxer::GetBuffered(TrackInfo::TrackType aType) {
}
}
if (startTime.IsValid()) {
if (startTime != -1) {
// We were able to find a start time for that range, see if we can
// find an end time.
TimeUnit endTime = RangeEndTime(aType, startOffset, endOffset, true);
if (endTime.IsValid() && endTime > startTime) {
int64_t endTime = RangeEndTime(aType, startOffset, endOffset, true);
if (endTime > startTime) {
buffered +=
TimeInterval(startTime - StartTime(), endTime - StartTime());
TimeInterval(TimeUnit::FromMicroseconds(startTime - StartTime()),
TimeUnit::FromMicroseconds(endTime - StartTime()));
}
}
}
@ -1043,38 +1039,38 @@ TimeIntervals OggDemuxer::GetBuffered(TrackInfo::TrackType aType) {
return buffered;
}
void OggDemuxer::FindStartTime(TimeUnit& aOutStartTime) {
void OggDemuxer::FindStartTime(int64_t& aOutStartTime) {
// Extract the start times of the bitstreams in order to calculate
// the duration.
TimeUnit videoStartTime = TimeUnit::FromInfinity();
TimeUnit audioStartTime = TimeUnit::FromInfinity();
int64_t videoStartTime = INT64_MAX;
int64_t audioStartTime = INT64_MAX;
if (HasVideo()) {
FindStartTime(TrackInfo::kVideoTrack, videoStartTime);
if (!videoStartTime.IsPosInf()) {
OGG_DEBUG("OggDemuxer::FindStartTime() video=%s",
videoStartTime.ToString().get());
mVideoOggState.mStartTime = Some(videoStartTime);
if (videoStartTime != INT64_MAX) {
OGG_DEBUG("OggDemuxer::FindStartTime() video=%" PRId64, videoStartTime);
mVideoOggState.mStartTime =
Some(TimeUnit::FromMicroseconds(videoStartTime));
}
}
if (HasAudio()) {
FindStartTime(TrackInfo::kAudioTrack, audioStartTime);
if (!audioStartTime.IsPosInf()) {
OGG_DEBUG("OggDemuxer::FindStartTime() audio=%s",
audioStartTime.ToString().get());
mAudioOggState.mStartTime = Some(audioStartTime);
if (audioStartTime != INT64_MAX) {
OGG_DEBUG("OggDemuxer::FindStartTime() audio=%" PRId64, audioStartTime);
mAudioOggState.mStartTime =
Some(TimeUnit::FromMicroseconds(audioStartTime));
}
}
TimeUnit startTime = std::min(videoStartTime, audioStartTime);
if (!startTime.IsPosInf()) {
int64_t startTime = std::min(videoStartTime, audioStartTime);
if (startTime != INT64_MAX) {
aOutStartTime = startTime;
}
}
void OggDemuxer::FindStartTime(TrackInfo::TrackType aType,
TimeUnit& aOutStartTime) {
TimeUnit startTime = TimeUnit::FromInfinity();
int64_t& aOutStartTime) {
int64_t startTime = INT64_MAX;
OggCodecState* state = GetTrackCodecState(aType);
ogg_packet* pkt = GetNextPacket(aType);
@ -1082,21 +1078,22 @@ void OggDemuxer::FindStartTime(TrackInfo::TrackType aType,
startTime = state->PacketStartTime(pkt);
}
if (!startTime.IsInfinite()) {
if (startTime != INT64_MAX) {
aOutStartTime = startTime;
}
}
nsresult OggDemuxer::SeekInternal(TrackInfo::TrackType aType,
const TimeUnit& aTarget) {
OGG_DEBUG("About to seek to %s", aTarget.ToString().get());
int64_t target = aTarget.ToMicroseconds();
OGG_DEBUG("About to seek to %" PRId64, target);
nsresult res;
TimeUnit adjustedTarget = aTarget;
TimeUnit startTime = StartTime(aType);
TimeUnit endTime =
mInfo.mMetadataDuration.valueOr(TimeUnit::Zero()) + startTime;
int64_t adjustedTarget = target;
int64_t startTime = StartTime(aType);
int64_t endTime = mInfo.mMetadataDuration->ToMicroseconds() + startTime;
if (aType == TrackInfo::kAudioTrack && mOpusState) {
adjustedTarget = std::max(startTime, aTarget - OGG_SEEK_OPUS_PREROLL);
adjustedTarget =
std::max(startTime, target - OGG_SEEK_OPUS_PREROLL.ToMicroseconds());
}
if (!HaveStartTime(aType) || adjustedTarget == startTime) {
@ -1123,19 +1120,19 @@ nsresult OggDemuxer::SeekInternal(TrackInfo::TrackType aType,
// Figure out if the seek target lies in a buffered range.
SeekRange r =
SelectSeekRange(aType, ranges, aTarget, startTime, endTime, true);
SelectSeekRange(aType, ranges, target, startTime, endTime, true);
if (!r.IsNull()) {
// We know the buffered range in which the seek target lies, do a
// bisection search in that buffered range.
res = SeekInBufferedRange(aType, aTarget, adjustedTarget, startTime,
res = SeekInBufferedRange(aType, target, adjustedTarget, startTime,
endTime, ranges, r);
NS_ENSURE_SUCCESS(res, res);
} else {
// The target doesn't lie in a buffered range. Perform a bisection
// search over the whole media, using the known buffered ranges to
// reduce the search space.
res = SeekInUnbuffered(aType, aTarget, startTime, endTime, ranges);
res = SeekInUnbuffered(aType, target, startTime, endTime, ranges);
NS_ENSURE_SUCCESS(res, res);
}
}
@ -1158,19 +1155,12 @@ nsresult OggDemuxer::SeekInternal(TrackInfo::TrackType aType,
OGG_DEBUG("End of stream reached before keyframe found in indexed seek");
break;
}
// Skip any header packet, this can be the case when looping and not parsing
// the headers again.
if (state->IsHeader(packet)) {
OggPacketPtr drop(state->PacketOut());
continue;
}
TimeUnit startTstamp = state->PacketStartTime(packet);
int64_t startTstamp = state->PacketStartTime(packet);
if (foundKeyframe && startTstamp > adjustedTarget) {
break;
}
if (state->IsKeyframe(packet)) {
OGG_DEBUG("keyframe found after seeking at %s",
startTstamp.ToString().get());
OGG_DEBUG("keyframe found after seeking at %" PRId64, startTstamp);
tempPackets.Erase();
foundKeyframe = true;
}
@ -1201,7 +1191,7 @@ OggDemuxer::IndexedSeekResult OggDemuxer::RollbackIndexedSeek(
}
OggDemuxer::IndexedSeekResult OggDemuxer::SeekToKeyframeUsingIndex(
TrackInfo::TrackType aType, const TimeUnit& aTarget) {
TrackInfo::TrackType aType, int64_t aTarget) {
if (!HasSkeleton() || !mSkeletonState->HasIndex()) {
return SEEK_INDEX_FAIL;
}
@ -1407,14 +1397,12 @@ RefPtr<OggTrackDemuxer::SeekPromise> OggTrackDemuxer::Seek(
}
RefPtr<MediaRawData> OggTrackDemuxer::NextSample() {
OGG_DEBUG("OggTrackDemuxer::NextSample");
if (mQueuedSample) {
RefPtr<MediaRawData> nextSample = mQueuedSample;
mQueuedSample = nullptr;
if (mType == TrackInfo::kAudioTrack) {
nextSample->mTrackInfo = mParent->mSharedAudioTrackInfo;
}
OGG_DEBUG("OggTrackDemuxer::NextSample (queued)");
return nextSample;
}
ogg_packet* packet = mParent->GetNextPacket(mType);
@ -1447,41 +1435,6 @@ RefPtr<MediaRawData> OggTrackDemuxer::NextSample() {
if (!data->mTime.IsValid()) {
return nullptr;
}
TimeUnit mediaStartTime = mParent->mStartTime.valueOr(TimeUnit::Zero());
TimeUnit mediaEndTime =
mediaStartTime +
mParent->mInfo.mMetadataDuration.valueOr(TimeUnit::FromInfinity());
// Trim packets that end after the media duration.
if (mType == TrackInfo::kAudioTrack) {
OGG_DEBUG("Check trimming %s > %s", data->GetEndTime().ToString().get(),
mediaEndTime.ToString().get());
// Because of a quirk of this demuxer, this needs to be >=. It looks
// useless, because `toTrim` is going to be 0, but it allows setting
// `mOriginalPresentationWindow`, so that the trimming logic will later
// remove extraneous frames.
// This demuxer sets the end time of a packet to be the end time that
// should be played, not the end time that corresponds to the number of
// decoded frames, that we can only have after decoding.
// >= allows detecting the last packet, and trimming it appropriately,
// after decoding has happened, with the AudioTrimmer.
if (data->GetEndTime() >= mediaEndTime) {
TimeUnit toTrim = data->GetEndTime() - mediaEndTime;
TimeUnit originalDuration = data->mDuration;
OGG_DEBUG(
"Demuxed past media end time, trimming: packet [%s,%s] to [%s,%s]",
data->mTime.ToString().get(), data->GetEndTime().ToString().get(),
data->mTime.ToString().get(),
(data->mTime + originalDuration).ToString().get());
data->mOriginalPresentationWindow =
Some(TimeInterval{data->mTime, data->GetEndTime()});
data->mDuration -= toTrim;
}
}
OGG_DEBUG("OGG packet demuxed: [%s,%s] (duration: %s, type: %s)",
data->mTime.ToString().get(), data->GetEndTime().ToString().get(),
data->mDuration.ToString().get(),
mType == TrackInfo::kAudioTrack ? "audio" : "video");
return data;
}
@ -1580,15 +1533,15 @@ tainted_opaque_ogg<ogg_uint32_t> OggDemuxer::GetPageChecksum(
return ret.to_opaque();
}
TimeUnit OggDemuxer::RangeStartTime(TrackInfo::TrackType aType,
int64_t aOffset) {
int64_t OggDemuxer::RangeStartTime(TrackInfo::TrackType aType,
int64_t aOffset) {
int64_t position = Resource(aType)->Tell();
nsresult res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, aOffset);
NS_ENSURE_SUCCESS(res, TimeUnit::Zero());
TimeUnit startTime = TimeUnit::Zero();
NS_ENSURE_SUCCESS(res, 0);
int64_t startTime = 0;
FindStartTime(aType, startTime);
res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, position);
NS_ENSURE_SUCCESS(res, TimeUnit::Invalid());
NS_ENSURE_SUCCESS(res, -1);
return startTime;
}
@ -1607,19 +1560,19 @@ struct nsDemuxerAutoOggSyncState {
tainted_ogg<ogg_sync_state*> mState;
};
TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
int64_t aEndOffset) {
int64_t OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
int64_t aEndOffset) {
int64_t position = Resource(aType)->Tell();
TimeUnit endTime = RangeEndTime(aType, 0, aEndOffset, false);
int64_t endTime = RangeEndTime(aType, 0, aEndOffset, false);
nsresult res =
Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, position);
NS_ENSURE_SUCCESS(res, TimeUnit::Invalid());
NS_ENSURE_SUCCESS(res, -1);
return endTime;
}
TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
int64_t aStartOffset, int64_t aEndOffset,
bool aCachedDataOnly) {
int64_t OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
int64_t aStartOffset, int64_t aEndOffset,
bool aCachedDataOnly) {
nsDemuxerAutoOggSyncState sync(*mSandbox);
// We need to find the last page which ends before aEndOffset that
@ -1633,13 +1586,13 @@ TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
int64_t readStartOffset = aEndOffset;
int64_t readLimitOffset = aEndOffset;
int64_t readHead = aEndOffset;
TimeUnit endTime = TimeUnit::Invalid();
int64_t endTime = -1;
uint32_t checksumAfterSeek = 0;
uint32_t prevChecksumAfterSeek = 0;
bool mustBackOff = false;
tainted_ogg<ogg_page*> page = mSandbox->malloc_in_sandbox<ogg_page>();
if (!page) {
return TimeUnit::Invalid();
return -1;
}
auto clean_page = MakeScopeExit([&] { mSandbox->free_in_sandbox(page); });
while (true) {
@ -1656,14 +1609,14 @@ TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
seek_ret, (static_cast<void>(checker = val), checker.isValid()),
&failedVerify);
if (failedVerify) {
return TimeUnit::Invalid();
return -1;
}
if (ret.unverified_safe_because(RLBOX_OGG_STATE_ASSERT_REASON) == 0) {
// We need more data if we've not encountered a page we've seen before,
// or we've read to the end of file.
if (mustBackOff || readHead == aEndOffset || readHead == aStartOffset) {
if (endTime.IsValid() || readStartOffset == 0) {
if (endTime != -1 || readStartOffset == 0) {
// We have encountered a page before, or we're at the end of file.
break;
}
@ -1698,15 +1651,15 @@ TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
if (aCachedDataOnly) {
res = Resource(aType)->GetResource()->ReadFromCache(buffer, readHead,
bytesToRead);
NS_ENSURE_SUCCESS(res, TimeUnit::Invalid());
NS_ENSURE_SUCCESS(res, -1);
bytesRead = bytesToRead;
} else {
MOZ_ASSERT(readHead < aEndOffset,
"resource pos must be before range end");
res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, readHead);
NS_ENSURE_SUCCESS(res, TimeUnit::Invalid());
NS_ENSURE_SUCCESS(res, -1);
res = Resource(aType)->Read(buffer, bytesToRead, &bytesRead);
NS_ENSURE_SUCCESS(res, TimeUnit::Invalid());
NS_ENSURE_SUCCESS(res, -1);
}
readHead += bytesRead;
if (readHead > readLimitOffset) {
@ -1720,11 +1673,11 @@ TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
int wrote_success =
CopyAndVerifyOrFail(ret, val == 0 || val == -1, &failedWroteVerify);
if (failedWroteVerify) {
return TimeUnit::Invalid();
return -1;
}
if (wrote_success != 0) {
endTime = TimeUnit::Invalid();
endTime = -1;
break;
}
continue;
@ -1774,12 +1727,12 @@ TimeUnit OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
// It's probably from a new "link" in a "chained" ogg. Don't
// bother even trying to find a duration...
SetChained();
endTime = TimeUnit::Invalid();
endTime = -1;
break;
}
TimeUnit t = codecState->Time(granulepos);
if (t.IsValid()) {
int64_t t = codecState->Time(granulepos);
if (t != -1) {
endTime = t;
}
}
@ -1796,16 +1749,15 @@ nsresult OggDemuxer::GetSeekRanges(TrackInfo::TrackType aType,
for (uint32_t index = 0; index < cached.Length(); index++) {
auto& range = cached[index];
TimeUnit startTime = TimeUnit::Invalid();
TimeUnit endTime = TimeUnit::Invalid();
int64_t startTime = -1;
int64_t endTime = -1;
if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
int64_t startOffset = range.mStart;
int64_t endOffset = range.mEnd;
startTime = RangeStartTime(aType, startOffset);
if (startTime.IsValid() &&
((endTime = RangeEndTime(aType, endOffset)).IsValid())) {
if (startTime != -1 && ((endTime = RangeEndTime(aType, endOffset)) != -1)) {
NS_WARNING_ASSERTION(startTime < endTime,
"Start time must be before end time");
aRanges.AppendElement(
@ -1820,12 +1772,11 @@ nsresult OggDemuxer::GetSeekRanges(TrackInfo::TrackType aType,
OggDemuxer::SeekRange OggDemuxer::SelectSeekRange(
TrackInfo::TrackType aType, const nsTArray<SeekRange>& ranges,
const TimeUnit& aTarget, const TimeUnit& aStartTime,
const TimeUnit& aEndTime, bool aExact) {
int64_t aTarget, int64_t aStartTime, int64_t aEndTime, bool aExact) {
int64_t so = 0;
int64_t eo = Resource(aType)->GetLength();
TimeUnit st = aStartTime;
TimeUnit et = aEndTime;
int64_t st = aStartTime;
int64_t et = aEndTime;
for (uint32_t i = 0; i < ranges.Length(); i++) {
const SeekRange& r = ranges[i];
if (r.mTimeStart < aTarget) {
@ -1849,18 +1800,17 @@ OggDemuxer::SeekRange OggDemuxer::SelectSeekRange(
}
nsresult OggDemuxer::SeekInBufferedRange(TrackInfo::TrackType aType,
const TimeUnit& aTarget,
TimeUnit& aAdjustedTarget,
const TimeUnit& aStartTime,
const TimeUnit& aEndTime,
int64_t aTarget,
int64_t aAdjustedTarget,
int64_t aStartTime, int64_t aEndTime,
const nsTArray<SeekRange>& aRanges,
const SeekRange& aRange) {
OGG_DEBUG("Seeking in buffered data to %s using bisection search",
aTarget.ToString().get());
OGG_DEBUG("Seeking in buffered data to %" PRId64 " using bisection search",
aTarget);
if (aType == TrackInfo::kVideoTrack || aAdjustedTarget >= aTarget) {
// We know the exact byte range in which the target must lie. It must
// be buffered in the media cache. Seek there.
nsresult res = SeekBisection(aType, aTarget, aRange, TimeUnit::Zero());
nsresult res = SeekBisection(aType, aTarget, aRange, 0);
if (NS_FAILED(res) || aType != TrackInfo::kVideoTrack) {
return res;
}
@ -1875,7 +1825,7 @@ nsresult OggDemuxer::SeekInBufferedRange(TrackInfo::TrackType aType,
MOZ_ASSERT(packet->granulepos != -1, "Must have a granulepos");
int shift = mTheoraState->KeyFrameGranuleJobs();
int64_t keyframeGranulepos = (packet->granulepos >> shift) << shift;
TimeUnit keyframeTime = mTheoraState->StartTime(keyframeGranulepos);
int64_t keyframeTime = mTheoraState->StartTime(keyframeGranulepos);
SEEK_LOG(LogLevel::Debug,
("Keyframe for %lld is at %lld, seeking back to it", frameTime,
keyframeTime));
@ -1893,12 +1843,11 @@ nsresult OggDemuxer::SeekInBufferedRange(TrackInfo::TrackType aType,
}
nsresult OggDemuxer::SeekInUnbuffered(TrackInfo::TrackType aType,
const TimeUnit& aTarget,
const TimeUnit& aStartTime,
const TimeUnit& aEndTime,
int64_t aTarget, int64_t aStartTime,
int64_t aEndTime,
const nsTArray<SeekRange>& aRanges) {
OGG_DEBUG("Seeking in unbuffered data to %s using bisection search",
aTarget.ToString().get());
OGG_DEBUG("Seeking in unbuffered data to %" PRId64 " using bisection search",
aTarget);
// If we've got an active Theora bitstream, determine the maximum possible
// time in usecs which a keyframe could be before a given interframe. We
@ -1912,15 +1861,16 @@ nsresult OggDemuxer::SeekInUnbuffered(TrackInfo::TrackType aType,
// as the extra decoding causes a noticeable speed hit when all the data
// is buffered (compared to just doing a bisection to exactly find the
// keyframe).
TimeUnit keyframeOffset = TimeUnit::Zero();
int64_t keyframeOffsetMs = 0;
if (aType == TrackInfo::kVideoTrack && mTheoraState) {
keyframeOffset = mTheoraState->MaxKeyframeOffset();
keyframeOffsetMs = mTheoraState->MaxKeyframeOffset();
}
// Add in the Opus pre-roll if necessary, as well.
if (aType == TrackInfo::kAudioTrack && mOpusState) {
keyframeOffset = std::max(keyframeOffset, OGG_SEEK_OPUS_PREROLL);
keyframeOffsetMs =
std::max(keyframeOffsetMs, OGG_SEEK_OPUS_PREROLL.ToMilliseconds());
}
TimeUnit seekTarget = std::max(aStartTime, aTarget - keyframeOffset);
int64_t seekTarget = std::max(aStartTime, aTarget - keyframeOffsetMs);
// Minimize the bisection search space using the known timestamps from the
// buffered ranges.
SeekRange k =
@ -1928,10 +1878,8 @@ nsresult OggDemuxer::SeekInUnbuffered(TrackInfo::TrackType aType,
return SeekBisection(aType, seekTarget, k, OGG_SEEK_FUZZ_USECS);
}
nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType,
const TimeUnit& aTarget,
const SeekRange& aRange,
const TimeUnit& aFuzz) {
nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType, int64_t aTarget,
const SeekRange& aRange, uint32_t aFuzz) {
nsresult res;
if (aTarget <= aRange.mTimeStart) {
@ -1946,15 +1894,13 @@ nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType,
// Bisection search, find start offset of last page with end time less than
// the seek target.
ogg_int64_t startOffset = aRange.mOffsetStart;
ogg_int64_t startTime = aRange.mTimeStart.ToMicroseconds();
ogg_int64_t startTime = aRange.mTimeStart;
ogg_int64_t startLength = 0; // Length of the page at startOffset.
ogg_int64_t endOffset = aRange.mOffsetEnd;
ogg_int64_t endTime = aRange.mTimeEnd.ToMicroseconds();
ogg_int64_t endTime = aRange.mTimeEnd;
ogg_int64_t seekTarget = aTarget.ToMicroseconds();
int64_t seekLowerBound =
std::max(static_cast<int64_t>(0),
aTarget.ToMicroseconds() - aFuzz.ToMicroseconds());
ogg_int64_t seekTarget = aTarget;
int64_t seekLowerBound = std::max(static_cast<int64_t>(0), aTarget - aFuzz);
int hops = 0;
DebugOnly<ogg_int64_t> previousGuess = -1;
int backsteps = 0;
@ -2112,17 +2058,17 @@ nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType,
if (aType == TrackInfo::kAudioTrack && granulepos > 0 &&
audioTime == -1) {
if (mVorbisState && serial == mVorbisState->mSerial) {
audioTime = mVorbisState->Time(granulepos).ToMicroseconds();
audioTime = mVorbisState->Time(granulepos);
} else if (mOpusState && serial == mOpusState->mSerial) {
audioTime = mOpusState->Time(granulepos).ToMicroseconds();
audioTime = mOpusState->Time(granulepos);
} else if (mFlacState && serial == mFlacState->mSerial) {
audioTime = mFlacState->Time(granulepos).ToMicroseconds();
audioTime = mFlacState->Time(granulepos);
}
}
if (aType == TrackInfo::kVideoTrack && granulepos > 0 &&
serial == mTheoraState->mSerial && videoTime == -1) {
videoTime = mTheoraState->Time(granulepos).ToMicroseconds();
videoTime = mTheoraState->Time(granulepos);
}
if (pageOffset + pageLength >= endOffset) {
@ -2169,7 +2115,7 @@ nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType,
// last page before the target, and the first page after the target.
SEEK_LOG(LogLevel::Debug,
("Terminating seek at offset=%lld", startOffset));
MOZ_ASSERT(startTime < aTarget.ToMicroseconds(),
MOZ_ASSERT(startTime < aTarget,
"Start time must always be less than target");
res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, startOffset);
NS_ENSURE_SUCCESS(res, res);

View file

@ -54,8 +54,8 @@ class OggDemuxer : public MediaDataDemuxer,
tainted_opaque_ogg<ogg_sync_state*> mState;
};
media::TimeIntervals GetBuffered(TrackInfo::TrackType aType);
void FindStartTime(media::TimeUnit& aOutStartTime);
void FindStartTime(TrackInfo::TrackType, media::TimeUnit& aOutStartTime);
void FindStartTime(int64_t& aOutStartTime);
void FindStartTime(TrackInfo::TrackType, int64_t& aOutStartTime);
nsresult SeekInternal(TrackInfo::TrackType aType,
const media::TimeUnit& aTarget);
@ -68,7 +68,7 @@ class OggDemuxer : public MediaDataDemuxer,
SEEK_FATAL_ERROR // Error returned by a stream operation.
};
IndexedSeekResult SeekToKeyframeUsingIndex(TrackInfo::TrackType aType,
const media::TimeUnit& aTarget);
int64_t aTarget);
// Rolls back a seek-using-index attempt, returning a failure error code.
IndexedSeekResult RollbackIndexedSeek(TrackInfo::TrackType aType,
@ -80,36 +80,29 @@ class OggDemuxer : public MediaDataDemuxer,
// (because it's cached).
class SeekRange {
public:
SeekRange()
: mOffsetStart(0),
mOffsetEnd(0),
mTimeStart(media::TimeUnit::Zero()),
mTimeEnd(media::TimeUnit::Zero()) {}
SeekRange() : mOffsetStart(0), mOffsetEnd(0), mTimeStart(0), mTimeEnd(0) {}
SeekRange(int64_t aOffsetStart, int64_t aOffsetEnd,
const media::TimeUnit& aTimeStart,
const media::TimeUnit& aTimeEnd)
SeekRange(int64_t aOffsetStart, int64_t aOffsetEnd, int64_t aTimeStart,
int64_t aTimeEnd)
: mOffsetStart(aOffsetStart),
mOffsetEnd(aOffsetEnd),
mTimeStart(aTimeStart),
mTimeEnd(aTimeEnd) {}
bool IsNull() const {
return mOffsetStart == 0 && mOffsetEnd == 0 && mTimeStart.IsZero() &&
mTimeEnd.IsZero();
return mOffsetStart == 0 && mOffsetEnd == 0 && mTimeStart == 0 &&
mTimeEnd == 0;
}
int64_t mOffsetStart, mOffsetEnd; // in bytes.
media::TimeUnit mTimeStart, mTimeEnd;
int64_t mTimeStart, mTimeEnd; // in usecs.
};
nsresult GetSeekRanges(TrackInfo::TrackType aType,
nsTArray<SeekRange>& aRanges);
SeekRange SelectSeekRange(TrackInfo::TrackType aType,
const nsTArray<SeekRange>& ranges,
const media::TimeUnit& aTarget,
const media::TimeUnit& aStartTime,
const media::TimeUnit& aEndTime, bool aExact);
const nsTArray<SeekRange>& ranges, int64_t aTarget,
int64_t aStartTime, int64_t aEndTime, bool aExact);
// Seeks to aTarget usecs in the buffered range aRange using bisection search,
// or to the keyframe prior to aTarget if we have video. aAdjustedTarget is
@ -117,11 +110,9 @@ class OggDemuxer : public MediaDataDemuxer,
// necessary. aStartTime must be the presentation time at the start of media,
// and aEndTime the time at end of media. aRanges must be the time/byte ranges
// buffered in the media cache as per GetSeekRanges().
nsresult SeekInBufferedRange(TrackInfo::TrackType aType,
const media::TimeUnit& aTarget,
media::TimeUnit& aAdjustedTarget,
const media::TimeUnit& aStartTime,
const media::TimeUnit& aEndTime,
nsresult SeekInBufferedRange(TrackInfo::TrackType aType, int64_t aTarget,
int64_t aAdjustedTarget, int64_t aStartTime,
int64_t aEndTime,
const nsTArray<SeekRange>& aRanges,
const SeekRange& aRange);
@ -131,10 +122,8 @@ class OggDemuxer : public MediaDataDemuxer,
// search space. aStartTime must be the presentation time at the start of
// media, and aEndTime the time at end of media. aRanges must be the time/byte
// ranges buffered in the media cache as per GetSeekRanges().
nsresult SeekInUnbuffered(TrackInfo::TrackType aType,
const media::TimeUnit& aTarget,
const media::TimeUnit& aStartTime,
const media::TimeUnit& aEndTime,
nsresult SeekInUnbuffered(TrackInfo::TrackType aType, int64_t aTarget,
int64_t aStartTime, int64_t aEndTime,
const nsTArray<SeekRange>& aRanges);
// Performs a seek bisection to move the media stream's read cursor to the
@ -143,9 +132,8 @@ class OggDemuxer : public MediaDataDemuxer,
// i.e. it will only read inside of the aRange's start and end offsets.
// aFuzz is the number of usecs of leniency we'll allow; we'll terminate the
// seek when we land in the range (aTime - aFuzz, aTime) usecs.
nsresult SeekBisection(TrackInfo::TrackType aType,
const media::TimeUnit& aTarget,
const SeekRange& aRange, const media::TimeUnit& aFuzz);
nsresult SeekBisection(TrackInfo::TrackType aType, int64_t aTarget,
const SeekRange& aRange, uint32_t aFuzz);
// Chunk size to read when reading Ogg files. Average Ogg page length
// is about 4300 bytes, so we read the file in chunks larger than that.
@ -223,7 +211,7 @@ class OggDemuxer : public MediaDataDemuxer,
// Get the end time of aEndOffset. This is the playback position we'd reach
// after playback finished at aEndOffset.
media::TimeUnit RangeEndTime(TrackInfo::TrackType aType, int64_t aEndOffset);
int64_t RangeEndTime(TrackInfo::TrackType aType, int64_t aEndOffset);
// Get the end time of aEndOffset, without reading before aStartOffset.
// This is the playback position we'd reach after playback finished at
@ -232,13 +220,13 @@ class OggDemuxer : public MediaDataDemuxer,
// regular blocking reads from the media stream. If bool aCachedDataOnly
// is true, this can safely be called on the main thread, otherwise it
// must be called on the state machine thread.
media::TimeUnit RangeEndTime(TrackInfo::TrackType aType, int64_t aStartOffset,
int64_t aEndOffset, bool aCachedDataOnly);
int64_t RangeEndTime(TrackInfo::TrackType aType, int64_t aStartOffset,
int64_t aEndOffset, bool aCachedDataOnly);
// Get the start time of the range beginning at aOffset. This is the start
// time of the first aType sample we'd be able to play if we
// started playback at aOffset.
media::TimeUnit RangeStartTime(TrackInfo::TrackType aType, int64_t aOffset);
int64_t RangeStartTime(TrackInfo::TrackType aType, int64_t aOffset);
// All invocations of libogg functionality from the demuxer is sandboxed using
// wasm library sandboxes on supported platforms. These functions that create
@ -301,7 +289,7 @@ class OggDemuxer : public MediaDataDemuxer,
OggStateContext mAudioOggState;
OggStateContext mVideoOggState;
Maybe<media::TimeUnit> mStartTime;
Maybe<int64_t> mStartTime;
// Booleans to indicate if we have audio and/or video data
bool HasVideo() const;
@ -311,8 +299,8 @@ class OggDemuxer : public MediaDataDemuxer,
}
bool HaveStartTime() const;
bool HaveStartTime(TrackInfo::TrackType aType);
media::TimeUnit StartTime() const;
media::TimeUnit StartTime(TrackInfo::TrackType aType);
int64_t StartTime() const;
int64_t StartTime(TrackInfo::TrackType aType);
// The picture region inside Theora frame to be displayed, if we have
// a Theora video track.

View file

@ -10,9 +10,9 @@
#include "OpusParser.h"
#include "VideoUtils.h"
#include <opus/opus.h>
#include "opus/opus.h"
extern "C" {
#include <opus/opus_multistream.h>
#include "opus/opus_multistream.h"
}
#include <cmath>
@ -39,7 +39,7 @@ OpusParser::OpusParser()
}
bool OpusParser::DecodeHeader(unsigned char* aData, size_t aLength) {
if (aLength < 19 || memcmp(aData, "OpusHead", 8) != 0) {
if (aLength < 19 || memcmp(aData, "OpusHead", 8)) {
OPUS_LOG(LogLevel::Debug, ("Invalid Opus file: unrecognized header"));
return false;
}
@ -143,9 +143,7 @@ bool OpusParser::DecodeHeader(unsigned char* aData, size_t aLength) {
}
bool OpusParser::DecodeTags(unsigned char* aData, size_t aLength) {
if (aLength < 16 || memcmp(aData, "OpusTags", 8) != 0) {
return false;
}
if (aLength < 16 || memcmp(aData, "OpusTags", 8)) return false;
// Copy out the raw comment lines, but only do basic validation
// checks against the string packing: too little data, too many

View file

@ -32,7 +32,7 @@ class OpusParser {
int mChannelMapping; // Channel mapping family.
int mStreams; // Number of packed streams in each packet.
int mCoupledStreams; // Number of packed coupled streams in each packet.
unsigned char mMappingTable[255] = {}; // Channel mapping table.
unsigned char mMappingTable[255]; // Channel mapping table.
// Granule position (end sample) of the last decoded Opus packet. This is
// used to calculate the amount we should trim from the last packet.

View file

@ -12,12 +12,13 @@
#include "TimeUnits.h"
#include "VideoUtils.h"
#include "VorbisDecoder.h" // For VorbisLayout
#include "VorbisUtils.h"
#include "mozilla/EndianUtils.h"
#include "mozilla/PodOperations.h"
#include "mozilla/SyncRunnable.h"
#include <opus/opus.h>
#include "opus/opus.h"
extern "C" {
#include <opus/opus_multistream.h>
#include "opus/opus_multistream.h"
}
#define OPUS_DEBUG(arg, ...) \
@ -102,14 +103,14 @@ RefPtr<MediaDataDecoder::InitPromise> OpusDataDecoder::Init() {
mSkip = mOpusParser->mPreSkip;
mPaddingDiscarded = false;
if (opusCodecSpecificData.mContainerCodecDelayFrames !=
mOpusParser->mPreSkip) {
if (opusCodecSpecificData.mContainerCodecDelayMicroSeconds !=
FramesToUsecs(mOpusParser->mPreSkip, mOpusParser->mRate).value()) {
NS_WARNING(
"Invalid Opus header: container CodecDelay and Opus pre-skip do not "
"match!");
}
OPUS_DEBUG("Opus preskip in extradata: %" PRId64 " frames",
opusCodecSpecificData.mContainerCodecDelayFrames);
OPUS_DEBUG("Opus preskip in extradata: %" PRId64 "us",
opusCodecSpecificData.mContainerCodecDelayMicroSeconds);
if (mInfo.mRate != (uint32_t)mOpusParser->mRate) {
NS_WARNING("Invalid Opus header: container and codec rate do not match!");
@ -275,6 +276,25 @@ RefPtr<MediaDataDecoder::DecodePromise> OpusDataDecoder::Decode(
aSample->mTime.ToSeconds(), aSample->GetEndTime().ToSeconds());
}
if (aSample->mDiscardPadding > 0) {
OPUS_DEBUG("Opus decoder discarding %u of %d frames",
aSample->mDiscardPadding, frames);
// Padding discard is only supposed to happen on the final packet.
// Record the discard so we can return an error if another packet is
// decoded.
if (aSample->mDiscardPadding > uint32_t(frames)) {
// Discarding more than the entire packet is invalid.
OPUS_DEBUG("Opus error, discard padding larger than packet");
return DecodePromise::CreateAndReject(
MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
RESULT_DETAIL("Discard padding larger than packet")),
__func__);
}
mPaddingDiscarded = true;
frames = frames - aSample->mDiscardPadding;
}
// Apply the header gain if one was specified.
#ifdef MOZ_SAMPLE_TYPE_FLOAT32
if (mOpusParser->mGain != 1.0f) {

View file

@ -7,7 +7,6 @@
#include "TheoraDecoder.h"
#include <algorithm>
#include <ogg/ogg.h>
#include "ImageContainer.h"
#include "TimeUnits.h"

View file

@ -9,7 +9,8 @@
# include <stdint.h>
# include "PlatformDecoderModule.h"
# include <theora/theoradec.h>
# include "ogg/ogg.h"
# include "theora/theoradec.h"
namespace mozilla {

View file

@ -7,6 +7,7 @@
#include "VorbisDecoder.h"
#include "VideoUtils.h"
#include "VorbisUtils.h"
#include "XiphExtradata.h"
#include "mozilla/Logging.h"
#include "mozilla/PodOperations.h"
@ -178,7 +179,7 @@ RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Decode(
LOG(LogLevel::Warning, ("vorbis_synthesis_blockin returned an error"));
}
float** pcm = 0;
VorbisPCMValue** pcm = 0;
int32_t frames = vorbis_synthesis_pcmout(&mVorbisDsp, &pcm);
if (frames == 0) {
return DecodePromise::CreateAndResolve(DecodedData(), __func__);
@ -195,9 +196,9 @@ RefPtr<MediaDataDecoder::DecodePromise> VorbisDataDecoder::Decode(
MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__), __func__);
}
for (uint32_t j = 0; j < channels; ++j) {
float* channel = pcm[j];
VorbisPCMValue* channel = pcm[j];
for (uint32_t i = 0; i < uint32_t(frames); ++i) {
buffer[i * channels + j] = channel[i];
buffer[i * channels + j] = MOZ_CONVERT_VORBIS_SAMPLE(channel[i]);
}
}

View file

@ -10,7 +10,11 @@
# include "PlatformDecoderModule.h"
# include "mozilla/Maybe.h"
# include <vorbis/codec.h>
# ifdef MOZ_TREMOR
# include "tremor/ivorbiscodec.h"
# else
# include "vorbis/codec.h"
# endif
namespace mozilla {

View file

@ -248,17 +248,15 @@ class RemoteVideoDecoder final : public RemoteDataDecoder {
nsCString GetCodecName() const override {
if (mMediaInfoFlag & MediaInfoFlag::VIDEO_H264) {
return "h264"_ns;
}
if (mMediaInfoFlag & MediaInfoFlag::VIDEO_VP8) {
} else if (mMediaInfoFlag & MediaInfoFlag::VIDEO_VP8) {
return "vp8"_ns;
}
if (mMediaInfoFlag & MediaInfoFlag::VIDEO_VP9) {
} else if (mMediaInfoFlag & MediaInfoFlag::VIDEO_VP9) {
return "vp9"_ns;
}
if (mMediaInfoFlag & MediaInfoFlag::VIDEO_AV1) {
} else if (mMediaInfoFlag & MediaInfoFlag::VIDEO_AV1) {
return "av1"_ns;
} else {
return "unknown"_ns;
}
return "unknown"_ns;
}
RefPtr<MediaDataDecoder::DecodePromise> Decode(
@ -728,11 +726,8 @@ class RemoteAudioDecoder final : public RemoteDataDecoder {
AssertOnThread();
LOG("ProcessOutput");
if (ShouldDiscardSample(aSample->Session()) || !aBuffer->IsValid()) {
aSample->Dispose();
LOG("Discarding sample");
return;
}
@ -757,44 +752,34 @@ class RemoteAudioDecoder final : public RemoteDataDecoder {
if (!ok ||
(IsSampleTimeSmallerThanFirstDemuxedSampleTime(presentationTimeUs) &&
!isEOS)) {
LOG("ProcessOutput: decoding error ok[%s], pts[%" PRId64 "], eos[%s]",
ok ? "true" : "false", presentationTimeUs, isEOS ? "true" : "false");
Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__));
return;
}
if (size > 0) {
const int32_t sampleSize = sizeof(int16_t);
const int32_t numSamples = size / sampleSize;
#ifdef MOZ_SAMPLE_TYPE_S16
const int32_t numSamples = size / 2;
#else
# error We only support 16-bit integer PCM
#endif
InflatableShortBuffer audio(numSamples);
AlignedAudioBuffer audio(numSamples);
if (!audio) {
Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
LOG("OOM while allocating temporary output buffer");
return;
}
jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size);
aBuffer->WriteToByteBuffer(dest, offset, size);
AlignedFloatBuffer converted = audio.Inflate();
TimeUnit pts = TimeUnit::FromMicroseconds(presentationTimeUs);
LOG("Decoded: %u frames of %s audio, pts: %s, %d channels, %" PRId32
" Hz",
numSamples / mOutputChannels,
sampleSize == sizeof(int16_t) ? "int16" : "f32", pts.ToString().get(),
mOutputChannels, mOutputSampleRate);
RefPtr<AudioData> data = new AudioData(
0, pts, std::move(converted), mOutputChannels, mOutputSampleRate);
RefPtr<AudioData> data =
new AudioData(0, TimeUnit::FromMicroseconds(presentationTimeUs),
std::move(audio), mOutputChannels, mOutputSampleRate);
UpdateOutputStatus(std::move(data));
} else {
LOG("ProcessOutput but size 0");
}
if (isEOS) {
LOG("EOS: drain complete");
DrainComplete();
}
}
@ -830,8 +815,6 @@ already_AddRefed<MediaDataDecoder> RemoteDataDecoder::CreateAudioDecoder(
java::sdk::MediaFormat::CreateAudioFormat(config.mMimeType, config.mRate,
config.mChannels, &format),
nullptr);
// format->SetInteger(java::sdk::MediaFormat::KEY_PCM_ENCODING,
// java::sdk::AudioFormat::ENCODING_PCM_FLOAT);
RefPtr<MediaDataDecoder> decoder =
new RemoteAudioDecoder(config, format, aDrmStubId);
@ -910,7 +893,7 @@ RefPtr<MediaDataDecoder::DecodePromise> RemoteDataDecoder::Drain() {
}
RefPtr<ShutdownPromise> RemoteDataDecoder::Shutdown() {
LOG("Shutdown");
LOG("");
AssertOnThread();
SetState(State::SHUTDOWN);
if (mJavaDecoder) {
@ -1084,14 +1067,10 @@ void RemoteDataDecoder::UpdateInputStatus(int64_t aTimestamp, bool aProcessed) {
void RemoteDataDecoder::UpdateOutputStatus(RefPtr<MediaData>&& aSample) {
AssertOnThread();
if (GetState() == State::SHUTDOWN) {
LOG("Update output status, but decoder has been shut down, dropping the "
"decoded results");
return;
}
if (IsUsefulData(aSample)) {
mDecodedData.AppendElement(std::move(aSample));
} else {
LOG("Decoded data, but not considered useful");
}
ReturnDecodedData();
}

View file

@ -545,8 +545,15 @@ MediaResult AppleATDecoder::SetupDecoder(MediaRawData* aSample) {
mOutputFormat.mFormatID = kAudioFormatLinearPCM;
mOutputFormat.mSampleRate = inputFormat.mSampleRate;
mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
mOutputFormat.mBitsPerChannel = 32;
mOutputFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | 0;
#elif defined(MOZ_SAMPLE_TYPE_S16)
mOutputFormat.mBitsPerChannel = 16;
mOutputFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | 0;
#else
# error Unknown audio sample type
#endif
// Set up the decoder so it gives us one sample per frame
mOutputFormat.mFramesPerPacket = 1;
mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame =

View file

@ -5,16 +5,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "FFmpegAudioDecoder.h"
#include "AudioSampleFormat.h"
#include "FFmpegLog.h"
#include "TimeUnits.h"
#include "VideoUtils.h"
#include "BufferReader.h"
#include "libavutil/dict.h"
#include "libavutil/samplefmt.h"
#if defined(FFVPX_VERSION)
# include "libavutil/channel_layout.h"
#endif
#include "mozilla/StaticPrefs_media.h"
#include "mozilla/Telemetry.h"
@ -22,39 +16,56 @@ namespace mozilla {
using TimeUnit = media::TimeUnit;
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(
FFmpegLibWrapper* aLib, const CreateDecoderParams& aDecoderParams)
: FFmpegDataDecoder(aLib, GetCodecId(aDecoderParams.AudioConfig().mMimeType,
aDecoderParams.AudioConfig())),
mAudioInfo(aDecoderParams.AudioConfig()) {
FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
const AudioInfo& aConfig)
: FFmpegDataDecoder(aLib, GetCodecId(aConfig.mMimeType)) {
MOZ_COUNT_CTOR(FFmpegAudioDecoder);
if (mCodecID == AV_CODEC_ID_AAC &&
mAudioInfo.mCodecSpecificConfig.is<AacCodecSpecificData>()) {
aConfig.mCodecSpecificConfig.is<AacCodecSpecificData>()) {
const AacCodecSpecificData& aacCodecSpecificData =
mAudioInfo.mCodecSpecificConfig.as<AacCodecSpecificData>();
aConfig.mCodecSpecificConfig.as<AacCodecSpecificData>();
mExtraData = new MediaByteBuffer;
// Ffmpeg expects the DecoderConfigDescriptor blob.
mExtraData->AppendElements(
*aacCodecSpecificData.mDecoderConfigDescriptorBinaryBlob);
FFMPEG_LOG("FFmpegAudioDecoder ctor (aac)");
mEncoderDelay = aacCodecSpecificData.mEncoderDelayFrames;
mEncoderPaddingOrTotalFrames = aacCodecSpecificData.mMediaFrameCount;
FFMPEG_LOG("FFmpegAudioDecoder (aac), found encoder delay (%" PRIu32
") and total frame count (%" PRIu64
") in codec-specific side data",
mEncoderDelay, TotalFrames());
return;
}
if (mCodecID == AV_CODEC_ID_MP3) {
// Nothing to do
return;
// Downgraded from diagnostic assert due to BMO 1776524 on Android.
MOZ_ASSERT(aConfig.mCodecSpecificConfig.is<Mp3CodecSpecificData>());
// Gracefully handle bad data. If don't hit the preceding assert once this
// has been shipped for awhile, we can remove it and make the following code
// non-conditional.
if (aConfig.mCodecSpecificConfig.is<Mp3CodecSpecificData>()) {
const Mp3CodecSpecificData& mp3CodecSpecificData =
aConfig.mCodecSpecificConfig.as<Mp3CodecSpecificData>();
mEncoderDelay = mp3CodecSpecificData.mEncoderDelayFrames;
mEncoderPaddingOrTotalFrames = mp3CodecSpecificData.mEncoderPaddingFrames;
FFMPEG_LOG("FFmpegAudioDecoder (mp3), found encoder delay (%" PRIu32
")"
"and padding values (%" PRIu64 ") in codec-specific side-data",
mEncoderDelay, Padding());
return;
}
}
if (mCodecID == AV_CODEC_ID_FLAC) {
MOZ_DIAGNOSTIC_ASSERT(
mAudioInfo.mCodecSpecificConfig.is<FlacCodecSpecificData>());
aConfig.mCodecSpecificConfig.is<FlacCodecSpecificData>());
// Gracefully handle bad data. If don't hit the preceding assert once this
// has been shipped for awhile, we can remove it and make the following code
// non-conditional.
if (mAudioInfo.mCodecSpecificConfig.is<FlacCodecSpecificData>()) {
if (aConfig.mCodecSpecificConfig.is<FlacCodecSpecificData>()) {
const FlacCodecSpecificData& flacCodecSpecificData =
mAudioInfo.mCodecSpecificConfig.as<FlacCodecSpecificData>();
aConfig.mCodecSpecificConfig.as<FlacCodecSpecificData>();
if (flacCodecSpecificData.mStreamInfoBinaryBlob->IsEmpty()) {
// Flac files without headers will be missing stream info. In this case
// we don't want to feed ffmpeg empty extra data as it will fail, just
@ -69,39 +80,21 @@ FFmpegAudioDecoder<LIBAV_VER>::FFmpegAudioDecoder(
}
}
// Vorbis and Opus are handled by this case.
// Gracefully handle failure to cover all codec specific cases above. Once
// we're confident there is no fall through from these cases above, we should
// remove this code.
RefPtr<MediaByteBuffer> audioCodecSpecificBinaryBlob =
GetAudioCodecSpecificBlob(mAudioInfo.mCodecSpecificConfig);
GetAudioCodecSpecificBlob(aConfig.mCodecSpecificConfig);
if (audioCodecSpecificBinaryBlob && audioCodecSpecificBinaryBlob->Length()) {
// Use a new MediaByteBuffer as the object will be modified during
// initialization.
mExtraData = new MediaByteBuffer;
mExtraData->AppendElements(*audioCodecSpecificBinaryBlob);
}
if (mCodecID == AV_CODEC_ID_OPUS) {
mDefaultPlaybackDeviceMono = aDecoderParams.mOptions.contains(
CreateDecoderParams::Option::DefaultPlaybackDeviceMono);
}
}
RefPtr<MediaDataDecoder::InitPromise> FFmpegAudioDecoder<LIBAV_VER>::Init() {
AVDictionary* options = nullptr;
if (mCodecID == AV_CODEC_ID_OPUS) {
// Opus has a special feature for stereo coding where it represent wide
// stereo channels by 180-degree out of phase. This improves quality, but
// needs to be disabled when the output is downmixed to mono. Playback
// number of channels are set in AudioSink, using the same method
// `DecideAudioPlaybackChannels()`, and triggers downmix if needed.
if (mDefaultPlaybackDeviceMono ||
DecideAudioPlaybackChannels(mAudioInfo) == 1) {
mLib->av_dict_set(&options, "apply_phase_inv", "false", 0);
}
}
MediaResult rv = InitDecoder(&options);
mLib->av_dict_free(&options);
MediaResult rv = InitDecoder();
return NS_SUCCEEDED(rv)
? InitPromise::CreateAndResolve(TrackInfo::kAudioTrack, __func__)
@ -117,22 +110,11 @@ void FFmpegAudioDecoder<LIBAV_VER>::InitCodecContext() {
// FFmpeg takes this as a suggestion for what format to use for audio samples.
// LibAV 0.8 produces rubbish float interleaved samples, request 16 bits
// audio.
#ifdef MOZ_SAMPLE_TYPE_S16
mCodecContext->request_sample_fmt = AV_SAMPLE_FMT_S16;
#else
mCodecContext->request_sample_fmt =
(mLib->mVersion == 53) ? AV_SAMPLE_FMT_S16 : AV_SAMPLE_FMT_FLT;
#ifdef FFVPX_VERSION
// AudioInfo's layout first 32-bits are bit-per-bit compatible with
// WAVEFORMATEXTENSIBLE and FFmpeg's AVChannel enum. We can cast here.
mCodecContext->ch_layout.nb_channels =
AssertedCast<int>(mAudioInfo.mChannels);
if (mAudioInfo.mChannelMap != AudioConfig::ChannelLayout::UNKNOWN_MAP) {
mLib->av_channel_layout_from_mask(
&mCodecContext->ch_layout,
AssertedCast<uint64_t>(mAudioInfo.mChannelMap));
} else {
mLib->av_channel_layout_default(&mCodecContext->ch_layout,
AssertedCast<int>(mAudioInfo.mChannels));
}
mCodecContext->sample_rate = AssertedCast<int>(mAudioInfo.mRate);
#endif
}
@ -144,6 +126,61 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
return audio;
}
#ifdef MOZ_SAMPLE_TYPE_S16
if (aFrame->format == AV_SAMPLE_FMT_FLT) {
// Audio data already packed. Need to convert from 32 bits Float to S16
AudioDataValue* tmp = audio.get();
float* data = reinterpret_cast<float**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = FloatToAudioSample<int16_t>(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_FLTP) {
// Planar audio data. Convert it from 32 bits float to S16
// and pack it into something we can understand.
AudioDataValue* tmp = audio.get();
float** data = reinterpret_cast<float**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = FloatToAudioSample<int16_t>(data[channel][frame]);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S16) {
// Audio data already packed. No need to do anything other than copy it
// into a buffer we own.
memcpy(audio.get(), aFrame->data[0],
aNumChannels * aNumAFrames * sizeof(AudioDataValue));
} else if (aFrame->format == AV_SAMPLE_FMT_S16P) {
// Planar audio data. Pack it into something we can understand.
AudioDataValue* tmp = audio.get();
AudioDataValue** data = reinterpret_cast<AudioDataValue**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = data[channel][frame];
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S32) {
// Audio data already packed. Need to convert from S32 to S16
AudioDataValue* tmp = audio.get();
int32_t* data = reinterpret_cast<int32_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = *data++ / (1U << 16);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_S32P) {
// Planar audio data. Convert it from S32 to S16
// and pack it into something we can understand.
AudioDataValue* tmp = audio.get();
int32_t** data = reinterpret_cast<int32_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = data[channel][frame] / (1U << 16);
}
}
}
#else
if (aFrame->format == AV_SAMPLE_FMT_FLT) {
// Audio data already packed. No need to do anything other than copy it
// into a buffer we own.
@ -196,193 +233,22 @@ static AlignedAudioBuffer CopyAndPackAudio(AVFrame* aFrame,
*tmp++ = AudioSampleToFloat(data[channel][frame]);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_U8) {
// Interleaved audio data. Convert it from u8 to the expected sample-format
AudioDataValue* tmp = audio.get();
uint8_t* data = reinterpret_cast<uint8_t**>(aFrame->data)[0];
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = UInt8bitToAudioSample<AudioDataValue>(*data++);
}
}
} else if (aFrame->format == AV_SAMPLE_FMT_U8P) {
// Planar audio data. Convert it from u8 to the expected sample-format
// and pack it into something we can understand.
AudioDataValue* tmp = audio.get();
uint8_t** data = reinterpret_cast<uint8_t**>(aFrame->data);
for (uint32_t frame = 0; frame < aNumAFrames; frame++) {
for (uint32_t channel = 0; channel < aNumChannels; channel++) {
*tmp++ = UInt8bitToAudioSample<AudioDataValue>(data[channel][frame]);
}
}
}
#endif
return audio;
}
using ChannelLayout = AudioConfig::ChannelLayout;
MediaResult FFmpegAudioDecoder<LIBAV_VER>::PostProcessOutput(
bool aDecoded, MediaRawData* aSample, DecodedData& aResults,
bool* aGotFrame, int32_t aSubmitted) {
media::TimeUnit pts = aSample->mTime;
if (mFrame->format != AV_SAMPLE_FMT_FLT &&
mFrame->format != AV_SAMPLE_FMT_FLTP &&
mFrame->format != AV_SAMPLE_FMT_S16 &&
mFrame->format != AV_SAMPLE_FMT_S16P &&
mFrame->format != AV_SAMPLE_FMT_S32 &&
mFrame->format != AV_SAMPLE_FMT_S32P &&
mFrame->format != AV_SAMPLE_FMT_U8 &&
mFrame->format != AV_SAMPLE_FMT_U8P) {
return MediaResult(
NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio decoder outputs unsupported audio format"));
}
if (aSubmitted < 0) {
FFMPEG_LOG("Got %d more frame from packet", mFrame->nb_samples);
}
FFMPEG_LOG("FFmpegAudioDecoder decoded: [%s,%s] (Duration: %s) [%s]",
aSample->mTime.ToString().get(),
aSample->GetEndTime().ToString().get(),
aSample->mDuration.ToString().get(),
mLib->av_get_sample_fmt_name(mFrame->format));
uint32_t numChannels = mCodecContext->channels;
uint32_t samplingRate = mCodecContext->sample_rate;
AlignedAudioBuffer audio =
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
if (!audio) {
FFMPEG_LOG("CopyAndPackAudio error (OOM)");
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
media::TimeUnit duration = TimeUnit(mFrame->nb_samples, samplingRate);
if (!duration.IsValid()) {
FFMPEG_LOG("Duration isn't valid (%d + %d)", mFrame->nb_samples,
samplingRate);
return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid sample duration"));
}
media::TimeUnit newpts = pts + duration;
if (!newpts.IsValid()) {
FFMPEG_LOG("New pts isn't valid (%lf + %lf)", pts.ToSeconds(),
duration.ToSeconds());
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid count of accumulated audio samples"));
}
RefPtr<AudioData> data =
new AudioData(aSample->mOffset, pts, std::move(audio), numChannels,
samplingRate, mCodecContext->channel_layout);
MOZ_ASSERT(duration == data->mDuration, "must be equal");
aResults.AppendElement(std::move(data));
pts = newpts;
if (aGotFrame) {
*aGotFrame = true;
}
return NS_OK;
uint64_t FFmpegAudioDecoder<LIBAV_VER>::Padding() const {
MOZ_ASSERT(mCodecID == AV_CODEC_ID_MP3);
return mEncoderPaddingOrTotalFrames;
}
#if LIBAVCODEC_VERSION_MAJOR < 59
MediaResult FFmpegAudioDecoder<LIBAV_VER>::DecodeUsingFFmpeg(
AVPacket* aPacket, bool& aDecoded, MediaRawData* aSample,
DecodedData& aResults, bool* aGotFrame) {
int decoded = 0;
int rv =
mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, aPacket);
aDecoded = decoded == 1;
if (rv < 0) {
NS_WARNING("FFmpeg audio decoder error.");
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error"));
}
PostProcessOutput(decoded, aSample, aResults, aGotFrame, 0);
return NS_OK;
uint64_t FFmpegAudioDecoder<LIBAV_VER>::TotalFrames() const {
MOZ_ASSERT(mCodecID == AV_CODEC_ID_AAC);
return mEncoderPaddingOrTotalFrames;
}
#else
# define AVRESULT_OK 0
MediaResult FFmpegAudioDecoder<LIBAV_VER>::DecodeUsingFFmpeg(
AVPacket* aPacket, bool& aDecoded, MediaRawData* aSample,
DecodedData& aResults, bool* aGotFrame) {
// This in increment whenever avcodec_send_packet succeeds, and decremented
// whenever avcodec_receive_frame succeeds. Because it is possible to have
// multiple AVFrames from a single AVPacket, this number can be negative.
// This is used to ensure that pts and duration are correctly set on the
// resulting audio buffers.
int32_t submitted = 0;
int ret = mLib->avcodec_send_packet(mCodecContext, aPacket);
switch (ret) {
case AVRESULT_OK:
submitted++;
break;
case AVERROR(EAGAIN):
FFMPEG_LOG(" av_codec_send_packet: EAGAIN.");
MOZ_ASSERT(false, "EAGAIN");
break;
case AVERROR_EOF:
FFMPEG_LOG(" End of stream.");
return MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
RESULT_DETAIL("End of stream"));
default:
NS_WARNING("FFmpeg audio decoder error (avcodec_send_packet).");
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error"));
}
MediaResult rv;
while (ret == 0) {
aDecoded = false;
ret = mLib->avcodec_receive_frame(mCodecContext, mFrame);
switch (ret) {
case AVRESULT_OK:
aDecoded = true;
submitted--;
if (submitted < 0) {
FFMPEG_LOG("Multiple AVFrame from a single AVPacket");
}
break;
case AVERROR(EAGAIN): {
// Quirk of the vorbis decoder -- the first packet doesn't return audio.
if (submitted == 1 && mCodecID == AV_CODEC_ID_VORBIS) {
AlignedAudioBuffer buf;
aResults.AppendElement(
new AudioData(0, TimeUnit::Zero(), std::move(buf),
mAudioInfo.mChannels, mAudioInfo.mRate));
}
FFMPEG_LOG(" EAGAIN (packets submitted: %" PRIu32 ").", submitted);
rv = NS_OK;
break;
}
case AVERROR_EOF: {
FFMPEG_LOG(" End of stream.");
rv = MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
RESULT_DETAIL("End of stream"));
break;
}
default:
FFMPEG_LOG(" avcodec_receive_packet error.");
NS_WARNING("FFmpeg audio decoder error (avcodec_receive_packet).");
rv = MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error"));
}
if (aDecoded) {
PostProcessOutput(aDecoded, aSample, aResults, aGotFrame, submitted);
}
}
return NS_OK;
}
#endif
MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
uint8_t* aData, int aSize,
@ -393,11 +259,6 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
AVPacket packet;
mLib->av_init_packet(&packet);
FFMPEG_LOG("FFmpegAudioDecoder::DoDecode: %d bytes, [%s,%s] (Duration: %s)",
aSize, aSample->mTime.ToString().get(),
aSample->GetEndTime().ToString().get(),
aSample->mDuration.ToString().get());
packet.data = const_cast<uint8_t*>(aData);
packet.size = aSize;
@ -412,14 +273,121 @@ MediaResult FFmpegAudioDecoder<LIBAV_VER>::DoDecode(MediaRawData* aSample,
RESULT_DETAIL("FFmpeg audio decoder failed to allocate frame"));
}
bool decoded = false;
auto rv = DecodeUsingFFmpeg(&packet, decoded, aSample, aResults, aGotFrame);
NS_ENSURE_SUCCESS(rv, rv);
int64_t samplePosition = aSample->mOffset;
while (packet.size > 0) {
int decoded = false;
int bytesConsumed = -1;
#if LIBAVCODEC_VERSION_MAJOR < 59
bytesConsumed =
mLib->avcodec_decode_audio4(mCodecContext, mFrame, &decoded, &packet);
if (bytesConsumed < 0) {
NS_WARNING("FFmpeg audio decoder error.");
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error:%d", bytesConsumed));
}
#else
# define AVRESULT_OK 0
int ret = mLib->avcodec_send_packet(mCodecContext, &packet);
switch (ret) {
case AVRESULT_OK:
bytesConsumed = packet.size;
break;
case AVERROR(EAGAIN):
break;
case AVERROR_EOF:
FFMPEG_LOG(" End of stream.");
return MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
RESULT_DETAIL("End of stream"));
default:
NS_WARNING("FFmpeg audio decoder error.");
return MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("FFmpeg audio error"));
}
ret = mLib->avcodec_receive_frame(mCodecContext, mFrame);
switch (ret) {
case AVRESULT_OK:
decoded = true;
break;
case AVERROR(EAGAIN):
break;
case AVERROR_EOF: {
FFMPEG_LOG(" End of stream.");
return MediaResult(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
RESULT_DETAIL("End of stream"));
}
}
#endif
if (decoded) {
if (mFrame->format != AV_SAMPLE_FMT_FLT &&
mFrame->format != AV_SAMPLE_FMT_FLTP &&
mFrame->format != AV_SAMPLE_FMT_S16 &&
mFrame->format != AV_SAMPLE_FMT_S16P &&
mFrame->format != AV_SAMPLE_FMT_S32 &&
mFrame->format != AV_SAMPLE_FMT_S32P) {
return MediaResult(
NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL(
"FFmpeg audio decoder outputs unsupported audio format"));
}
uint32_t numChannels = mCodecContext->channels;
uint32_t samplingRate = mCodecContext->sample_rate;
AlignedAudioBuffer audio =
CopyAndPackAudio(mFrame, numChannels, mFrame->nb_samples);
if (!audio) {
FFMPEG_LOG("FFmpegAudioDecoder: OOM");
return MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__);
}
FFMPEG_LOG("Packet decoded: [%s, %s] (%" PRId64 "us, %d frames)",
aSample->mTime.ToString().get(),
aSample->GetEndTime().ToString().get(),
aSample->mDuration.ToMicroseconds(), mFrame->nb_samples);
media::TimeUnit duration = TimeUnit(mFrame->nb_samples, samplingRate);
if (!duration.IsValid()) {
FFMPEG_LOG("FFmpegAudioDecoder: invalid duration");
return MediaResult(NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid sample duration"));
}
media::TimeUnit pts = aSample->mTime;
media::TimeUnit newpts = pts + duration;
if (!newpts.IsValid()) {
FFMPEG_LOG("FFmpegAudioDecoder: invalid PTS.");
return MediaResult(
NS_ERROR_DOM_MEDIA_OVERFLOW_ERR,
RESULT_DETAIL("Invalid count of accumulated audio samples"));
}
RefPtr<AudioData> data =
new AudioData(samplePosition, pts, std::move(audio), numChannels,
samplingRate, mCodecContext->channel_layout);
MOZ_ASSERT(duration == data->mDuration, "must be equal");
aResults.AppendElement(std::move(data));
pts = newpts;
if (aGotFrame) {
*aGotFrame = true;
}
}
// The packet wasn't sent to ffmpeg, another attempt will happen next
// iteration.
if (bytesConsumed != -1) {
packet.data += bytesConsumed;
packet.size -= bytesConsumed;
samplePosition += bytesConsumed;
}
}
return NS_OK;
}
AVCodecID FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType,
const AudioInfo& aInfo) {
AVCodecID FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(
const nsACString& aMimeType) {
if (aMimeType.EqualsLiteral("audio/mpeg")) {
#ifdef FFVPX_VERSION
if (!StaticPrefs::media_ffvpx_mp3_enabled()) {
@ -434,65 +402,6 @@ AVCodecID FFmpegAudioDecoder<LIBAV_VER>::GetCodecId(const nsACString& aMimeType,
if (aMimeType.EqualsLiteral("audio/mp4a-latm")) {
return AV_CODEC_ID_AAC;
}
if (aMimeType.EqualsLiteral("audio/vorbis")) {
#ifdef FFVPX_VERSION
if (!StaticPrefs::media_ffvpx_vorbis_enabled()) {
return AV_CODEC_ID_NONE;
}
#endif
return AV_CODEC_ID_VORBIS;
}
#ifdef FFVPX_VERSION
if (aMimeType.EqualsLiteral("audio/opus")) {
if (!StaticPrefs::media_ffvpx_opus_enabled()) {
return AV_CODEC_ID_NONE;
}
return AV_CODEC_ID_OPUS;
}
#endif
#ifdef FFVPX_VERSION
if (aMimeType.Find("wav") != kNotFound) {
if (!StaticPrefs::media_ffvpx_wav_enabled()) {
return AV_CODEC_ID_NONE;
}
if (aMimeType.EqualsLiteral("audio/x-wav") ||
aMimeType.EqualsLiteral("audio/wave; codecs=1") ||
aMimeType.EqualsLiteral("audio/wave; codecs=65534")) {
// find the pcm format
switch (aInfo.mBitDepth) {
case 8:
return AV_CODEC_ID_PCM_U8;
case 16:
return AV_CODEC_ID_PCM_S16LE;
case 24:
return AV_CODEC_ID_PCM_S24LE;
case 32:
return AV_CODEC_ID_PCM_S32LE;
case 0:
// ::Init will find and use the right type here, this is just
// returning something that means that this media type can be decoded.
// This happens when attempting to find what decoder to use for a
// media type, without actually having looked at the actual
// bytestream. This decoder can decode all usual PCM bytestream
// anyway.
return AV_CODEC_ID_PCM_S16LE;
default:
return AV_CODEC_ID_NONE;
};
}
if (aMimeType.EqualsLiteral("audio/wave; codecs=3")) {
return AV_CODEC_ID_PCM_F32LE;
}
// A-law
if (aMimeType.EqualsLiteral("audio/wave; codecs=6")) {
return AV_CODEC_ID_PCM_MULAW;
}
// Mu-law
if (aMimeType.EqualsLiteral("audio/wave; codecs=7")) {
return AV_CODEC_ID_PCM_MULAW;
}
}
#endif
return AV_CODEC_ID_NONE;
}

View file

@ -25,14 +25,12 @@ class FFmpegAudioDecoder<LIBAV_VER>
: public FFmpegDataDecoder<LIBAV_VER>,
public DecoderDoctorLifeLogger<FFmpegAudioDecoder<LIBAV_VER>> {
public:
FFmpegAudioDecoder(FFmpegLibWrapper* aLib,
const CreateDecoderParams& aDecoderParams);
FFmpegAudioDecoder(FFmpegLibWrapper* aLib, const AudioInfo& aConfig);
virtual ~FFmpegAudioDecoder();
RefPtr<InitPromise> Init() override;
void InitCodecContext() MOZ_REQUIRES(sMutex) override;
static AVCodecID GetCodecId(const nsACString& aMimeType,
const AudioInfo& aInfo);
static AVCodecID GetCodecId(const nsACString& aMimeType);
nsCString GetDescriptionName() const override {
#ifdef USING_MOZFFVPX
return "ffvpx audio decoder"_ns;
@ -45,15 +43,21 @@ class FFmpegAudioDecoder<LIBAV_VER>
private:
MediaResult DoDecode(MediaRawData* aSample, uint8_t* aData, int aSize,
bool* aGotFrame, DecodedData& aResults) override;
MediaResult DecodeUsingFFmpeg(AVPacket* aPacket, bool& aDecoded,
MediaRawData* aSample, DecodedData& aResults,
bool* aGotFrame);
MediaResult PostProcessOutput(bool aDecoded, MediaRawData* aSample,
DecodedData& aResults, bool* aGotFrame,
int32_t aSubmitted);
const AudioInfo mAudioInfo;
// True if the audio will be downmixed and rendered in mono.
bool mDefaultPlaybackDeviceMono;
// This method is to be called only when decoding mp3, in order to correctly
// discard padding frames.
uint64_t Padding() const;
// This method is to be called only when decoding AAC, in order to correctly
// discard padding frames, based on the number of frames decoded and the total
// frame count of the media.
uint64_t TotalFrames() const;
// The number of frames of encoder delay, that need to be discarded at the
// beginning of the stream.
uint32_t mEncoderDelay = 0;
// This holds either the encoder padding (when this decoder decodes mp3), or
// the total frame count of the media (when this decoder decodes AAC).
// It is best accessed via the `Padding` and `TotalFrames` methods, for
// clarity.
uint64_t mEncoderPaddingOrTotalFrames = 0;
};
} // namespace mozilla

View file

@ -5,7 +5,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <string.h>
#include "libavutil/dict.h"
#ifdef __GNUC__
# include <unistd.h>
#endif
@ -73,7 +72,7 @@ MediaResult FFmpegDataDecoder<LIBAV_VER>::AllocateExtraData() {
// Note: This doesn't run on the ffmpeg TaskQueue, it runs on some other media
// taskqueue
MediaResult FFmpegDataDecoder<LIBAV_VER>::InitDecoder(AVDictionary** aOptions) {
MediaResult FFmpegDataDecoder<LIBAV_VER>::InitDecoder() {
FFMPEG_LOG("Initialising FFmpeg decoder");
AVCodec* codec = FindAVCodec(mLib, mCodecID);
@ -125,10 +124,7 @@ MediaResult FFmpegDataDecoder<LIBAV_VER>::InitDecoder(AVDictionary** aOptions) {
}
#endif
if (mLib->avcodec_open2(mCodecContext, codec, aOptions) < 0) {
if (mCodecContext->extradata) {
mLib->av_freep(&mCodecContext->extradata);
}
if (mLib->avcodec_open2(mCodecContext, codec, nullptr) < 0) {
mLib->av_freep(&mCodecContext);
FFMPEG_LOG(" Couldn't open avcodec");
return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
@ -221,7 +217,6 @@ RefPtr<MediaDataDecoder::DecodePromise> FFmpegDataDecoder<LIBAV_VER>::Drain() {
RefPtr<MediaDataDecoder::DecodePromise>
FFmpegDataDecoder<LIBAV_VER>::ProcessDrain() {
MOZ_ASSERT(mTaskQueue->IsOnCurrentThread());
FFMPEG_LOG("FFmpegDataDecoder: draining buffers");
RefPtr<MediaRawData> empty(new MediaRawData());
empty->mTimecode = mLastInputDts;
bool gotFrame = false;

View file

@ -51,7 +51,7 @@ class FFmpegDataDecoder<LIBAV_VER>
virtual void ProcessShutdown();
virtual void InitCodecContext() MOZ_REQUIRES(sMutex) {}
AVFrame* PrepareFrame();
MediaResult InitDecoder(AVDictionary** aOptions);
MediaResult InitDecoder();
MediaResult AllocateExtraData();
MediaResult DoDecode(MediaRawData* aSample, bool* aGotFrame,
DecodedData& aResults);

View file

@ -52,7 +52,8 @@ class FFmpegDecoderModule : public PlatformDecoderModule {
media::DecodeSupport::Unsupported) {
return nullptr;
}
RefPtr<MediaDataDecoder> decoder = new FFmpegAudioDecoder<V>(mLib, aParams);
RefPtr<MediaDataDecoder> decoder =
new FFmpegAudioDecoder<V>(mLib, aParams.AudioConfig());
return decoder.forget();
}
@ -89,9 +90,7 @@ class FFmpegDecoderModule : public PlatformDecoderModule {
}
AVCodecID videoCodec = FFmpegVideoDecoder<V>::GetCodecId(mimeType);
AVCodecID audioCodec = FFmpegAudioDecoder<V>::GetCodecId(
mimeType,
trackInfo.GetAsAudioInfo() ? *trackInfo.GetAsAudioInfo() : AudioInfo());
AVCodecID audioCodec = FFmpegAudioDecoder<V>::GetCodecId(mimeType);
if (audioCodec == AV_CODEC_ID_NONE && videoCodec == AV_CODEC_ID_NONE) {
MOZ_LOG(sPDMLog, LogLevel::Debug,
("FFmpeg decoder rejects requested type '%s'",

View file

@ -117,12 +117,12 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
#define AV_FUNC_OPTION_SILENT(func, ver) \
if ((ver)&version) { \
if (!((func) = (decltype(func))PR_FindSymbol( \
if (!(func = (decltype(func))PR_FindSymbol( \
((ver)&AV_FUNC_AVUTIL_MASK) ? mAVUtilLib : mAVCodecLib, \
#func))) { \
} \
} else { \
(func) = (decltype(func))nullptr; \
func = (decltype(func))nullptr; \
}
#define AV_FUNC_OPTION(func, ver) \
@ -133,7 +133,7 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
#define AV_FUNC(func, ver) \
AV_FUNC_OPTION(func, ver) \
if ((ver)&version && !(func)) { \
if ((ver)&version && !func) { \
Unlink(); \
return isFFMpeg ? LinkResult::MissingFFMpegFunction \
: LinkResult::MissingLibAVFunction; \
@ -182,8 +182,6 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_AVUTIL_58 | AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
AV_FUNC(av_image_check_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC(av_image_get_buffer_size, AV_FUNC_AVUTIL_ALL)
AV_FUNC_OPTION(av_channel_layout_default, AV_FUNC_AVUTIL_60)
AV_FUNC_OPTION(av_channel_layout_from_mask, AV_FUNC_AVUTIL_60)
AV_FUNC_OPTION(av_buffer_get_opaque,
(AV_FUNC_AVUTIL_56 | AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60))
@ -201,11 +199,6 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC(avcodec_descriptor_get, AV_FUNC_53 | AV_FUNC_55 | AV_FUNC_56 |
AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 |
AV_FUNC_60)
AV_FUNC(av_get_sample_fmt_name, AV_FUNC_AVUTIL_ALL);
AV_FUNC(av_dict_set, AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60)
AV_FUNC(av_dict_free, AV_FUNC_AVUTIL_57 | AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 | AV_FUNC_AVUTIL_60)
#ifdef MOZ_WIDGET_GTK
AV_FUNC_OPTION_SILENT(avcodec_get_hw_config,
@ -233,8 +226,9 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(av_hwframe_ctx_alloc,
AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(avcodec_get_name,
AV_FUNC_57 | AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(av_dict_set, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(av_dict_free, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(avcodec_get_name, AV_FUNC_58 | AV_FUNC_59 | AV_FUNC_60)
AV_FUNC_OPTION_SILENT(av_get_pix_fmt_string, AV_FUNC_AVUTIL_58 |
AV_FUNC_AVUTIL_59 |
AV_FUNC_AVUTIL_60)
@ -243,9 +237,9 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
#undef AV_FUNC_OPTION
#ifdef MOZ_WIDGET_GTK
# define VA_FUNC_OPTION_SILENT(func) \
if (!((func) = (decltype(func))PR_FindSymbol(mVALib, #func))) { \
(func) = (decltype(func))nullptr; \
# define VA_FUNC_OPTION_SILENT(func) \
if (!(func = (decltype(func))PR_FindSymbol(mVALib, #func))) { \
func = (decltype(func))nullptr; \
}
// mVALib is optional and may not be present.
@ -257,9 +251,9 @@ FFmpegLibWrapper::LinkResult FFmpegLibWrapper::Link() {
}
# undef VA_FUNC_OPTION_SILENT
# define VAD_FUNC_OPTION_SILENT(func) \
if (!((func) = (decltype(func))PR_FindSymbol(mVALibDrm, #func))) { \
FFMPEG_LOG("Couldn't load function " #func); \
# define VAD_FUNC_OPTION_SILENT(func) \
if (!(func = (decltype(func))PR_FindSymbol(mVALibDrm, #func))) { \
FFMPEG_LOG("Couldn't load function " #func); \
}
// mVALibDrm is optional and may not be present.
@ -343,7 +337,7 @@ void FFmpegLibWrapper::LinkVAAPILibs() {
#ifdef MOZ_WIDGET_GTK
bool FFmpegLibWrapper::IsVAAPIAvailable() {
# define VA_FUNC_LOADED(func) ((func) != nullptr)
# define VA_FUNC_LOADED(func) (func != nullptr)
return VA_FUNC_LOADED(avcodec_get_hw_config) &&
VA_FUNC_LOADED(av_hwdevice_ctx_alloc) &&
VA_FUNC_LOADED(av_hwdevice_ctx_init) &&

View file

@ -17,7 +17,6 @@ struct AVPacket;
struct AVDictionary;
struct AVCodecParserContext;
struct PRLibrary;
struct AVChannelLayout;
#ifdef MOZ_WIDGET_GTK
struct AVCodecHWConfig;
struct AVVAAPIHWConfig;
@ -123,14 +122,6 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
void* log_ctx);
int (*av_image_get_buffer_size)(int pix_fmt, int width, int height,
int align);
const char* (*av_get_sample_fmt_name)(int sample_fmt);
void (*av_channel_layout_default)(AVChannelLayout* ch_layout,
int nb_channels);
void (*av_channel_layout_from_mask)(AVChannelLayout* ch_layout,
uint64_t mask);
int (*av_dict_set)(AVDictionary** pm, const char* key, const char* value,
int flags);
void (*av_dict_free)(AVDictionary** m);
// libavutil v55 and later only
AVFrame* (*av_frame_alloc)();
@ -164,6 +155,9 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CONSTRUCTORS FFmpegLibWrapper {
int (*av_hwdevice_ctx_create_derived)(AVBufferRef** dst_ctx, int type,
AVBufferRef* src_ctx, int flags);
AVBufferRef* (*av_hwframe_ctx_alloc)(AVBufferRef* device_ctx);
int (*av_dict_set)(AVDictionary** pm, const char* key, const char* value,
int flags);
void (*av_dict_free)(AVDictionary** m);
const char* (*avcodec_get_name)(int id);
char* (*av_get_pix_fmt_string)(char* buf, int buf_size, int pix_fmt);

View file

@ -28,13 +28,9 @@ extern "C" {
#endif // LIBAVCODEC_VERSION_MAJOR >= 58
#if LIBAVCODEC_VERSION_MAJOR < 55
// This value is not defined in older version of libavcodec
# define CODEC_ID_OPUS 86076
# define AV_CODEC_ID_VP6F CODEC_ID_VP6F
# define AV_CODEC_ID_H264 CODEC_ID_H264
# define AV_CODEC_ID_AAC CODEC_ID_AAC
# define AV_CODEC_ID_VORBIS CODEC_ID_VORBIS
# define AV_CODEC_ID_OPUS CODEC_ID_OPUS
# define AV_CODEC_ID_MP3 CODEC_ID_MP3
# define AV_CODEC_ID_VP8 CODEC_ID_VP8
# define AV_CODEC_ID_NONE CODEC_ID_NONE

View file

@ -588,7 +588,7 @@ RefPtr<MediaDataDecoder::InitPromise> FFmpegVideoDecoder<LIBAV_VER>::Init() {
}
#endif // MOZ_WAYLAND_USE_HWDECODE
rv = InitDecoder(nullptr);
rv = InitDecoder();
if (NS_SUCCEEDED(rv)) {
return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
}

View file

@ -28,13 +28,13 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Decode(
MediaRawData* aSample) {
MOZ_ASSERT(mThread->IsOnCurrentThread(),
"We're not on the thread we were first initialized on");
LOG("AudioTrimmer::Decode");
PrepareTrimmers(aSample);
RefPtr<MediaRawData> sample = aSample;
PrepareTrimmers(sample);
RefPtr<AudioTrimmer> self = this;
RefPtr<DecodePromise> p = mDecoder->Decode(aSample)->Then(
RefPtr<DecodePromise> p = mDecoder->Decode(sample)->Then(
GetCurrentSerialEventTarget(), __func__,
[self](DecodePromise::ResolveOrRejectValue&& aValue) {
return self->HandleDecodedResult(std::move(aValue));
[self, sample](DecodePromise::ResolveOrRejectValue&& aValue) {
return self->HandleDecodedResult(std::move(aValue), sample);
});
return p;
}
@ -42,7 +42,6 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Decode(
RefPtr<MediaDataDecoder::FlushPromise> AudioTrimmer::Flush() {
MOZ_ASSERT(mThread->IsOnCurrentThread(),
"We're not on the thread we were first initialized on");
LOG("Flushing");
RefPtr<FlushPromise> p = mDecoder->Flush();
mTrimmers.Clear();
return p;
@ -55,7 +54,7 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::Drain() {
RefPtr<DecodePromise> p = mDecoder->Drain()->Then(
GetCurrentSerialEventTarget(), __func__,
[self = RefPtr{this}](DecodePromise::ResolveOrRejectValue&& aValue) {
return self->HandleDecodedResult(std::move(aValue));
return self->HandleDecodedResult(std::move(aValue), nullptr);
});
return p;
}
@ -95,37 +94,32 @@ MediaDataDecoder::ConversionRequired AudioTrimmer::NeedsConversion() const {
}
RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::HandleDecodedResult(
DecodePromise::ResolveOrRejectValue&& aValue) {
DecodePromise::ResolveOrRejectValue&& aValue, MediaRawData* aRaw) {
MOZ_ASSERT(mThread->IsOnCurrentThread(),
"We're not on the thread we were first initialized on");
if (aValue.IsReject()) {
return DecodePromise::CreateAndReject(std::move(aValue.RejectValue()),
__func__);
}
TimeUnit rawStart = aRaw ? aRaw->mTime : TimeUnit::Zero();
TimeUnit rawEnd = aRaw ? aRaw->GetEndTime() : TimeUnit::Zero();
MediaDataDecoder::DecodedData results = std::move(aValue.ResolveValue());
LOG("HandleDecodedResults: %zu decoded data, %zu trimmers", results.Length(),
mTrimmers.Length());
if (results.IsEmpty()) {
// No samples returned, we assume this is due to the latency of the
// decoder and that the related decoded sample will be returned during
// the next call to Decode().
LOGV("No sample returned -- decoder has latency");
return DecodePromise::CreateAndResolve(std::move(results), __func__);
LOGV("No sample returned for sample[%s, %s]", rawStart.ToString().get(),
rawEnd.ToString().get());
}
for (uint32_t i = 0; i < results.Length();) {
const RefPtr<MediaData>& data = results[i];
MOZ_ASSERT(data->mType == MediaData::Type::AUDIO_DATA);
if (!data->mDuration.IsValid()) {
return DecodePromise::CreateAndReject(std::move(aValue.RejectValue()),
__func__);
}
TimeInterval sampleInterval(data->mTime, data->GetEndTime());
if (mTrimmers.IsEmpty()) {
// mTrimmers being empty can only occurs if the decoder returned more
// frames than we pushed in. We can't handle this case, abort trimming.
LOG("decoded buffer [%s, %s] has no trimming information)",
LOG("sample[%s, %s] (decoded[%s, %s] no trimming information)",
rawStart.ToString().get(), rawEnd.ToString().get(),
sampleInterval.mStart.ToString().get(),
sampleInterval.mEnd.ToString().get());
i++;
@ -136,23 +130,28 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::HandleDecodedResult(
mTrimmers.RemoveElementAt(0);
if (!trimmer) {
// Those frames didn't need trimming.
LOGV("decoded buffer [%s, %s] doesn't need trimming",
LOGV("sample[%s, %s] (decoded[%s, %s] no trimming needed",
rawStart.ToString().get(), rawEnd.ToString().get(),
sampleInterval.mStart.ToString().get(),
sampleInterval.mEnd.ToString().get());
i++;
continue;
}
if (!trimmer->Intersects(sampleInterval)) {
LOGV("decoded buffer [%s, %s] would be empty after trimming, dropping it",
sampleInterval.mStart.ToString().get(),
sampleInterval.mEnd.ToString().get());
LOGV(
"sample[%s, %s] (decoded[%s, %s] would be empty after trimming, "
"dropping it",
rawStart.ToString().get(), rawEnd.ToString().get(),
sampleInterval.mStart.ToString().get(),
sampleInterval.mEnd.ToString().get());
results.RemoveElementAt(i);
continue;
}
LOGV("Trimming sample[%s,%s] to [%s,%s]",
LOGV("Trimming sample[%s,%s] to [%s,%s] (raw was:[%s, %s])",
sampleInterval.mStart.ToString().get(),
sampleInterval.mEnd.ToString().get(), trimmer->mStart.ToString().get(),
trimmer->mEnd.ToString().get());
trimmer->mEnd.ToString().get(), rawStart.ToString().get(),
rawEnd.ToString().get());
TimeInterval trim({std::max(trimmer->mStart, sampleInterval.mStart),
std::min(trimmer->mEnd, sampleInterval.mEnd)});
@ -162,8 +161,7 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::HandleDecodedResult(
Unused << ok;
if (sample->Frames() == 0) {
LOGV("sample[%s, %s] is empty after trimming, dropping it",
sampleInterval.mStart.ToString().get(),
sampleInterval.mEnd.ToString().get());
rawStart.ToString().get(), rawEnd.ToString().get());
results.RemoveElementAt(i);
continue;
}
@ -186,7 +184,12 @@ RefPtr<MediaDataDecoder::DecodePromise> AudioTrimmer::DecodeBatch(
->Then(GetCurrentSerialEventTarget(), __func__,
[self = RefPtr{this}](
DecodePromise::ResolveOrRejectValue&& aValue) {
return self->HandleDecodedResult(std::move(aValue));
// If the decoder returned less samples than what we fed it.
// We can assume that this is due to the decoder encoding
// delay and that all decoded frames have been shifted by n =
// compressedSamples.Length() - decodedSamples.Length() and
// that the first n compressed samples returned nothing.
return self->HandleDecodedResult(std::move(aValue), nullptr);
});
return p;
}

View file

@ -41,9 +41,10 @@ class AudioTrimmer final : public MediaDataDecoder {
private:
~AudioTrimmer() = default;
// Apply trimming information on decoded data.
// Apply trimming information on decoded data. aRaw can be null as it's only
// used for logging purposes.
RefPtr<DecodePromise> HandleDecodedResult(
DecodePromise::ResolveOrRejectValue&& aValue);
DecodePromise::ResolveOrRejectValue&& aValue, MediaRawData* aRaw);
void PrepareTrimmers(MediaRawData* aRaw);
const RefPtr<MediaDataDecoder> mDecoder;
nsCOMPtr<nsISerialEventTarget> mThread;

Binary file not shown.

View file

@ -0,0 +1 @@
Cache-Control: no-store

View file

@ -929,15 +929,19 @@ var gErrorTests = [
{ name: "448636.ogv", type: "video/ogg" },
{ name: "bug504843.ogv", type: "video/ogg" },
{ name: "bug501279.ogg", type: "audio/ogg" },
{ name: "bug603918.webm", type: "video/webm" },
{ name: "bug604067.webm", type: "video/webm" },
{ name: "bug1535980.webm", type: "video/webm" },
{ name: "bug1799787.webm", type: "video/webm" },
{ name: "bogus.duh", type: "bogus/duh" },
];
// Playing this file errors out after receiving "loadedmetadata", we still want
// to check the duration in "onerror" and make sure it is still available.
var gDurationTests = [{ name: "bug604067.webm", duration: 6.076 }];
// These files would get error after receiving "loadedmetadata", we would like
// to check duration in "onerror" and make sure the duration is still available.
var gDurationTests = [
{ name: "bug603918.webm", duration: 6.076 },
{ name: "bug604067.webm", duration: 6.076 },
];
// These are files that have nontrivial duration and are useful for seeking within.
var gSeekTests = [

View file

@ -462,6 +462,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -460,6 +460,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -459,6 +459,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -468,6 +468,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -461,6 +461,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -460,6 +460,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -460,6 +460,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -461,6 +461,8 @@ support-files =
bug556821.ogv^headers^
bug557094.ogv
bug557094.ogv^headers^
bug603918.webm
bug603918.webm^headers^
bug604067.webm
bug604067.webm^headers^
bug1066943.webm

View file

@ -94,30 +94,30 @@ bool WAVTrackDemuxer::Init() {
return false;
}
uint32_t chunkName = mHeaderParser.GiveHeader().ChunkName();
uint32_t chunkSize = mHeaderParser.GiveHeader().ChunkSize();
uint32_t aChunkName = mHeaderParser.GiveHeader().ChunkName();
uint32_t aChunkSize = mHeaderParser.GiveHeader().ChunkSize();
if (chunkName == FRMT_CODE) {
if (aChunkName == FRMT_CODE) {
if (!FmtChunkParserInit()) {
return false;
}
} else if (chunkName == LIST_CODE) {
} else if (aChunkName == LIST_CODE) {
mHeaderParser.Reset();
uint64_t endOfListChunk = static_cast<uint64_t>(mOffset) + chunkSize;
uint64_t endOfListChunk = static_cast<uint64_t>(mOffset) + aChunkSize;
if (endOfListChunk > UINT32_MAX) {
return false;
}
if (!ListChunkParserInit(chunkSize)) {
if (!ListChunkParserInit(aChunkSize)) {
mOffset = endOfListChunk;
}
} else if (chunkName == DATA_CODE) {
mDataLength = chunkSize;
} else if (aChunkName == DATA_CODE) {
mDataLength = aChunkSize;
if (mFirstChunkOffset != mOffset) {
mFirstChunkOffset = mOffset;
}
break;
} else {
mOffset += chunkSize; // Skip other irrelevant chunks.
mOffset += aChunkSize; // Skip other irrelevant chunks.
}
if (mOffset & 1) {
// Wave files are 2-byte aligned so we need to round up
@ -138,25 +138,22 @@ bool WAVTrackDemuxer::Init() {
}
}
mSamplesPerSecond = mFmtChunk.SampleRate();
mChannels = mFmtChunk.Channels();
if (!mSamplesPerSecond || !mChannels || !mFmtChunk.ValidBitsPerSamples()) {
mSamplesPerSecond = mFmtParser.FmtChunk().SampleRate();
mChannels = mFmtParser.FmtChunk().Channels();
mSampleFormat = mFmtParser.FmtChunk().SampleFormat();
if (!mSamplesPerSecond || !mChannels || !mSampleFormat) {
return false;
}
mSamplesPerChunk =
DATA_CHUNK_SIZE * 8 / mChannels / mFmtChunk.ValidBitsPerSamples();
mSampleFormat = mFmtChunk.ValidBitsPerSamples();
mSamplesPerChunk = DATA_CHUNK_SIZE * 8 / mChannels / mSampleFormat;
mInfo->mRate = mSamplesPerSecond;
mInfo->mChannels = mChannels;
mInfo->mBitDepth = mFmtChunk.ValidBitsPerSamples();
mInfo->mProfile = AssertedCast<int8_t>(mFmtChunk.WaveFormat() & 0x00FF);
mInfo->mExtendedProfile =
AssertedCast<int8_t>(mFmtChunk.WaveFormat() & 0xFF00 >> 8);
mInfo->mBitDepth = mSampleFormat;
mInfo->mProfile = mFmtParser.FmtChunk().WaveFormat() & 0x00FF;
mInfo->mExtendedProfile = (mFmtParser.FmtChunk().WaveFormat() & 0xFF00) >> 8;
mInfo->mMimeType = "audio/wave; codecs=";
mInfo->mMimeType.AppendInt(mFmtChunk.WaveFormat());
mInfo->mMimeType.AppendInt(mFmtParser.FmtChunk().WaveFormat());
mInfo->mDuration = Duration();
mInfo->mChannelMap = mFmtChunk.ChannelMap();
return mInfo->mDuration.IsPositive();
}
@ -176,8 +173,8 @@ bool WAVTrackDemuxer::HeaderParserInit() {
if (!header) {
return false;
}
BufferReader headerReader(header->Data(), 8);
Unused << mHeaderParser.Parse(headerReader);
BufferReader HeaderReader(header->Data(), 8);
Unused << mHeaderParser.Parse(HeaderReader);
return true;
}
@ -186,8 +183,9 @@ bool WAVTrackDemuxer::FmtChunkParserInit() {
if (!fmtChunk) {
return false;
}
nsTArray<uint8_t> fmtChunkData(fmtChunk->Data(), fmtChunk->Size());
mFmtChunk.Init(std::move(fmtChunkData));
BufferReader fmtReader(fmtChunk->Data(),
mHeaderParser.GiveHeader().ChunkSize());
Unused << mFmtParser.Parse(fmtReader);
return true;
}
@ -347,6 +345,7 @@ void WAVTrackDemuxer::Reset() {
mParser.Reset();
mHeaderParser.Reset();
mRIFFParser.Reset();
mFmtParser.Reset();
}
RefPtr<WAVTrackDemuxer::SkipAccessPointPromise>
@ -355,9 +354,7 @@ WAVTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold) {
SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__);
}
int64_t WAVTrackDemuxer::GetResourceOffset() const {
return AssertedCast<int64_t>(mOffset);
}
int64_t WAVTrackDemuxer::GetResourceOffset() const { return mOffset; }
TimeIntervals WAVTrackDemuxer::GetBuffered() {
TimeUnit duration = Duration();
@ -393,8 +390,9 @@ TimeUnit WAVTrackDemuxer::Duration(int64_t aNumDataChunks) const {
if (!mSamplesPerSecond || !mSamplesPerChunk) {
return TimeUnit();
}
const int64_t frames = mSamplesPerChunk * aNumDataChunks;
return TimeUnit(frames, mSamplesPerSecond);
const double usPerDataChunk =
USECS_PER_S * static_cast<double>(mSamplesPerChunk) / mSamplesPerSecond;
return TimeUnit::FromMicroseconds(aNumDataChunks * usPerDataChunk);
}
TimeUnit WAVTrackDemuxer::DurationFromBytes(uint32_t aNumBytes) const {
@ -404,14 +402,21 @@ TimeUnit WAVTrackDemuxer::DurationFromBytes(uint32_t aNumBytes) const {
uint64_t numSamples = aNumBytes * 8 / mChannels / mSampleFormat;
return TimeUnit(numSamples, mSamplesPerSecond);
uint64_t numUSeconds = USECS_PER_S * numSamples / mSamplesPerSecond;
if (USECS_PER_S * numSamples % mSamplesPerSecond > mSamplesPerSecond / 2) {
numUSeconds++;
}
return TimeUnit::FromMicroseconds(numUSeconds);
}
MediaByteRange WAVTrackDemuxer::FindNextChunk() {
if (mOffset + DATA_CHUNK_SIZE < mFirstChunkOffset + mDataLength) {
return {mOffset, mOffset + DATA_CHUNK_SIZE};
} else {
return {mOffset, mFirstChunkOffset + mDataLength};
}
return {mOffset, mFirstChunkOffset + mDataLength};
}
MediaByteRange WAVTrackDemuxer::FindChunkHeader() {
@ -451,8 +456,8 @@ already_AddRefed<MediaRawData> WAVTrackDemuxer::GetNextChunk(
return nullptr;
}
const uint32_t read = Read(chunkWriter->Data(), datachunk->mOffset,
AssertedCast<int64_t>(datachunk->Size()));
const uint32_t read =
Read(chunkWriter->Data(), datachunk->mOffset, datachunk->Size());
if (read != aRange.Length()) {
return nullptr;
@ -493,8 +498,8 @@ already_AddRefed<MediaRawData> WAVTrackDemuxer::GetFileHeader(
return nullptr;
}
const uint32_t read = Read(headerWriter->Data(), fileHeader->mOffset,
AssertedCast<int64_t>(fileHeader->Size()));
const uint32_t read =
Read(headerWriter->Data(), fileHeader->mOffset, fileHeader->Size());
if (read != aRange.Length()) {
return nullptr;
@ -514,9 +519,8 @@ uint64_t WAVTrackDemuxer::ChunkIndexFromTime(
if (!mSamplesPerChunk || !mSamplesPerSecond) {
return 0;
}
double chunkDurationS =
mSamplesPerChunk / static_cast<double>(mSamplesPerSecond);
int64_t chunkIndex = std::floor(aTime.ToSeconds() / chunkDurationS);
uint64_t chunkIndex =
(aTime.ToSeconds() * mSamplesPerSecond / mSamplesPerChunk) - 1;
return chunkIndex;
}
@ -526,12 +530,12 @@ void WAVTrackDemuxer::UpdateState(const MediaByteRange& aRange) {
mTotalChunkLen += static_cast<uint64_t>(aRange.Length());
}
int64_t WAVTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset,
int64_t aSize) {
uint32_t WAVTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset,
int32_t aSize) {
const int64_t streamLen = StreamLength();
if (mInfo && streamLen > 0) {
int64_t max = streamLen > aOffset ? streamLen - aOffset : 0;
aSize = std::min(aSize, max);
aSize = std::min<int64_t>(aSize, max);
}
uint32_t read = 0;
const nsresult rv = mSource.ReadAt(aOffset, reinterpret_cast<char*>(aBuffer),
@ -583,11 +587,11 @@ bool RIFFParser::RIFFHeader::ParseNext(uint8_t c) {
bool RIFFParser::RIFFHeader::IsValid(int aPos) const {
if (aPos > -1 && aPos < 4) {
return RIFF[aPos] == mRaw[aPos];
}
if (aPos > 7 && aPos < 12) {
} else if (aPos > 7 && aPos < 12) {
return WAVE[aPos - 8] == mRaw[aPos];
} else {
return true;
}
return true;
}
bool RIFFParser::RIFFHeader::IsValid() const { return mPos >= RIFF_CHUNK_SIZE; }
@ -653,62 +657,71 @@ void HeaderParser::ChunkHeader::Update(uint8_t c) {
}
}
// FormatChunk
// FormatParser
void FormatChunk::Init(nsTArray<uint8_t>&& aData) { mRaw = std::move(aData); }
Result<uint32_t, nsresult> FormatParser::Parse(BufferReader& aReader) {
for (auto res = aReader.ReadU8();
res.isOk() && !mFmtChunk.ParseNext(res.unwrap());
res = aReader.ReadU8()) {
}
uint16_t FormatChunk::WaveFormat() const { return (mRaw[1] << 8) | (mRaw[0]); }
if (mFmtChunk.IsValid()) {
return FMT_CHUNK_MIN_SIZE;
}
uint16_t FormatChunk::Channels() const { return (mRaw[3] << 8) | (mRaw[2]); }
return 0;
}
uint32_t FormatChunk::SampleRate() const {
void FormatParser::Reset() { mFmtChunk.Reset(); }
const FormatParser::FormatChunk& FormatParser::FmtChunk() const {
return mFmtChunk;
}
// FormatParser::FormatChunk
FormatParser::FormatChunk::FormatChunk() { Reset(); }
void FormatParser::FormatChunk::Reset() {
memset(mRaw, 0, sizeof(mRaw));
mPos = 0;
}
uint16_t FormatParser::FormatChunk::WaveFormat() const {
return (mRaw[1] << 8) | (mRaw[0]);
}
uint16_t FormatParser::FormatChunk::Channels() const {
return (mRaw[3] << 8) | (mRaw[2]);
}
uint32_t FormatParser::FormatChunk::SampleRate() const {
return static_cast<uint32_t>((mRaw[7] << 24) | (mRaw[6] << 16) |
(mRaw[5] << 8) | (mRaw[4]));
}
uint16_t FormatChunk::AverageBytesPerSec() const {
return static_cast<uint16_t>((mRaw[11] << 24) | (mRaw[10] << 16) |
(mRaw[9] << 8) | (mRaw[8]));
uint16_t FormatParser::FormatChunk::FrameSize() const {
return (mRaw[13] << 8) | (mRaw[12]);
}
uint16_t FormatChunk::BlockAlign() const {
return static_cast<uint16_t>(mRaw[13] << 8) | (mRaw[12]);
}
uint16_t FormatChunk::ValidBitsPerSamples() const {
uint16_t FormatParser::FormatChunk::SampleFormat() const {
return (mRaw[15] << 8) | (mRaw[14]);
}
uint16_t FormatChunk::ExtraFormatInfoSize() const {
uint16_t value = static_cast<uint16_t>(mRaw[17] << 8) | (mRaw[16]);
if (WaveFormat() != 0xFFFE && value != 0) {
NS_WARNING(
"Found non-zero extra format info length and the wave format"
" isn't WAVEFORMATEXTENSIBLE.");
return 0;
}
if (WaveFormat() == 0xFFFE && value < 22) {
NS_WARNING(
"Wave format is WAVEFORMATEXTENSIBLE and extra data size isn't at"
" least 22 bytes");
return 0;
}
return value;
bool FormatParser::FormatChunk::ParseNext(uint8_t c) {
Update(c);
return IsValid();
}
AudioConfig::ChannelLayout::ChannelMap FormatChunk::ChannelMap() const {
// Integer or float files -- regular mapping
if (WaveFormat() == 1 || WaveFormat() == 2) {
return AudioConfig::ChannelLayout(Channels()).Map();
bool FormatParser::FormatChunk::IsValid() const {
return (FrameSize() == SampleRate() * Channels() / 8) &&
(mPos >= FMT_CHUNK_MIN_SIZE);
}
void FormatParser::FormatChunk::Update(uint8_t c) {
if (mPos < FMT_CHUNK_MIN_SIZE) {
mRaw[mPos++] = c;
}
if (ExtraFormatInfoSize() < 22) {
MOZ_ASSERT(Channels() <= 2);
return AudioConfig::ChannelLayout::UNKNOWN_MAP;
}
// ChannelLayout::ChannelMap is by design bit-per-bit compatible with
// WAVEFORMATEXTENSIBLE's dwChannelMask attribute, we can just cast here.
return static_cast<AudioConfig::ChannelLayout::ChannelMap>(
mRaw[21] | mRaw[20] | mRaw[19] | mRaw[18]);
}
// DataParser

View file

@ -73,9 +73,9 @@ class RIFFParser {
private:
bool Update(uint8_t c);
uint8_t mRaw[RIFF_CHUNK_SIZE] = {};
uint8_t mRaw[RIFF_CHUNK_SIZE];
int mPos = 0;
int mPos;
};
RIFFHeader mRiffHeader;
@ -108,32 +108,49 @@ class HeaderParser {
private:
void Update(uint8_t c);
uint8_t mRaw[CHUNK_HEAD_SIZE] = {};
uint8_t mRaw[CHUNK_HEAD_SIZE];
int mPos = 0;
int mPos;
};
ChunkHeader mHeader;
};
class FormatChunk {
public:
FormatChunk() = default;
void Init(nsTArray<uint8_t>&& aData);
bool IsValid() const;
class FormatParser {
private:
class FormatChunk;
uint16_t WaveFormat() const;
uint16_t Channels() const;
uint32_t SampleRate() const;
uint16_t ExtraFormatInfoSize() const;
uint16_t SampleFormat() const;
uint16_t AverageBytesPerSec() const;
uint16_t BlockAlign() const;
uint16_t ValidBitsPerSamples() const;
AudioConfig::ChannelLayout::ChannelMap ChannelMap() const;
public:
const FormatChunk& FmtChunk() const;
Result<uint32_t, nsresult> Parse(BufferReader& aReader);
void Reset();
private:
nsTArray<uint8_t> mRaw;
class FormatChunk {
public:
FormatChunk();
void Reset();
uint16_t WaveFormat() const;
uint16_t Channels() const;
uint32_t SampleRate() const;
uint16_t FrameSize() const;
uint16_t SampleFormat() const;
bool IsValid() const;
bool ParseNext(uint8_t c);
private:
void Update(uint8_t c);
uint8_t mRaw[FMT_CHUNK_MIN_SIZE];
int mPos;
};
FormatChunk mFmtChunk;
};
class DataParser {
@ -153,7 +170,7 @@ class DataParser {
void Reset();
private:
int mPos = 0; // To Check Alignment
int mPos; // To Check Alignment
};
DataChunk mChunk;
@ -215,7 +232,7 @@ class WAVTrackDemuxer : public MediaTrackDemuxer,
uint64_t OffsetFromChunkIndex(uint32_t aChunkIndex) const;
uint64_t ChunkIndexFromTime(const media::TimeUnit& aTime) const;
int64_t Read(uint8_t* aBuffer, int64_t aOffset, int64_t aSize);
uint32_t Read(uint8_t* aBuffer, int64_t aOffset, int32_t aSize);
MediaResourceIndex mSource;
@ -223,7 +240,7 @@ class WAVTrackDemuxer : public MediaTrackDemuxer,
RIFFParser mRIFFParser;
HeaderParser mHeaderParser;
FormatChunk mFmtChunk;
FormatParser mFmtParser;
// ListChunkParser mListChunkParser;
uint64_t mOffset;

View file

@ -38,8 +38,20 @@ int WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
uint32_t aChannel, const float* aIn,
uint32_t* aInLen, float* aOut,
uint32_t* aOutLen) {
#ifdef MOZ_SAMPLE_TYPE_S16
AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 4> tmp1;
AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 4> tmp2;
tmp1.SetLength(*aInLen);
tmp2.SetLength(*aOutLen);
ConvertAudioSamples(aIn, tmp1.Elements(), *aInLen);
int result = speex_resampler_process_int(
aResampler, aChannel, tmp1.Elements(), aInLen, tmp2.Elements(), aOutLen);
ConvertAudioSamples(tmp2.Elements(), aOut, *aOutLen);
return result;
#else
return speex_resampler_process_float(aResampler, aChannel, aIn, aInLen, aOut,
aOutLen);
#endif
}
int WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
@ -47,17 +59,29 @@ int WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
uint32_t* aInLen, float* aOut,
uint32_t* aOutLen) {
AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 4> tmp;
#ifdef MOZ_SAMPLE_TYPE_S16
tmp.SetLength(*aOutLen);
int result = speex_resampler_process_int(aResampler, aChannel, aIn, aInLen,
tmp.Elements(), aOutLen);
ConvertAudioSamples(tmp.Elements(), aOut, *aOutLen);
return result;
#else
tmp.SetLength(*aInLen);
ConvertAudioSamples(aIn, tmp.Elements(), *aInLen);
int result = speex_resampler_process_float(
aResampler, aChannel, tmp.Elements(), aInLen, aOut, aOutLen);
return result;
#endif
}
int WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
uint32_t aChannel, const int16_t* aIn,
uint32_t* aInLen, int16_t* aOut,
uint32_t* aOutLen) {
#ifdef MOZ_SAMPLE_TYPE_S16
return speex_resampler_process_int(aResampler, aChannel, aIn, aInLen, aOut,
aOutLen);
#else
AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 4> tmp1;
AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 4> tmp2;
tmp1.SetLength(*aInLen);
@ -67,6 +91,7 @@ int WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler,
aResampler, aChannel, tmp1.Elements(), aInLen, tmp2.Elements(), aOutLen);
ConvertAudioSamples(tmp2.Elements(), aOut, *aOutLen);
return result;
#endif
}
void WebAudioUtils::LogToDeveloperConsole(uint64_t aWindowID,

View file

@ -109,9 +109,20 @@ nsReturnRef<HRTFKernel> HRTFElevation::calculateKernelForAzimuthElevation(
const int16_t(&impulse_response_data)[ResponseFrameSize] =
irc_composite_c_r0195[elevationIndex].azimuths[azimuthIndex];
// When libspeex_resampler is compiled with FIXED_POINT, samples in
// speex_resampler_process_float are rounded directly to int16_t, which
// only works well if the floats are in the range +/-32767. On such
// platforms it's better to resample before converting to float anyway.
#ifdef MOZ_SAMPLE_TYPE_S16
# define RESAMPLER_PROCESS speex_resampler_process_int
const int16_t* response = impulse_response_data;
const int16_t* resampledResponse;
#else
# define RESAMPLER_PROCESS speex_resampler_process_float
float response[ResponseFrameSize];
ConvertAudioSamples(impulse_response_data, response, ResponseFrameSize);
float* resampledResponse;
#endif
// Note that depending on the fftSize returned by the panner, we may be
// truncating the impulse response.
@ -129,8 +140,8 @@ nsReturnRef<HRTFKernel> HRTFElevation::calculateKernelForAzimuthElevation(
// Feed the input buffer into the resampler.
spx_uint32_t in_len = ResponseFrameSize;
spx_uint32_t out_len = resampled.Length();
speex_resampler_process_float(resampler, 0, response, &in_len,
resampled.Elements(), &out_len);
RESAMPLER_PROCESS(resampler, 0, response, &in_len, resampled.Elements(),
&out_len);
if (out_len < resampled.Length()) {
// The input should have all been processed.
@ -139,8 +150,8 @@ nsReturnRef<HRTFKernel> HRTFElevation::calculateKernelForAzimuthElevation(
spx_uint32_t out_index = out_len;
in_len = speex_resampler_get_input_latency(resampler);
out_len = resampled.Length() - out_index;
speex_resampler_process_float(resampler, 0, nullptr, &in_len,
resampled.Elements() + out_index, &out_len);
RESAMPLER_PROCESS(resampler, 0, nullptr, &in_len,
resampled.Elements() + out_index, &out_len);
out_index += out_len;
// There may be some uninitialized samples remaining for very low
// sample rates.
@ -150,8 +161,18 @@ nsReturnRef<HRTFKernel> HRTFElevation::calculateKernelForAzimuthElevation(
speex_resampler_reset_mem(resampler);
}
return HRTFKernel::create(resampledResponse, resampledResponseLength,
sampleRate);
#ifdef MOZ_SAMPLE_TYPE_S16
AutoTArray<float, 2 * ResponseFrameSize> floatArray;
floatArray.SetLength(resampledResponseLength);
float* floatResponse = floatArray.Elements();
ConvertAudioSamples(resampledResponse, floatResponse,
resampledResponseLength);
#else
float* floatResponse = resampledResponse;
#endif
#undef RESAMPLER_PROCESS
return HRTFKernel::create(floatResponse, resampledResponseLength, sampleRate);
}
// The range of elevations for the IRCAM impulse responses varies depending on

View file

@ -1,25 +1,24 @@
<!DOCTYPE html>
<!DOCTYPE HTML>
<html>
<head>
<title>Test the decodeAudioData API and Resampling</title>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<head>
<title>Test the decodeAudioData API and Resampling</title>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<pre id="test">
<script src="webaudio.js" type="text/javascript"></script>
<script type="text/javascript">
// These routines have been copied verbatim from WebKit, and are used in order
// to convert a memory buffer into a wave buffer.
function writeString(s, a, offset) {
// These routines have been copied verbatim from WebKit, and are used in order
// to convert a memory buffer into a wave buffer.
function writeString(s, a, offset) {
for (var i = 0; i < s.length; ++i) {
a[offset + i] = s.charCodeAt(i);
a[offset + i] = s.charCodeAt(i);
}
}
}
function writeInt16(n, a, offset) {
function writeInt16(n, a, offset) {
n = Math.floor(n);
var b1 = n & 255;
@ -27,9 +26,9 @@
a[offset + 0] = b1;
a[offset + 1] = b2;
}
}
function writeInt32(n, a, offset) {
function writeInt32(n, a, offset) {
n = Math.floor(n);
var b1 = n & 255;
var b2 = (n >> 8) & 255;
@ -40,37 +39,37 @@
a[offset + 1] = b2;
a[offset + 2] = b3;
a[offset + 3] = b4;
}
}
function writeAudioBuffer(audioBuffer, a, offset) {
function writeAudioBuffer(audioBuffer, a, offset) {
var n = audioBuffer.length;
var channels = audioBuffer.numberOfChannels;
for (var i = 0; i < n; ++i) {
for (var k = 0; k < channels; ++k) {
var buffer = audioBuffer.getChannelData(k);
var sample = buffer[i] * 32768.0;
for (var k = 0; k < channels; ++k) {
var buffer = audioBuffer.getChannelData(k);
var sample = buffer[i] * 32768.0;
// Clip samples to the limitations of 16-bit.
// If we don't do this then we'll get nasty wrap-around distortion.
if (sample < -32768)
sample = -32768;
if (sample > 32767)
sample = 32767;
// Clip samples to the limitations of 16-bit.
// If we don't do this then we'll get nasty wrap-around distortion.
if (sample < -32768)
sample = -32768;
if (sample > 32767)
sample = 32767;
writeInt16(sample, a, offset);
offset += 2;
}
writeInt16(sample, a, offset);
offset += 2;
}
}
}
}
function createWaveFileData(audioBuffer) {
function createWaveFileData(audioBuffer) {
var frameLength = audioBuffer.length;
var numberOfChannels = audioBuffer.numberOfChannels;
var sampleRate = audioBuffer.sampleRate;
var bitsPerSample = 16;
var byteRate = sampleRate * numberOfChannels * bitsPerSample / 8;
var blockAlign = numberOfChannels * bitsPerSample / 8;
var byteRate = sampleRate * numberOfChannels * bitsPerSample/8;
var blockAlign = numberOfChannels * bitsPerSample/8;
var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio
var headerByteLength = 44;
var totalLength = headerByteLength + wavDataByteLength;
@ -101,289 +100,289 @@
writeAudioBuffer(audioBuffer, waveFileData, 44);
return waveFileData;
}
}
</script>
<script class="testbody" type="text/javascript">
SimpleTest.waitForExplicitFinish();
SimpleTest.waitForExplicitFinish();
// fuzzTolerance and fuzzToleranceMobile are used to determine fuzziness
// thresholds. They're needed to make sure that we can deal with neglibible
// differences in the binary buffer caused as a result of resampling the
// audio. fuzzToleranceMobile is typically larger on mobile platforms since
// we do fixed-point resampling as opposed to floating-point resampling on
// those platforms.
var files = [
// An ogg file, 44.1khz, mono
{
url: "ting-44.1k-1ch.ogg",
valid: true,
expectedUrl: "ting-44.1k-1ch.wav",
numberOfChannels: 1,
frames: 30592,
sampleRate: 44100,
duration: 0.693,
fuzzTolerance: 5,
fuzzToleranceMobile: 1284
},
// An ogg file, 44.1khz, stereo
{
url: "ting-44.1k-2ch.ogg",
valid: true,
expectedUrl: "ting-44.1k-2ch.wav",
numberOfChannels: 2,
frames: 30592,
sampleRate: 44100,
duration: 0.693,
fuzzTolerance: 6,
fuzzToleranceMobile: 2544
},
// An ogg file, 48khz, mono
{
url: "ting-48k-1ch.ogg",
valid: true,
expectedUrl: "ting-48k-1ch.wav",
numberOfChannels: 1,
frames: 33297,
sampleRate: 48000,
duration: 0.693,
fuzzTolerance: 5,
fuzzToleranceMobile: 1388
},
// An ogg file, 48khz, stereo
{
url: "ting-48k-2ch.ogg",
valid: true,
expectedUrl: "ting-48k-2ch.wav",
numberOfChannels: 2,
frames: 33297,
sampleRate: 48000,
duration: 0.693,
fuzzTolerance: 14,
fuzzToleranceMobile: 2752
},
// Make sure decoding a wave file results in the same buffer (for both the
// resampling and non-resampling cases)
{
url: "ting-44.1k-1ch.wav",
valid: true,
expectedUrl: "ting-44.1k-1ch.wav",
numberOfChannels: 1,
frames: 30592,
sampleRate: 44100,
duration: 0.693,
fuzzTolerance: 0,
fuzzToleranceMobile: 0
},
{
url: "ting-48k-1ch.wav",
valid: true,
expectedUrl: "ting-48k-1ch.wav",
numberOfChannels: 1,
frames: 33297,
sampleRate: 48000,
duration: 0.693,
fuzzTolerance: 0,
fuzzToleranceMobile: 0
},
// // A wave file
// //{ url: "24bit-44khz.wav", valid: true, expectedUrl: "24bit-44khz-expected.wav" },
// A non-audio file
{ url: "invalid.txt", valid: false, sampleRate: 44100 },
// A webm file with no audio
{ url: "noaudio.webm", valid: false, sampleRate: 48000 },
// A video ogg file with audio
{
url: "audio.ogv",
valid: true,
expectedUrl: "audio-expected.wav",
numberOfChannels: 2,
sampleRate: 44100,
frames: 47680,
duration: 1.0807,
fuzzTolerance: 106,
fuzzToleranceMobile: 3482
},
{
url: "nil-packet.ogg",
expectedUrl: null,
valid: true,
numberOfChannels: 2,
sampleRate: 48000,
frames: 18600,
duration: 0.3874,
}
];
// Returns true if the memory buffers are less different that |fuzz| bytes
function fuzzyMemcmp(buf1, buf2, fuzz) {
var result = true;
var difference = 0;
is(buf1.length, buf2.length, "same length");
for (var i = 0; i < buf1.length; ++i) {
if (Math.abs(buf1[i] - buf2[i])) {
++difference;
}
}
if (difference > fuzz) {
ok(false, "Expected at most " + fuzz + " bytes difference, found " + difference + " bytes");
}
return difference <= fuzz;
// fuzzTolerance and fuzzToleranceMobile are used to determine fuzziness
// thresholds. They're needed to make sure that we can deal with neglibible
// differences in the binary buffer caused as a result of resampling the
// audio. fuzzToleranceMobile is typically larger on mobile platforms since
// we do fixed-point resampling as opposed to floating-point resampling on
// those platforms.
var files = [
// An ogg file, 44.1khz, mono
{
url: "ting-44.1k-1ch.ogg",
valid: true,
expectedUrl: "ting-44.1k-1ch.wav",
numberOfChannels: 1,
frames: 30592,
sampleRate: 44100,
duration: 0.693,
fuzzTolerance: 5,
fuzzToleranceMobile: 1284
},
// An ogg file, 44.1khz, stereo
{
url: "ting-44.1k-2ch.ogg",
valid: true,
expectedUrl: "ting-44.1k-2ch.wav",
numberOfChannels: 2,
frames: 30592,
sampleRate: 44100,
duration: 0.693,
fuzzTolerance: 6,
fuzzToleranceMobile: 2544
},
// An ogg file, 48khz, mono
{
url: "ting-48k-1ch.ogg",
valid: true,
expectedUrl: "ting-48k-1ch.wav",
numberOfChannels: 1,
frames: 33297,
sampleRate: 48000,
duration: 0.693,
fuzzTolerance: 5,
fuzzToleranceMobile: 1388
},
// An ogg file, 48khz, stereo
{
url: "ting-48k-2ch.ogg",
valid: true,
expectedUrl: "ting-48k-2ch.wav",
numberOfChannels: 2,
frames: 33297,
sampleRate: 48000,
duration: 0.693,
fuzzTolerance: 14,
fuzzToleranceMobile: 2752
},
// Make sure decoding a wave file results in the same buffer (for both the
// resampling and non-resampling cases)
{
url: "ting-44.1k-1ch.wav",
valid: true,
expectedUrl: "ting-44.1k-1ch.wav",
numberOfChannels: 1,
frames: 30592,
sampleRate: 44100,
duration: 0.693,
fuzzTolerance: 0,
fuzzToleranceMobile: 0
},
{
url: "ting-48k-1ch.wav",
valid: true,
expectedUrl: "ting-48k-1ch.wav",
numberOfChannels: 1,
frames: 33297,
sampleRate: 48000,
duration: 0.693,
fuzzTolerance: 0,
fuzzToleranceMobile: 0
},
// // A wave file
// //{ url: "24bit-44khz.wav", valid: true, expectedUrl: "24bit-44khz-expected.wav" },
// A non-audio file
{ url: "invalid.txt", valid: false, sampleRate: 44100 },
// A webm file with no audio
{ url: "noaudio.webm", valid: false, sampleRate: 48000 },
// A video ogg file with audio
{
url: "audio.ogv",
valid: true,
expectedUrl: "audio-expected.wav",
numberOfChannels: 2,
sampleRate: 44100,
frames: 47680,
duration: 1.0807,
fuzzTolerance: 106,
fuzzToleranceMobile: 3482
},
{
url: "nil-packet.ogg",
expectedUrl: null,
valid: true,
numberOfChannels: 2,
sampleRate: 48000,
frames: 18600,
duration: 0.3874,
}
];
function getFuzzTolerance(test) {
var kIsMobile =
navigator.userAgent.includes("Mobile") || // b2g
navigator.userAgent.includes("Android"); // android
return kIsMobile ? test.fuzzToleranceMobile : test.fuzzTolerance;
}
function bufferIsSilent(buffer) {
for (var i = 0; i < buffer.length; ++i) {
if (buffer.getChannelData(0)[i] != 0) {
return false;
}
// Returns true if the memory buffers are less different that |fuzz| bytes
function fuzzyMemcmp(buf1, buf2, fuzz) {
var result = true;
var difference = 0;
is(buf1.length, buf2.length, "same length");
for (var i = 0; i < buf1.length; ++i) {
if (Math.abs(buf1[i] - buf2[i])) {
++difference;
}
return true;
}
if (difference > fuzz) {
ok(false, "Expected at most " + fuzz + " bytes difference, found " + difference + " bytes");
}
return difference <= fuzz;
}
function checkAudioBuffer(buffer, test) {
if (buffer.numberOfChannels != test.numberOfChannels) {
is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels");
function getFuzzTolerance(test) {
var kIsMobile =
navigator.userAgent.includes("Mobile") || // b2g
navigator.userAgent.includes("Android"); // android
return kIsMobile ? test.fuzzToleranceMobile : test.fuzzTolerance;
}
function bufferIsSilent(buffer) {
for (var i = 0; i < buffer.length; ++i) {
if (buffer.getChannelData(0)[i] != 0) {
return false;
}
}
return true;
}
function checkAudioBuffer(buffer, test) {
if (buffer.numberOfChannels != test.numberOfChannels) {
is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels");
return;
}
ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration");
if (Math.abs(buffer.duration - test.duration) >= 1e-3) {
ok(false, "got: " + buffer.duration + ", expected: " + test.duration);
}
is(buffer.sampleRate, test.sampleRate, "Correct sample rate");
is(buffer.length, test.frames, "Correct length");
var wave = createWaveFileData(buffer);
if (test.expectedWaveData) {
ok(fuzzyMemcmp(wave, test.expectedWaveData, getFuzzTolerance(test)), "Received expected decoded data");
}
}
function checkResampledBuffer(buffer, test, callback) {
if (buffer.numberOfChannels != test.numberOfChannels) {
is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels");
return;
}
ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration");
if (Math.abs(buffer.duration - test.duration) >= 1e-3) {
ok(false, "got: " + buffer.duration + ", expected: " + test.duration);
}
// Take into account the resampling when checking the size
var expectedLength = test.frames * buffer.sampleRate / test.sampleRate;
SimpleTest.ok(
Math.abs(buffer.length - expectedLength) < 1.0,
"Correct length - got " + buffer.length +
", expected about " + expectedLength
);
// Playback the buffer in the original context, to resample back to the
// original rate and compare with the decoded buffer without resampling.
cx = test.nativeContext;
var expected = cx.createBufferSource();
expected.buffer = test.expectedBuffer;
expected.start();
var inverse = cx.createGain();
inverse.gain.value = -1;
expected.connect(inverse);
inverse.connect(cx.destination);
var resampled = cx.createBufferSource();
resampled.buffer = buffer;
resampled.start();
// This stop should do nothing, but it tests for bug 937475
resampled.stop(test.frames / cx.sampleRate);
resampled.connect(cx.destination);
cx.oncomplete = function(e) {
ok(!bufferIsSilent(e.renderedBuffer), "Expect buffer not silent");
// Resampling will lose the highest frequency components, so we should
// pass the difference through a low pass filter. However, either the
// input files don't have significant high frequency components or the
// tolerance in compareBuffers() is too high to detect them.
compareBuffers(e.renderedBuffer,
cx.createBuffer(test.numberOfChannels,
test.frames, test.sampleRate));
callback();
}
cx.startRendering();
}
function runResampling(test, response, callback) {
var sampleRate = test.sampleRate == 44100 ? 48000 : 44100;
var cx = new OfflineAudioContext(1, 1, sampleRate);
cx.decodeAudioData(response, function onSuccess(asyncResult) {
is(asyncResult.sampleRate, sampleRate, "Correct sample rate");
checkResampledBuffer(asyncResult, test, callback);
}, function onFailure() {
ok(false, "Expected successful decode with resample");
callback();
});
}
function runTest(test, response, callback) {
// We need to copy the array here, because decodeAudioData will detach the
// array's buffer.
var compressedAudio = response.slice(0);
var expectCallback = false;
var cx = new OfflineAudioContext(test.numberOfChannels || 1,
test.frames || 1, test.sampleRate);
cx.decodeAudioData(response, function onSuccess(asyncResult) {
ok(expectCallback, "Success callback should fire asynchronously");
ok(test.valid, "Did expect success for test " + test.url);
checkAudioBuffer(asyncResult, test);
test.expectedBuffer = asyncResult;
test.nativeContext = cx;
runResampling(test, compressedAudio, callback);
}, function onFailure(e) {
ok(e instanceof DOMException, "We want to see an exception here");
is(e.name, "EncodingError", "Exception name matches");
ok(expectCallback, "Failure callback should fire asynchronously");
ok(!test.valid, "Did expect failure for test " + test.url);
callback();
});
expectCallback = true;
}
function loadTest(test, callback) {
var xhr = new XMLHttpRequest();
xhr.open("GET", test.url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function() {
if (!test.expectedUrl) {
runTest(test, xhr.response, callback);
return;
}
ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration");
if (Math.abs(buffer.duration - test.duration) >= 1e-3) {
ok(false, "got: " + buffer.duration + ", expected: " + test.duration);
}
is(buffer.sampleRate, test.sampleRate, "Correct sample rate");
is(buffer.length, test.frames, "Correct length");
var wave = createWaveFileData(buffer);
if (test.expectedWaveData) {
ok(fuzzyMemcmp(wave, test.expectedWaveData, getFuzzTolerance(test)), "Received expected decoded data");
}
}
function checkResampledBuffer(buffer, test, callback) {
if (buffer.numberOfChannels != test.numberOfChannels) {
is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels");
return;
}
ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration");
if (Math.abs(buffer.duration - test.duration) >= 1e-3) {
ok(false, "got: " + buffer.duration + ", expected: " + test.duration);
}
// Take into account the resampling when checking the size
var expectedLength = test.frames * buffer.sampleRate / test.sampleRate;
SimpleTest.ok(
Math.abs(buffer.length - expectedLength) < 1.0,
"Correct length - got " + buffer.length +
", expected about " + expectedLength
);
// Playback the buffer in the original context, to resample back to the
// original rate and compare with the decoded buffer without resampling.
cx = test.nativeContext;
var expected = cx.createBufferSource();
expected.buffer = test.expectedBuffer;
expected.start();
var inverse = cx.createGain();
inverse.gain.value = -1;
expected.connect(inverse);
inverse.connect(cx.destination);
var resampled = cx.createBufferSource();
resampled.buffer = buffer;
resampled.start();
// This stop should do nothing, but it tests for bug 937475
resampled.stop(test.frames / cx.sampleRate);
resampled.connect(cx.destination);
cx.oncomplete = function (e) {
ok(!bufferIsSilent(e.renderedBuffer), "Expect buffer not silent");
// Resampling will lose the highest frequency components, so we should
// pass the difference through a low pass filter. However, either the
// input files don't have significant high frequency components or the
// tolerance in compareBuffers() is too high to detect them.
compareBuffers(e.renderedBuffer,
cx.createBuffer(test.numberOfChannels,
test.frames, test.sampleRate));
callback();
}
cx.startRendering();
}
function runResampling(test, response, callback) {
var sampleRate = test.sampleRate == 44100 ? 48000 : 44100;
var cx = new OfflineAudioContext(1, 1, sampleRate);
cx.decodeAudioData(response, function onSuccess(asyncResult) {
is(asyncResult.sampleRate, sampleRate, "Correct sample rate");
checkResampledBuffer(asyncResult, test, callback);
}, function onFailure() {
ok(false, "Expected successful decode with resample");
callback();
});
}
function runTest(test, response, callback) {
// We need to copy the array here, because decodeAudioData will detach the
// array's buffer.
var compressedAudio = response.slice(0);
var expectCallback = false;
var cx = new OfflineAudioContext(test.numberOfChannels || 1,
test.frames || 1, test.sampleRate);
cx.decodeAudioData(response, function onSuccess(asyncResult) {
ok(expectCallback, "Success callback should fire asynchronously");
ok(test.valid, "Did expect success for test " + test.url);
checkAudioBuffer(asyncResult, test);
test.expectedBuffer = asyncResult;
test.nativeContext = cx;
runResampling(test, compressedAudio, callback);
}, function onFailure(e) {
ok(e instanceof DOMException, "We want to see an exception here");
is(e.name, "EncodingError", "Exception name matches");
ok(expectCallback, "Failure callback should fire asynchronously");
ok(!test.valid, "Did expect failure for test " + test.url);
callback();
});
expectCallback = true;
}
function loadTest(test, callback) {
var xhr = new XMLHttpRequest();
xhr.open("GET", test.url, true);
xhr.responseType = "arraybuffer";
xhr.onload = function () {
if (!test.expectedUrl) {
runTest(test, xhr.response, callback);
return;
}
var getExpected = new XMLHttpRequest();
getExpected.open("GET", test.expectedUrl, true);
getExpected.responseType = "arraybuffer";
getExpected.onload = function () {
test.expectedWaveData = new Uint8Array(getExpected.response);
runTest(test, xhr.response, callback);
};
getExpected.send();
var getExpected = new XMLHttpRequest();
getExpected.open("GET", test.expectedUrl, true);
getExpected.responseType = "arraybuffer";
getExpected.onload = function() {
test.expectedWaveData = new Uint8Array(getExpected.response);
runTest(test, xhr.response, callback);
};
xhr.send();
}
getExpected.send();
};
xhr.send();
}
function loadNextTest() {
if (files.length) {
loadTest(files.shift(), loadNextTest);
} else {
SimpleTest.finish();
}
function loadNextTest() {
if (files.length) {
loadTest(files.shift(), loadNextTest);
} else {
SimpleTest.finish();
}
}
loadNextTest();
loadNextTest();
</script>
</pre>
</body>
</body>
</html>

View file

@ -235,7 +235,6 @@ already_AddRefed<MediaTrackDemuxer> WebMDemuxer::GetTrackDemuxer(
}
void WebMDemuxer::Reset(TrackInfo::TrackType aType) {
mProcessedDiscardPadding = false;
if (aType == TrackInfo::kVideoTrack) {
mVideoPackets.Reset();
} else {
@ -417,12 +416,11 @@ nsresult WebMDemuxer::ReadMetadata() {
uint64_t codecDelayUs = params.codec_delay / 1000;
mInfo.mAudio.mMimeType = "audio/opus";
OpusCodecSpecificData opusCodecSpecificData;
opusCodecSpecificData.mContainerCodecDelayFrames =
AssertedCast<int64_t>(USECS_PER_S * codecDelayUs / 48000);
opusCodecSpecificData.mContainerCodecDelayMicroSeconds =
AssertedCast<int64_t>(codecDelayUs);
mInfo.mAudio.mCodecSpecificConfig =
AudioCodecSpecificVariant{std::move(opusCodecSpecificData)};
WEBM_DEBUG("Preroll for Opus: %" PRIu64 " frames",
opusCodecSpecificData.mContainerCodecDelayFrames);
WEBM_DEBUG("Preroll for Opus: %" PRIu64, codecDelayUs);
}
mSeekPreroll = params.seek_preroll;
mInfo.mAudio.mRate = AssertedCast<uint32_t>(params.rate);
@ -584,7 +582,6 @@ nsresult WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
unsigned int count = 0;
r = nestegg_packet_count(holder->Packet(), &count);
if (r == -1) {
WEBM_DEBUG("nestegg_packet_count: error");
return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
int64_t tstamp = holder->Timestamp();
@ -597,7 +594,6 @@ nsresult WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
RefPtr<NesteggPacketHolder> next_holder;
rv = NextPacket(aType, next_holder);
if (NS_FAILED(rv) && rv != NS_ERROR_DOM_MEDIA_END_OF_STREAM) {
WEBM_DEBUG("NextPacket: error");
return rv;
}
@ -639,7 +635,6 @@ nsresult WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
}
if (mIsMediaSource && next_tstamp == INT64_MIN) {
WEBM_DEBUG("WebM is a media source, and next timestamp computation filed.");
return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
}
@ -726,13 +721,13 @@ nsresult WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
sample = new MediaRawData(data, length, alphaData, alphaLength);
if ((length && !sample->Data()) ||
(alphaLength && !sample->AlphaData())) {
WEBM_DEBUG("Couldn't allocate MediaRawData: OOM");
// OOM.
return NS_ERROR_OUT_OF_MEMORY;
}
} else {
sample = new MediaRawData(data, length);
if (length && !sample->Data()) {
WEBM_DEBUG("Couldn't allocate MediaRawData: OOM");
// OOM.
return NS_ERROR_OUT_OF_MEMORY;
}
}
@ -744,25 +739,20 @@ nsresult WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
sample->mOffset = holder->Offset();
sample->mKeyframe = isKeyframe;
if (discardPadding && i == count - 1) {
sample->mOriginalPresentationWindow =
Some(media::TimeInterval{sample->mTime, sample->GetEndTime()});
CheckedInt64 discardFrames;
if (discardPadding < 0) {
// This will ensure decoding will error out, and the file is rejected.
sample->mDuration = TimeUnit::Invalid();
// This is an invalid value as discard padding should never be negative.
// Set to maximum value so that the decoder will reject it as it's
// greater than the number of frames available.
discardFrames = INT32_MAX;
WEBM_DEBUG("Invalid negative discard padding");
} else {
TimeUnit padding = TimeUnit::FromNanoseconds(discardPadding);
if (padding > sample->mDuration || mProcessedDiscardPadding) {
WEBM_DEBUG(
"Padding frames larger than packet size, flagging the packet for "
"error (padding: %s, duration: %s, already processed: %s)",
padding.ToString().get(), sample->mDuration.ToString().get(),
mProcessedDiscardPadding ? "true" : "false");
sample->mDuration = TimeUnit::Invalid();
} else {
sample->mDuration -= padding;
}
discardFrames = TimeUnitToFrames(
TimeUnit::FromNanoseconds(discardPadding), mInfo.mAudio.mRate);
}
if (discardFrames.isValid()) {
sample->mDiscardPadding = discardFrames.value();
}
mProcessedDiscardPadding = true;
}
if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED ||
@ -876,7 +866,6 @@ nsresult WebMDemuxer::NextPacket(TrackInfo::TrackType aType,
bool hasType = isVideo ? mHasVideo : mHasAudio;
if (!hasType) {
WEBM_DEBUG("No media type found");
return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
@ -898,7 +887,6 @@ nsresult WebMDemuxer::NextPacket(TrackInfo::TrackType aType,
return rv;
}
if (!holder) {
WEBM_DEBUG("Couldn't demux packet");
return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
@ -915,24 +903,20 @@ nsresult WebMDemuxer::DemuxPacket(TrackInfo::TrackType aType,
int r = nestegg_read_packet(Context(aType), &packet);
if (r == 0) {
nestegg_read_reset(Context(aType));
WEBM_DEBUG("EOS");
return NS_ERROR_DOM_MEDIA_END_OF_STREAM;
} else if (r < 0) {
WEBM_DEBUG("nestegg_read_packet: error");
return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
unsigned int track = 0;
r = nestegg_packet_track(packet, &track);
if (r == -1) {
WEBM_DEBUG("nestegg_packet_track: error");
return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
int64_t offset = Resource(aType).Tell();
RefPtr<NesteggPacketHolder> holder = new NesteggPacketHolder();
if (!holder->Init(packet, offset, track, false)) {
WEBM_DEBUG("NesteggPacketHolder::Init: error");
return NS_ERROR_DOM_MEDIA_DEMUXER_ERR;
}
@ -1172,7 +1156,6 @@ nsresult WebMTrackDemuxer::NextSample(RefPtr<MediaRawData>& aData) {
aData = mSamples.PopFront();
return NS_OK;
}
WEBM_DEBUG("WebMTrackDemuxer::NextSample: error");
return rv;
}

View file

@ -238,11 +238,6 @@ class WebMDemuxer : public MediaDataDemuxer,
// as nestegg only performs 1-byte read at a time.
int64_t mLastWebMBlockOffset;
const bool mIsMediaSource;
// Discard padding in WebM cannot occur more than once. This is set to true if
// a discard padding element has been found and processed, and the decoding is
// expected to error out if another discard padding element is found
// subsequently in the byte stream.
bool mProcessedDiscardPadding = false;
EncryptionInfo mCrypto;
};

View file

@ -169,7 +169,7 @@ dictionary MediaStateDebugInfo {
long demuxEOS = 0;
long drainState = 0;
boolean waitingForKey = false;
long long lastStreamSourceID = 0;
long lastStreamSourceID = 0;
};
dictionary MediaFrameStats {

View file

@ -42,7 +42,7 @@ add_setup(async function setup() {
});
add_task(async function testKill() {
await runTest("small-shot.ogg", "Utility Generic", "ffvpx audio decoder");
await runTest("small-shot.ogg", "Utility Generic", "vorbis audio decoder");
await cleanUtilityProcessShutdown(
"audioDecoder_Generic",
@ -59,7 +59,7 @@ add_task(async function testKill() {
});
add_task(async function testShutdown() {
await runTest("small-shot.ogg", "Utility Generic", "ffvpx audio decoder");
await runTest("small-shot.ogg", "Utility Generic", "vorbis audio decoder");
const audioDecoderPid = await findGenericAudioDecoder();
ok(audioDecoderPid > 0, `Valid PID found: ${audioDecoderPid}`);

View file

@ -110,19 +110,19 @@ function audioTestData() {
expectations: {
Android: {
process: "Utility Generic",
decoder: "ffvpx audio decoder",
decoder: "vorbis audio decoder",
},
Linux: {
process: "Utility Generic",
decoder: "ffvpx audio decoder",
decoder: "vorbis audio decoder",
},
WINNT: {
process: "Utility Generic",
decoder: "ffvpx audio decoder",
decoder: "vorbis audio decoder",
},
Darwin: {
process: "Utility Generic",
decoder: "ffvpx audio decoder",
decoder: "vorbis audio decoder",
},
},
},

View file

@ -2,20 +2,16 @@
This directory contains files used in gecko builds from FFmpeg
(http://ffmpeg.org). The current files are from FFmpeg as of
revision
37cde570bc2dcd64a15c5d9a37b9fa0d78d84f9f
revision fed07efcde72824ac1ada80d4af4e91ac4fcfc14
git clone https://git.ffmpeg.org/ffmpeg.git ffmpeg
git checkout 37cde570bc2dcd64a15c5d9a37b9fa0d78d84f9f
git checkout fed07efcde72824ac1ada80d4af4e91ac4fcfc14
All source files match their path from the library's source archive.
Currently, we only use the vp8, vp9, av1 (via libdav1d), mp3, flac, vorbis (via
libvorbis), opus (via libopus) and PCM portion of the library. If this changes,
configuration files will most likely need to be updated.
Decoding AV1 via libdav1d and libvorbis is supported, although the decoder
libraries are vendored separately, `ffvpx` only contains the code to use
those libraries through the `ffmpeg` API.
Currently, we only use the vp8, vp9, av1 (libdav1d), mp3, and flac portion of
the library. If this changes, configuration files will most likely need to be
updated. Decoding AV1 via libdav1d is supported, although the decoder
(libdav1d) is vendored separately, `ffvpx` only contains the code to use `libdav1d` through the `ffmpeg` API.
The ffmpeg project recommends to use ffmpeg's tip, not a particular release.

View file

@ -424,34 +424,34 @@
#define CONFIG_WS_SND1_DECODER 0
#define CONFIG_XMA1_DECODER 0
#define CONFIG_XMA2_DECODER 0
#define CONFIG_PCM_ALAW_DECODER 1
#define CONFIG_PCM_ALAW_DECODER 0
#define CONFIG_PCM_BLURAY_DECODER 0
#define CONFIG_PCM_DVD_DECODER 0
#define CONFIG_PCM_F16LE_DECODER 0
#define CONFIG_PCM_F24LE_DECODER 0
#define CONFIG_PCM_F32BE_DECODER 0
#define CONFIG_PCM_F32LE_DECODER 1
#define CONFIG_PCM_F32LE_DECODER 0
#define CONFIG_PCM_F64BE_DECODER 0
#define CONFIG_PCM_F64LE_DECODER 0
#define CONFIG_PCM_LXF_DECODER 0
#define CONFIG_PCM_MULAW_DECODER 1
#define CONFIG_PCM_MULAW_DECODER 0
#define CONFIG_PCM_S8_DECODER 0
#define CONFIG_PCM_S8_PLANAR_DECODER 0
#define CONFIG_PCM_S16BE_DECODER 0
#define CONFIG_PCM_S16BE_PLANAR_DECODER 0
#define CONFIG_PCM_S16LE_DECODER 1
#define CONFIG_PCM_S16LE_DECODER 0
#define CONFIG_PCM_S16LE_PLANAR_DECODER 0
#define CONFIG_PCM_S24BE_DECODER 0
#define CONFIG_PCM_S24DAUD_DECODER 0
#define CONFIG_PCM_S24LE_DECODER 1
#define CONFIG_PCM_S24LE_DECODER 0
#define CONFIG_PCM_S24LE_PLANAR_DECODER 0
#define CONFIG_PCM_S32BE_DECODER 0
#define CONFIG_PCM_S32LE_DECODER 1
#define CONFIG_PCM_S32LE_DECODER 0
#define CONFIG_PCM_S32LE_PLANAR_DECODER 0
#define CONFIG_PCM_S64BE_DECODER 0
#define CONFIG_PCM_S64LE_DECODER 0
#define CONFIG_PCM_SGA_DECODER 0
#define CONFIG_PCM_U8_DECODER 1
#define CONFIG_PCM_U8_DECODER 0
#define CONFIG_PCM_U16BE_DECODER 0
#define CONFIG_PCM_U16LE_DECODER 0
#define CONFIG_PCM_U24BE_DECODER 0
@ -567,11 +567,11 @@
#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0
#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0
#define CONFIG_LIBOPENJPEG_DECODER 0
#define CONFIG_LIBOPUS_DECODER 1
#define CONFIG_LIBOPUS_DECODER 0
#define CONFIG_LIBRSVG_DECODER 0
#define CONFIG_LIBSPEEX_DECODER 0
#define CONFIG_LIBUAVS3D_DECODER 0
#define CONFIG_LIBVORBIS_DECODER 1
#define CONFIG_LIBVORBIS_DECODER 0
#define CONFIG_LIBVPX_VP8_DECODER 0
#define CONFIG_LIBVPX_VP9_DECODER 0
#define CONFIG_LIBZVBI_TELETEXT_DECODER 0

View file

@ -8,7 +8,6 @@
#define CONFIG_CHOMP_BSF 0
#define CONFIG_DUMP_EXTRADATA_BSF 0
#define CONFIG_DCA_CORE_BSF 0
#define CONFIG_DTS2PTS_BSF 0
#define CONFIG_DV_ERROR_MARKER_BSF 0
#define CONFIG_EAC3_CORE_BSF 0
#define CONFIG_EXTRACT_EXTRADATA_BSF 0
@ -20,7 +19,6 @@
#define CONFIG_HEVC_METADATA_BSF 0
#define CONFIG_HEVC_MP4TOANNEXB_BSF 0
#define CONFIG_IMX_DUMP_HEADER_BSF 0
#define CONFIG_MEDIA100_TO_MJPEGB_BSF 0
#define CONFIG_MJPEG2JPEG_BSF 0
#define CONFIG_MJPEGA_DUMP_HEADER_BSF 0
#define CONFIG_MP3_HEADER_DECOMPRESS_BSF 0
@ -164,7 +162,6 @@
#define CONFIG_M101_DECODER 0
#define CONFIG_MAGICYUV_DECODER 0
#define CONFIG_MDEC_DECODER 0
#define CONFIG_MEDIA100_DECODER 0
#define CONFIG_MIMIC_DECODER 0
#define CONFIG_MJPEG_DECODER 0
#define CONFIG_MJPEGB_DECODER 0
@ -214,7 +211,6 @@
#define CONFIG_PGM_DECODER 0
#define CONFIG_PGMYUV_DECODER 0
#define CONFIG_PGX_DECODER 0
#define CONFIG_PHM_DECODER 0
#define CONFIG_PHOTOCD_DECODER 0
#define CONFIG_PICTOR_DECODER 0
#define CONFIG_PIXLET_DECODER 0
@ -232,7 +228,6 @@
#define CONFIG_R210_DECODER 0
#define CONFIG_RASC_DECODER 0
#define CONFIG_RAWVIDEO_DECODER 0
#define CONFIG_RKA_DECODER 0
#define CONFIG_RL2_DECODER 0
#define CONFIG_ROQ_DECODER 0
#define CONFIG_RPZA_DECODER 0
@ -308,8 +303,6 @@
#define CONFIG_VP9_RKMPP_DECODER 0
#define CONFIG_VP9_V4L2M2M_DECODER 0
#define CONFIG_VQA_DECODER 0
#define CONFIG_VQC_DECODER 0
#define CONFIG_WBMP_DECODER 0
#define CONFIG_WEBP_DECODER 0
#define CONFIG_WCMV_DECODER 0
#define CONFIG_WRAPPED_AVFRAME_DECODER 0
@ -344,7 +337,6 @@
#define CONFIG_ALS_DECODER 0
#define CONFIG_AMRNB_DECODER 0
#define CONFIG_AMRWB_DECODER 0
#define CONFIG_APAC_DECODER 0
#define CONFIG_APE_DECODER 0
#define CONFIG_APTX_DECODER 0
#define CONFIG_APTX_HD_DECODER 0
@ -357,7 +349,6 @@
#define CONFIG_BINKAUDIO_DCT_DECODER 0
#define CONFIG_BINKAUDIO_RDFT_DECODER 0
#define CONFIG_BMV_AUDIO_DECODER 0
#define CONFIG_BONK_DECODER 0
#define CONFIG_COOK_DECODER 0
#define CONFIG_DCA_DECODER 0
#define CONFIG_DFPWM_DECODER 0
@ -374,14 +365,12 @@
#define CONFIG_FASTAUDIO_DECODER 0
#define CONFIG_FFWAVESYNTH_DECODER 0
#define CONFIG_FLAC_DECODER 1
#define CONFIG_FTR_DECODER 0
#define CONFIG_G723_1_DECODER 0
#define CONFIG_G729_DECODER 0
#define CONFIG_GSM_DECODER 0
#define CONFIG_GSM_MS_DECODER 0
#define CONFIG_HCA_DECODER 0
#define CONFIG_HCOM_DECODER 0
#define CONFIG_HDR_DECODER 0
#define CONFIG_IAC_DECODER 0
#define CONFIG_ILBC_DECODER 0
#define CONFIG_IMC_DECODER 0
@ -389,7 +378,6 @@
#define CONFIG_MACE3_DECODER 0
#define CONFIG_MACE6_DECODER 0
#define CONFIG_METASOUND_DECODER 0
#define CONFIG_MISC4_DECODER 0
#define CONFIG_MLP_DECODER 0
#define CONFIG_MP1_DECODER 0
#define CONFIG_MP1FLOAT_DECODER 0
@ -426,8 +414,7 @@
#define CONFIG_TTA_DECODER 0
#define CONFIG_TWINVQ_DECODER 0
#define CONFIG_VMDAUDIO_DECODER 0
#define CONFIG_VORBIS_DECODER 1
#define CONFIG_WAVARC_DECODER 0
#define CONFIG_VORBIS_DECODER 0
#define CONFIG_WAVPACK_DECODER 0
#define CONFIG_WMALOSSLESS_DECODER 0
#define CONFIG_WMAPRO_DECODER 0
@ -437,34 +424,34 @@
#define CONFIG_WS_SND1_DECODER 0
#define CONFIG_XMA1_DECODER 0
#define CONFIG_XMA2_DECODER 0
#define CONFIG_PCM_ALAW_DECODER 1
#define CONFIG_PCM_ALAW_DECODER 0
#define CONFIG_PCM_BLURAY_DECODER 0
#define CONFIG_PCM_DVD_DECODER 0
#define CONFIG_PCM_F16LE_DECODER 0
#define CONFIG_PCM_F24LE_DECODER 0
#define CONFIG_PCM_F32BE_DECODER 0
#define CONFIG_PCM_F32LE_DECODER 1
#define CONFIG_PCM_F32LE_DECODER 0
#define CONFIG_PCM_F64BE_DECODER 0
#define CONFIG_PCM_F64LE_DECODER 0
#define CONFIG_PCM_LXF_DECODER 0
#define CONFIG_PCM_MULAW_DECODER 1
#define CONFIG_PCM_MULAW_DECODER 0
#define CONFIG_PCM_S8_DECODER 0
#define CONFIG_PCM_S8_PLANAR_DECODER 0
#define CONFIG_PCM_S16BE_DECODER 0
#define CONFIG_PCM_S16BE_PLANAR_DECODER 0
#define CONFIG_PCM_S16LE_DECODER 1
#define CONFIG_PCM_S16LE_DECODER 0
#define CONFIG_PCM_S16LE_PLANAR_DECODER 0
#define CONFIG_PCM_S24BE_DECODER 0
#define CONFIG_PCM_S24DAUD_DECODER 0
#define CONFIG_PCM_S24LE_DECODER 1
#define CONFIG_PCM_S24LE_DECODER 0
#define CONFIG_PCM_S24LE_PLANAR_DECODER 0
#define CONFIG_PCM_S32BE_DECODER 0
#define CONFIG_PCM_S32LE_DECODER 1
#define CONFIG_PCM_S32LE_DECODER 0
#define CONFIG_PCM_S32LE_PLANAR_DECODER 0
#define CONFIG_PCM_S64BE_DECODER 0
#define CONFIG_PCM_S64LE_DECODER 0
#define CONFIG_PCM_SGA_DECODER 0
#define CONFIG_PCM_U8_DECODER 1
#define CONFIG_PCM_U8_DECODER 0
#define CONFIG_PCM_U16BE_DECODER 0
#define CONFIG_PCM_U16LE_DECODER 0
#define CONFIG_PCM_U24BE_DECODER 0
@ -472,7 +459,6 @@
#define CONFIG_PCM_U32BE_DECODER 0
#define CONFIG_PCM_U32LE_DECODER 0
#define CONFIG_PCM_VIDC_DECODER 0
#define CONFIG_CBD2_DPCM_DECODER 0
#define CONFIG_DERF_DPCM_DECODER 0
#define CONFIG_GREMLIN_DPCM_DECODER 0
#define CONFIG_INTERPLAY_DPCM_DECODER 0
@ -480,7 +466,6 @@
#define CONFIG_SDX2_DPCM_DECODER 0
#define CONFIG_SOL_DPCM_DECODER 0
#define CONFIG_XAN_DPCM_DECODER 0
#define CONFIG_WADY_DPCM_DECODER 0
#define CONFIG_ADPCM_4XM_DECODER 0
#define CONFIG_ADPCM_ADX_DECODER 0
#define CONFIG_ADPCM_AFC_DECODER 0
@ -530,7 +515,6 @@
#define CONFIG_ADPCM_THP_LE_DECODER 0
#define CONFIG_ADPCM_VIMA_DECODER 0
#define CONFIG_ADPCM_XA_DECODER 0
#define CONFIG_ADPCM_XMD_DECODER 0
#define CONFIG_ADPCM_YAMAHA_DECODER 0
#define CONFIG_ADPCM_ZORK_DECODER 0
#define CONFIG_SSA_DECODER 0
@ -583,11 +567,11 @@
#define CONFIG_LIBOPENCORE_AMRNB_DECODER 0
#define CONFIG_LIBOPENCORE_AMRWB_DECODER 0
#define CONFIG_LIBOPENJPEG_DECODER 0
#define CONFIG_LIBOPUS_DECODER 1
#define CONFIG_LIBOPUS_DECODER 0
#define CONFIG_LIBRSVG_DECODER 0
#define CONFIG_LIBSPEEX_DECODER 0
#define CONFIG_LIBUAVS3D_DECODER 0
#define CONFIG_LIBVORBIS_DECODER 1
#define CONFIG_LIBVORBIS_DECODER 0
#define CONFIG_LIBVPX_VP8_DECODER 0
#define CONFIG_LIBVPX_VP9_DECODER 0
#define CONFIG_LIBZVBI_TELETEXT_DECODER 0
@ -597,7 +581,6 @@
#define CONFIG_LIBAOM_AV1_DECODER 0
#define CONFIG_AV1_DECODER 1
#define CONFIG_AV1_CUVID_DECODER 0
#define CONFIG_AV1_MEDIACODEC_DECODER 0
#define CONFIG_AV1_QSV_DECODER 0
#define CONFIG_LIBOPENH264_DECODER 0
#define CONFIG_H264_CUVID_DECODER 0
@ -616,8 +599,6 @@
#define CONFIG_VP9_CUVID_DECODER 0
#define CONFIG_VP9_MEDIACODEC_DECODER 0
#define CONFIG_VP9_QSV_DECODER 0
#define CONFIG_VNULL_DECODER 0
#define CONFIG_ANULL_DECODER 0
#define CONFIG_A64MULTI_ENCODER 0
#define CONFIG_A64MULTI5_ENCODER 0
#define CONFIG_ALIAS_PIX_ENCODER 0
@ -648,7 +629,6 @@
#define CONFIG_H261_ENCODER 0
#define CONFIG_H263_ENCODER 0
#define CONFIG_H263P_ENCODER 0
#define CONFIG_H264_MEDIACODEC_ENCODER 0
#define CONFIG_HAP_ENCODER 0
#define CONFIG_HUFFYUV_ENCODER 0
#define CONFIG_JPEG2000_ENCODER 0
@ -668,7 +648,6 @@
#define CONFIG_PFM_ENCODER 0
#define CONFIG_PGM_ENCODER 0
#define CONFIG_PGMYUV_ENCODER 0
#define CONFIG_PHM_ENCODER 0
#define CONFIG_PNG_ENCODER 0
#define CONFIG_PPM_ENCODER 0
#define CONFIG_PRORES_ENCODER 0
@ -699,7 +678,6 @@
#define CONFIG_V410_ENCODER 0
#define CONFIG_VBN_ENCODER 0
#define CONFIG_VC2_ENCODER 0
#define CONFIG_WBMP_ENCODER 0
#define CONFIG_WRAPPED_AVFRAME_ENCODER 0
#define CONFIG_WMV1_ENCODER 0
#define CONFIG_WMV2_ENCODER 0
@ -721,7 +699,6 @@
#define CONFIG_EAC3_ENCODER 0
#define CONFIG_FLAC_ENCODER 0
#define CONFIG_G723_1_ENCODER 0
#define CONFIG_HDR_ENCODER 0
#define CONFIG_MLP_ENCODER 0
#define CONFIG_MP2_ENCODER 0
#define CONFIG_MP2FIXED_ENCODER 0
@ -833,9 +810,6 @@
#define CONFIG_AAC_MF_ENCODER 0
#define CONFIG_AC3_MF_ENCODER 0
#define CONFIG_H263_V4L2M2M_ENCODER 0
#define CONFIG_AV1_NVENC_ENCODER 0
#define CONFIG_AV1_QSV_ENCODER 0
#define CONFIG_AV1_AMF_ENCODER 0
#define CONFIG_LIBOPENH264_ENCODER 0
#define CONFIG_H264_AMF_ENCODER 0
#define CONFIG_H264_MF_ENCODER 0
@ -846,7 +820,6 @@
#define CONFIG_H264_VAAPI_ENCODER 0
#define CONFIG_H264_VIDEOTOOLBOX_ENCODER 0
#define CONFIG_HEVC_AMF_ENCODER 0
#define CONFIG_HEVC_MEDIACODEC_ENCODER 0
#define CONFIG_HEVC_MF_ENCODER 0
#define CONFIG_HEVC_NVENC_ENCODER 0
#define CONFIG_HEVC_QSV_ENCODER 0
@ -866,8 +839,6 @@
#define CONFIG_VP8_VAAPI_ENCODER 0
#define CONFIG_VP9_VAAPI_ENCODER 0
#define CONFIG_VP9_QSV_ENCODER 0
#define CONFIG_VNULL_ENCODER 0
#define CONFIG_ANULL_ENCODER 0
#define CONFIG_AV1_D3D11VA_HWACCEL 0
#define CONFIG_AV1_D3D11VA2_HWACCEL 0
#define CONFIG_AV1_DXVA2_HWACCEL 0
@ -950,7 +921,6 @@
#define CONFIG_DVDSUB_PARSER 0
#define CONFIG_DVD_NAV_PARSER 0
#define CONFIG_FLAC_PARSER 0
#define CONFIG_FTR_PARSER 0
#define CONFIG_G723_1_PARSER 0
#define CONFIG_G729_PARSER 0
#define CONFIG_GIF_PARSER 0
@ -959,10 +929,8 @@
#define CONFIG_H263_PARSER 0
#define CONFIG_H264_PARSER 0
#define CONFIG_HEVC_PARSER 0
#define CONFIG_HDR_PARSER 0
#define CONFIG_IPU_PARSER 0
#define CONFIG_JPEG2000_PARSER 0
#define CONFIG_MISC4_PARSER 0
#define CONFIG_MJPEG_PARSER 0
#define CONFIG_MLP_PARSER 0
#define CONFIG_MPEG4VIDEO_PARSER 0
@ -985,7 +953,6 @@
#define CONFIG_WEBP_PARSER 0
#define CONFIG_XBM_PARSER 0
#define CONFIG_XMA_PARSER 0
#define CONFIG_XWD_PARSER 0
#define CONFIG_ALSA_INDEV 0
#define CONFIG_ANDROID_CAMERA_INDEV 0
#define CONFIG_AVFOUNDATION_INDEV 0
@ -1033,7 +1000,6 @@
#define CONFIG_ADELAY_FILTER 0
#define CONFIG_ADENORM_FILTER 0
#define CONFIG_ADERIVATIVE_FILTER 0
#define CONFIG_ADRC_FILTER 0
#define CONFIG_ADYNAMICEQUALIZER_FILTER 0
#define CONFIG_ADYNAMICSMOOTH_FILTER 0
#define CONFIG_AECHO_FILTER 0
@ -1156,7 +1122,6 @@
#define CONFIG_VOLUME_FILTER 0
#define CONFIG_VOLUMEDETECT_FILTER 0
#define CONFIG_AEVALSRC_FILTER 0
#define CONFIG_AFDELAYSRC_FILTER 0
#define CONFIG_AFIRSRC_FILTER 0
#define CONFIG_ANOISESRC_FILTER 0
#define CONFIG_ANULLSRC_FILTER 0
@ -1174,11 +1139,9 @@
#define CONFIG_AVGBLUR_FILTER 0
#define CONFIG_AVGBLUR_OPENCL_FILTER 0
#define CONFIG_AVGBLUR_VULKAN_FILTER 0
#define CONFIG_BACKGROUNDKEY_FILTER 0
#define CONFIG_BBOX_FILTER 0
#define CONFIG_BENCH_FILTER 0
#define CONFIG_BILATERAL_FILTER 0
#define CONFIG_BILATERAL_CUDA_FILTER 0
#define CONFIG_BITPLANENOISE_FILTER 0
#define CONFIG_BLACKDETECT_FILTER 0
#define CONFIG_BLACKFRAME_FILTER 0
@ -1194,7 +1157,6 @@
#define CONFIG_CHROMABER_VULKAN_FILTER 0
#define CONFIG_CHROMAHOLD_FILTER 0
#define CONFIG_CHROMAKEY_FILTER 0
#define CONFIG_CHROMAKEY_CUDA_FILTER 0
#define CONFIG_CHROMANR_FILTER 0
#define CONFIG_CHROMASHIFT_FILTER 0
#define CONFIG_CIESCOPE_FILTER 0
@ -1211,14 +1173,12 @@
#define CONFIG_COLORMAP_FILTER 0
#define CONFIG_COLORMATRIX_FILTER 0
#define CONFIG_COLORSPACE_FILTER 0
#define CONFIG_COLORSPACE_CUDA_FILTER 0
#define CONFIG_COLORTEMPERATURE_FILTER 0
#define CONFIG_CONVOLUTION_FILTER 0
#define CONFIG_CONVOLUTION_OPENCL_FILTER 0
#define CONFIG_CONVOLVE_FILTER 0
#define CONFIG_COPY_FILTER 0
#define CONFIG_COREIMAGE_FILTER 0
#define CONFIG_CORR_FILTER 0
#define CONFIG_COVER_RECT_FILTER 0
#define CONFIG_CROP_FILTER 0
#define CONFIG_CROPDETECT_FILTER 0
@ -1401,7 +1361,6 @@
#define CONFIG_READVITC_FILTER 0
#define CONFIG_REALTIME_FILTER 0
#define CONFIG_REMAP_FILTER 0
#define CONFIG_REMAP_OPENCL_FILTER 0
#define CONFIG_REMOVEGRAIN_FILTER 0
#define CONFIG_REMOVELOGO_FILTER 0
#define CONFIG_REPEATFIELDS_FILTER 0
@ -1453,7 +1412,6 @@
#define CONFIG_SPP_FILTER 0
#define CONFIG_SR_FILTER 0
#define CONFIG_SSIM_FILTER 0
#define CONFIG_SSIM360_FILTER 0
#define CONFIG_STEREO3D_FILTER 0
#define CONFIG_STREAMSELECT_FILTER 0
#define CONFIG_SUBTITLES_FILTER 0
@ -1518,12 +1476,6 @@
#define CONFIG_ZMQ_FILTER 0
#define CONFIG_ZOOMPAN_FILTER 0
#define CONFIG_ZSCALE_FILTER 0
#define CONFIG_HSTACK_VAAPI_FILTER 0
#define CONFIG_VSTACK_VAAPI_FILTER 0
#define CONFIG_XSTACK_VAAPI_FILTER 0
#define CONFIG_HSTACK_QSV_FILTER 0
#define CONFIG_VSTACK_QSV_FILTER 0
#define CONFIG_XSTACK_QSV_FILTER 0
#define CONFIG_ALLRGB_FILTER 0
#define CONFIG_ALLYUV_FILTER 0
#define CONFIG_CELLAUTO_FILTER 0
@ -1531,7 +1483,6 @@
#define CONFIG_COLORCHART_FILTER 0
#define CONFIG_COLORSPECTRUM_FILTER 0
#define CONFIG_COREIMAGESRC_FILTER 0
#define CONFIG_DDAGRAB_FILTER 0
#define CONFIG_FREI0R_SRC_FILTER 0
#define CONFIG_GRADIENTS_FILTER 0
#define CONFIG_HALDCLUTSRC_FILTER 0
@ -1550,7 +1501,6 @@
#define CONFIG_TESTSRC2_FILTER 0
#define CONFIG_YUVTESTSRC_FILTER 0
#define CONFIG_NULLSINK_FILTER 0
#define CONFIG_A3DSCOPE_FILTER 0
#define CONFIG_ABITSCOPE_FILTER 0
#define CONFIG_ADRAWGRAPH_FILTER 0
#define CONFIG_AGRAPHMONITOR_FILTER 0
@ -1559,7 +1509,6 @@
#define CONFIG_AVECTORSCOPE_FILTER 0
#define CONFIG_CONCAT_FILTER 0
#define CONFIG_SHOWCQT_FILTER 0
#define CONFIG_SHOWCWT_FILTER 0
#define CONFIG_SHOWFREQS_FILTER 0
#define CONFIG_SHOWSPATIAL_FILTER 0
#define CONFIG_SHOWSPECTRUM_FILTER 0
@ -1593,7 +1542,6 @@
#define CONFIG_AMRNB_DEMUXER 0
#define CONFIG_AMRWB_DEMUXER 0
#define CONFIG_ANM_DEMUXER 0
#define CONFIG_APAC_DEMUXER 0
#define CONFIG_APC_DEMUXER 0
#define CONFIG_APE_DEMUXER 0
#define CONFIG_APM_DEMUXER 0
@ -1627,7 +1575,6 @@
#define CONFIG_BFSTM_DEMUXER 0
#define CONFIG_BRSTM_DEMUXER 0
#define CONFIG_BOA_DEMUXER 0
#define CONFIG_BONK_DEMUXER 0
#define CONFIG_C93_DEMUXER 0
#define CONFIG_CAF_DEMUXER 0
#define CONFIG_CAVSVIDEO_DEMUXER 0
@ -1712,7 +1659,6 @@
#define CONFIG_JV_DEMUXER 0
#define CONFIG_KUX_DEMUXER 0
#define CONFIG_KVAG_DEMUXER 0
#define CONFIG_LAF_DEMUXER 0
#define CONFIG_LMLM4_DEMUXER 0
#define CONFIG_LOAS_DEMUXER 0
#define CONFIG_LUODAT_DEMUXER 0
@ -1795,7 +1741,6 @@
#define CONFIG_RAWVIDEO_DEMUXER 0
#define CONFIG_REALTEXT_DEMUXER 0
#define CONFIG_REDSPARK_DEMUXER 0
#define CONFIG_RKA_DEMUXER 0
#define CONFIG_RL2_DEMUXER 0
#define CONFIG_RM_DEMUXER 0
#define CONFIG_ROQ_DEMUXER 0
@ -1811,7 +1756,6 @@
#define CONFIG_SBG_DEMUXER 0
#define CONFIG_SCC_DEMUXER 0
#define CONFIG_SCD_DEMUXER 0
#define CONFIG_SDNS_DEMUXER 0
#define CONFIG_SDP_DEMUXER 0
#define CONFIG_SDR2_DEMUXER 0
#define CONFIG_SDS_DEMUXER 0
@ -1863,8 +1807,6 @@
#define CONFIG_VPLAYER_DEMUXER 0
#define CONFIG_VQF_DEMUXER 0
#define CONFIG_W64_DEMUXER 0
#define CONFIG_WADY_DEMUXER 0
#define CONFIG_WAVARC_DEMUXER 0
#define CONFIG_WAV_DEMUXER 0
#define CONFIG_WC3_DEMUXER 0
#define CONFIG_WEBM_DASH_MANIFEST_DEMUXER 0
@ -1877,7 +1819,6 @@
#define CONFIG_WV_DEMUXER 0
#define CONFIG_XA_DEMUXER 0
#define CONFIG_XBIN_DEMUXER 0
#define CONFIG_XMD_DEMUXER 0
#define CONFIG_XMV_DEMUXER 0
#define CONFIG_XVAG_DEMUXER 0
#define CONFIG_XWMA_DEMUXER 0
@ -1890,7 +1831,6 @@
#define CONFIG_IMAGE_EXR_PIPE_DEMUXER 0
#define CONFIG_IMAGE_GEM_PIPE_DEMUXER 0
#define CONFIG_IMAGE_GIF_PIPE_DEMUXER 0
#define CONFIG_IMAGE_HDR_PIPE_DEMUXER 0
#define CONFIG_IMAGE_J2K_PIPE_DEMUXER 0
#define CONFIG_IMAGE_JPEG_PIPE_DEMUXER 0
#define CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER 0
@ -1898,11 +1838,9 @@
#define CONFIG_IMAGE_PAM_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PBM_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PCX_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PFM_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PGMYUV_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PGM_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PGX_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PHM_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PHOTOCD_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PICTOR_PIPE_DEMUXER 0
#define CONFIG_IMAGE_PNG_PIPE_DEMUXER 0
@ -2105,7 +2043,6 @@
#define CONFIG_CONCATF_PROTOCOL 0
#define CONFIG_CRYPTO_PROTOCOL 0
#define CONFIG_DATA_PROTOCOL 0
#define CONFIG_FD_PROTOCOL 0
#define CONFIG_FFRTMPCRYPT_PROTOCOL 0
#define CONFIG_FFRTMPHTTP_PROTOCOL 0
#define CONFIG_FILE_PROTOCOL 0
@ -2149,6 +2086,6 @@
#define CONFIG_LIBSSH_PROTOCOL 0
#define CONFIG_LIBSMBCLIENT_PROTOCOL 0
#define CONFIG_LIBZMQ_PROTOCOL 0
#define CONFIG_IPFS_GATEWAY_PROTOCOL 0
#define CONFIG_IPNS_GATEWAY_PROTOCOL 0
#define CONFIG_IPFS_PROTOCOL 0
#define CONFIG_IPNS_PROTOCOL 0
#endif /* FFMPEG_CONFIG_COMPONENTS_H */

View file

@ -22,8 +22,6 @@ if CONFIG['FFVPX_ASFLAGS']:
LOCAL_INCLUDES += ['/media/ffvpx']
USE_LIBS += [ 'gkcodecs' ]
# We allow warnings for third-party code that can be updated from upstream.
AllowCompilerWarnings()

View file

@ -1,8 +1,6 @@
/*
* This file is part of FFmpeg.
*
* Copyright (c) 2023 J. Dekker <jdek@itanimul.li>
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
@ -18,15 +16,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
.macro clip min, max, regs:vararg
.irp x, \regs
smax \x, \x, \min
.endr
.irp x, \regs
smin \x, \x, \max
.endr
.endm
.macro transpose_8x8B r0, r1, r2, r3, r4, r5, r6, r7, r8, r9
trn1 \r8\().8B, \r0\().8B, \r1\().8B
trn2 \r9\().8B, \r0\().8B, \r1\().8B

View file

@ -798,7 +798,7 @@ extern const FFCodec ff_libvorbis_decoder;
extern const FFCodec ff_libvpx_vp8_encoder;
extern const FFCodec ff_libvpx_vp8_decoder;
extern FFCodec ff_libvpx_vp9_encoder;
extern const FFCodec ff_libvpx_vp9_decoder;
extern FFCodec ff_libvpx_vp9_decoder;
/* preferred over libwebp */
extern const FFCodec ff_libwebp_anim_encoder;
extern const FFCodec ff_libwebp_encoder;

View file

@ -20,14 +20,11 @@
#include "config_components.h"
#include "libavutil/hdr_dynamic_metadata.h"
#include "libavutil/film_grain_params.h"
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "av1dec.h"
#include "atsc_a53.h"
#include "bytestream.h"
#include "codec_internal.h"
#include "decode.h"
@ -648,7 +645,6 @@ fail:
static av_cold int av1_decode_free(AVCodecContext *avctx)
{
AV1DecContext *s = avctx->priv_data;
AV1RawMetadataITUTT35 itut_t35;
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
av1_frame_unref(avctx, &s->ref[i]);
@ -659,14 +655,8 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
av_buffer_unref(&s->seq_ref);
av_buffer_unref(&s->header_ref);
av_buffer_unref(&s->cll_ref);
av_buffer_unref(&s->mdcv_ref);
av_freep(&s->tile_group_info);
while (s->itut_t35_fifo && av_fifo_read(s->itut_t35_fifo, &itut_t35, 1) >= 0)
av_buffer_unref(&itut_t35.payload_ref);
av_fifo_freep2(&s->itut_t35_fifo);
ff_cbs_fragment_free(&s->current_obu);
ff_cbs_close(&s->cbc);
@ -752,16 +742,6 @@ static int update_context_with_frame_header(AVCodecContext *avctx,
return 0;
}
static const CodedBitstreamUnitType decompose_unit_types[] = {
AV1_OBU_FRAME,
AV1_OBU_FRAME_HEADER,
AV1_OBU_METADATA,
AV1_OBU_REDUNDANT_FRAME_HEADER,
AV1_OBU_SEQUENCE_HEADER,
AV1_OBU_TEMPORAL_DELIMITER,
AV1_OBU_TILE_GROUP,
};
static av_cold int av1_decode_init(AVCodecContext *avctx)
{
AV1DecContext *s = avctx->priv_data;
@ -791,14 +771,6 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
if (ret < 0)
return ret;
s->cbc->decompose_unit_types = decompose_unit_types;
s->cbc->nb_decompose_unit_types = FF_ARRAY_ELEMS(decompose_unit_types);
s->itut_t35_fifo = av_fifo_alloc2(1, sizeof(AV1RawMetadataITUTT35),
AV_FIFO_FLAG_AUTO_GROW);
if (!s->itut_t35_fifo)
return AVERROR(ENOMEM);
av_opt_set_int(s->cbc->priv_data, "operating_point", s->operating_point, 0);
if (avctx->extradata && avctx->extradata_size) {
@ -880,108 +852,6 @@ fail:
return ret;
}
static int export_itut_t35(AVCodecContext *avctx, AVFrame *frame,
const AV1RawMetadataITUTT35 *itut_t35)
{
GetByteContext gb;
int ret, provider_code;
bytestream2_init(&gb, itut_t35->payload, itut_t35->payload_size);
provider_code = bytestream2_get_be16(&gb);
switch (provider_code) {
case 0x31: { // atsc_provider_code
uint32_t user_identifier = bytestream2_get_be32(&gb);
switch (user_identifier) {
case MKBETAG('G', 'A', '9', '4'): { // closed captions
AVBufferRef *buf = NULL;
ret = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
if (ret < 0)
return ret;
if (!ret)
break;
if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_A53_CC, buf))
av_buffer_unref(&buf);
avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
break;
}
default: // ignore unsupported identifiers
break;
}
break;
}
case 0x3C: { // smpte_provider_code
AVDynamicHDRPlus *hdrplus;
int provider_oriented_code = bytestream2_get_be16(&gb);
int application_identifier = bytestream2_get_byte(&gb);
if (itut_t35->itu_t_t35_country_code != 0xB5 ||
provider_oriented_code != 1 || application_identifier != 4)
break;
hdrplus = av_dynamic_hdr_plus_create_side_data(frame);
if (!hdrplus)
return AVERROR(ENOMEM);
ret = av_dynamic_hdr_plus_from_t35(hdrplus, gb.buffer,
bytestream2_get_bytes_left(&gb));
if (ret < 0)
return ret;
break;
}
default: // ignore unsupported provider codes
break;
}
return 0;
}
static int export_metadata(AVCodecContext *avctx, AVFrame *frame)
{
AV1DecContext *s = avctx->priv_data;
AV1RawMetadataITUTT35 itut_t35;
int ret = 0;
if (s->mdcv) {
AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame);
if (!mastering)
return AVERROR(ENOMEM);
for (int i = 0; i < 3; i++) {
mastering->display_primaries[i][0] = av_make_q(s->mdcv->primary_chromaticity_x[i], 1 << 16);
mastering->display_primaries[i][1] = av_make_q(s->mdcv->primary_chromaticity_y[i], 1 << 16);
}
mastering->white_point[0] = av_make_q(s->mdcv->white_point_chromaticity_x, 1 << 16);
mastering->white_point[1] = av_make_q(s->mdcv->white_point_chromaticity_y, 1 << 16);
mastering->max_luminance = av_make_q(s->mdcv->luminance_max, 1 << 8);
mastering->min_luminance = av_make_q(s->mdcv->luminance_min, 1 << 14);
mastering->has_primaries = 1;
mastering->has_luminance = 1;
}
if (s->cll) {
AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame);
if (!light)
return AVERROR(ENOMEM);
light->MaxCLL = s->cll->max_cll;
light->MaxFALL = s->cll->max_fall;
}
while (av_fifo_read(s->itut_t35_fifo, &itut_t35, 1) >= 0) {
if (ret >= 0)
ret = export_itut_t35(avctx, frame, &itut_t35);
av_buffer_unref(&itut_t35.payload_ref);
}
return ret;
}
static int export_film_grain(AVCodecContext *avctx, AVFrame *frame)
{
AV1DecContext *s = avctx->priv_data;
@ -1058,12 +928,6 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
if (ret < 0)
return ret;
ret = export_metadata(avctx, frame);
if (ret < 0) {
av_frame_unref(frame);
return ret;
}
if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) {
ret = export_film_grain(avctx, frame);
if (ret < 0) {
@ -1074,11 +938,7 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
frame->pts = pkt->pts;
frame->pkt_dts = pkt->dts;
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_size = pkt->size;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
*got_frame = 1;
@ -1313,47 +1173,7 @@ static int av1_decode_frame(AVCodecContext *avctx, AVFrame *frame,
case AV1_OBU_TILE_LIST:
case AV1_OBU_TEMPORAL_DELIMITER:
case AV1_OBU_PADDING:
break;
case AV1_OBU_METADATA:
switch (obu->obu.metadata.metadata_type) {
case AV1_METADATA_TYPE_HDR_CLL:
av_buffer_unref(&s->cll_ref);
s->cll_ref = av_buffer_ref(unit->content_ref);
if (!s->cll_ref) {
s->cll = NULL;
ret = AVERROR(ENOMEM);
goto end;
}
s->cll = &obu->obu.metadata.metadata.hdr_cll;
break;
case AV1_METADATA_TYPE_HDR_MDCV:
av_buffer_unref(&s->mdcv_ref);
s->mdcv_ref = av_buffer_ref(unit->content_ref);
if (!s->mdcv_ref) {
s->mdcv = NULL;
ret = AVERROR(ENOMEM);
goto end;
}
s->mdcv = &obu->obu.metadata.metadata.hdr_mdcv;
break;
case AV1_METADATA_TYPE_ITUT_T35: {
AV1RawMetadataITUTT35 itut_t35;
memcpy(&itut_t35, &obu->obu.metadata.metadata.itut_t35, sizeof(itut_t35));
itut_t35.payload_ref = av_buffer_ref(obu->obu.metadata.metadata.itut_t35.payload_ref);
if (!itut_t35.payload_ref) {
ret = AVERROR(ENOMEM);
goto end;
}
ret = av_fifo_write(s->itut_t35_fifo, &itut_t35, 1);
if (ret < 0) {
av_buffer_unref(&itut_t35.payload_ref);
goto end;
}
break;
}
default:
break;
}
break;
default:
av_log(avctx, AV_LOG_DEBUG,
@ -1398,7 +1218,6 @@ end:
static void av1_decode_flush(AVCodecContext *avctx)
{
AV1DecContext *s = avctx->priv_data;
AV1RawMetadataITUTT35 itut_t35;
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++)
av1_frame_unref(avctx, &s->ref[i]);
@ -1407,10 +1226,6 @@ static void av1_decode_flush(AVCodecContext *avctx)
s->operating_point_idc = 0;
s->raw_frame_header = NULL;
s->raw_seq = NULL;
s->cll = NULL;
s->mdcv = NULL;
while (av_fifo_read(s->itut_t35_fifo, &itut_t35, 1) >= 0)
av_buffer_unref(&itut_t35.payload_ref);
ff_cbs_flush(s->cbc);
}

View file

@ -23,7 +23,6 @@
#include <stdint.h>
#include "libavutil/fifo.h"
#include "libavutil/buffer.h"
#include "libavutil/frame.h"
#include "libavutil/pixfmt.h"
@ -74,13 +73,6 @@ typedef struct AV1DecContext {
AVBufferRef *header_ref;
AV1RawFrameHeader *raw_frame_header;
TileGroupInfo *tile_group_info;
AVBufferRef *cll_ref;
AV1RawMetadataHDRCLL *cll;
AVBufferRef *mdcv_ref;
AV1RawMetadataHDRMDCV *mdcv;
AVFifo *itut_t35_fifo;
uint16_t tile_num;
uint16_t tg_start;
uint16_t tg_end;

View file

@ -771,13 +771,11 @@ typedef struct AVCodecContext {
*/
float dark_masking;
#if FF_API_SLICE_OFFSET
/**
* slice count
* - encoding: Set by libavcodec.
* - decoding: Set by user (or 0).
*/
attribute_deprecated
int slice_count;
/**
@ -785,9 +783,7 @@ typedef struct AVCodecContext {
* - encoding: Set/allocated by libavcodec.
* - decoding: Set/allocated by user (or NULL).
*/
attribute_deprecated
int *slice_offset;
#endif
/**
* sample aspect ratio (0 if unknown)
@ -1588,19 +1584,12 @@ typedef struct AVCodecContext {
#define FF_PROFILE_DNXHR_HQX 4
#define FF_PROFILE_DNXHR_444 5
#define FF_PROFILE_DTS 20
#define FF_PROFILE_DTS_ES 30
#define FF_PROFILE_DTS_96_24 40
#define FF_PROFILE_DTS_HD_HRA 50
#define FF_PROFILE_DTS_HD_MA 60
#define FF_PROFILE_DTS_EXPRESS 70
#define FF_PROFILE_DTS_HD_MA_X 61
#define FF_PROFILE_DTS_HD_MA_X_IMAX 62
#define FF_PROFILE_EAC3_DDP_ATMOS 30
#define FF_PROFILE_TRUEHD_ATMOS 30
#define FF_PROFILE_DTS 20
#define FF_PROFILE_DTS_ES 30
#define FF_PROFILE_DTS_96_24 40
#define FF_PROFILE_DTS_HD_HRA 50
#define FF_PROFILE_DTS_HD_MA 60
#define FF_PROFILE_DTS_EXPRESS 70
#define FF_PROFILE_MPEG2_422 0
#define FF_PROFILE_MPEG2_HIGH 1
@ -2429,16 +2418,9 @@ int avcodec_parameters_to_context(AVCodecContext *codec,
* avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
* retrieving a codec.
*
* Depending on the codec, you might need to set options in the codec context
* also for decoding (e.g. width, height, or the pixel or audio sample format in
* the case the information is not available in the bitstream, as when decoding
* raw audio or video).
* @note Always call this function before using decoding routines (such as
* @ref avcodec_receive_frame()).
*
* Options in the codec context can be set either by setting them in the options
* AVDictionary, or by setting the values in the context itself, directly or by
* using the av_opt_set() API before calling this function.
*
* Example:
* @code
* av_dict_set(&opts, "b", "2.5M", 0);
* codec = avcodec_find_decoder(AV_CODEC_ID_H264);
@ -2451,36 +2433,17 @@ int avcodec_parameters_to_context(AVCodecContext *codec,
* exit(1);
* @endcode
*
* In the case AVCodecParameters are available (e.g. when demuxing a stream
* using libavformat, and accessing the AVStream contained in the demuxer), the
* codec parameters can be copied to the codec context using
* avcodec_parameters_to_context(), as in the following example:
*
* @code
* AVStream *stream = ...;
* context = avcodec_alloc_context3(codec);
* if (avcodec_parameters_to_context(context, stream->codecpar) < 0)
* exit(1);
* if (avcodec_open2(context, codec, NULL) < 0)
* exit(1);
* @endcode
*
* @note Always call this function before using decoding routines (such as
* @ref avcodec_receive_frame()).
*
* @param avctx The context to initialize.
* @param codec The codec to open this context for. If a non-NULL codec has been
* previously passed to avcodec_alloc_context3() or
* for this context, then this parameter MUST be either NULL or
* equal to the previously passed codec.
* @param options A dictionary filled with AVCodecContext and codec-private
* options, which are set on top of the options already set in
* avctx, can be NULL. On return this object will be filled with
* options that were not found in the avctx codec context.
* @param options A dictionary filled with AVCodecContext and codec-private options.
* On return this object will be filled with options that were not found.
*
* @return zero on success, a negative value on error
* @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
* av_dict_set(), av_opt_set(), av_opt_find(), avcodec_parameters_to_context()
* av_dict_set(), av_opt_find().
*/
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);

View file

@ -2931,7 +2931,6 @@ static const AVCodecDescriptor codec_descriptors[] = {
.name = "eac3",
.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
.profiles = NULL_IF_CONFIG_SMALL(ff_eac3_profiles),
},
{
.id = AV_CODEC_ID_SIPR,
@ -2960,7 +2959,6 @@ static const AVCodecDescriptor codec_descriptors[] = {
.name = "truehd",
.long_name = NULL_IF_CONFIG_SMALL("TrueHD"),
.props = AV_CODEC_PROP_LOSSLESS,
.profiles = NULL_IF_CONFIG_SMALL(ff_truehd_profiles),
},
{
.id = AV_CODEC_ID_MP4ALS,

View file

@ -16,32 +16,5 @@ static const FFCodec * const codec_list[] = {
#endif
#if CONFIG_AV1_DECODER
&ff_av1_decoder,
#endif
#if CONFIG_LIBVORBIS_DECODER
&ff_libvorbis_decoder,
#endif
#if CONFIG_PCM_ALAW_DECODER
&ff_pcm_alaw_decoder,
#endif
#if CONFIG_PCM_F32LE_DECODER
&ff_pcm_f32le_decoder,
#endif
#if CONFIG_PCM_MULAW_DECODER
&ff_pcm_mulaw_decoder,
#endif
#if CONFIG_PCM_S16LE_DECODER
&ff_pcm_s16le_decoder,
#endif
#if CONFIG_PCM_S24LE_DECODER
&ff_pcm_s24le_decoder,
#endif
#if CONFIG_PCM_S32LE_DECODER
&ff_pcm_s32le_decoder,
#endif
#if CONFIG_PCM_U8_DECODER
&ff_pcm_u8_decoder,
#endif
#if CONFIG_LIBOPUS_DECODER
&ff_libopus_decoder,
#endif
NULL };

View file

@ -139,10 +139,8 @@ static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
av_packet_unref(avci->last_pkt_props);
if (pkt) {
ret = av_packet_copy_props(avci->last_pkt_props, pkt);
#if FF_API_FRAME_PKT
if (!ret)
avci->last_pkt_props->stream_index = pkt->size; // Needed for ff_decode_frame_props().
#endif
avci->last_pkt_props->opaque = (void *)(intptr_t)pkt->size; // Needed for ff_decode_frame_props().
}
return ret;
}
@ -289,12 +287,8 @@ static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame,
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
frame->pkt_dts = pkt->dts;
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
if(!avctx->has_b_frames)
frame->pkt_pos = pkt->pos;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
//FIXME these should be under if(!avctx->has_b_frames)
/* get_buffer is supposed to set frame parameters */
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
@ -466,10 +460,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
pkt->pts = AV_NOPTS_VALUE;
pkt->dts = AV_NOPTS_VALUE;
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_FRAME_PROPS)) {
#if FF_API_FRAME_PKT
// See extract_packet_props() comment.
avci->last_pkt_props->stream_index = avci->last_pkt_props->stream_index - consumed;
#endif
avci->last_pkt_props->opaque = (void *)((intptr_t)avci->last_pkt_props->opaque - consumed);
avci->last_pkt_props->pts = AV_NOPTS_VALUE;
avci->last_pkt_props->dts = AV_NOPTS_VALUE;
}
@ -555,7 +547,6 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
ret = codec->cb.receive_frame(avctx, frame);
emms_c();
} else
ret = decode_simple_receive_frame(avctx, frame);
@ -1321,13 +1312,9 @@ int ff_decode_frame_props_from_pkt(const AVCodecContext *avctx,
};
frame->pts = pkt->pts;
frame->duration = pkt->duration;
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_pos = pkt->pos;
frame->duration = pkt->duration;
frame->pkt_size = pkt->size;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
for (int i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
size_t size;
@ -1368,11 +1355,7 @@ int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
int ret = ff_decode_frame_props_from_pkt(avctx, frame, pkt);
if (ret < 0)
return ret;
#if FF_API_FRAME_PKT
FF_DISABLE_DEPRECATION_WARNINGS
frame->pkt_size = pkt->stream_index;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
frame->pkt_size = (int)(intptr_t)pkt->opaque;
}
#if FF_API_REORDERED_OPAQUE
FF_DISABLE_DEPRECATION_WARNINGS

View file

@ -24,7 +24,6 @@
#include "libavutil/avassert.h"
#include "libavutil/cpu.h"
#include "libavutil/film_grain_params.h"
#include "libavutil/hdr_dynamic_metadata.h"
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
@ -512,57 +511,29 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
if (p->itut_t35) {
GetByteContext gb;
int provider_code;
unsigned int user_identifier;
bytestream2_init(&gb, p->itut_t35->payload, p->itut_t35->payload_size);
bytestream2_skip(&gb, 1); // terminal provider code
bytestream2_skip(&gb, 1); // terminal provider oriented code
user_identifier = bytestream2_get_be32(&gb);
switch (user_identifier) {
case MKBETAG('G', 'A', '9', '4'): { // closed captions
AVBufferRef *buf = NULL;
provider_code = bytestream2_get_be16(&gb);
switch (provider_code) {
case 0x31: { // atsc_provider_code
uint32_t user_identifier = bytestream2_get_be32(&gb);
switch (user_identifier) {
case MKBETAG('G', 'A', '9', '4'): { // closed captions
AVBufferRef *buf = NULL;
res = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
if (res < 0)
goto fail;
if (!res)
break;
if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_A53_CC, buf))
av_buffer_unref(&buf);
c->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
break;
}
default: // ignore unsupported identifiers
break;
}
break;
}
case 0x3C: { // smpte_provider_code
AVDynamicHDRPlus *hdrplus;
int provider_oriented_code = bytestream2_get_be16(&gb);
int application_identifier = bytestream2_get_byte(&gb);
if (p->itut_t35->country_code != 0xB5 ||
provider_oriented_code != 1 || application_identifier != 4)
break;
hdrplus = av_dynamic_hdr_plus_create_side_data(frame);
if (!hdrplus) {
res = AVERROR(ENOMEM);
goto fail;
}
res = av_dynamic_hdr_plus_from_t35(hdrplus, gb.buffer,
bytestream2_get_bytes_left(&gb));
res = ff_parse_a53_cc(&buf, gb.buffer, bytestream2_get_bytes_left(&gb));
if (res < 0)
goto fail;
if (!res)
break;
if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_A53_CC, buf))
av_buffer_unref(&buf);
c->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
break;
}
default: // ignore unsupported provider codes
default: // ignore unsupported identifiers
break;
}
}

View file

@ -1,47 +0,0 @@
/*
* libopus encoder/decoder common code
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <opus_defines.h>
#include "libavutil/error.h"
#include "libopus.h"
int ff_opus_error_to_averror(int err)
{
switch (err) {
case OPUS_BAD_ARG:
return AVERROR(EINVAL);
case OPUS_BUFFER_TOO_SMALL:
return AVERROR_UNKNOWN;
case OPUS_INTERNAL_ERROR:
return AVERROR(EFAULT);
case OPUS_INVALID_PACKET:
return AVERROR_INVALIDDATA;
case OPUS_UNIMPLEMENTED:
return AVERROR(ENOSYS);
case OPUS_INVALID_STATE:
return AVERROR_UNKNOWN;
case OPUS_ALLOC_FAIL:
return AVERROR(ENOMEM);
default:
return AVERROR(EINVAL);
}
}

View file

@ -1,27 +0,0 @@
/*
* libopus encoder/decoder common code
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_LIBOPUS_H
#define AVCODEC_LIBOPUS_H
int ff_opus_error_to_averror(int err);
#endif /* AVCODEC_LIBOPUS_H */

View file

@ -1,252 +0,0 @@
/*
* Opus decoder using libopus
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <opus.h>
#include <opus_multistream.h>
#include "libavutil/internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "avcodec.h"
#include "codec_internal.h"
#include "decode.h"
#include "internal.h"
#include "mathops.h"
#include "libopus.h"
#include "vorbis_data.h"
struct libopus_context {
AVClass *class;
OpusMSDecoder *dec;
int pre_skip;
#ifndef OPUS_SET_GAIN
union { int i; double d; } gain;
#endif
#ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
int apply_phase_inv;
#endif
};
#define OPUS_HEAD_SIZE 19
static av_cold int libopus_decode_init(AVCodecContext *avc)
{
struct libopus_context *opus = avc->priv_data;
int ret, channel_map = 0, gain_db = 0, nb_streams, nb_coupled, channels;
uint8_t mapping_arr[8] = { 0, 1 }, *mapping;
channels = avc->extradata_size >= 10 ? avc->extradata[9] : (avc->ch_layout.nb_channels == 1) ? 1 : 2;
if (channels <= 0) {
av_log(avc, AV_LOG_WARNING,
"Invalid number of channels %d, defaulting to stereo\n", channels);
channels = 2;
}
avc->sample_rate = 48000;
avc->sample_fmt = avc->request_sample_fmt == AV_SAMPLE_FMT_FLT ?
AV_SAMPLE_FMT_FLT : AV_SAMPLE_FMT_S16;
av_channel_layout_uninit(&avc->ch_layout);
if (channels > 8) {
avc->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
avc->ch_layout.nb_channels = channels;
} else {
av_channel_layout_copy(&avc->ch_layout, &ff_vorbis_ch_layouts[channels - 1]);
}
if (avc->extradata_size >= OPUS_HEAD_SIZE) {
opus->pre_skip = AV_RL16(avc->extradata + 10);
gain_db = sign_extend(AV_RL16(avc->extradata + 16), 16);
channel_map = AV_RL8 (avc->extradata + 18);
}
if (avc->extradata_size >= OPUS_HEAD_SIZE + 2 + channels) {
nb_streams = avc->extradata[OPUS_HEAD_SIZE + 0];
nb_coupled = avc->extradata[OPUS_HEAD_SIZE + 1];
if (nb_streams + nb_coupled != channels)
av_log(avc, AV_LOG_WARNING, "Inconsistent channel mapping.\n");
mapping = avc->extradata + OPUS_HEAD_SIZE + 2;
} else {
if (channels > 2 || channel_map) {
av_log(avc, AV_LOG_ERROR,
"No channel mapping for %d channels.\n", channels);
return AVERROR(EINVAL);
}
nb_streams = 1;
nb_coupled = channels > 1;
mapping = mapping_arr;
}
if (channels > 2 && channels <= 8) {
const uint8_t *vorbis_offset = ff_vorbis_channel_layout_offsets[channels - 1];
int ch;
/* Remap channels from Vorbis order to ffmpeg order */
for (ch = 0; ch < channels; ch++)
mapping_arr[ch] = mapping[vorbis_offset[ch]];
mapping = mapping_arr;
}
opus->dec = opus_multistream_decoder_create(avc->sample_rate, channels,
nb_streams, nb_coupled,
mapping, &ret);
if (!opus->dec) {
av_log(avc, AV_LOG_ERROR, "Unable to create decoder: %s\n",
opus_strerror(ret));
return ff_opus_error_to_averror(ret);
}
#ifdef OPUS_SET_GAIN
ret = opus_multistream_decoder_ctl(opus->dec, OPUS_SET_GAIN(gain_db));
if (ret != OPUS_OK)
av_log(avc, AV_LOG_WARNING, "Failed to set gain: %s\n",
opus_strerror(ret));
#else
{
double gain_lin = ff_exp10(gain_db / (20.0 * 256));
if (avc->sample_fmt == AV_SAMPLE_FMT_FLT)
opus->gain.d = gain_lin;
else
opus->gain.i = FFMIN(gain_lin * 65536, INT_MAX);
}
#endif
#ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
ret = opus_multistream_decoder_ctl(opus->dec,
OPUS_SET_PHASE_INVERSION_DISABLED(!opus->apply_phase_inv));
if (ret != OPUS_OK)
av_log(avc, AV_LOG_WARNING,
"Unable to set phase inversion: %s\n",
opus_strerror(ret));
#endif
/* Decoder delay (in samples) at 48kHz */
avc->delay = avc->internal->skip_samples = opus->pre_skip;
return 0;
}
static av_cold int libopus_decode_close(AVCodecContext *avc)
{
struct libopus_context *opus = avc->priv_data;
if (opus->dec) {
opus_multistream_decoder_destroy(opus->dec);
opus->dec = NULL;
}
return 0;
}
#define MAX_FRAME_SIZE (960 * 6)
static int libopus_decode(AVCodecContext *avc, AVFrame *frame,
int *got_frame_ptr, AVPacket *pkt)
{
struct libopus_context *opus = avc->priv_data;
int ret, nb_samples;
frame->nb_samples = MAX_FRAME_SIZE;
if ((ret = ff_get_buffer(avc, frame, 0)) < 0)
return ret;
if (avc->sample_fmt == AV_SAMPLE_FMT_S16)
nb_samples = opus_multistream_decode(opus->dec, pkt->data, pkt->size,
(opus_int16 *)frame->data[0],
frame->nb_samples, 0);
else
nb_samples = opus_multistream_decode_float(opus->dec, pkt->data, pkt->size,
(float *)frame->data[0],
frame->nb_samples, 0);
if (nb_samples < 0) {
av_log(avc, AV_LOG_ERROR, "Decoding error: %s\n",
opus_strerror(nb_samples));
return ff_opus_error_to_averror(nb_samples);
}
#ifndef OPUS_SET_GAIN
{
int i = avc->ch_layout.nb_channels * nb_samples;
if (avc->sample_fmt == AV_SAMPLE_FMT_FLT) {
float *pcm = (float *)frame->data[0];
for (; i > 0; i--, pcm++)
*pcm = av_clipf(*pcm * opus->gain.d, -1, 1);
} else {
int16_t *pcm = (int16_t *)frame->data[0];
for (; i > 0; i--, pcm++)
*pcm = av_clip_int16(((int64_t)opus->gain.i * *pcm) >> 16);
}
}
#endif
frame->nb_samples = nb_samples;
*got_frame_ptr = 1;
return pkt->size;
}
static void libopus_flush(AVCodecContext *avc)
{
struct libopus_context *opus = avc->priv_data;
opus_multistream_decoder_ctl(opus->dec, OPUS_RESET_STATE);
/* The stream can have been extracted by a tool that is not Opus-aware.
Therefore, any packet can become the first of the stream. */
avc->internal->skip_samples = opus->pre_skip;
}
#define OFFSET(x) offsetof(struct libopus_context, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
static const AVOption libopusdec_options[] = {
#ifdef OPUS_SET_PHASE_INVERSION_DISABLED_REQUEST
{ "apply_phase_inv", "Apply intensity stereo phase inversion", OFFSET(apply_phase_inv), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
#endif
{ NULL },
};
static const AVClass libopusdec_class = {
.class_name = "libopusdec",
.item_name = av_default_item_name,
.option = libopusdec_options,
.version = LIBAVUTIL_VERSION_INT,
};
const FFCodec ff_libopus_decoder = {
.p.name = "libopus",
CODEC_LONG_NAME("libopus Opus"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_OPUS,
.priv_data_size = sizeof(struct libopus_context),
.init = libopus_decode_init,
.close = libopus_decode_close,
FF_CODEC_DECODE_CB(libopus_decode),
.flush = libopus_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &libopusdec_class,
.p.wrapper_name = "libopus",
};

View file

@ -1,224 +0,0 @@
/*
* Copyright (c) 2002 Mark Hills <mark@pogo.org.uk>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <vorbis/vorbisenc.h>
#include "avcodec.h"
#include "bytestream.h"
#include "codec_internal.h"
#include "decode.h"
typedef struct OggVorbisDecContext {
vorbis_info vi; /**< vorbis_info used during init */
vorbis_dsp_state vd; /**< DSP state used for analysis */
vorbis_block vb; /**< vorbis_block used for analysis */
vorbis_comment vc; /**< VorbisComment info */
ogg_packet op; /**< ogg packet */
} OggVorbisDecContext;
static int oggvorbis_decode_close(AVCodecContext *avccontext);
static int oggvorbis_decode_init(AVCodecContext *avccontext) {
OggVorbisDecContext *context = avccontext->priv_data ;
uint8_t *p= avccontext->extradata;
int i, hsizes[3], ret;
unsigned char *headers[3], *extradata = avccontext->extradata;
if(! avccontext->extradata_size || ! p) {
av_log(avccontext, AV_LOG_ERROR, "vorbis extradata absent\n");
return AVERROR(EINVAL);
}
vorbis_info_init(&context->vi) ;
vorbis_comment_init(&context->vc) ;
if(p[0] == 0 && p[1] == 30) {
int sizesum = 0;
for(i = 0; i < 3; i++){
hsizes[i] = bytestream_get_be16((const uint8_t **)&p);
sizesum += 2 + hsizes[i];
if (sizesum > avccontext->extradata_size) {
av_log(avccontext, AV_LOG_ERROR, "vorbis extradata too small\n");
ret = AVERROR_INVALIDDATA;
goto error;
}
headers[i] = p;
p += hsizes[i];
}
} else if(*p == 2) {
unsigned int offset = 1;
unsigned int sizesum = 1;
p++;
for(i=0; i<2; i++) {
hsizes[i] = 0;
while((*p == 0xFF) && (sizesum < avccontext->extradata_size)) {
hsizes[i] += 0xFF;
offset++;
sizesum += 1 + 0xFF;
p++;
}
hsizes[i] += *p;
offset++;
sizesum += 1 + *p;
if(sizesum > avccontext->extradata_size) {
av_log(avccontext, AV_LOG_ERROR,
"vorbis header sizes damaged\n");
ret = AVERROR_INVALIDDATA;
goto error;
}
p++;
}
hsizes[2] = avccontext->extradata_size - hsizes[0]-hsizes[1]-offset;
#if 0
av_log(avccontext, AV_LOG_DEBUG,
"vorbis header sizes: %d, %d, %d, / extradata_len is %d \n",
hsizes[0], hsizes[1], hsizes[2], avccontext->extradata_size);
#endif
headers[0] = extradata + offset;
headers[1] = extradata + offset + hsizes[0];
headers[2] = extradata + offset + hsizes[0] + hsizes[1];
} else {
av_log(avccontext, AV_LOG_ERROR,
"vorbis initial header len is wrong: %d\n", *p);
ret = AVERROR_INVALIDDATA;
goto error;
}
for(i=0; i<3; i++){
context->op.b_o_s= i==0;
context->op.bytes = hsizes[i];
context->op.packet = headers[i];
if(vorbis_synthesis_headerin(&context->vi, &context->vc, &context->op)<0){
av_log(avccontext, AV_LOG_ERROR, "%d. vorbis header damaged\n", i+1);
ret = AVERROR_INVALIDDATA;
goto error;
}
}
av_channel_layout_uninit(&avccontext->ch_layout);
avccontext->ch_layout.order = AV_CHANNEL_ORDER_UNSPEC;
avccontext->ch_layout.nb_channels = context->vi.channels;
avccontext->sample_rate = context->vi.rate;
avccontext->sample_fmt = AV_SAMPLE_FMT_S16;
avccontext->time_base= (AVRational){1, avccontext->sample_rate};
vorbis_synthesis_init(&context->vd, &context->vi);
vorbis_block_init(&context->vd, &context->vb);
return 0 ;
error:
oggvorbis_decode_close(avccontext);
return ret;
}
static inline int conv(int samples, float **pcm, char *buf, int channels) {
int i, j;
ogg_int16_t *ptr, *data = (ogg_int16_t*)buf ;
float *mono ;
for(i = 0 ; i < channels ; i++){
ptr = &data[i];
mono = pcm[i] ;
for(j = 0 ; j < samples ; j++) {
*ptr = av_clip_int16(mono[j] * 32767.f);
ptr += channels;
}
}
return 0 ;
}
static int oggvorbis_decode_frame(AVCodecContext *avccontext, AVFrame *frame,
int *got_frame_ptr, AVPacket *avpkt)
{
OggVorbisDecContext *context = avccontext->priv_data ;
float **pcm ;
ogg_packet *op= &context->op;
int samples, total_samples, total_bytes;
int ret;
int16_t *output;
if(!avpkt->size){
//FIXME flush
return 0;
}
frame->nb_samples = 8192*4;
if ((ret = ff_get_buffer(avccontext, frame, 0)) < 0)
return ret;
output = (int16_t *)frame->data[0];
op->packet = avpkt->data;
op->bytes = avpkt->size;
// av_log(avccontext, AV_LOG_DEBUG, "%d %d %d %"PRId64" %"PRId64" %d %d\n", op->bytes, op->b_o_s, op->e_o_s, op->granulepos, op->packetno, buf_size, context->vi.rate);
/* for(i=0; i<op->bytes; i++)
av_log(avccontext, AV_LOG_DEBUG, "%02X ", op->packet[i]);
av_log(avccontext, AV_LOG_DEBUG, "\n");*/
if(vorbis_synthesis(&context->vb, op) == 0)
vorbis_synthesis_blockin(&context->vd, &context->vb) ;
total_samples = 0 ;
total_bytes = 0 ;
while((samples = vorbis_synthesis_pcmout(&context->vd, &pcm)) > 0) {
conv(samples, pcm, (char*)output + total_bytes, context->vi.channels) ;
total_bytes += samples * 2 * context->vi.channels ;
total_samples += samples ;
vorbis_synthesis_read(&context->vd, samples) ;
}
frame->nb_samples = total_samples;
*got_frame_ptr = total_samples > 0;
return avpkt->size;
}
static int oggvorbis_decode_close(AVCodecContext *avccontext) {
OggVorbisDecContext *context = avccontext->priv_data ;
vorbis_block_clear(&context->vb);
vorbis_dsp_clear(&context->vd);
vorbis_info_clear(&context->vi) ;
vorbis_comment_clear(&context->vc) ;
return 0 ;
}
const FFCodec ff_libvorbis_decoder = {
.p.name = "libvorbis",
CODEC_LONG_NAME("libvorbis"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_VORBIS,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE,
.priv_data_size = sizeof(OggVorbisDecContext),
.init = oggvorbis_decode_init,
FF_CODEC_DECODE_CB(oggvorbis_decode_frame),
.close = oggvorbis_decode_close,
};

View file

@ -46,9 +46,6 @@ SOURCES += [
'jfdctfst.c',
'jfdctint.c',
'jrevdct.c',
'libopus.c',
'libopusdec.c',
'libvorbisdec.c',
'log2_tab.c',
'mpegaudio.c',
'mpegaudiodata.c',
@ -64,7 +61,6 @@ SOURCES += [
'options.c',
'parser.c',
'parsers.c',
'pcm.c',
'profiles.c',
'pthread.c',
'pthread_frame.c',
@ -75,7 +71,6 @@ SOURCES += [
'utils.c',
'version.c',
'vlc.c',
'vorbis_data.c',
'vorbis_parser.c',
'xiph.c'
]
@ -130,11 +125,6 @@ if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
'mozva'
]
LOCAL_INCLUDES += [
'/media/libopus/include',
'/media/libvorbis',
]
if CONFIG['MOZ_LIBAV_FFT']:
SOURCES += [
'avfft.c',

View file

@ -177,9 +177,7 @@ static const AVOption avcodec_options[] = {
{"xvidmmx", "deprecated, for compatibility only", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_XVID }, INT_MIN, INT_MAX, V|E|D, "idct"},
{"faani", "floating point AAN IDCT", 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_FAAN }, INT_MIN, INT_MAX, V|D|E, "idct"},
{"simpleauto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_IDCT_SIMPLEAUTO }, INT_MIN, INT_MAX, V|E|D, "idct"},
#if FF_API_SLICE_OFFSET
{"slice_count", NULL, OFFSET(slice_count), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, INT_MIN, INT_MAX},
#endif
{"ec", "set error concealment strategy", OFFSET(error_concealment), AV_OPT_TYPE_FLAGS, {.i64 = 3 }, INT_MIN, INT_MAX, V|D, "ec"},
{"guess_mvs", "iterative motion vector (MV) search (slow)", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_GUESS_MVS }, INT_MIN, INT_MAX, V|D, "ec"},
{"deblock", "use strong deblock filter for damaged MBs", 0, AV_OPT_TYPE_CONST, {.i64 = FF_EC_DEBLOCK }, INT_MIN, INT_MAX, V|D, "ec"},

View file

@ -1,630 +0,0 @@
/*
* PCM codecs
* Copyright (c) 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* PCM codecs
*/
#include "config.h"
#include "config_components.h"
#include "libavutil/attributes.h"
#include "libavutil/float_dsp.h"
#include "libavutil/reverse.h"
#include "libavutil/thread.h"
#include "avcodec.h"
#include "bytestream.h"
#include "codec_internal.h"
#include "decode.h"
#include "encode.h"
#include "pcm_tablegen.h"
static av_cold int pcm_encode_init(AVCodecContext *avctx)
{
avctx->frame_size = 0;
#if !CONFIG_HARDCODED_TABLES
switch (avctx->codec->id) {
#define INIT_ONCE(id, name) \
case AV_CODEC_ID_PCM_ ## id: \
if (CONFIG_PCM_ ## id ## _ENCODER) { \
static AVOnce init_static_once = AV_ONCE_INIT; \
ff_thread_once(&init_static_once, pcm_ ## name ## _tableinit); \
} \
break
INIT_ONCE(ALAW, alaw);
INIT_ONCE(MULAW, ulaw);
INIT_ONCE(VIDC, vidc);
default:
break;
}
#endif
avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
avctx->block_align = avctx->ch_layout.nb_channels * avctx->bits_per_coded_sample / 8;
avctx->bit_rate = avctx->block_align * 8LL * avctx->sample_rate;
return 0;
}
/**
* Write PCM samples macro
* @param type Datatype of native machine format
* @param endian bytestream_put_xxx() suffix
* @param src Source pointer (variable name)
* @param dst Destination pointer (variable name)
* @param n Total number of samples (variable name)
* @param shift Bitshift (bits)
* @param offset Sample value offset
*/
#define ENCODE(type, endian, src, dst, n, shift, offset) \
samples_ ## type = (const type *) src; \
for (; n > 0; n--) { \
register type v = (*samples_ ## type++ >> shift) + offset; \
bytestream_put_ ## endian(&dst, v); \
}
#define ENCODE_PLANAR(type, endian, dst, n, shift, offset) \
n /= avctx->ch_layout.nb_channels; \
for (c = 0; c < avctx->ch_layout.nb_channels; c++) { \
int i; \
samples_ ## type = (const type *) frame->extended_data[c]; \
for (i = n; i > 0; i--) { \
register type v = (*samples_ ## type++ >> shift) + offset; \
bytestream_put_ ## endian(&dst, v); \
} \
}
static int pcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
int n, c, sample_size, v, ret;
const short *samples;
unsigned char *dst;
const uint8_t *samples_uint8_t;
const int16_t *samples_int16_t;
const int32_t *samples_int32_t;
const int64_t *samples_int64_t;
const uint16_t *samples_uint16_t;
const uint32_t *samples_uint32_t;
sample_size = av_get_bits_per_sample(avctx->codec->id) / 8;
n = frame->nb_samples * avctx->ch_layout.nb_channels;
samples = (const short *)frame->data[0];
if ((ret = ff_get_encode_buffer(avctx, avpkt, n * sample_size, 0)) < 0)
return ret;
dst = avpkt->data;
switch (avctx->codec->id) {
case AV_CODEC_ID_PCM_U32LE:
ENCODE(uint32_t, le32, samples, dst, n, 0, 0x80000000)
break;
case AV_CODEC_ID_PCM_U32BE:
ENCODE(uint32_t, be32, samples, dst, n, 0, 0x80000000)
break;
case AV_CODEC_ID_PCM_S24LE:
ENCODE(int32_t, le24, samples, dst, n, 8, 0)
break;
case AV_CODEC_ID_PCM_S24LE_PLANAR:
ENCODE_PLANAR(int32_t, le24, dst, n, 8, 0)
break;
case AV_CODEC_ID_PCM_S24BE:
ENCODE(int32_t, be24, samples, dst, n, 8, 0)
break;
case AV_CODEC_ID_PCM_U24LE:
ENCODE(uint32_t, le24, samples, dst, n, 8, 0x800000)
break;
case AV_CODEC_ID_PCM_U24BE:
ENCODE(uint32_t, be24, samples, dst, n, 8, 0x800000)
break;
case AV_CODEC_ID_PCM_S24DAUD:
for (; n > 0; n--) {
uint32_t tmp = ff_reverse[(*samples >> 8) & 0xff] +
(ff_reverse[*samples & 0xff] << 8);
tmp <<= 4; // sync flags would go here
bytestream_put_be24(&dst, tmp);
samples++;
}
break;
case AV_CODEC_ID_PCM_U16LE:
ENCODE(uint16_t, le16, samples, dst, n, 0, 0x8000)
break;
case AV_CODEC_ID_PCM_U16BE:
ENCODE(uint16_t, be16, samples, dst, n, 0, 0x8000)
break;
case AV_CODEC_ID_PCM_S8:
ENCODE(uint8_t, byte, samples, dst, n, 0, -128)
break;
case AV_CODEC_ID_PCM_S8_PLANAR:
ENCODE_PLANAR(uint8_t, byte, dst, n, 0, -128)
break;
#if HAVE_BIGENDIAN
case AV_CODEC_ID_PCM_S64LE:
case AV_CODEC_ID_PCM_F64LE:
ENCODE(int64_t, le64, samples, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_F32LE:
ENCODE(int32_t, le32, samples, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S32LE_PLANAR:
ENCODE_PLANAR(int32_t, le32, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16LE:
ENCODE(int16_t, le16, samples, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16LE_PLANAR:
ENCODE_PLANAR(int16_t, le16, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_F64BE:
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_S64BE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_S16BE:
#else
case AV_CODEC_ID_PCM_S64BE:
case AV_CODEC_ID_PCM_F64BE:
ENCODE(int64_t, be64, samples, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_S32BE:
ENCODE(int32_t, be32, samples, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16BE:
ENCODE(int16_t, be16, samples, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16BE_PLANAR:
ENCODE_PLANAR(int16_t, be16, dst, n, 0, 0)
break;
case AV_CODEC_ID_PCM_F64LE:
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_S64LE:
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S16LE:
#endif /* HAVE_BIGENDIAN */
case AV_CODEC_ID_PCM_U8:
memcpy(dst, samples, n * sample_size);
break;
#if HAVE_BIGENDIAN
case AV_CODEC_ID_PCM_S16BE_PLANAR:
#else
case AV_CODEC_ID_PCM_S16LE_PLANAR:
case AV_CODEC_ID_PCM_S32LE_PLANAR:
#endif /* HAVE_BIGENDIAN */
n /= avctx->ch_layout.nb_channels;
for (c = 0; c < avctx->ch_layout.nb_channels; c++) {
const uint8_t *src = frame->extended_data[c];
bytestream_put_buffer(&dst, src, n * sample_size);
}
break;
case AV_CODEC_ID_PCM_ALAW:
for (; n > 0; n--) {
v = *samples++;
*dst++ = linear_to_alaw[(v + 32768) >> 2];
}
break;
case AV_CODEC_ID_PCM_MULAW:
for (; n > 0; n--) {
v = *samples++;
*dst++ = linear_to_ulaw[(v + 32768) >> 2];
}
break;
case AV_CODEC_ID_PCM_VIDC:
for (; n > 0; n--) {
v = *samples++;
*dst++ = linear_to_vidc[(v + 32768) >> 2];
}
break;
default:
return -1;
}
*got_packet_ptr = 1;
return 0;
}
typedef struct PCMDecode {
short table[256];
void (*vector_fmul_scalar)(float *dst, const float *src, float mul,
int len);
float scale;
} PCMDecode;
static av_cold int pcm_decode_init(AVCodecContext *avctx)
{
PCMDecode *s = avctx->priv_data;
AVFloatDSPContext *fdsp;
int i;
switch (avctx->codec_id) {
case AV_CODEC_ID_PCM_ALAW:
for (i = 0; i < 256; i++)
s->table[i] = alaw2linear(i);
break;
case AV_CODEC_ID_PCM_MULAW:
for (i = 0; i < 256; i++)
s->table[i] = ulaw2linear(i);
break;
case AV_CODEC_ID_PCM_VIDC:
for (i = 0; i < 256; i++)
s->table[i] = vidc2linear(i);
break;
case AV_CODEC_ID_PCM_F16LE:
case AV_CODEC_ID_PCM_F24LE:
if (avctx->bits_per_coded_sample < 1 || avctx->bits_per_coded_sample > 24)
return AVERROR_INVALIDDATA;
s->scale = 1. / (1 << (avctx->bits_per_coded_sample - 1));
fdsp = avpriv_float_dsp_alloc(0);
if (!fdsp)
return AVERROR(ENOMEM);
s->vector_fmul_scalar = fdsp->vector_fmul_scalar;
av_free(fdsp);
break;
default:
break;
}
avctx->sample_fmt = avctx->codec->sample_fmts[0];
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32)
avctx->bits_per_raw_sample = av_get_bits_per_sample(avctx->codec_id);
return 0;
}
/**
* Read PCM samples macro
* @param size Data size of native machine format
* @param endian bytestream_get_xxx() endian suffix
* @param src Source pointer (variable name)
* @param dst Destination pointer (variable name)
* @param n Total number of samples (variable name)
* @param shift Bitshift (bits)
* @param offset Sample value offset
*/
#define DECODE(size, endian, src, dst, n, shift, offset) \
for (; n > 0; n--) { \
uint ## size ## _t v = bytestream_get_ ## endian(&src); \
AV_WN ## size ## A(dst, (uint ## size ## _t)(v - offset) << shift); \
dst += size / 8; \
}
#define DECODE_PLANAR(size, endian, src, dst, n, shift, offset) \
n /= channels; \
for (c = 0; c < avctx->ch_layout.nb_channels; c++) { \
int i; \
dst = frame->extended_data[c]; \
for (i = n; i > 0; i--) { \
uint ## size ## _t v = bytestream_get_ ## endian(&src); \
AV_WN ## size ## A(dst, (uint ## size ##_t)(v - offset) << shift); \
dst += size / 8; \
} \
}
static int pcm_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame_ptr, AVPacket *avpkt)
{
const uint8_t *src = avpkt->data;
int buf_size = avpkt->size;
PCMDecode *s = avctx->priv_data;
int channels = avctx->ch_layout.nb_channels;
int sample_size, c, n, ret, samples_per_block;
uint8_t *samples;
int32_t *dst_int32_t;
sample_size = av_get_bits_per_sample(avctx->codec_id) / 8;
/* av_get_bits_per_sample returns 0 for AV_CODEC_ID_PCM_DVD */
samples_per_block = 1;
if (avctx->codec_id == AV_CODEC_ID_PCM_LXF) {
/* we process 40-bit blocks per channel for LXF */
samples_per_block = 2;
sample_size = 5;
}
if (sample_size == 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n");
return AVERROR(EINVAL);
}
if (channels == 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
return AVERROR(EINVAL);
}
if (avctx->codec_id != avctx->codec->id) {
av_log(avctx, AV_LOG_ERROR, "codec ids mismatch\n");
return AVERROR(EINVAL);
}
n = channels * sample_size;
if (n && buf_size % n) {
if (buf_size < n) {
av_log(avctx, AV_LOG_ERROR,
"Invalid PCM packet, data has size %d but at least a size of %d was expected\n",
buf_size, n);
return AVERROR_INVALIDDATA;
} else
buf_size -= buf_size % n;
}
n = buf_size / sample_size;
/* get output buffer */
frame->nb_samples = n * samples_per_block / channels;
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
samples = frame->data[0];
switch (avctx->codec_id) {
case AV_CODEC_ID_PCM_U32LE:
DECODE(32, le32, src, samples, n, 0, 0x80000000)
break;
case AV_CODEC_ID_PCM_U32BE:
DECODE(32, be32, src, samples, n, 0, 0x80000000)
break;
case AV_CODEC_ID_PCM_S24LE:
DECODE(32, le24, src, samples, n, 8, 0)
break;
case AV_CODEC_ID_PCM_S24LE_PLANAR:
DECODE_PLANAR(32, le24, src, samples, n, 8, 0);
break;
case AV_CODEC_ID_PCM_S24BE:
DECODE(32, be24, src, samples, n, 8, 0)
break;
case AV_CODEC_ID_PCM_U24LE:
DECODE(32, le24, src, samples, n, 8, 0x800000)
break;
case AV_CODEC_ID_PCM_U24BE:
DECODE(32, be24, src, samples, n, 8, 0x800000)
break;
case AV_CODEC_ID_PCM_S24DAUD:
for (; n > 0; n--) {
uint32_t v = bytestream_get_be24(&src);
v >>= 4; // sync flags are here
AV_WN16A(samples, ff_reverse[(v >> 8) & 0xff] +
(ff_reverse[v & 0xff] << 8));
samples += 2;
}
break;
case AV_CODEC_ID_PCM_U16LE:
DECODE(16, le16, src, samples, n, 0, 0x8000)
break;
case AV_CODEC_ID_PCM_U16BE:
DECODE(16, be16, src, samples, n, 0, 0x8000)
break;
case AV_CODEC_ID_PCM_S8:
for (; n > 0; n--)
*samples++ = *src++ + 128;
break;
case AV_CODEC_ID_PCM_SGA:
for (; n > 0; n--) {
int sign = *src >> 7;
int magn = *src & 0x7f;
*samples++ = sign ? 128 - magn : 128 + magn;
src++;
}
break;
case AV_CODEC_ID_PCM_S8_PLANAR:
n /= avctx->ch_layout.nb_channels;
for (c = 0; c < avctx->ch_layout.nb_channels; c++) {
int i;
samples = frame->extended_data[c];
for (i = n; i > 0; i--)
*samples++ = *src++ + 128;
}
break;
#if HAVE_BIGENDIAN
case AV_CODEC_ID_PCM_S64LE:
case AV_CODEC_ID_PCM_F64LE:
DECODE(64, le64, src, samples, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F24LE:
case AV_CODEC_ID_PCM_F16LE:
DECODE(32, le32, src, samples, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S32LE_PLANAR:
DECODE_PLANAR(32, le32, src, samples, n, 0, 0);
break;
case AV_CODEC_ID_PCM_S16LE:
DECODE(16, le16, src, samples, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16LE_PLANAR:
DECODE_PLANAR(16, le16, src, samples, n, 0, 0);
break;
case AV_CODEC_ID_PCM_F64BE:
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_S64BE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_S16BE:
#else
case AV_CODEC_ID_PCM_S64BE:
case AV_CODEC_ID_PCM_F64BE:
DECODE(64, be64, src, samples, n, 0, 0)
break;
case AV_CODEC_ID_PCM_F32BE:
case AV_CODEC_ID_PCM_S32BE:
DECODE(32, be32, src, samples, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16BE:
DECODE(16, be16, src, samples, n, 0, 0)
break;
case AV_CODEC_ID_PCM_S16BE_PLANAR:
DECODE_PLANAR(16, be16, src, samples, n, 0, 0);
break;
case AV_CODEC_ID_PCM_F64LE:
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F24LE:
case AV_CODEC_ID_PCM_F16LE:
case AV_CODEC_ID_PCM_S64LE:
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S16LE:
#endif /* HAVE_BIGENDIAN */
case AV_CODEC_ID_PCM_U8:
memcpy(samples, src, n * sample_size);
break;
#if HAVE_BIGENDIAN
case AV_CODEC_ID_PCM_S16BE_PLANAR:
#else
case AV_CODEC_ID_PCM_S16LE_PLANAR:
case AV_CODEC_ID_PCM_S32LE_PLANAR:
#endif /* HAVE_BIGENDIAN */
n /= avctx->ch_layout.nb_channels;
for (c = 0; c < avctx->ch_layout.nb_channels; c++) {
samples = frame->extended_data[c];
bytestream_get_buffer(&src, samples, n * sample_size);
}
break;
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW:
case AV_CODEC_ID_PCM_VIDC:
for (; n > 0; n--) {
AV_WN16A(samples, s->table[*src++]);
samples += 2;
}
break;
case AV_CODEC_ID_PCM_LXF:
{
int i;
n /= channels;
for (c = 0; c < channels; c++) {
dst_int32_t = (int32_t *)frame->extended_data[c];
for (i = 0; i < n; i++) {
// extract low 20 bits and expand to 32 bits
*dst_int32_t++ = ((uint32_t)src[2]<<28) |
(src[1] << 20) |
(src[0] << 12) |
((src[2] & 0x0F) << 8) |
src[1];
// extract high 20 bits and expand to 32 bits
*dst_int32_t++ = ((uint32_t)src[4]<<24) |
(src[3] << 16) |
((src[2] & 0xF0) << 8) |
(src[4] << 4) |
(src[3] >> 4);
src += 5;
}
}
break;
}
default:
return -1;
}
if (avctx->codec_id == AV_CODEC_ID_PCM_F16LE ||
avctx->codec_id == AV_CODEC_ID_PCM_F24LE) {
s->vector_fmul_scalar((float *)frame->extended_data[0],
(const float *)frame->extended_data[0],
s->scale, FFALIGN(frame->nb_samples * avctx->ch_layout.nb_channels, 4));
emms_c();
}
*got_frame_ptr = 1;
return buf_size;
}
#define PCM_ENCODER_0(id_, sample_fmt_, name_, long_name_)
#define PCM_ENCODER_1(id_, sample_fmt_, name_, long_name_) \
const FFCodec ff_ ## name_ ## _encoder = { \
.p.name = #name_, \
CODEC_LONG_NAME(long_name_), \
.p.type = AVMEDIA_TYPE_AUDIO, \
.p.id = AV_CODEC_ID_ ## id_, \
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE | \
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, \
.init = pcm_encode_init, \
FF_CODEC_ENCODE_CB(pcm_encode_frame), \
.p.sample_fmts = (const enum AVSampleFormat[]){ sample_fmt_, \
AV_SAMPLE_FMT_NONE }, \
}
#define PCM_ENCODER_2(cf, id, sample_fmt, name, long_name) \
PCM_ENCODER_ ## cf(id, sample_fmt, name, long_name)
#define PCM_ENCODER_3(cf, id, sample_fmt, name, long_name) \
PCM_ENCODER_2(cf, id, sample_fmt, name, long_name)
#define PCM_ENCODER(id, sample_fmt, name, long_name) \
PCM_ENCODER_3(CONFIG_ ## id ## _ENCODER, id, sample_fmt, name, long_name)
#define PCM_DECODER_0(id, sample_fmt, name, long_name)
#define PCM_DECODER_1(id_, sample_fmt_, name_, long_name_) \
const FFCodec ff_ ## name_ ## _decoder = { \
.p.name = #name_, \
CODEC_LONG_NAME(long_name_), \
.p.type = AVMEDIA_TYPE_AUDIO, \
.p.id = AV_CODEC_ID_ ## id_, \
.priv_data_size = sizeof(PCMDecode), \
.init = pcm_decode_init, \
FF_CODEC_DECODE_CB(pcm_decode_frame), \
.p.capabilities = AV_CODEC_CAP_DR1, \
.p.sample_fmts = (const enum AVSampleFormat[]){ sample_fmt_, \
AV_SAMPLE_FMT_NONE }, \
}
#define PCM_DECODER_2(cf, id, sample_fmt, name, long_name) \
PCM_DECODER_ ## cf(id, sample_fmt, name, long_name)
#define PCM_DECODER_3(cf, id, sample_fmt, name, long_name) \
PCM_DECODER_2(cf, id, sample_fmt, name, long_name)
#define PCM_DECODER(id, sample_fmt, name, long_name) \
PCM_DECODER_3(CONFIG_ ## id ## _DECODER, id, sample_fmt, name, long_name)
#define PCM_CODEC(id, sample_fmt_, name, long_name_) \
PCM_ENCODER(id, sample_fmt_, name, long_name_); \
PCM_DECODER(id, sample_fmt_, name, long_name_)
/* Note: Do not forget to add new entries to the Makefile as well. */
PCM_CODEC (PCM_ALAW, AV_SAMPLE_FMT_S16, pcm_alaw, "PCM A-law / G.711 A-law");
PCM_DECODER(PCM_F16LE, AV_SAMPLE_FMT_FLT, pcm_f16le, "PCM 16.8 floating point little-endian");
PCM_DECODER(PCM_F24LE, AV_SAMPLE_FMT_FLT, pcm_f24le, "PCM 24.0 floating point little-endian");
PCM_CODEC (PCM_F32BE, AV_SAMPLE_FMT_FLT, pcm_f32be, "PCM 32-bit floating point big-endian");
PCM_CODEC (PCM_F32LE, AV_SAMPLE_FMT_FLT, pcm_f32le, "PCM 32-bit floating point little-endian");
PCM_CODEC (PCM_F64BE, AV_SAMPLE_FMT_DBL, pcm_f64be, "PCM 64-bit floating point big-endian");
PCM_CODEC (PCM_F64LE, AV_SAMPLE_FMT_DBL, pcm_f64le, "PCM 64-bit floating point little-endian");
PCM_DECODER(PCM_LXF, AV_SAMPLE_FMT_S32P,pcm_lxf, "PCM signed 20-bit little-endian planar");
PCM_CODEC (PCM_MULAW, AV_SAMPLE_FMT_S16, pcm_mulaw, "PCM mu-law / G.711 mu-law");
PCM_CODEC (PCM_S8, AV_SAMPLE_FMT_U8, pcm_s8, "PCM signed 8-bit");
PCM_CODEC (PCM_S8_PLANAR, AV_SAMPLE_FMT_U8P, pcm_s8_planar, "PCM signed 8-bit planar");
PCM_CODEC (PCM_S16BE, AV_SAMPLE_FMT_S16, pcm_s16be, "PCM signed 16-bit big-endian");
PCM_CODEC (PCM_S16BE_PLANAR, AV_SAMPLE_FMT_S16P,pcm_s16be_planar, "PCM signed 16-bit big-endian planar");
PCM_CODEC (PCM_S16LE, AV_SAMPLE_FMT_S16, pcm_s16le, "PCM signed 16-bit little-endian");
PCM_CODEC (PCM_S16LE_PLANAR, AV_SAMPLE_FMT_S16P,pcm_s16le_planar, "PCM signed 16-bit little-endian planar");
PCM_CODEC (PCM_S24BE, AV_SAMPLE_FMT_S32, pcm_s24be, "PCM signed 24-bit big-endian");
PCM_CODEC (PCM_S24DAUD, AV_SAMPLE_FMT_S16, pcm_s24daud, "PCM D-Cinema audio signed 24-bit");
PCM_CODEC (PCM_S24LE, AV_SAMPLE_FMT_S32, pcm_s24le, "PCM signed 24-bit little-endian");
PCM_CODEC (PCM_S24LE_PLANAR, AV_SAMPLE_FMT_S32P,pcm_s24le_planar, "PCM signed 24-bit little-endian planar");
PCM_CODEC (PCM_S32BE, AV_SAMPLE_FMT_S32, pcm_s32be, "PCM signed 32-bit big-endian");
PCM_CODEC (PCM_S32LE, AV_SAMPLE_FMT_S32, pcm_s32le, "PCM signed 32-bit little-endian");
PCM_CODEC (PCM_S32LE_PLANAR, AV_SAMPLE_FMT_S32P,pcm_s32le_planar, "PCM signed 32-bit little-endian planar");
PCM_CODEC (PCM_U8, AV_SAMPLE_FMT_U8, pcm_u8, "PCM unsigned 8-bit");
PCM_CODEC (PCM_U16BE, AV_SAMPLE_FMT_S16, pcm_u16be, "PCM unsigned 16-bit big-endian");
PCM_CODEC (PCM_U16LE, AV_SAMPLE_FMT_S16, pcm_u16le, "PCM unsigned 16-bit little-endian");
PCM_CODEC (PCM_U24BE, AV_SAMPLE_FMT_S32, pcm_u24be, "PCM unsigned 24-bit big-endian");
PCM_CODEC (PCM_U24LE, AV_SAMPLE_FMT_S32, pcm_u24le, "PCM unsigned 24-bit little-endian");
PCM_CODEC (PCM_U32BE, AV_SAMPLE_FMT_S32, pcm_u32be, "PCM unsigned 32-bit big-endian");
PCM_CODEC (PCM_U32LE, AV_SAMPLE_FMT_S32, pcm_u32le, "PCM unsigned 32-bit little-endian");
PCM_CODEC (PCM_S64BE, AV_SAMPLE_FMT_S64, pcm_s64be, "PCM signed 64-bit big-endian");
PCM_CODEC (PCM_S64LE, AV_SAMPLE_FMT_S64, pcm_s64le, "PCM signed 64-bit little-endian");
PCM_CODEC (PCM_VIDC, AV_SAMPLE_FMT_S16, pcm_vidc, "PCM Archimedes VIDC");
PCM_DECODER(PCM_SGA, AV_SAMPLE_FMT_U8, pcm_sga, "PCM SGA");

View file

@ -1,143 +0,0 @@
/*
* Header file for hardcoded PCM tables
*
* Copyright (c) 2010 Reimar Döffinger <Reimar.Doeffinger@gmx.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_PCM_TABLEGEN_H
#define AVCODEC_PCM_TABLEGEN_H
#include <stdint.h>
#include "libavutil/attributes.h"
/* from g711.c by SUN microsystems (unrestricted use) */
#define SIGN_BIT (0x80) /* Sign bit for a A-law byte. */
#define QUANT_MASK (0xf) /* Quantization field mask. */
#define NSEGS (8) /* Number of A-law segments. */
#define SEG_SHIFT (4) /* Left shift for segment number. */
#define SEG_MASK (0x70) /* Segment field mask. */
#define BIAS (0x84) /* Bias for linear code. */
#define VIDC_SIGN_BIT (1)
#define VIDC_QUANT_MASK (0x1E)
#define VIDC_QUANT_SHIFT (1)
#define VIDC_SEG_SHIFT (5)
#define VIDC_SEG_MASK (0xE0)
/* alaw2linear() - Convert an A-law value to 16-bit linear PCM */
static av_cold int alaw2linear(unsigned char a_val)
{
int t;
int seg;
a_val ^= 0x55;
t = a_val & QUANT_MASK;
seg = ((unsigned)a_val & SEG_MASK) >> SEG_SHIFT;
if(seg) t= (t + t + 1 + 32) << (seg + 2);
else t= (t + t + 1 ) << 3;
return (a_val & SIGN_BIT) ? t : -t;
}
static av_cold int ulaw2linear(unsigned char u_val)
{
int t;
/* Complement to obtain normal u-law value. */
u_val = ~u_val;
/*
* Extract and bias the quantization bits. Then
* shift up by the segment number and subtract out the bias.
*/
t = ((u_val & QUANT_MASK) << 3) + BIAS;
t <<= ((unsigned)u_val & SEG_MASK) >> SEG_SHIFT;
return (u_val & SIGN_BIT) ? (BIAS - t) : (t - BIAS);
}
static av_cold int vidc2linear(unsigned char u_val)
{
int t;
/*
* Extract and bias the quantization bits. Then
* shift up by the segment number and subtract out the bias.
*/
t = (((u_val & VIDC_QUANT_MASK) >> VIDC_QUANT_SHIFT) << 3) + BIAS;
t <<= ((unsigned)u_val & VIDC_SEG_MASK) >> VIDC_SEG_SHIFT;
return (u_val & VIDC_SIGN_BIT) ? (BIAS - t) : (t - BIAS);
}
#if CONFIG_HARDCODED_TABLES
#define pcm_alaw_tableinit()
#define pcm_ulaw_tableinit()
#define pcm_vidc_tableinit()
#include "libavcodec/pcm_tables.h"
#else
/* 16384 entries per table */
static uint8_t linear_to_alaw[16384];
static uint8_t linear_to_ulaw[16384];
static uint8_t linear_to_vidc[16384];
static av_cold void build_xlaw_table(uint8_t *linear_to_xlaw,
int (*xlaw2linear)(unsigned char),
int mask)
{
int i, j, v, v1, v2;
j = 1;
linear_to_xlaw[8192] = mask;
for(i=0;i<127;i++) {
v1 = xlaw2linear(i ^ mask);
v2 = xlaw2linear((i + 1) ^ mask);
v = (v1 + v2 + 4) >> 3;
for(;j<v;j+=1) {
linear_to_xlaw[8192 - j] = (i ^ (mask ^ 0x80));
linear_to_xlaw[8192 + j] = (i ^ mask);
}
}
for(;j<8192;j++) {
linear_to_xlaw[8192 - j] = (127 ^ (mask ^ 0x80));
linear_to_xlaw[8192 + j] = (127 ^ mask);
}
linear_to_xlaw[0] = linear_to_xlaw[1];
}
static void pcm_alaw_tableinit(void)
{
build_xlaw_table(linear_to_alaw, alaw2linear, 0xd5);
}
static void pcm_ulaw_tableinit(void)
{
build_xlaw_table(linear_to_ulaw, ulaw2linear, 0xff);
}
static void pcm_vidc_tableinit(void)
{
build_xlaw_table(linear_to_vidc, vidc2linear, 0xff);
}
#endif /* CONFIG_HARDCODED_TABLES */
#endif /* AVCODEC_PCM_TABLEGEN_H */

Some files were not shown because too many files have changed in this diff Show more