forked from mirrors/gecko-dev
Bug 1872519 pass planar reverse stream data to AudioProcessingTrack::NotifyOutputData() r=chunmin
to remove unnecessary deinterleaving. This will facilitating passing the output for a secondary output device, without interleaving. The AudioChunk is down-mixed directly into the AudioProcessing's input buffer, rather than using an AudioPacketizer, to skip another one or two copies. processedFrameCount accounting in TestAudioCallbackDriver.SlowStart is adjusted to ignore frames processed while waiting for the fallback driver to stop [1] and to continue counting frames while the driver shuts down. [1] https://searchfox.org/mozilla-central/rev/6856d0cab9e37dd9eb305f174ff71f0a95b31f82/dom/media/GraphDriver.cpp#873-882 Depends on D198236 Differential Revision: https://phabricator.services.mozilla.com/D198237
This commit is contained in:
parent
129ffddc2b
commit
24db8f79b3
7 changed files with 83 additions and 119 deletions
|
|
@ -334,10 +334,6 @@ class AudioCallbackDriver::FallbackWrapper : public GraphInterface {
|
||||||
bool OnThread() { return mFallbackDriver->OnThread(); }
|
bool OnThread() { return mFallbackDriver->OnThread(); }
|
||||||
|
|
||||||
/* GraphInterface methods */
|
/* GraphInterface methods */
|
||||||
void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
|
|
||||||
TrackRate aRate, uint32_t aChannels) override {
|
|
||||||
MOZ_CRASH("Unexpected NotifyOutputData from fallback SystemClockDriver");
|
|
||||||
}
|
|
||||||
void NotifyInputStopped() override {
|
void NotifyInputStopped() override {
|
||||||
MOZ_CRASH("Unexpected NotifyInputStopped from fallback SystemClockDriver");
|
MOZ_CRASH("Unexpected NotifyInputStopped from fallback SystemClockDriver");
|
||||||
}
|
}
|
||||||
|
|
@ -961,14 +957,6 @@ long AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer,
|
||||||
NaNToZeroInPlace(aOutputBuffer, aFrames * mOutputChannelCount);
|
NaNToZeroInPlace(aOutputBuffer, aFrames * mOutputChannelCount);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Callback any observers for the AEC speaker data. Note that one
|
|
||||||
// (maybe) of these will be full-duplex, the others will get their input
|
|
||||||
// data off separate cubeb callbacks. Take care with how stuff is
|
|
||||||
// removed/added to this list and TSAN issues, but input and output will
|
|
||||||
// use separate callback methods.
|
|
||||||
Graph()->NotifyOutputData(aOutputBuffer, static_cast<size_t>(aFrames),
|
|
||||||
mSampleRate, mOutputChannelCount);
|
|
||||||
|
|
||||||
#ifdef XP_MACOSX
|
#ifdef XP_MACOSX
|
||||||
// This only happens when the output is on a macbookpro's external speaker,
|
// This only happens when the output is on a macbookpro's external speaker,
|
||||||
// that are stereo, but let's just be safe.
|
// that are stereo, but let's just be safe.
|
||||||
|
|
|
||||||
|
|
@ -177,10 +177,6 @@ struct GraphInterface : public nsISupports {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Called on the graph thread when there is new output data for listeners.
|
|
||||||
* This is the mixed audio output of this MediaTrackGraph. */
|
|
||||||
virtual void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
|
|
||||||
TrackRate aRate, uint32_t aChannels) = 0;
|
|
||||||
/* Called on the graph thread after an AudioCallbackDriver with an input
|
/* Called on the graph thread after an AudioCallbackDriver with an input
|
||||||
* stream has stopped. */
|
* stream has stopped. */
|
||||||
virtual void NotifyInputStopped() = 0;
|
virtual void NotifyInputStopped() = 0;
|
||||||
|
|
|
||||||
|
|
@ -897,9 +897,7 @@ void MediaTrackGraphImpl::CloseAudioInput(DeviceInputTrack* aTrack) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// All AudioInput listeners get the same speaker data (at least for now).
|
// All AudioInput listeners get the same speaker data (at least for now).
|
||||||
void MediaTrackGraphImpl::NotifyOutputData(AudioDataValue* aBuffer,
|
void MediaTrackGraphImpl::NotifyOutputData(const AudioChunk& aChunk) {
|
||||||
size_t aFrames, TrackRate aRate,
|
|
||||||
uint32_t aChannels) {
|
|
||||||
if (!mDeviceInputTrackManagerGraphThread.GetNativeInputTrack()) {
|
if (!mDeviceInputTrackManagerGraphThread.GetNativeInputTrack()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
@ -907,7 +905,7 @@ void MediaTrackGraphImpl::NotifyOutputData(AudioDataValue* aBuffer,
|
||||||
#if defined(MOZ_WEBRTC)
|
#if defined(MOZ_WEBRTC)
|
||||||
for (const auto& track : mTracks) {
|
for (const auto& track : mTracks) {
|
||||||
if (const auto& t = track->AsAudioProcessingTrack()) {
|
if (const auto& t = track->AsAudioProcessingTrack()) {
|
||||||
t->NotifyOutputData(this, aBuffer, aFrames, aRate, aChannels);
|
t->NotifyOutputData(this, aChunk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
@ -1481,6 +1479,11 @@ void MediaTrackGraphImpl::Process(MixerCallbackReceiver* aMixerReceiver) {
|
||||||
}
|
}
|
||||||
AudioChunk* outputChunk = mMixer.MixedChunk();
|
AudioChunk* outputChunk = mMixer.MixedChunk();
|
||||||
if (!outputDeviceEntry.mReceiver) { // primary output
|
if (!outputDeviceEntry.mReceiver) { // primary output
|
||||||
|
// Callback any observers for the AEC speaker data. Note that one
|
||||||
|
// (maybe) of these will be full-duplex, the others will get their input
|
||||||
|
// data off separate cubeb callbacks.
|
||||||
|
NotifyOutputData(*outputChunk);
|
||||||
|
|
||||||
aMixerReceiver->MixerCallback(outputChunk, mSampleRate);
|
aMixerReceiver->MixerCallback(outputChunk, mSampleRate);
|
||||||
} else {
|
} else {
|
||||||
outputDeviceEntry.mReceiver->EnqueueAudio(*outputChunk);
|
outputDeviceEntry.mReceiver->EnqueueAudio(*outputChunk);
|
||||||
|
|
|
||||||
|
|
@ -480,8 +480,7 @@ class MediaTrackGraphImpl : public MediaTrackGraph,
|
||||||
|
|
||||||
/* Called on the graph thread when there is new output data for listeners.
|
/* Called on the graph thread when there is new output data for listeners.
|
||||||
* This is the mixed audio output of this MediaTrackGraph. */
|
* This is the mixed audio output of this MediaTrackGraph. */
|
||||||
void NotifyOutputData(AudioDataValue* aBuffer, size_t aFrames,
|
void NotifyOutputData(const AudioChunk& aChunk);
|
||||||
TrackRate aRate, uint32_t aChannels) override;
|
|
||||||
/* Called on the graph thread after an AudioCallbackDriver with an input
|
/* Called on the graph thread after an AudioCallbackDriver with an input
|
||||||
* stream has stopped. */
|
* stream has stopped. */
|
||||||
void NotifyInputStopped() override;
|
void NotifyInputStopped() override;
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,6 @@ class MockGraphInterface : public GraphInterface {
|
||||||
NS_DECL_THREADSAFE_ISUPPORTS
|
NS_DECL_THREADSAFE_ISUPPORTS
|
||||||
explicit MockGraphInterface(TrackRate aSampleRate)
|
explicit MockGraphInterface(TrackRate aSampleRate)
|
||||||
: mSampleRate(aSampleRate) {}
|
: mSampleRate(aSampleRate) {}
|
||||||
MOCK_METHOD4(NotifyOutputData,
|
|
||||||
void(AudioDataValue*, size_t, TrackRate, uint32_t));
|
|
||||||
MOCK_METHOD0(NotifyInputStopped, void());
|
MOCK_METHOD0(NotifyInputStopped, void());
|
||||||
MOCK_METHOD5(NotifyInputData, void(const AudioDataValue*, size_t, TrackRate,
|
MOCK_METHOD5(NotifyInputData, void(const AudioDataValue*, size_t, TrackRate,
|
||||||
uint32_t, uint32_t));
|
uint32_t, uint32_t));
|
||||||
|
|
@ -107,8 +105,6 @@ MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
|
||||||
RefPtr<AudioCallbackDriver> driver;
|
RefPtr<AudioCallbackDriver> driver;
|
||||||
auto graph = MakeRefPtr<NiceMock<MockGraphInterface>>(rate);
|
auto graph = MakeRefPtr<NiceMock<MockGraphInterface>>(rate);
|
||||||
EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
|
EXPECT_CALL(*graph, NotifyInputStopped).Times(0);
|
||||||
ON_CALL(*graph, NotifyOutputData)
|
|
||||||
.WillByDefault([&](AudioDataValue*, size_t, TrackRate, uint32_t) {});
|
|
||||||
|
|
||||||
driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, rate, 2, 0, nullptr,
|
driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, rate, 2, 0, nullptr,
|
||||||
nullptr, AudioInputType::Unknown);
|
nullptr, AudioInputType::Unknown);
|
||||||
|
|
@ -144,7 +140,6 @@ void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
|
||||||
Maybe<int64_t> audioStart;
|
Maybe<int64_t> audioStart;
|
||||||
Maybe<uint32_t> alreadyBuffered;
|
Maybe<uint32_t> alreadyBuffered;
|
||||||
int64_t inputFrameCount = 0;
|
int64_t inputFrameCount = 0;
|
||||||
int64_t outputFrameCount = 0;
|
|
||||||
int64_t processedFrameCount = 0;
|
int64_t processedFrameCount = 0;
|
||||||
ON_CALL(*graph, NotifyInputData)
|
ON_CALL(*graph, NotifyInputData)
|
||||||
.WillByDefault([&](const AudioDataValue*, size_t aFrames, TrackRate,
|
.WillByDefault([&](const AudioDataValue*, size_t aFrames, TrackRate,
|
||||||
|
|
@ -152,6 +147,9 @@ void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
|
||||||
if (!audioStart) {
|
if (!audioStart) {
|
||||||
audioStart = Some(graph->StateComputedTime());
|
audioStart = Some(graph->StateComputedTime());
|
||||||
alreadyBuffered = Some(aAlreadyBuffered);
|
alreadyBuffered = Some(aAlreadyBuffered);
|
||||||
|
// Reset processedFrameCount to ignore frames processed while waiting
|
||||||
|
// for the fallback driver to stop.
|
||||||
|
processedFrameCount = 0;
|
||||||
}
|
}
|
||||||
EXPECT_NEAR(inputFrameCount,
|
EXPECT_NEAR(inputFrameCount,
|
||||||
static_cast<int64_t>(graph->StateComputedTime() -
|
static_cast<int64_t>(graph->StateComputedTime() -
|
||||||
|
|
@ -163,9 +161,6 @@ void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
|
||||||
<< ", alreadyBuffered=" << *alreadyBuffered;
|
<< ", alreadyBuffered=" << *alreadyBuffered;
|
||||||
inputFrameCount += aFrames;
|
inputFrameCount += aFrames;
|
||||||
});
|
});
|
||||||
ON_CALL(*graph, NotifyOutputData)
|
|
||||||
.WillByDefault([&](AudioDataValue*, size_t aFrames, TrackRate aRate,
|
|
||||||
uint32_t) { outputFrameCount += aFrames; });
|
|
||||||
|
|
||||||
driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, aRate, 2, 2, nullptr,
|
driver = MakeRefPtr<AudioCallbackDriver>(graph, nullptr, aRate, 2, 2, nullptr,
|
||||||
(void*)1, AudioInputType::Voice);
|
(void*)1, AudioInputType::Voice);
|
||||||
|
|
@ -198,18 +193,22 @@ void TestSlowStart(const TrackRate aRate) MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION {
|
||||||
<< "Fallback driver iteration <1s (sanity)";
|
<< "Fallback driver iteration <1s (sanity)";
|
||||||
return graph->IterationCount() >= fallbackIterations;
|
return graph->IterationCount() >= fallbackIterations;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
MediaEventListener processedListener = stream->FramesProcessedEvent().Connect(
|
||||||
|
AbstractThread::GetCurrent(),
|
||||||
|
[&](uint32_t aFrames) { processedFrameCount += aFrames; });
|
||||||
stream->Thaw();
|
stream->Thaw();
|
||||||
|
|
||||||
// Wait for at least 100ms of audio data.
|
SpinEventLoopUntil(
|
||||||
WaitUntil(stream->FramesProcessedEvent(), [&](uint32_t aFrames) {
|
"processed at least 100ms of audio data from stream callback"_ns, [&] {
|
||||||
processedFrameCount += aFrames;
|
return inputFrameCount != 0 && processedFrameCount >= aRate / 10;
|
||||||
return processedFrameCount >= aRate / 10;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// This will block untill all events have been executed.
|
// This will block untill all events have been executed.
|
||||||
MOZ_KnownLive(driver)->Shutdown();
|
MOZ_KnownLive(driver)->Shutdown();
|
||||||
|
processedListener.Disconnect();
|
||||||
|
|
||||||
EXPECT_EQ(inputFrameCount, outputFrameCount);
|
EXPECT_EQ(inputFrameCount, processedFrameCount);
|
||||||
EXPECT_NEAR(graph->StateComputedTime() - *audioStart,
|
EXPECT_NEAR(graph->StateComputedTime() - *audioStart,
|
||||||
inputFrameCount + *alreadyBuffered, WEBAUDIO_BLOCK_SIZE)
|
inputFrameCount + *alreadyBuffered, WEBAUDIO_BLOCK_SIZE)
|
||||||
<< "Graph progresses while audio driver runs. stateComputedTime="
|
<< "Graph progresses while audio driver runs. stateComputedTime="
|
||||||
|
|
|
||||||
|
|
@ -681,89 +681,65 @@ void AudioInputProcessing::Process(MediaTrackGraph* aGraph, GraphTime aFrom,
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
|
void AudioInputProcessing::ProcessOutputData(MediaTrackGraph* aGraph,
|
||||||
AudioDataValue* aBuffer,
|
const AudioChunk& aChunk) {
|
||||||
size_t aFrames, TrackRate aRate,
|
MOZ_ASSERT(aChunk.ChannelCount() > 0);
|
||||||
uint32_t aChannels) {
|
|
||||||
aGraph->AssertOnGraphThread();
|
aGraph->AssertOnGraphThread();
|
||||||
|
|
||||||
if (!mEnabled || PassThrough(aGraph)) {
|
if (!mEnabled || PassThrough(aGraph)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!mPacketizerOutput ||
|
TrackRate sampleRate = aGraph->GraphRate();
|
||||||
mPacketizerOutput->mPacketSize != GetPacketSize(aRate) ||
|
uint32_t framesPerPacket = GetPacketSize(sampleRate); // in frames
|
||||||
mPacketizerOutput->mChannels != aChannels) {
|
// Downmix from aChannels to MAX_CHANNELS if needed.
|
||||||
|
uint32_t channelCount =
|
||||||
|
std::min<uint32_t>(aChunk.ChannelCount(), MAX_CHANNELS);
|
||||||
|
if (channelCount != mOutputBufferChannelCount ||
|
||||||
|
channelCount * framesPerPacket != mOutputBuffer.Length()) {
|
||||||
|
mOutputBuffer.SetLength(channelCount * framesPerPacket);
|
||||||
|
mOutputBufferChannelCount = channelCount;
|
||||||
// It's ok to drop the audio still in the packetizer here: if this changes,
|
// It's ok to drop the audio still in the packetizer here: if this changes,
|
||||||
// we changed devices or something.
|
// we changed devices or something.
|
||||||
mPacketizerOutput = Nothing();
|
mOutputBufferFrameCount = 0;
|
||||||
mPacketizerOutput.emplace(GetPacketSize(aRate), aChannels);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mPacketizerOutput->Input(aBuffer, aFrames);
|
TrackTime chunkOffset = 0;
|
||||||
|
AutoTArray<float*, MAX_CHANNELS> channelPtrs;
|
||||||
|
channelPtrs.SetLength(channelCount);
|
||||||
|
do {
|
||||||
|
MOZ_ASSERT(mOutputBufferFrameCount < framesPerPacket);
|
||||||
|
uint32_t packetRemainder = framesPerPacket - mOutputBufferFrameCount;
|
||||||
|
mSubChunk = aChunk;
|
||||||
|
mSubChunk.SliceTo(
|
||||||
|
chunkOffset, std::min(chunkOffset + packetRemainder, aChunk.mDuration));
|
||||||
|
MOZ_ASSERT(mSubChunk.mDuration <= packetRemainder);
|
||||||
|
|
||||||
while (mPacketizerOutput->PacketsAvailable()) {
|
for (uint32_t channel = 0; channel < channelCount; channel++) {
|
||||||
uint32_t samplesPerPacket =
|
channelPtrs[channel] =
|
||||||
mPacketizerOutput->mPacketSize * mPacketizerOutput->mChannels;
|
&mOutputBuffer[channel * framesPerPacket + mOutputBufferFrameCount];
|
||||||
if (mOutputBuffer.Length() < samplesPerPacket) {
|
|
||||||
mOutputBuffer.SetLength(samplesPerPacket);
|
|
||||||
}
|
}
|
||||||
if (mDeinterleavedBuffer.Length() < samplesPerPacket) {
|
mSubChunk.DownMixTo(channelPtrs);
|
||||||
mDeinterleavedBuffer.SetLength(samplesPerPacket);
|
|
||||||
|
chunkOffset += mSubChunk.mDuration;
|
||||||
|
MOZ_ASSERT(chunkOffset <= aChunk.mDuration);
|
||||||
|
mOutputBufferFrameCount += mSubChunk.mDuration;
|
||||||
|
MOZ_ASSERT(mOutputBufferFrameCount <= framesPerPacket);
|
||||||
|
|
||||||
|
if (mOutputBufferFrameCount == framesPerPacket) {
|
||||||
|
// Have a complete packet. Analyze it.
|
||||||
|
for (uint32_t channel = 0; channel < channelCount; channel++) {
|
||||||
|
channelPtrs[channel] = &mOutputBuffer[channel * framesPerPacket];
|
||||||
}
|
}
|
||||||
float* packet = mOutputBuffer.Data();
|
StreamConfig reverseConfig(sampleRate, channelCount);
|
||||||
mPacketizerOutput->Output(packet);
|
|
||||||
|
|
||||||
AutoTArray<float*, MAX_CHANNELS> deinterleavedPacketDataChannelPointers;
|
|
||||||
float* interleavedFarend = nullptr;
|
|
||||||
uint32_t channelCountFarend = 0;
|
|
||||||
uint32_t framesPerPacketFarend = 0;
|
|
||||||
|
|
||||||
// Downmix from aChannels to MAX_CHANNELS if needed. We always have
|
|
||||||
// floats here, the packetized performed the conversion.
|
|
||||||
if (aChannels > MAX_CHANNELS) {
|
|
||||||
AudioConverter converter(
|
|
||||||
AudioConfig(aChannels, 0, AudioConfig::FORMAT_FLT),
|
|
||||||
AudioConfig(MAX_CHANNELS, 0, AudioConfig::FORMAT_FLT));
|
|
||||||
framesPerPacketFarend = mPacketizerOutput->mPacketSize;
|
|
||||||
framesPerPacketFarend =
|
|
||||||
converter.Process(mInputDownmixBuffer, packet, framesPerPacketFarend);
|
|
||||||
interleavedFarend = mInputDownmixBuffer.Data();
|
|
||||||
channelCountFarend = MAX_CHANNELS;
|
|
||||||
deinterleavedPacketDataChannelPointers.SetLength(MAX_CHANNELS);
|
|
||||||
} else {
|
|
||||||
interleavedFarend = packet;
|
|
||||||
channelCountFarend = aChannels;
|
|
||||||
framesPerPacketFarend = mPacketizerOutput->mPacketSize;
|
|
||||||
deinterleavedPacketDataChannelPointers.SetLength(aChannels);
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_ASSERT(interleavedFarend &&
|
|
||||||
(channelCountFarend == 1 || channelCountFarend == 2) &&
|
|
||||||
framesPerPacketFarend);
|
|
||||||
|
|
||||||
if (mInputBuffer.Length() < framesPerPacketFarend * channelCountFarend) {
|
|
||||||
mInputBuffer.SetLength(framesPerPacketFarend * channelCountFarend);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t offset = 0;
|
|
||||||
for (size_t i = 0; i < deinterleavedPacketDataChannelPointers.Length();
|
|
||||||
++i) {
|
|
||||||
deinterleavedPacketDataChannelPointers[i] = mInputBuffer.Data() + offset;
|
|
||||||
offset += framesPerPacketFarend;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deinterleave, prepare a channel pointers array, with enough storage for
|
|
||||||
// the frames.
|
|
||||||
DeinterleaveAndConvertBuffer(
|
|
||||||
interleavedFarend, framesPerPacketFarend, channelCountFarend,
|
|
||||||
deinterleavedPacketDataChannelPointers.Elements());
|
|
||||||
|
|
||||||
StreamConfig reverseConfig(aRate, channelCountFarend);
|
|
||||||
DebugOnly<int> err = mAudioProcessing->AnalyzeReverseStream(
|
DebugOnly<int> err = mAudioProcessing->AnalyzeReverseStream(
|
||||||
deinterleavedPacketDataChannelPointers.Elements(), reverseConfig);
|
channelPtrs.Elements(), reverseConfig);
|
||||||
|
|
||||||
MOZ_ASSERT(!err, "Could not process the reverse stream.");
|
MOZ_ASSERT(!err, "Could not process the reverse stream.");
|
||||||
|
|
||||||
|
mOutputBufferFrameCount = 0;
|
||||||
}
|
}
|
||||||
|
} while (chunkOffset < aChunk.mDuration);
|
||||||
|
|
||||||
|
mSubChunk.SetNull(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only called if we're not in passthrough mode
|
// Only called if we're not in passthrough mode
|
||||||
|
|
@ -1162,14 +1138,11 @@ void AudioProcessingTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioProcessingTrack::NotifyOutputData(MediaTrackGraph* aGraph,
|
void AudioProcessingTrack::NotifyOutputData(MediaTrackGraph* aGraph,
|
||||||
AudioDataValue* aBuffer,
|
const AudioChunk& aChunk) {
|
||||||
size_t aFrames, TrackRate aRate,
|
|
||||||
uint32_t aChannels) {
|
|
||||||
MOZ_ASSERT(mGraph == aGraph, "Cannot feed audio output to another graph");
|
MOZ_ASSERT(mGraph == aGraph, "Cannot feed audio output to another graph");
|
||||||
AssertOnGraphThread();
|
AssertOnGraphThread();
|
||||||
if (mInputProcessing) {
|
if (mInputProcessing) {
|
||||||
mInputProcessing->ProcessOutputData(aGraph, aBuffer, aFrames, aRate,
|
mInputProcessing->ProcessOutputData(aGraph, aChunk);
|
||||||
aChannels);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -116,8 +116,7 @@ class AudioInputProcessing : public AudioDataListener {
|
||||||
void Process(MediaTrackGraph* aGraph, GraphTime aFrom, GraphTime aTo,
|
void Process(MediaTrackGraph* aGraph, GraphTime aFrom, GraphTime aTo,
|
||||||
AudioSegment* aInput, AudioSegment* aOutput);
|
AudioSegment* aInput, AudioSegment* aOutput);
|
||||||
|
|
||||||
void ProcessOutputData(MediaTrackGraph* aGraph, AudioDataValue* aBuffer,
|
void ProcessOutputData(MediaTrackGraph* aGraph, const AudioChunk& aChunk);
|
||||||
size_t aFrames, TrackRate aRate, uint32_t aChannels);
|
|
||||||
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
|
bool IsVoiceInput(MediaTrackGraph* aGraph) const override {
|
||||||
// If we're passing data directly without AEC or any other process, this
|
// If we're passing data directly without AEC or any other process, this
|
||||||
// means that all voice-processing has been disabled intentionaly. In this
|
// means that all voice-processing has been disabled intentionaly. In this
|
||||||
|
|
@ -179,9 +178,6 @@ class AudioInputProcessing : public AudioDataListener {
|
||||||
// Packetizer to be able to feed 10ms packets to the input side of
|
// Packetizer to be able to feed 10ms packets to the input side of
|
||||||
// mAudioProcessing. Not used if the processing is bypassed.
|
// mAudioProcessing. Not used if the processing is bypassed.
|
||||||
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
|
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
|
||||||
// Packetizer to be able to feed 10ms packets to the output side of
|
|
||||||
// mAudioProcessing. Not used if the processing is bypassed.
|
|
||||||
Maybe<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
|
|
||||||
// The number of channels asked for by content, after clamping to the range of
|
// The number of channels asked for by content, after clamping to the range of
|
||||||
// legal channel count for this particular device.
|
// legal channel count for this particular device.
|
||||||
uint32_t mRequestedInputChannelCount;
|
uint32_t mRequestedInputChannelCount;
|
||||||
|
|
@ -189,9 +185,15 @@ class AudioInputProcessing : public AudioDataListener {
|
||||||
// because of prefs or constraints. This allows simply copying the audio into
|
// because of prefs or constraints. This allows simply copying the audio into
|
||||||
// the MTG, skipping resampling and the whole webrtc.org code.
|
// the MTG, skipping resampling and the whole webrtc.org code.
|
||||||
bool mSkipProcessing;
|
bool mSkipProcessing;
|
||||||
// Stores the mixed audio output for the reverse-stream of the AEC (the
|
// Buffer for up to one 10ms packet of planar mixed audio output for the
|
||||||
// speaker data).
|
// reverse-stream (speaker data) of mAudioProcessing AEC.
|
||||||
|
// Length is packet size * channel count, regardless of how many frames are
|
||||||
|
// buffered. Not used if the processing is bypassed.
|
||||||
AlignedFloatBuffer mOutputBuffer;
|
AlignedFloatBuffer mOutputBuffer;
|
||||||
|
// Number of channels into which mOutputBuffer is divided.
|
||||||
|
uint32_t mOutputBufferChannelCount = 0;
|
||||||
|
// Number of frames buffered in mOutputBuffer for the reverse stream.
|
||||||
|
uint32_t mOutputBufferFrameCount = 0;
|
||||||
// Stores the input audio, to be processed by the APM.
|
// Stores the input audio, to be processed by the APM.
|
||||||
AlignedFloatBuffer mInputBuffer;
|
AlignedFloatBuffer mInputBuffer;
|
||||||
// Stores the deinterleaved microphone audio
|
// Stores the deinterleaved microphone audio
|
||||||
|
|
@ -209,6 +211,11 @@ class AudioInputProcessing : public AudioDataListener {
|
||||||
// When processing is enabled, the number of packets received by this
|
// When processing is enabled, the number of packets received by this
|
||||||
// instance, to implement periodic logging.
|
// instance, to implement periodic logging.
|
||||||
uint64_t mPacketCount;
|
uint64_t mPacketCount;
|
||||||
|
// Temporary descriptor for a slice of an AudioChunk parameter passed to
|
||||||
|
// ProcessOutputData(). This is a member rather than on the stack so that
|
||||||
|
// any memory allocated for its mChannelData pointer array is not
|
||||||
|
// reallocated on each iteration.
|
||||||
|
AudioChunk mSubChunk;
|
||||||
// A storage holding the interleaved audio data converted the AudioSegment.
|
// A storage holding the interleaved audio data converted the AudioSegment.
|
||||||
// This will be used as an input parameter for PacketizeAndProcess. This
|
// This will be used as an input parameter for PacketizeAndProcess. This
|
||||||
// should be removed once bug 1729041 is done.
|
// should be removed once bug 1729041 is done.
|
||||||
|
|
@ -246,8 +253,7 @@ class AudioProcessingTrack : public DeviceInputConsumerTrack {
|
||||||
}
|
}
|
||||||
// Pass the graph's mixed audio output to mInputProcessing for processing as
|
// Pass the graph's mixed audio output to mInputProcessing for processing as
|
||||||
// the reverse stream.
|
// the reverse stream.
|
||||||
void NotifyOutputData(MediaTrackGraph* aGraph, AudioDataValue* aBuffer,
|
void NotifyOutputData(MediaTrackGraph* aGraph, const AudioChunk& aChunk);
|
||||||
size_t aFrames, TrackRate aRate, uint32_t aChannels);
|
|
||||||
|
|
||||||
// Any thread
|
// Any thread
|
||||||
AudioProcessingTrack* AsAudioProcessingTrack() override { return this; }
|
AudioProcessingTrack* AsAudioProcessingTrack() override { return this; }
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue