Backed out 3 changesets (bug 1421025, bug 1388219) for causing bug 1421706 on a CLOSED TREE.

Backed out changeset 1a69438ec05f (bug 1421025)
Backed out changeset 213c2c200c08 (bug 1388219)
Backed out changeset 341aaeb4ce69 (bug 1388219)
This commit is contained in:
Ryan VanderMeulen 2017-11-29 17:00:27 -05:00
parent 8b1f82ef39
commit 5395ec18d2
11 changed files with 128 additions and 350 deletions

View file

@ -52,7 +52,7 @@ ResolutionFeasibilityDistance(int32_t candidate, int32_t requested)
if (candidate >= requested) {
distance = (candidate - requested) * 1000 / std::max(candidate, requested);
} else {
distance = 10000 + (requested - candidate) *
distance = (UINT32_MAX / 2) + (requested - candidate) *
1000 / std::max(candidate, requested);
}
return distance;
@ -862,14 +862,14 @@ CamerasParent::RecvStartCapture(const CaptureEngine& aCapEngine,
capability.codecType = static_cast<webrtc::VideoCodecType>(ipcCaps.codecType());
capability.interlaced = ipcCaps.interlaced();
#ifdef DEBUG
auto deviceUniqueID = sDeviceUniqueIDs.find(capnum);
MOZ_ASSERT(deviceUniqueID == sDeviceUniqueIDs.end());
#endif
sDeviceUniqueIDs.emplace(capnum, cap.VideoCapture()->CurrentDeviceName());
sAllRequestedCapabilities.emplace(capnum, capability);
if (aCapEngine == CameraEngine) {
#ifdef DEBUG
auto deviceUniqueID = sDeviceUniqueIDs.find(capnum);
MOZ_ASSERT(deviceUniqueID == sDeviceUniqueIDs.end());
#endif
sDeviceUniqueIDs.emplace(capnum, cap.VideoCapture()->CurrentDeviceName());
sAllRequestedCapabilities.emplace(capnum, capability);
for (const auto &it : sDeviceUniqueIDs) {
if (strcmp(it.second, cap.VideoCapture()->CurrentDeviceName()) == 0) {
capability.width = std::max(
@ -908,16 +908,6 @@ CamerasParent::RecvStartCapture(const CaptureEngine& aCapEngine,
}
MOZ_ASSERT(minIdx != -1);
capability = candidateCapabilities->second[minIdx];
} else if (aCapEngine == ScreenEngine ||
aCapEngine == BrowserEngine ||
aCapEngine == WinEngine ||
aCapEngine == AppEngine) {
for (const auto &it : sDeviceUniqueIDs) {
if (strcmp(it.second, cap.VideoCapture()->CurrentDeviceName()) == 0) {
capability.maxFPS = std::max(
capability.maxFPS, sAllRequestedCapabilities[it.first].maxFPS);
}
}
}
error = cap.VideoCapture()->StartCapture(capability);
@ -959,14 +949,16 @@ CamerasParent::StopCapture(const CaptureEngine& aCapEngine,
mCallbacks[i - 1]->mStreamId == (uint32_t)capnum) {
CallbackHelper* cbh = mCallbacks[i-1];
engine->WithEntry(capnum,[cbh, &capnum](VideoEngine::CaptureEntry& cap){
engine->WithEntry(capnum,[cbh, &capnum, &aCapEngine](VideoEngine::CaptureEntry& cap){
if (cap.VideoCapture()) {
cap.VideoCapture()->DeRegisterCaptureDataCallback(
static_cast<rtc::VideoSinkInterface<webrtc::VideoFrame>*>(cbh));
cap.VideoCapture()->StopCaptureIfAllClientsClose();
sDeviceUniqueIDs.erase(capnum);
sAllRequestedCapabilities.erase(capnum);
if (aCapEngine == CameraEngine) {
sDeviceUniqueIDs.erase(capnum);
sAllRequestedCapabilities.erase(capnum);
}
}
});

View file

@ -217,7 +217,6 @@ public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AllocationHandle);
protected:
~AllocationHandle() {}
static uint64_t sId;
public:
AllocationHandle(const dom::MediaTrackConstraints& aConstraints,
const mozilla::ipc::PrincipalInfo& aPrincipalInfo,
@ -227,15 +226,11 @@ public:
: mConstraints(aConstraints),
mPrincipalInfo(aPrincipalInfo),
mPrefs(aPrefs),
#ifdef MOZ_WEBRTC
mId(sId++),
#endif
mDeviceId(aDeviceId) {}
public:
NormalizedConstraints mConstraints;
mozilla::ipc::PrincipalInfo mPrincipalInfo;
MediaEnginePrefs mPrefs;
uint64_t mId;
nsString mDeviceId;
};
@ -371,7 +366,6 @@ protected:
virtual nsresult
UpdateSingleSource(const AllocationHandle* aHandle,
const NormalizedConstraints& aNetConstraints,
const NormalizedConstraints& aNewConstraint,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId,
const char** aOutBadConstraint) {
@ -400,7 +394,6 @@ protected:
// aHandle and/or aConstraintsUpdate may be nullptr (see below)
AutoTArray<const NormalizedConstraints*, 10> allConstraints;
AutoTArray<const NormalizedConstraints*, 1> updatedConstraint;
for (auto& registered : mRegisteredHandles) {
if (aConstraintsUpdate && registered.get() == aHandle) {
continue; // Don't count old constraints
@ -409,13 +402,9 @@ protected:
}
if (aConstraintsUpdate) {
allConstraints.AppendElement(aConstraintsUpdate);
updatedConstraint.AppendElement(aConstraintsUpdate);
} else if (aHandle) {
// In the case of AddShareOfSingleSource, the handle isn't registered yet.
allConstraints.AppendElement(&aHandle->mConstraints);
updatedConstraint.AppendElement(&aHandle->mConstraints);
} else {
updatedConstraint.AppendElements(allConstraints);
}
NormalizedConstraints netConstraints(allConstraints);
@ -424,8 +413,7 @@ protected:
return NS_ERROR_FAILURE;
}
NormalizedConstraints newConstraint(updatedConstraint);
nsresult rv = UpdateSingleSource(aHandle, netConstraints, newConstraint, aPrefs, aDeviceId,
nsresult rv = UpdateSingleSource(aHandle, netConstraints, aPrefs, aDeviceId,
aOutBadConstraint);
if (NS_FAILED(rv)) {
return rv;

View file

@ -25,20 +25,10 @@ bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource,
const PrincipalHandle& aPrincipalHandle)
{
MOZ_ASSERT(aSource);
MOZ_ASSERT(aImage);
if (!aImage) {
return 0;
}
VideoSegment segment;
RefPtr<layers::Image> image = aImage;
IntSize size = image->GetSize();
if (!size.width || !size.height) {
return 0;
}
IntSize size(image ? mWidth : 0, image ? mHeight : 0);
segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
// This is safe from any thread, and is safe if the track is Finished
@ -64,19 +54,6 @@ MediaEngineCameraVideoSource::GetCapability(size_t aIndex,
aOut = mHardcodedCapabilities.SafeElementAt(aIndex, webrtc::CaptureCapability());
}
uint32_t
MediaEngineCameraVideoSource::GetDistance(
const webrtc::CaptureCapability& aCandidate,
const NormalizedConstraintSet &aConstraints,
const nsString& aDeviceId,
const DistanceCalculation aCalculate) const
{
if (aCalculate == kFeasibility) {
return GetFeasibilityDistance(aCandidate, aConstraints, aDeviceId);
}
return GetFitnessDistance(aCandidate, aConstraints, aDeviceId);
}
uint32_t
MediaEngineCameraVideoSource::GetFitnessDistance(
const webrtc::CaptureCapability& aCandidate,
@ -98,27 +75,6 @@ MediaEngineCameraVideoSource::GetFitnessDistance(
return uint32_t(std::min(distance, uint64_t(UINT32_MAX)));
}
uint32_t
MediaEngineCameraVideoSource::GetFeasibilityDistance(
const webrtc::CaptureCapability& aCandidate,
const NormalizedConstraintSet &aConstraints,
const nsString& aDeviceId) const
{
// Treat width|height|frameRate == 0 on capability as "can do any".
// This allows for orthogonal capabilities that are not in discrete steps.
uint64_t distance =
uint64_t(FitnessDistance(aDeviceId, aConstraints.mDeviceId)) +
uint64_t(FitnessDistance(mFacingMode, aConstraints.mFacingMode)) +
uint64_t(aCandidate.width? FeasibilityDistance(int32_t(aCandidate.width),
aConstraints.mWidth) : 0) +
uint64_t(aCandidate.height? FeasibilityDistance(int32_t(aCandidate.height),
aConstraints.mHeight) : 0) +
uint64_t(aCandidate.maxFPS? FeasibilityDistance(double(aCandidate.maxFPS),
aConstraints.mFrameRate) : 0);
return uint32_t(std::min(distance, uint64_t(UINT32_MAX)));
}
// Find best capability by removing inferiors. May leave >1 of equal distance
/* static */ void
@ -262,9 +218,7 @@ bool
MediaEngineCameraVideoSource::ChooseCapability(
const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId,
webrtc::CaptureCapability& aCapability,
const DistanceCalculation aCalculate)
const nsString& aDeviceId)
{
if (MOZ_LOG_TEST(GetMediaManagerLog(), LogLevel::Debug)) {
LOG(("ChooseCapability: prefs: %dx%d @%dfps",
@ -292,7 +246,7 @@ MediaEngineCameraVideoSource::ChooseCapability(
auto& candidate = candidateSet[i];
webrtc::CaptureCapability cap;
GetCapability(candidate.mIndex, cap);
candidate.mDistance = GetDistance(cap, aConstraints, aDeviceId, aCalculate);
candidate.mDistance = GetFitnessDistance(cap, aConstraints, aDeviceId);
LogCapability("Capability", cap, candidate.mDistance);
if (candidate.mDistance == UINT32_MAX) {
candidateSet.RemoveElementAt(i);
@ -314,7 +268,7 @@ MediaEngineCameraVideoSource::ChooseCapability(
auto& candidate = candidateSet[i];
webrtc::CaptureCapability cap;
GetCapability(candidate.mIndex, cap);
if (GetDistance(cap, cs, aDeviceId, aCalculate) == UINT32_MAX) {
if (GetFitnessDistance(cap, cs, aDeviceId) == UINT32_MAX) {
rejects.AppendElement(candidate);
candidateSet.RemoveElementAt(i);
} else {
@ -345,7 +299,7 @@ MediaEngineCameraVideoSource::ChooseCapability(
for (auto& candidate : candidateSet) {
webrtc::CaptureCapability cap;
GetCapability(candidate.mIndex, cap);
candidate.mDistance = GetDistance(cap, normPrefs, aDeviceId, aCalculate);
candidate.mDistance = GetFitnessDistance(cap, normPrefs, aDeviceId);
}
TrimLessFitCandidates(candidateSet);
}
@ -361,13 +315,13 @@ MediaEngineCameraVideoSource::ChooseCapability(
if (cap.rawType == webrtc::RawVideoType::kVideoI420 ||
cap.rawType == webrtc::RawVideoType::kVideoYUY2 ||
cap.rawType == webrtc::RawVideoType::kVideoYV12) {
aCapability = cap;
mCapability = cap;
found = true;
break;
}
}
if (!found) {
GetCapability(candidateSet[0].mIndex, aCapability);
GetCapability(candidateSet[0].mIndex, mCapability);
}
LogCapability("Chosen capability", mCapability, sameDistance);

View file

@ -24,19 +24,6 @@ namespace webrtc {
namespace mozilla {
// Fitness distance is defined in
// https://www.w3.org/TR/2017/CR-mediacapture-streams-20171003/#dfn-selectsettings
// The main difference of feasibility and fitness distance is that if the
// constraint is required ('max', or 'exact'), and the settings dictionary's value
// for the constraint does not satisfy the constraint, the fitness distance is
// positive infinity. Given a continuous space of settings dictionaries comprising
// all discrete combinations of dimension and frame-rate related properties,
// the feasibility distance is still in keeping with the constraints algorithm.
enum DistanceCalculation {
kFitness,
kFeasibility
};
class MediaEngineCameraVideoSource : public MediaEngineVideoSource
{
public:
@ -99,16 +86,9 @@ protected:
TrackID aID,
StreamTime delta,
const PrincipalHandle& aPrincipalHandle);
uint32_t GetDistance(const webrtc::CaptureCapability& aCandidate,
const NormalizedConstraintSet &aConstraints,
const nsString& aDeviceId,
const DistanceCalculation aCalculate) const;
uint32_t GetFitnessDistance(const webrtc::CaptureCapability& aCandidate,
const NormalizedConstraintSet &aConstraints,
const nsString& aDeviceId) const;
uint32_t GetFeasibilityDistance(const webrtc::CaptureCapability& aCandidate,
const NormalizedConstraintSet &aConstraints,
const nsString& aDeviceId) const;
static void TrimLessFitCandidates(CapabilitySet& set);
static void LogConstraints(const NormalizedConstraintSet& aConstraints);
static void LogCapability(const char* aHeader,
@ -116,13 +96,9 @@ protected:
uint32_t aDistance);
virtual size_t NumCapabilities() const;
virtual void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) const;
virtual bool ChooseCapability(
const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId,
webrtc::CaptureCapability& aCapability,
const DistanceCalculation aCalculate
);
virtual bool ChooseCapability(const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId);
void SetName(nsString aName);
void SetUUID(const char* aUUID);
const nsCString& GetUUID() const; // protected access
@ -140,9 +116,6 @@ protected:
nsTArray<RefPtr<SourceMediaStream>> mSources; // When this goes empty, we shut down HW
nsTArray<PrincipalHandle> mPrincipalHandles; // Directly mapped to mSources.
RefPtr<layers::Image> mImage;
nsTArray<RefPtr<layers::Image>> mImages;
nsTArray<webrtc::CaptureCapability> mTargetCapabilities;
nsTArray<uint64_t> mHandleIds;
RefPtr<layers::ImageContainer> mImageContainer;
// end of data protected by mMonitor
@ -152,8 +125,6 @@ protected:
TrackID mTrackID;
webrtc::CaptureCapability mCapability;
webrtc::CaptureCapability mTargetCapability;
uint64_t mHandleId;
mutable nsTArray<webrtc::CaptureCapability> mHardcodedCapabilities;
private:

View file

@ -10,9 +10,6 @@
#include "nsIPrefService.h"
#include "MediaTrackConstraints.h"
#include "CamerasChild.h"
#include "VideoFrameUtils.h"
#include "webrtc/api/video/i420_buffer.h"
#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
extern mozilla::LogModule* GetMediaManagerLog();
#define LOG(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Debug, msg)
@ -20,8 +17,6 @@ extern mozilla::LogModule* GetMediaManagerLog();
namespace mozilla {
uint64_t MediaEngineCameraVideoSource::AllocationHandle::sId = 0;
// These need a definition somewhere because template
// code is allowed to take their address, and they aren't
// guaranteed to have one without this.
@ -85,9 +80,6 @@ MediaEngineRemoteVideoSource::Shutdown()
empty = mSources.IsEmpty();
if (empty) {
MOZ_ASSERT(mPrincipalHandles.IsEmpty());
MOZ_ASSERT(mTargetCapabilities.IsEmpty());
MOZ_ASSERT(mHandleIds.IsEmpty());
MOZ_ASSERT(mImages.IsEmpty());
break;
}
source = mSources[0];
@ -134,9 +126,6 @@ MediaEngineRemoteVideoSource::Allocate(
MonitorAutoLock lock(mMonitor);
if (mSources.IsEmpty()) {
MOZ_ASSERT(mPrincipalHandles.IsEmpty());
MOZ_ASSERT(mTargetCapabilities.IsEmpty());
MOZ_ASSERT(mHandleIds.IsEmpty());
MOZ_ASSERT(mImages.IsEmpty());
LOG(("Video device %d reallocated", mCaptureIndex));
} else {
LOG(("Video device %d allocated shared", mCaptureIndex));
@ -179,21 +168,11 @@ MediaEngineRemoteVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
return NS_ERROR_FAILURE;
}
mImageContainer =
layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
{
MonitorAutoLock lock(mMonitor);
mSources.AppendElement(aStream);
mPrincipalHandles.AppendElement(aPrincipalHandle);
mTargetCapabilities.AppendElement(mTargetCapability);
mHandleIds.AppendElement(mHandleId);
mImages.AppendElement(mImageContainer->CreatePlanarYCbCrImage());
MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
MOZ_ASSERT(mSources.Length() == mImages.Length());
}
aStream->AddTrack(aID, 0, new VideoSegment(), SourceMediaStream::ADDTRACK_QUEUED);
@ -201,6 +180,8 @@ MediaEngineRemoteVideoSource::Start(SourceMediaStream* aStream, TrackID aID,
if (mState == kStarted) {
return NS_OK;
}
mImageContainer =
layers::LayerManager::CreateImageContainer(layers::ImageContainer::ASYNCHRONOUS);
mState = kStarted;
mTrackID = aID;
@ -237,14 +218,8 @@ MediaEngineRemoteVideoSource::Stop(mozilla::SourceMediaStream* aSource,
}
MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
MOZ_ASSERT(mSources.Length() == mImages.Length());
mSources.RemoveElementAt(i);
mPrincipalHandles.RemoveElementAt(i);
mTargetCapabilities.RemoveElementAt(i);
mHandleIds.RemoveElementAt(i);
mImages.RemoveElementAt(i);
aSource->EndTrack(aID);
@ -287,21 +262,18 @@ nsresult
MediaEngineRemoteVideoSource::UpdateSingleSource(
const AllocationHandle* aHandle,
const NormalizedConstraints& aNetConstraints,
const NormalizedConstraints& aNewConstraint,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId,
const char** aOutBadConstraint)
{
if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId)) {
*aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
return NS_ERROR_FAILURE;
}
switch (mState) {
case kReleased:
MOZ_ASSERT(aHandle);
mHandleId = aHandle->mId;
if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId, mCapability, kFitness)) {
*aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
return NS_ERROR_FAILURE;
}
mTargetCapability = mCapability;
if (camera::GetChildAndCall(&camera::CamerasChild::AllocateCaptureDevice,
mCapEngine, GetUUID().get(),
kMaxUniqueIdLength, mCaptureIndex,
@ -314,47 +286,18 @@ MediaEngineRemoteVideoSource::UpdateSingleSource(
break;
case kStarted:
{
size_t index = mHandleIds.NoIndex;
if (aHandle) {
mHandleId = aHandle->mId;
index = mHandleIds.IndexOf(mHandleId);
}
if (!ChooseCapability(aNewConstraint, aPrefs, aDeviceId, mTargetCapability,
kFitness)) {
*aOutBadConstraint = FindBadConstraint(aNewConstraint, *this, aDeviceId);
if (mCapability != mLastCapability) {
camera::GetChildAndCall(&camera::CamerasChild::StopCapture,
mCapEngine, mCaptureIndex);
if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture,
mCapEngine, mCaptureIndex, mCapability,
this)) {
LOG(("StartCapture failed"));
return NS_ERROR_FAILURE;
}
if (index != mHandleIds.NoIndex) {
MonitorAutoLock lock(mMonitor);
mTargetCapabilities[index] = mTargetCapability;
MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
MOZ_ASSERT(mSources.Length() == mImages.Length());
}
if (!ChooseCapability(aNetConstraints, aPrefs, aDeviceId, mCapability,
kFeasibility)) {
*aOutBadConstraint = FindBadConstraint(aNetConstraints, *this, aDeviceId);
return NS_ERROR_FAILURE;
}
if (mCapability != mLastCapability) {
camera::GetChildAndCall(&camera::CamerasChild::StopCapture,
mCapEngine, mCaptureIndex);
if (camera::GetChildAndCall(&camera::CamerasChild::StartCapture,
mCapEngine, mCaptureIndex, mCapability,
this)) {
LOG(("StartCapture failed"));
return NS_ERROR_FAILURE;
}
SetLastCapability(mCapability);
}
break;
SetLastCapability(mCapability);
}
break;
default:
LOG(("Video device %d in ignored state %d", mCaptureIndex, mState));
@ -400,22 +343,18 @@ MediaEngineRemoteVideoSource::NotifyPull(MediaStreamGraph* aGraph,
TrackID aID, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle)
{
StreamTime delta = 0;
size_t i;
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
if (mState != kStarted) {
return;
}
i = mSources.IndexOf(aSource);
if (i == mSources.NoIndex) {
return;
}
delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
StreamTime delta = aDesiredTime - aSource->GetEndOfAppendedData(aID);
if (delta > 0) {
AppendToTrack(aSource, mImages[i], aID, delta, aPrincipalHandle);
// nullptr images are allowed
AppendToTrack(aSource, mImage, aID, delta, aPrincipalHandle);
}
}
@ -438,12 +377,11 @@ MediaEngineRemoteVideoSource::FrameSizeChange(unsigned int w, unsigned int h)
}
int
MediaEngineRemoteVideoSource::DeliverFrame(uint8_t* aBuffer,
MediaEngineRemoteVideoSource::DeliverFrame(uint8_t* aBuffer ,
const camera::VideoFrameProperties& aProps)
{
MonitorAutoLock lock(mMonitor);
// Check for proper state.
if (mState != kStarted || !mImageContainer) {
if (mState != kStarted) {
LOG(("DeliverFrame: video not started"));
return 0;
}
@ -451,114 +389,51 @@ MediaEngineRemoteVideoSource::DeliverFrame(uint8_t* aBuffer,
// Update the dimensions
FrameSizeChange(aProps.width(), aProps.height());
MOZ_ASSERT(mSources.Length() == mPrincipalHandles.Length());
MOZ_ASSERT(mSources.Length() == mTargetCapabilities.Length());
MOZ_ASSERT(mSources.Length() == mHandleIds.Length());
MOZ_ASSERT(mSources.Length() == mImages.Length());
for (uint32_t i = 0; i < mTargetCapabilities.Length(); i++ ) {
int32_t req_max_width = mTargetCapabilities[i].width & 0xffff;
int32_t req_max_height = mTargetCapabilities[i].height & 0xffff;
int32_t req_ideal_width = (mTargetCapabilities[i].width >> 16) & 0xffff;
int32_t req_ideal_height = (mTargetCapabilities[i].height >> 16) & 0xffff;
int32_t dest_max_width = std::min(req_max_width, mWidth);
int32_t dest_max_height = std::min(req_max_height, mHeight);
// This logic works for both camera and screen sharing case.
// for camera case, req_ideal_width and req_ideal_height is 0.
// The following snippet will set dst_width to dest_max_width and dst_height to dest_max_height
int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : mWidth, dest_max_width);
int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : mHeight, dest_max_height);
int dst_stride_y = dst_width;
int dst_stride_uv = (dst_width + 1) / 2;
camera::VideoFrameProperties properties;
uint8_t* frame;
bool needReScale = !((dst_width == mWidth && dst_height == mHeight) ||
(dst_width > mWidth || dst_height > mHeight));
if (!needReScale) {
dst_width = mWidth;
dst_height = mHeight;
frame = aBuffer;
} else {
rtc::scoped_refptr<webrtc::I420Buffer> i420Buffer;
i420Buffer = webrtc::I420Buffer::Create(mWidth, mHeight, mWidth,
(mWidth + 1) / 2, (mWidth + 1) / 2);
const int conversionResult = webrtc::ConvertToI420(webrtc::kI420,
aBuffer,
0, 0, // No cropping
mWidth, mHeight,
mWidth * mHeight * 3 / 2,
webrtc::kVideoRotation_0,
i420Buffer.get());
webrtc::VideoFrame captureFrame(i420Buffer, 0, 0, webrtc::kVideoRotation_0);
if (conversionResult < 0) {
return 0;
}
rtc::scoped_refptr<webrtc::I420Buffer> scaledBuffer;
scaledBuffer = webrtc::I420Buffer::Create(dst_width, dst_height, dst_stride_y,
dst_stride_uv, dst_stride_uv);
scaledBuffer->CropAndScaleFrom(*captureFrame.video_frame_buffer().get());
webrtc::VideoFrame scaledFrame(scaledBuffer, 0, 0, webrtc::kVideoRotation_0);
VideoFrameUtils::InitFrameBufferProperties(scaledFrame, properties);
frame = new unsigned char[properties.bufferSize()];
if (!frame) {
return 0;
}
VideoFrameUtils::CopyVideoFrameBuffers(frame,
properties.bufferSize(), scaledFrame);
layers::PlanarYCbCrData data;
RefPtr<layers::PlanarYCbCrImage> image;
{
// We grab the lock twice, but don't hold it across the (long) CopyData
MonitorAutoLock lock(mMonitor);
if (!mImageContainer) {
LOG(("DeliverFrame() called after Stop()!"));
return 0;
}
// Create a video frame and append it to the track.
RefPtr<layers::PlanarYCbCrImage> image = mImageContainer->CreatePlanarYCbCrImage();
image = mImageContainer->CreatePlanarYCbCrImage();
uint8_t* frame = static_cast<uint8_t*> (aBuffer);
const uint8_t lumaBpp = 8;
const uint8_t chromaBpp = 4;
layers::PlanarYCbCrData data;
// Take lots of care to round up!
data.mYChannel = frame;
data.mYSize = IntSize(dst_width, dst_height);
data.mYStride = (dst_width * lumaBpp + 7) / 8;
data.mCbCrStride = (dst_width * chromaBpp + 7) / 8;
data.mCbChannel = frame + dst_height * data.mYStride;
data.mCrChannel = data.mCbChannel + ((dst_height + 1) / 2) * data.mCbCrStride;
data.mCbCrSize = IntSize((dst_width + 1) / 2, (dst_height + 1) / 2);
data.mYSize = IntSize(mWidth, mHeight);
data.mYStride = (mWidth * lumaBpp + 7)/ 8;
data.mCbCrStride = (mWidth * chromaBpp + 7) / 8;
data.mCbChannel = frame + mHeight * data.mYStride;
data.mCrChannel = data.mCbChannel + ((mHeight+1)/2) * data.mCbCrStride;
data.mCbCrSize = IntSize((mWidth+1)/ 2, (mHeight+1)/ 2);
data.mPicX = 0;
data.mPicY = 0;
data.mPicSize = IntSize(dst_width, dst_height);
data.mPicSize = IntSize(mWidth, mHeight);
data.mStereoMode = StereoMode::MONO;
}
if (!image->CopyData(data)) {
MOZ_ASSERT(false);
return 0;
}
if (needReScale && frame) {
delete frame;
frame = nullptr;
}
if (!image->CopyData(data)) {
MOZ_ASSERT(false);
return 0;
}
MonitorAutoLock lock(mMonitor);
#ifdef DEBUG
static uint32_t frame_num = 0;
LOGFRAME(("frame %d (%dx%d); timeStamp %u, ntpTimeMs %" PRIu64 ", renderTimeMs %" PRIu64,
frame_num++, mWidth, mHeight,
aProps.timeStamp(), aProps.ntpTimeMs(), aProps.renderTimeMs()));
static uint32_t frame_num = 0;
LOGFRAME(("frame %d (%dx%d); timeStamp %u, ntpTimeMs %" PRIu64 ", renderTimeMs %" PRIu64,
frame_num++, mWidth, mHeight,
aProps.timeStamp(), aProps.ntpTimeMs(), aProps.renderTimeMs()));
#endif
// implicitly releases last image
mImages[i] = image.forget();
}
// implicitly releases last image
mImage = image.forget();
// We'll push the frame into the MSG on the next NotifyPull. This will avoid
// swamping the MSG with frames should it be taking longer than normal to run
@ -589,9 +464,7 @@ bool
MediaEngineRemoteVideoSource::ChooseCapability(
const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId,
webrtc::CaptureCapability& aCapability,
const DistanceCalculation aCalculate)
const nsString& aDeviceId)
{
AssertIsOnOwningThread();
@ -604,16 +477,15 @@ MediaEngineRemoteVideoSource::ChooseCapability(
// time (and may in fact change over time), so as a hack, we push ideal
// and max constraints down to desktop_capture_impl.cc and finish the
// algorithm there.
aCapability.width =
(c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 | (c.mWidth.mMax & 0xffff);
aCapability.height =
(c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 | (c.mHeight.mMax & 0xffff);
aCapability.maxFPS =
c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
mCapability.width = (c.mWidth.mIdeal.valueOr(0) & 0xffff) << 16 |
(c.mWidth.mMax & 0xffff);
mCapability.height = (c.mHeight.mIdeal.valueOr(0) & 0xffff) << 16 |
(c.mHeight.mMax & 0xffff);
mCapability.maxFPS = c.mFrameRate.Clamp(c.mFrameRate.mIdeal.valueOr(aPrefs.mFPS));
return true;
}
default:
return MediaEngineCameraVideoSource::ChooseCapability(aConstraints, aPrefs, aDeviceId, aCapability, aCalculate);
return MediaEngineCameraVideoSource::ChooseCapability(aConstraints, aPrefs, aDeviceId);
}
}

View file

@ -84,12 +84,9 @@ public:
return mMediaSource;
}
bool ChooseCapability(
const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId,
webrtc::CaptureCapability& aCapability,
const DistanceCalculation aCalculate) override;
bool ChooseCapability(const NormalizedConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId) override;
void Refresh(int aIndex);
@ -110,7 +107,6 @@ private:
nsresult
UpdateSingleSource(const AllocationHandle* aHandle,
const NormalizedConstraints& aNetConstraints,
const NormalizedConstraints& aNewConstraint,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId,
const char** aOutBadConstraint) override;

View file

@ -566,7 +566,6 @@ private:
nsresult
UpdateSingleSource(const AllocationHandle* aHandle,
const NormalizedConstraints& aNetConstraints,
const NormalizedConstraints& aNewConstraint,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId,
const char** aOutBadConstraint) override;

View file

@ -279,7 +279,6 @@ nsresult
MediaEngineWebRTCMicrophoneSource::UpdateSingleSource(
const AllocationHandle* aHandle,
const NormalizedConstraints& aNetConstraints,
const NormalizedConstraints& aNewConstraint, /* Ignored */
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId,
const char** aOutBadConstraint)

View file

@ -417,28 +417,6 @@ MediaConstraintsHelper::FitnessDistance(ValueType aN,
std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
}
template<class ValueType, class NormalizedRange>
/* static */ uint32_t
MediaConstraintsHelper::FeasibilityDistance(ValueType aN,
const NormalizedRange& aRange)
{
if (aRange.mMin > aN) {
return UINT32_MAX;
}
// We prefer larger resolution because now we support downscaling
if (aN == aRange.mIdeal.valueOr(aN)) {
return 0;
}
if (aN > aRange.mIdeal.value()) {
return uint32_t(ValueType((std::abs(aN - aRange.mIdeal.value()) * 1000) /
std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
}
return 10000 + uint32_t(ValueType((std::abs(aN - aRange.mIdeal.value()) * 1000) /
std::max(std::abs(aN), std::abs(aRange.mIdeal.value()))));
}
// Fitness distance returned as integer math * 1000. Infinity = UINT32_MAX
/* static */ uint32_t

View file

@ -85,19 +85,12 @@ public:
return mMax >= aOther.mMin && mMin <= aOther.mMax;
}
void Intersect(const Range& aOther) {
MOZ_ASSERT(Intersects(aOther));
mMin = std::max(mMin, aOther.mMin);
if (Intersects(aOther)) {
mMax = std::min(mMax, aOther.mMax);
} else {
// If there is no intersection, we will down-scale or drop frame
mMax = std::max(mMax, aOther.mMax);
}
mMax = std::min(mMax, aOther.mMax);
}
bool Merge(const Range& aOther) {
if (strcmp(mName, "width") != 0 &&
strcmp(mName, "height") != 0 &&
strcmp(mName, "frameRate") != 0 &&
!Intersects(aOther)) {
if (!Intersects(aOther)) {
return false;
}
Intersect(aOther);
@ -304,8 +297,6 @@ class MediaConstraintsHelper
protected:
template<class ValueType, class NormalizedRange>
static uint32_t FitnessDistance(ValueType aN, const NormalizedRange& aRange);
template<class ValueType, class NormalizedRange>
static uint32_t FeasibilityDistance(ValueType aN, const NormalizedRange& aRange);
static uint32_t FitnessDistance(nsString aN,
const NormalizedConstraintSet::StringRange& aConstraint);

View file

@ -582,7 +582,45 @@ int32_t DesktopCaptureImpl::IncomingFrame(uint8_t* videoFrame,
return -1;
}
DeliverCapturedFrame(captureFrame, captureTime);
int32_t req_max_width = _requestedCapability.width & 0xffff;
int32_t req_max_height = _requestedCapability.height & 0xffff;
int32_t req_ideal_width = (_requestedCapability.width >> 16) & 0xffff;
int32_t req_ideal_height = (_requestedCapability.height >> 16) & 0xffff;
int32_t dest_max_width = std::min(req_max_width, target_width);
int32_t dest_max_height = std::min(req_max_height, target_height);
int32_t dst_width = std::min(req_ideal_width > 0 ? req_ideal_width : target_width, dest_max_width);
int32_t dst_height = std::min(req_ideal_height > 0 ? req_ideal_height : target_height, dest_max_height);
// scale to average of portrait and landscape
float scale_width = (float)dst_width / (float)target_width;
float scale_height = (float)dst_height / (float)target_height;
float scale = (scale_width + scale_height) / 2;
dst_width = (int)(scale * target_width);
dst_height = (int)(scale * target_height);
// if scaled rectangle exceeds max rectangle, scale to minimum of portrait and landscape
if (dst_width > dest_max_width || dst_height > dest_max_height) {
scale_width = (float)dest_max_width / (float)dst_width;
scale_height = (float)dest_max_height / (float)dst_height;
scale = std::min(scale_width, scale_height);
dst_width = (int)(scale * dst_width);
dst_height = (int)(scale * dst_height);
}
int dst_stride_y = dst_width;
int dst_stride_uv = (dst_width + 1) / 2;
if (dst_width == target_width && dst_height == target_height) {
DeliverCapturedFrame(captureFrame, captureTime);
} else {
rtc::scoped_refptr<webrtc::I420Buffer> buffer;
buffer = I420Buffer::Create(dst_width, dst_height, dst_stride_y,
dst_stride_uv, dst_stride_uv);
buffer->ScaleFrom(*captureFrame.video_frame_buffer().get());
webrtc::VideoFrame scaledFrame(buffer, 0, 0, kVideoRotation_0);
DeliverCapturedFrame(scaledFrame, captureTime);
}
} else {
assert(false);
return -1;