Merge mozilla-central to mozilla-inbound. on a CLOSED TREE

This commit is contained in:
Andreea Pavel 2019-03-22 18:51:53 +02:00
commit a7cd412f0e
256 changed files with 11563 additions and 4188 deletions

View file

@ -24,5 +24,3 @@ replace-with = "vendored-sources"
[source.vendored-sources]
directory = '@top_srcdir@/third_party/rust'
@WIN64_CARGO_LINKER_CONFIG@

66
Cargo.lock generated
View file

@ -277,6 +277,40 @@ name = "bitreader"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "bits"
version = "0.1.0"
dependencies = [
"comedy 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"filetime_win 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"guid_win 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.80 (git+https://github.com/servo/serde?branch=deserialize_from_enums9)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
[[package]]
name = "bits_client"
version = "0.1.0"
dependencies = [
"bits 0.1.0",
"comedy 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"guid_win 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bitsdownload"
version = "0.1.0"
dependencies = [
"bits_client 0.1.0",
]
[[package]]
name = "blake2-rfc"
version = "0.2.18"
@ -446,6 +480,16 @@ dependencies = [
"cc 1.0.23 (git+https://github.com/glandium/cc-rs?branch=1.0.23-clang-cl-aarch64)",
]
[[package]]
name = "comedy"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
[[package]]
name = "constant_time_eq"
version = "0.1.3"
@ -974,6 +1018,15 @@ dependencies = [
"smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "filetime_win"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"comedy 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
[[package]]
name = "fixedbitset"
version = "0.1.8"
@ -1141,6 +1194,7 @@ dependencies = [
"arrayvec 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"audioipc-client 0.4.0",
"audioipc-server 0.2.3",
"bitsdownload 0.1.0",
"cert_storage 0.0.1",
"cose-c 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"cubeb-pulse 0.2.0",
@ -1208,6 +1262,15 @@ dependencies = [
"scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "guid_win"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"comedy 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
[[package]]
name = "h2"
version = "0.1.12"
@ -3384,6 +3447,7 @@ dependencies = [
"checksum clang-sys 0.26.1 (registry+https://github.com/rust-lang/crates.io-index)" = "481e42017c1416b1c0856ece45658ecbb7c93d8a93455f7e5fa77f3b35455557"
"checksum clap 2.31.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f0f16b89cbb9ee36d87483dc939fe9f1e13c05898d56d7b230a0d4dff033a536"
"checksum cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "56d741ea7a69e577f6d06b36b7dff4738f680593dc27a701ffa8506b73ce28bb"
"checksum comedy 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d4f03fbb05a4df3523a44cda10340e6ae6bea03ee9d01240a1a2c1ef6c73e95"
"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e"
"checksum cookie 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1465f8134efa296b4c19db34d909637cb2bf0f7aaf21299e23e18fa29ac557cf"
"checksum core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4e2640d6d0bf22e82bed1b73c6aef8d5dd31e5abe6666c57e6d45e2649f4f887"
@ -3438,6 +3502,7 @@ dependencies = [
"checksum failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6dd377bcc1b1b7ce911967e3ec24fa19c3224394ec05b54aa7b083d498341ac7"
"checksum failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "64c2d913fe8ed3b6c6518eedf4538255b989945c14c2a7d5cbff62a5e2120596"
"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
"checksum filetime_win 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b8c37abd4a58e0cb794bcae4a7dc4f02fff376949d8d1066d4c729e97bfb38ec"
"checksum fixedbitset 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "85cb8fec437468d86dc7c83ca7cfc933341d561873275f22dd5eedefa63a6478"
"checksum flate2 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9fac2277e84e5e858483756647a9d0aa8d9a2b7cba517fd84325a0aaa69a0909"
"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3"
@ -3456,6 +3521,7 @@ dependencies = [
"checksum gleam 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)" = "17bca843dd3cf25db1bf415d55de9c0f0ae09dd7fa952ec3cef9930f90de1339"
"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
"checksum goblin 0.0.17 (registry+https://github.com/rust-lang/crates.io-index)" = "5911d7df7b8f65ab676c5327b50acea29d3c6a1a4ad05e444cf5dce321b26db2"
"checksum guid_win 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "87261686cc5e35b6584f4c2a430c2b153d8a92ab1ef820c16be34c1df8f5f58b"
"checksum h2 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "a27e7ed946e8335bdf9a191bc1b9b14a03ba822d013d2f58437f4fabcbd7fc2c"
"checksum http 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "dca621d0fa606a5ff2850b6e337b57ad6137ee4d67e940449643ff45af6874c6"
"checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07"

View file

@ -687,8 +687,8 @@ class PrincipalsCollector {
async getAllPrincipalsInternal(progress) {
progress.step = "principals-quota-manager";
let principals = await new Promise(resolve => {
quotaManagerService.getUsage(request => {
progress.step = "principals-quota-manager-getUsage";
quotaManagerService.listInitializedOrigins(request => {
progress.step = "principals-quota-manager-listInitializedOrigins";
if (request.resultCode != Cr.NS_OK) {
// We are probably shutting down. We don't want to propagate the
// error, rejecting the promise.

View file

@ -312,7 +312,8 @@ def build_one_stage(cc, cxx, asm, ld, ar, ranlib, libtool,
"-DRUNTIMES_%s_CMAKE_SHARED_LINKER_FLAGS=%s" % (target, android_link_flags),
"-DRUNTIMES_%s_CMAKE_SYSROOT=%s" % (target, sysroot_dir),
"-DRUNTIMES_%s_COMPILER_RT_BUILD_PROFILE=ON" % target,
"-DRUNTIMES_%s_COMPILER_RT_BUILD_SANITIZERS=OFF" % target,
"-DRUNTIMES_%s_COMPILER_RT_BUILD_SANITIZERS=ON" % target,
"-DRUNTIMES_%s_SANITIZER_ALLOW_CXXABI=OFF" % target,
"-DRUNTIMES_%s_COMPILER_RT_BUILD_LIBFUZZER=OFF" % target,
"-DRUNTIMES_%s_COMPILER_RT_INCLUDE_TESTS=OFF" % target,
"-DRUNTIMES_%s_LLVM_ENABLE_PER_TARGET_RUNTIME_DIR=OFF" % target,

3
build/cargo-host-linker Executable file
View file

@ -0,0 +1,3 @@
#!/bin/sh
# See comment in cargo-linker.
eval ${MOZ_CARGO_WRAP_HOST_LD} ${MOZ_CARGO_WRAP_HOST_LDFLAGS} '"$@"'

View file

@ -0,0 +1,3 @@
@echo off
REM See comment in cargo-linker (without extension)
%MOZ_CARGO_WRAP_HOST_LD% %MOZ_CARGO_WRAP_HOST_LDFLAGS% %*

View file

@ -14,7 +14,7 @@
# frequently has additional arguments in addition to the compiler
# itself.
# * MOZ_CARGO_WRAP_LDFLAGS contains space-separated arguments to pass,
# and not quoting it ensures that either of those arguments is passed
# and not quoting it ensures that each of those arguments is passed
# as a separate argument to the actual LD.
#
# $@ is doubly quoted for the eval. See bug 1418598.

3
build/cargo-linker.bat Normal file
View file

@ -0,0 +1,3 @@
@echo off
REM See comment in cargo-linker (without extension)
%MOZ_CARGO_WRAP_LD% %MOZ_CARGO_WRAP_LDFLAGS% %*

View file

@ -210,10 +210,14 @@ MOZ_CAN_RUN_SCRIPT void test_ref_9() {
test_ref(*(RefCountedBase*)x); // expected-error {{arguments must all be strong refs or caller's parameters when calling a function marked as MOZ_CAN_RUN_SCRIPT (including the implicit object argument). '*(RefCountedBase*)x' is neither.}}
}
// Ignore warning not related to static analysis here
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wvoid-ptr-dereference"
MOZ_CAN_RUN_SCRIPT void test_ref_10() {
void* x = new RefCountedBase();
test_ref((RefCountedBase&)*x); // expected-error {{arguments must all be strong refs or caller's parameters when calling a function marked as MOZ_CAN_RUN_SCRIPT (including the implicit object argument). '*x' is neither.}} expected-error {{ISO C++ does not allow indirection on operand of type 'void *'}}
test_ref((RefCountedBase&)*x); // expected-error {{arguments must all be strong refs or caller's parameters when calling a function marked as MOZ_CAN_RUN_SCRIPT (including the implicit object argument). '*x' is neither.}}
}
#pragma GCC diagnostic pop
MOZ_CAN_RUN_SCRIPT void test_maybe() {
mozilla::Maybe<RefCountedBase*> unsafe;

View file

@ -15,8 +15,6 @@ with Files('docs/**'):
if CONFIG['OS_ARCH'] == 'WINNT':
DIRS += ['win32']
if CONFIG['WIN64_CARGO_LINKER']:
CONFIGURE_SUBST_FILES += ['win64/cargo-linker.bat']
else:
DIRS += ['unix']

View file

@ -272,16 +272,6 @@ set_config('RUST_TARGET', rust_target_triple)
set_config('RUST_HOST_TARGET', rust_host_triple)
@depends(rust_target_triple)
def rust_target_env_name(triple):
return triple.upper().replace('-', '_')
# We need this to form various Cargo environment variables, as there is no
# uppercase function in make, and we don't want to shell out just for
# converting a string to uppercase.
set_config('RUST_TARGET_ENV_NAME', rust_target_env_name)
# This is used for putting source info into symbol files.
set_config('RUSTC_COMMIT', depends(rustc_info)(lambda i: i.commit))
@ -307,37 +297,6 @@ def rust_tests(enable_rust_tests, rustdoc):
set_config('MOZ_RUST_TESTS', rust_tests)
@depends(target, rustc_info, c_compiler)
def win64_cargo_linker(target, rustc_info, compiler_info):
# When we're building a 32-bit Windows build with a 64-bit rustc, we
# need to configure the linker it will use for host binaries (build scripts)
# specially because the compiler configuration we use for the build is for
# MSVC targeting 32-bit binaries.
if target.kernel == 'WINNT' and \
target.cpu in ('x86', 'aarch64') and \
compiler_info.type in ('msvc', 'clang-cl') and \
rustc_info.host == 'x86_64-pc-windows-msvc':
return True
set_config('WIN64_CARGO_LINKER', win64_cargo_linker)
@depends(win64_cargo_linker, check_build_environment)
@imports(_from='textwrap', _import='dedent')
def win64_cargo_linker_config(linker, env):
if linker:
return dedent('''\
[target.x86_64-pc-windows-msvc]
linker = "{objdir}/build/win64/cargo-linker.bat"
'''.format(objdir=env.topobjdir))
# We want an empty string here so we don't leave the @ variable in the config file.
return ''
set_config('WIN64_CARGO_LINKER_CONFIG', win64_cargo_linker_config)
@depends(target, c_compiler, rustc)
@imports('os')
def rustc_natvis_ldflags(target, compiler_info, rustc):

View file

@ -1 +0,0 @@
@HOST_LINKER@ @HOST_LINKER_LIBPATHS_BAT@ %*

View file

@ -137,6 +137,8 @@ HOST_RECIPES := \
$(HOST_RECIPES): RUSTFLAGS:=$(rustflags_override)
cargo_env = $(subst -,_,$(subst a,A,$(subst b,B,$(subst c,C,$(subst d,D,$(subst e,E,$(subst f,F,$(subst g,G,$(subst h,H,$(subst i,I,$(subst j,J,$(subst k,K,$(subst l,L,$(subst m,M,$(subst n,N,$(subst o,O,$(subst p,P,$(subst q,Q,$(subst r,R,$(subst s,S,$(subst t,T,$(subst u,U,$(subst v,V,$(subst w,W,$(subst x,X,$(subst y,Y,$(subst z,Z,$1)))))))))))))))))))))))))))
# We use the + prefix to pass down the jobserver fds to cargo, but we
# don't use the prefix when make -n is used, so that cargo doesn't run
# in that case)
@ -159,14 +161,8 @@ define CARGO_CHECK
$(call RUN_CARGO,check)
endef
cargo_linker_env_var := CARGO_TARGET_$(RUST_TARGET_ENV_NAME)_LINKER
# Don't define a custom linker on Windows, as it's difficult to have a
# non-binary file that will get executed correctly by Cargo. We don't
# have to worry about a cross-compiling (besides x86-64 -> x86, which
# already works with the current setup) setup on Windows, and we don't
# have to pass in any special linker options on Windows.
ifneq (WINNT,$(OS_ARCH))
cargo_host_linker_env_var := CARGO_TARGET_$(call cargo_env,$(RUST_HOST_TARGET))_LINKER
cargo_linker_env_var := CARGO_TARGET_$(call cargo_env,$(RUST_TARGET))_LINKER
# Defining all of this for ASan/TSan builds results in crashes while running
# some crates's build scripts (!), so disable it for now.
@ -180,24 +176,50 @@ ifndef FUZZING_INTERFACES
# Also, we don't want to pass PGO flags until cargo supports them.
export MOZ_CARGO_WRAP_LDFLAGS
export MOZ_CARGO_WRAP_LD
export MOZ_CARGO_WRAP_HOST_LDFLAGS
export MOZ_CARGO_WRAP_HOST_LD
# Exporting from make always exports a value. Setting a value per-recipe
# would export an empty value for the host recipes. When not doing a
# cross-compile, the --target for those is the same, and cargo will use
# $(cargo_linker_env_var) for its linker, so we always pass the
# cargo-linker wrapper, and fill MOZ_CARGO_WRAP_LD* more or less
# CARGO_TARGET_*_LINKER for its linker, so we always pass the
# cargo-linker wrapper, and fill MOZ_CARGO_WRAP_{HOST_,}LD* more or less
# appropriately for all recipes.
ifeq (WINNT,$(HOST_OS_ARCH))
# Use .bat wrapping on Windows hosts, and shell wrapping on other hosts.
# Like for CC/C*FLAGS, we want the target values to trump the host values when
# both variables are the same.
export $(cargo_host_linker_env_var):=$(topsrcdir)/build/cargo-host-linker.bat
export $(cargo_linker_env_var):=$(topsrcdir)/build/cargo-linker.bat
WRAP_HOST_LINKER_LIBPATHS:=$(HOST_LINKER_LIBPATHS_BAT)
else
export $(cargo_host_linker_env_var):=$(topsrcdir)/build/cargo-host-linker
export $(cargo_linker_env_var):=$(topsrcdir)/build/cargo-linker
WRAP_HOST_LINKER_LIBPATHS:=$(HOST_LINKER_LIBPATHS)
endif
$(TARGET_RECIPES): MOZ_CARGO_WRAP_LDFLAGS:=$(filter-out -fsanitize=cfi% -framework Cocoa -lobjc AudioToolbox ExceptionHandling -fprofile-%,$(LDFLAGS))
$(HOST_RECIPES): MOZ_CARGO_WRAP_LDFLAGS:=$(HOST_LDFLAGS) $(WRAP_HOST_LINKER_LIBPATHS)
$(TARGET_RECIPES) $(HOST_RECIPES): MOZ_CARGO_WRAP_HOST_LDFLAGS:=$(HOST_LDFLAGS) $(WRAP_HOST_LINKER_LIBPATHS)
ifeq (,$(filter clang-cl,$(CC_TYPE)))
$(TARGET_RECIPES): MOZ_CARGO_WRAP_LD:=$(CC)
$(HOST_RECIPES): MOZ_CARGO_WRAP_LDFLAGS:=$(HOST_LDFLAGS)
else
$(TARGET_RECIPES): MOZ_CARGO_WRAP_LD:=$(LINKER)
endif
ifeq (,$(filter clang-cl,$(HOST_CC_TYPE)))
$(HOST_RECIPES): MOZ_CARGO_WRAP_LD:=$(HOST_CC)
$(TARGET_RECIPES) $(HOST_RECIPES): MOZ_CARGO_WRAP_HOST_LD:=$(HOST_CC)
else
$(HOST_RECIPES): MOZ_CARGO_WRAP_LD:=$(HOST_LINKER)
$(TARGET_RECIPES) $(HOST_RECIPES): MOZ_CARGO_WRAP_HOST_LD:=$(HOST_LINKER)
endif
endif # FUZZING_INTERFACES
endif # MOZ_UBSAN
endif # MOZ_TSAN
endif # MOZ_ASAN
endif # ifneq WINNT
ifdef RUST_LIBRARY_FILE
ifdef RUST_LIBRARY_FEATURES

View file

@ -64,7 +64,8 @@ async function testRemoteClientPersistConnection(mocks,
info("Emit 'closed' on the client and wait for the sidebar item to disappear");
client._eventEmitter.emit("closed");
await waitUntil(() => !findSidebarItemByText(sidebarName, document));
await waitUntil(() => !findSidebarItemByText(sidebarName, document) &&
!findSidebarItemByText(runtimeName, document));
info("Remove the tab");
await removeTab(tab);

View file

@ -77,8 +77,10 @@ add_task(async function() {
mocks.removeRuntime(USB_RUNTIME_1.id);
mocks.removeRuntime(USB_RUNTIME_2.id);
mocks.emitUSBUpdate();
await waitUntil(() => !findSidebarItemByText(USB_RUNTIME_1.shortName, document));
await waitUntil(() => !findSidebarItemByText(USB_RUNTIME_2.shortName, document));
await waitUntil(() => !findSidebarItemByText(USB_RUNTIME_1.name, document) &&
!findSidebarItemByText(USB_RUNTIME_1.shortName, document));
await waitUntil(() => !findSidebarItemByText(USB_RUNTIME_2.name, document) &&
!findSidebarItemByText(USB_RUNTIME_2.shortName, document));
checkTelemetryEvents([
{ method: "runtime_removed", extras: RUNTIME_1_EXTRAS },

View file

@ -3637,7 +3637,6 @@ html .breakpoints-list .breakpoint.paused {
padding: 0 0.5em;
line-height: 25px;
position: relative;
transition: all 0.25s ease;
cursor: pointer;
display: flex;
}

View file

@ -12,7 +12,6 @@
padding: 0 0.5em;
line-height: 25px;
position: relative;
transition: all 0.25s ease;
cursor: pointer;
display: flex;
}

View file

@ -450,15 +450,10 @@ CssRuleView.prototype = {
},
/**
* Retrieve the RuleEditor instance that should be stored on
* the offset parent of the node
* Retrieve the RuleEditor instance.
*/
_getRuleEditorForNode: function(node) {
if (!node.offsetParent) {
// some nodes don't have an offsetParent, but their parentNode does
node = node.parentNode;
}
return node.offsetParent._ruleEditor;
return node.closest(".ruleview-rule")._ruleEditor;
},
/**

View file

@ -99,6 +99,7 @@ skip-if = (verify && !debug && os == 'win')
[browser_rules_completion-new-property_multiline.js]
[browser_rules_computed-lists_01.js]
[browser_rules_computed-lists_02.js]
[browser_rules_computed-lists_03.js]
[browser_rules_completion-popup-hidden-after-navigation.js]
[browser_rules_content_01.js]
[browser_rules_content_02.js]

View file

@ -0,0 +1,35 @@
/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
// Tests that the rule view does not show expanders for property values that contain
// variables.
// This should, in theory, be able to work, but the complexity outlined in
// https://bugzilla.mozilla.org/show_bug.cgi?id=1535315#c2 made us hide the expander
// instead. Also, this is what Chrome does too.
var TEST_URI = `
<style type="text/css">
#testid {
--primary-color: black;
background: var(--primary-color, red);
}
</style>
<h1 id="testid">Styled Node</h1>
`;
add_task(async function() {
await addTab("data:text/html;charset=utf-8," + encodeURIComponent(TEST_URI));
const { inspector, view } = await openRuleView();
await selectNode("#testid", inspector);
const rule = getRuleViewRuleEditor(view, 1).rule;
is(rule.textProps[0].name, "--primary-color", "The first property is the variable");
is(rule.textProps[1].name, "background", "The second property is background");
info("Check that the expander is hidden for the background property");
is(rule.textProps[1].editor.expander.style.display, "none", "Expander is hidden");
});

View file

@ -584,6 +584,17 @@ TextPropertyEditor.prototype = {
this.expander.style.display = "none";
},
get shouldShowComputedExpander() {
// Only show the expander to reveal computed properties if:
// - the computed properties are actually different from the current property (i.e
// these are longhands while the current property is the shorthand)
// - all of the computed properties have defined values. In case the current property
// value contains CSS variables, then the computed properties will be missing and we
// want to avoid showing them.
return this.prop.computed.some(c => c.name !== this.prop.name) &&
!this.prop.computed.every(c => !c.value);
},
/**
* Update the visibility of the enable checkbox, the warning indicator and
* the filter property, as well as the overridden state of the property.
@ -607,8 +618,8 @@ TextPropertyEditor.prototype = {
!this.prop.overridden ||
this.ruleEditor.rule.isUnmatched;
const showExpander = this.prop.computed.some(c => c.name !== this.prop.name);
this.expander.style.display = showExpander ? "inline-block" : "none";
this.expander.style.display =
this.shouldShowComputedExpander ? "inline-block" : "none";
if (!this.editing &&
(this.prop.overridden || !this.prop.enabled ||
@ -626,8 +637,8 @@ TextPropertyEditor.prototype = {
_updateComputed: function() {
this.computed.innerHTML = "";
const showExpander = this.prop.computed.some(c => c.name !== this.prop.name);
this.expander.style.display = !this.editing && showExpander ? "inline-block" : "none";
this.expander.style.display =
!this.editing && this.shouldShowComputedExpander ? "inline-block" : "none";
this._populatedComputed = false;
if (this.expander.hasAttribute("open")) {

View file

@ -8,7 +8,6 @@
#include "DOMMediaStream.h"
#include "ImageContainer.h"
#include "MediaStreamGraph.h"
#include "MediaStreamListener.h"
#include "gfxPlatform.h"
#include "mozilla/Atomics.h"
#include "mozilla/dom/CanvasCaptureMediaStreamBinding.h"
@ -22,100 +21,17 @@ using namespace mozilla::gfx;
namespace mozilla {
namespace dom {
class OutputStreamDriver::TrackListener : public MediaStreamTrackListener {
public:
TrackListener(TrackID aTrackId, const PrincipalHandle& aPrincipalHandle,
SourceMediaStream* aSourceStream)
: mEnded(false),
mSourceStream(aSourceStream),
mTrackId(aTrackId),
mPrincipalHandle(aPrincipalHandle),
mMutex("CanvasCaptureMediaStream OutputStreamDriver::StreamListener") {
MOZ_ASSERT(mSourceStream);
}
void Forget() {
EndTrack();
mSourceStream->EndTrack(mTrackId);
MutexAutoLock lock(mMutex);
mImage = nullptr;
}
void EndTrack() { mEnded = true; }
void SetImage(const RefPtr<layers::Image>& aImage, const TimeStamp& aTime) {
MutexAutoLock lock(mMutex);
mImage = aImage;
mImageTime = aTime;
}
void NotifyPull(MediaStreamGraph* aGraph, StreamTime aEndOfAppendedData,
StreamTime aDesiredTime) override {
// Called on the MediaStreamGraph thread.
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i",
mSourceStream.get(), mTrackId);
MOZ_ASSERT(mSourceStream);
StreamTime delta = aDesiredTime - aEndOfAppendedData;
MOZ_ASSERT(delta > 0);
MutexAutoLock lock(mMutex);
RefPtr<Image> image = mImage;
IntSize size = image ? image->GetSize() : IntSize(0, 0);
VideoSegment segment;
segment.AppendFrame(image.forget(), delta, size, mPrincipalHandle, false,
mImageTime);
mSourceStream->AppendToTrack(mTrackId, &segment);
if (mEnded) {
mSourceStream->EndTrack(mTrackId);
}
}
void NotifyEnded() override {
Forget();
mSourceStream->Graph()->DispatchToMainThreadStableState(
NS_NewRunnableFunction(
"OutputStreamDriver::TrackListener::RemoveTrackListener",
[self = RefPtr<TrackListener>(this), this]() {
if (!mSourceStream->IsDestroyed()) {
mSourceStream->RemoveTrackListener(this, mTrackId);
}
}));
}
void NotifyRemoved() override { Forget(); }
protected:
~TrackListener() = default;
private:
Atomic<bool> mEnded;
const RefPtr<SourceMediaStream> mSourceStream;
const TrackID mTrackId;
const PrincipalHandle mPrincipalHandle;
Mutex mMutex;
// The below members are protected by mMutex.
RefPtr<layers::Image> mImage;
TimeStamp mImageTime;
};
OutputStreamDriver::OutputStreamDriver(SourceMediaStream* aSourceStream,
const TrackID& aTrackId,
const PrincipalHandle& aPrincipalHandle)
: FrameCaptureListener(),
mTrackId(aTrackId),
mSourceStream(aSourceStream),
mTrackListener(
new TrackListener(aTrackId, aPrincipalHandle, aSourceStream)) {
mPrincipalHandle(aPrincipalHandle) {
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mSourceStream);
MOZ_ASSERT(IsTrackIDExplicit(mTrackId));
mSourceStream->AddTrack(aTrackId, new VideoSegment());
mSourceStream->AddTrackListener(mTrackListener, aTrackId);
mSourceStream->SetPullingEnabled(aTrackId, true);
// All CanvasCaptureMediaStreams shall at least get one frame.
mFrameCaptureRequested = true;
@ -123,16 +39,24 @@ OutputStreamDriver::OutputStreamDriver(SourceMediaStream* aSourceStream,
OutputStreamDriver::~OutputStreamDriver() {
MOZ_ASSERT(NS_IsMainThread());
// MediaStreamGraph will keep the listener alive until it can end the track in
// the graph on the next NotifyPull().
mTrackListener->EndTrack();
EndTrack();
}
void OutputStreamDriver::EndTrack() { mTrackListener->EndTrack(); }
void OutputStreamDriver::EndTrack() {
MOZ_ASSERT(NS_IsMainThread());
mSourceStream->EndTrack(mTrackId);
}
void OutputStreamDriver::SetImage(const RefPtr<layers::Image>& aImage,
const TimeStamp& aTime) {
mTrackListener->SetImage(aImage, aTime);
MOZ_ASSERT(NS_IsMainThread());
TRACE_COMMENT("SourceMediaStream %p track %i", mSourceStream.get(), mTrackId);
VideoSegment segment;
segment.AppendFrame(do_AddRef(aImage), aImage->GetSize(), mPrincipalHandle,
false, aTime);
mSourceStream->AppendToTrack(mTrackId, &segment);
}
// ----------------------------------------------------------------------

View file

@ -14,7 +14,6 @@ class nsIPrincipal;
namespace mozilla {
class DOMMediaStream;
class MediaStreamListener;
class SourceMediaStream;
namespace layers {
@ -44,16 +43,10 @@ class OutputStreamFrameListener;
* | Canvas | SetFrameCapture() | (FrameCaptureListener) |
* |________| ------------------------> |________________________|
* |
* | SetImage()
* | SetImage() -
* | AppendToTrack()
* |
* v
* ____________________
* |Stream/TrackListener|
* ---------------------------------------| (All image access |---------------
* === MediaStreamGraph Thread === | Mutex Guarded) |
* |____________________|
* ^ |
* NotifyPull() | | AppendToTrack()
* | v
* ___________________________
* | |
* | MSG / SourceMediaStream |
@ -93,12 +86,11 @@ class OutputStreamDriver : public FrameCaptureListener {
protected:
virtual ~OutputStreamDriver();
class StreamListener;
class TrackListener;
private:
const TrackID mTrackId;
const RefPtr<SourceMediaStream> mSourceStream;
const RefPtr<TrackListener> mTrackListener;
const PrincipalHandle mPrincipalHandle;
};
class CanvasCaptureMediaStream : public DOMMediaStream {

View file

@ -0,0 +1,136 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#ifndef DriftCompensation_h_
#define DriftCompensation_h_
#include "MediaSegment.h"
#include "VideoUtils.h"
#include "mozilla/Atomics.h"
#include "mozilla/Unused.h"
namespace mozilla {
static LazyLogModule gDriftCompensatorLog("DriftCompensator");
#define LOG(type, ...) MOZ_LOG(gDriftCompensatorLog, type, (__VA_ARGS__))
/**
* DriftCompensator can be used to handle drift between audio and video tracks
* from the MediaStreamGraph.
*
* Drift can occur because audio is driven by a MediaStreamGraph running off an
* audio callback, thus it's progressed by the clock of one the audio output
* devices on the user's machine. Video on the other hand is always expressed in
* wall-clock TimeStamps, i.e., it's progressed by the system clock. These
* clocks will, over time, drift apart.
*
* Do not use the DriftCompensator across multiple audio tracks, as it will
* automatically record the start time of the first audio samples, and all
* samples for the same audio track on the same audio clock will have to be
* processed to retain accuracy.
*
* DriftCompensator is designed to be used from two threads:
* - The audio thread for notifications of audio samples.
* - The video thread for compensating drift of video frames to match the audio
* clock.
*/
class DriftCompensator {
const RefPtr<nsIEventTarget> mVideoThread;
const TrackRate mAudioRate;
// Number of audio samples produced. Any thread.
Atomic<StreamTime> mAudioSamples{0};
// Time the first audio samples were added. mVideoThread only.
TimeStamp mAudioStartTime;
void SetAudioStartTime(TimeStamp aTime) {
MOZ_ASSERT(mVideoThread->IsOnCurrentThread());
MOZ_ASSERT(mAudioStartTime.IsNull());
mAudioStartTime = aTime;
}
protected:
virtual ~DriftCompensator() = default;
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DriftCompensator)
DriftCompensator(RefPtr<nsIEventTarget> aVideoThread, TrackRate aAudioRate)
: mVideoThread(std::move(aVideoThread)), mAudioRate(aAudioRate) {
MOZ_ASSERT(mAudioRate > 0);
}
void NotifyAudioStart(TimeStamp aStart) {
MOZ_ASSERT(mAudioSamples == 0);
LOG(LogLevel::Info, "DriftCompensator %p at rate %d started", this,
mAudioRate);
nsresult rv = mVideoThread->Dispatch(NewRunnableMethod<TimeStamp>(
"DriftCompensator::SetAudioStartTime", this,
&DriftCompensator::SetAudioStartTime, aStart));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
/**
* aSamples is the number of samples fed by an AudioStream.
*/
void NotifyAudio(StreamTime aSamples) {
MOZ_ASSERT(aSamples > 0);
mAudioSamples += aSamples;
LOG(LogLevel::Verbose,
"DriftCompensator %p Processed another %" PRId64
" samples; now %.3fs audio",
this, aSamples, static_cast<double>(mAudioSamples) / mAudioRate);
}
/**
* Drift compensates a video TimeStamp based on historical audio data.
*/
virtual TimeStamp GetVideoTime(TimeStamp aNow, TimeStamp aTime) {
MOZ_ASSERT(mVideoThread->IsOnCurrentThread());
StreamTime samples = mAudioSamples;
if (samples / mAudioRate < 10) {
// We don't apply compensation for the first 10 seconds because of the
// higher inaccuracy during this time.
LOG(LogLevel::Debug, "DriftCompensator %p %" PRId64 "ms so far; ignoring",
this, samples * 1000 / mAudioRate);
return aTime;
}
int64_t videoScaleUs = (aNow - mAudioStartTime).ToMicroseconds();
int64_t audioScaleUs = FramesToUsecs(samples, mAudioRate).value();
int64_t videoDurationUs = (aTime - mAudioStartTime).ToMicroseconds();
if (videoScaleUs == 0) {
videoScaleUs = audioScaleUs;
}
TimeStamp reclocked =
mAudioStartTime +
TimeDuration::FromMicroseconds(
SaferMultDiv(videoDurationUs, audioScaleUs, videoScaleUs).value());
LOG(LogLevel::Debug,
"DriftCompensator %p GetVideoTime, v-now: %.3fs, a-now: %.3fs; %.3fs "
"-> %.3fs (d %.3fms)",
this, (aNow - mAudioStartTime).ToSeconds(),
static_cast<double>(audioScaleUs) / 1000000.0,
(aTime - mAudioStartTime).ToSeconds(),
(reclocked - mAudioStartTime).ToSeconds(),
(reclocked - aTime).ToMilliseconds());
return reclocked;
}
};
#undef LOG
} // namespace mozilla
#endif /* DriftCompensation_h_ */

View file

@ -34,7 +34,6 @@ GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
: mIterationStart(0),
mIterationEnd(0),
mGraphImpl(aGraphImpl),
mCurrentTimeStamp(TimeStamp::Now()),
mPreviousDriver(nullptr),
mNextDriver(nullptr) {}
@ -263,6 +262,7 @@ void ThreadedDriver::Shutdown() {
SystemClockDriver::SystemClockDriver(MediaStreamGraphImpl* aGraphImpl)
: ThreadedDriver(aGraphImpl),
mInitialTimeStamp(TimeStamp::Now()),
mCurrentTimeStamp(TimeStamp::Now()),
mLastTimeStamp(TimeStamp::Now()),
mIsFallback(false) {}
@ -345,11 +345,6 @@ MediaTime SystemClockDriver::GetIntervalForIteration() {
return interval;
}
TimeStamp OfflineClockDriver::GetCurrentTimeStamp() {
MOZ_CRASH("This driver does not support getting the current timestamp.");
return TimeStamp();
}
void ThreadedDriver::WaitForNextIteration() {
GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
@ -872,8 +867,6 @@ long AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer,
(uint32_t)durationMS,
(long)(nextStateComputedTime - stateComputedTime)));
mCurrentTimeStamp = TimeStamp::Now();
if (stateComputedTime < mIterationEnd) {
LOG(LogLevel::Error,
("%p: Media graph global underrun detected", GraphImpl()));

View file

@ -151,12 +151,6 @@ class GraphDriver {
GraphDriver* PreviousDriver();
void SetPreviousDriver(GraphDriver* aPreviousDriver);
/**
* If we are running a real time graph, get the current time stamp to schedule
* video frames. This has to be reimplemented by real time drivers.
*/
virtual TimeStamp GetCurrentTimeStamp() { return mCurrentTimeStamp; }
GraphTime IterationEnd() { return mIterationEnd; }
virtual AudioCallbackDriver* AsAudioCallbackDriver() { return nullptr; }
@ -210,10 +204,6 @@ class GraphDriver {
// The MediaStreamGraphImpl associated with this driver.
const RefPtr<MediaStreamGraphImpl> mGraphImpl;
// This is used on the main thread (during initialization), and the graph
// thread. No monitor needed because we know the graph thread does not run
// during the initialization.
TimeStamp mCurrentTimeStamp;
// This is non-null only when this driver has recently switched from an other
// driver, and has not cleaned it up yet (for example because the audio stream
// is currently calling the callback during initialization).
@ -297,6 +287,7 @@ class SystemClockDriver : public ThreadedDriver {
// Those are only modified (after initialization) on the graph thread. The
// graph thread does not run during the initialization.
TimeStamp mInitialTimeStamp;
TimeStamp mCurrentTimeStamp;
TimeStamp mLastTimeStamp;
// This is true if this SystemClockDriver runs the graph because we could not
@ -314,7 +305,6 @@ class OfflineClockDriver : public ThreadedDriver {
virtual ~OfflineClockDriver();
TimeDuration WaitInterval() override;
MediaTime GetIntervalForIteration() override;
TimeStamp GetCurrentTimeStamp() override;
OfflineClockDriver* AsOfflineClockDriver() override { return this; }
private:

View file

@ -4291,54 +4291,49 @@ SourceListener::InitializeAsync() {
LOG("started all sources");
aHolder.Resolve(true, __func__);
})
->Then(
GetMainThreadSerialEventTarget(), __func__,
[self = RefPtr<SourceListener>(this), this]() {
if (mStopped) {
// We were shut down during the async init
return SourceListenerPromise::CreateAndResolve(true, __func__);
}
->Then(GetMainThreadSerialEventTarget(), __func__,
[self = RefPtr<SourceListener>(this), this]() {
if (mStopped) {
// We were shut down during the async init
return SourceListenerPromise::CreateAndResolve(true, __func__);
}
for (DeviceState* state :
{mAudioDeviceState.get(), mVideoDeviceState.get()}) {
if (!state) {
continue;
}
MOZ_DIAGNOSTIC_ASSERT(!state->mTrackEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mDeviceEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mStopped);
for (DeviceState* state :
{mAudioDeviceState.get(), mVideoDeviceState.get()}) {
if (!state) {
continue;
}
MOZ_DIAGNOSTIC_ASSERT(!state->mTrackEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mDeviceEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mStopped);
state->mDeviceEnabled = true;
state->mTrackEnabled = true;
state->mTrackEnabledTime = TimeStamp::Now();
state->mDeviceEnabled = true;
state->mTrackEnabled = true;
state->mTrackEnabledTime = TimeStamp::Now();
}
return SourceListenerPromise::CreateAndResolve(true, __func__);
},
[self = RefPtr<SourceListener>(this),
this](RefPtr<MediaMgrError>&& aResult) {
if (mStopped) {
return SourceListenerPromise::CreateAndReject(
std::move(aResult), __func__);
}
if (state == mVideoDeviceState.get() && !mStream->IsDestroyed()) {
mStream->SetPullingEnabled(kVideoTrack, true);
}
}
return SourceListenerPromise::CreateAndResolve(true, __func__);
},
[self = RefPtr<SourceListener>(this),
this](RefPtr<MediaMgrError>&& aResult) {
if (mStopped) {
return SourceListenerPromise::CreateAndReject(std::move(aResult),
__func__);
}
for (DeviceState* state :
{mAudioDeviceState.get(), mVideoDeviceState.get()}) {
if (!state) {
continue;
}
MOZ_DIAGNOSTIC_ASSERT(!state->mTrackEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mDeviceEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mStopped);
for (DeviceState* state :
{mAudioDeviceState.get(), mVideoDeviceState.get()}) {
if (!state) {
continue;
}
MOZ_DIAGNOSTIC_ASSERT(!state->mTrackEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mDeviceEnabled);
MOZ_DIAGNOSTIC_ASSERT(!state->mStopped);
state->mStopped = true;
}
return SourceListenerPromise::CreateAndReject(std::move(aResult),
__func__);
});
state->mStopped = true;
}
return SourceListenerPromise::CreateAndReject(std::move(aResult),
__func__);
});
}
void SourceListener::Stop() {
@ -4396,7 +4391,6 @@ void SourceListener::Remove() {
mStream->RemoveTrackListener(mAudioDeviceState->mListener, kAudioTrack);
}
if (mVideoDeviceState) {
mStream->SetPullingEnabled(kVideoTrack, false);
mStream->RemoveTrackListener(mVideoDeviceState->mListener, kVideoTrack);
}
}

View file

@ -592,7 +592,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
return NS_ERROR_FAILURE;
}
mEncoder->Suspend(TimeStamp::Now());
mEncoder->Suspend();
NS_DispatchToMainThread(
new DispatchEventRunnable(this, NS_LITERAL_STRING("pause")));
return NS_OK;
@ -606,7 +606,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver<MediaStreamTrack>,
return NS_ERROR_FAILURE;
}
mEncoder->Resume(TimeStamp::Now());
mEncoder->Resume();
NS_DispatchToMainThread(
new DispatchEventRunnable(this, NS_LITERAL_STRING("resume")));
return NS_OK;

View file

@ -367,25 +367,6 @@ class MediaSegmentBase : public MediaSegment {
uint32_t mIndex;
};
Chunk* FindChunkContaining(StreamTime aOffset, StreamTime* aStart = nullptr) {
if (aOffset < 0) {
return nullptr;
}
StreamTime offset = 0;
for (uint32_t i = 0; i < mChunks.Length(); ++i) {
Chunk& c = mChunks[i];
StreamTime nextOffset = offset + c.GetDuration();
if (aOffset < nextOffset) {
if (aStart) {
*aStart = offset;
}
return &c;
}
offset = nextOffset;
}
return nullptr;
}
void RemoveLeading(StreamTime aDuration) { RemoveLeading(aDuration, 0); }
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {

View file

@ -22,7 +22,6 @@
#include "AudioNodeStream.h"
#include "AudioNodeExternalInputStream.h"
#include "MediaStreamListener.h"
#include "MediaStreamVideoSink.h"
#include "mozilla/dom/BaseAudioContextBinding.h"
#include "mozilla/media/MediaUtils.h"
#include <algorithm>
@ -81,15 +80,6 @@ void MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream) {
MOZ_ASSERT(OnGraphThreadOrNotRunning());
aStream->mTracksStartTime = mProcessedTime;
if (aStream->AsSourceStream()) {
SourceMediaStream* source = aStream->AsSourceStream();
TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
TimeStamp processedTimeStamp =
currentTimeStamp + TimeDuration::FromSeconds(MediaTimeToSeconds(
mProcessedTime - IterationEnd()));
source->SetStreamTracksStartTimeStamp(processedTimeStamp);
}
if (aStream->IsSuspended()) {
mSuspendedStreams.AppendElement(aStream);
LOG(LogLevel::Debug,
@ -1193,7 +1183,7 @@ void MediaStreamGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions) {
for (MediaStream* stream : mStreams) {
if (SourceMediaStream* is = stream->AsSourceStream()) {
ensureNextIteration |= is->PullNewData(aEndBlockingDecisions);
is->ExtractPendingInput(mStateComputedTime);
is->ExtractPendingInput(mStateComputedTime, aEndBlockingDecisions);
}
if (stream->mFinished) {
// The stream's not suspended, and since it's finished, underruns won't
@ -1890,14 +1880,12 @@ size_t MediaStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
// - mGraph - Not reported here
// - mConsumers - elements
// Future:
// - mVideoOutputs - elements
// - mLastPlayedVideoFrame
// - mTrackListeners - elements
// - mAudioOutputStream - elements
amount += mTracks.SizeOfExcludingThis(aMallocSizeOf);
amount += mAudioOutputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
amount += mVideoOutputs.ShallowSizeOfExcludingThis(aMallocSizeOf);
amount += mTrackListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
amount += mMainThreadListeners.ShallowSizeOfExcludingThis(aMallocSizeOf);
amount += mDisabledTracks.ShallowSizeOfExcludingThis(aMallocSizeOf);
@ -2015,12 +2003,6 @@ void MediaStream::RemoveAllListenersImpl() {
mTrackListeners.Clear();
RemoveAllDirectListenersImpl();
auto videoOutputs(mVideoOutputs);
for (auto& l : videoOutputs) {
l.mListener->NotifyRemoved();
}
mVideoOutputs.Clear();
}
void MediaStream::DestroyImpl() {
@ -2134,71 +2116,6 @@ void MediaStream::RemoveAudioOutput(void* aKey) {
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aKey));
}
void MediaStream::AddVideoOutputImpl(
already_AddRefed<MediaStreamVideoSink> aSink, TrackID aID) {
RefPtr<MediaStreamVideoSink> sink = aSink;
LOG(LogLevel::Info,
("MediaStream %p Adding MediaStreamVideoSink %p as output", this,
sink.get()));
MOZ_ASSERT(aID != TRACK_NONE);
for (auto entry : mVideoOutputs) {
if (entry.mListener == sink &&
(entry.mTrackID == TRACK_ANY || entry.mTrackID == aID)) {
return;
}
}
TrackBound<MediaStreamVideoSink>* l = mVideoOutputs.AppendElement();
l->mListener = sink;
l->mTrackID = aID;
AddDirectTrackListenerImpl(sink.forget(), aID);
}
void MediaStream::RemoveVideoOutputImpl(MediaStreamVideoSink* aSink,
TrackID aID) {
LOG(LogLevel::Info,
("MediaStream %p Removing MediaStreamVideoSink %p as output", this,
aSink));
MOZ_ASSERT(aID != TRACK_NONE);
// Ensure that any frames currently queued for playback by the compositor
// are removed.
aSink->ClearFrames();
for (size_t i = 0; i < mVideoOutputs.Length(); ++i) {
if (mVideoOutputs[i].mListener == aSink &&
(mVideoOutputs[i].mTrackID == TRACK_ANY ||
mVideoOutputs[i].mTrackID == aID)) {
mVideoOutputs.RemoveElementAt(i);
}
}
RemoveDirectTrackListenerImpl(aSink, aID);
}
void MediaStream::AddVideoOutput(MediaStreamVideoSink* aSink, TrackID aID) {
class Message : public ControlMessage {
public:
Message(MediaStream* aStream, MediaStreamVideoSink* aSink, TrackID aID)
: ControlMessage(aStream), mSink(aSink), mID(aID) {}
void Run() override { mStream->AddVideoOutputImpl(mSink.forget(), mID); }
RefPtr<MediaStreamVideoSink> mSink;
TrackID mID;
};
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aSink, aID));
}
void MediaStream::RemoveVideoOutput(MediaStreamVideoSink* aSink, TrackID aID) {
class Message : public ControlMessage {
public:
Message(MediaStream* aStream, MediaStreamVideoSink* aSink, TrackID aID)
: ControlMessage(aStream), mSink(aSink), mID(aID) {}
void Run() override { mStream->RemoveVideoOutputImpl(mSink, mID); }
RefPtr<MediaStreamVideoSink> mSink;
TrackID mID;
};
GraphImpl()->AppendMessage(MakeUnique<Message>(this, aSink, aID));
}
void MediaStream::Suspend() {
class Message : public ControlMessage {
public:
@ -2251,6 +2168,9 @@ void MediaStream::AddTrackListenerImpl(
GraphTimeToStreamTime(GraphImpl()->mStateComputedTime)) {
l->mListener->NotifyEnded();
}
if (GetDisabledTrackMode(aTrackID) == DisabledTrackMode::SILENCE_BLACK) {
l->mListener->NotifyEnabledStateChanged(false);
}
}
void MediaStream::AddTrackListener(MediaStreamTrackListener* aListener,
@ -2388,6 +2308,11 @@ void MediaStream::SetTrackEnabledImpl(TrackID aTrackID,
for (int32_t i = mDisabledTracks.Length() - 1; i >= 0; --i) {
if (aTrackID == mDisabledTracks[i].mTrackID) {
mDisabledTracks.RemoveElementAt(i);
for (TrackBound<MediaStreamTrackListener>& l : mTrackListeners) {
if (l.mTrackID == aTrackID) {
l.mListener->NotifyEnabledStateChanged(true);
}
}
return;
}
}
@ -2399,6 +2324,13 @@ void MediaStream::SetTrackEnabledImpl(TrackID aTrackID,
}
}
mDisabledTracks.AppendElement(DisabledTrack(aTrackID, aMode));
if (aMode == DisabledTrackMode::SILENCE_BLACK) {
for (TrackBound<MediaStreamTrackListener>& l : mTrackListeners) {
if (l.mTrackID == aTrackID) {
l.mListener->NotifyEnabledStateChanged(false);
}
}
}
}
}
@ -2537,6 +2469,8 @@ void SourceMediaStream::SetPullingEnabled(TrackID aTrackID, bool aEnabled) {
mStream->mTracks.FindTrack(mTrackID)->IsEnded());
return;
}
MOZ_ASSERT(data->mData->GetType() == MediaSegment::AUDIO,
"Pulling is not allowed for video");
data->mPullingEnabled = mEnabled;
}
SourceMediaStream* mStream;
@ -2605,11 +2539,13 @@ bool SourceMediaStream::PullNewData(GraphTime aDesiredUpToTime) {
return true;
}
void SourceMediaStream::ExtractPendingInput(GraphTime aCurrentTime) {
void SourceMediaStream::ExtractPendingInput(GraphTime aCurrentTime,
GraphTime aDesiredUpToTime) {
MutexAutoLock lock(mMutex);
bool finished = mFinishPending;
StreamTime streamCurrentTime = GraphTimeToStreamTime(aCurrentTime);
StreamTime streamDesiredUpToTime = GraphTimeToStreamTime(aDesiredUpToTime);
for (int32_t i = mUpdateTracks.Length() - 1; i >= 0; --i) {
SourceMediaStream::TrackData* data = &mUpdateTracks[i];
@ -2644,7 +2580,7 @@ void SourceMediaStream::ExtractPendingInput(GraphTime aCurrentTime) {
// data->mData with an empty clone.
data->mData = segment->CreateEmptyClone();
data->mCommands &= ~SourceMediaStream::TRACK_CREATE;
} else if (data->mData->GetDuration() > 0) {
} else {
MediaSegment* dest = mTracks.FindTrack(data->mID)->GetSegment();
LOG(LogLevel::Verbose,
("%p: SourceMediaStream %p track %d, advancing end from %" PRId64
@ -2657,6 +2593,16 @@ void SourceMediaStream::ExtractPendingInput(GraphTime aCurrentTime) {
if (data->mCommands & SourceMediaStream::TRACK_END) {
mTracks.FindTrack(data->mID)->SetEnded();
mUpdateTracks.RemoveElementAt(i);
} else if (!data->mPullingEnabled &&
data->mData->GetType() == MediaSegment::VIDEO) {
// This video track is pushed. Since we use timestamps rather than
// durations for video we avoid making the video track block the stream
// by extending the duration when there's not enough video data, so a
// video track always has valid data.
VideoSegment* segment = static_cast<VideoSegment*>(
mTracks.FindTrack(data->mID)->GetSegment());
StreamTime missingTime = streamDesiredUpToTime - segment->GetDuration();
segment->ExtendLastFrameBy(missingTime);
}
}
@ -2737,8 +2683,6 @@ void SourceMediaStream::AdvanceTimeVaryingValuesToCurrentTime(
GraphTime aCurrentTime, GraphTime aBlockedTime) {
MutexAutoLock lock(mMutex);
mTracksStartTime += aBlockedTime;
mStreamTracksStartTimeStamp +=
TimeDuration::FromSeconds(GraphImpl()->MediaTimeToSeconds(aBlockedTime));
mTracks.ForgetUpTo(aCurrentTime - mTracksStartTime);
}
@ -2815,16 +2759,7 @@ void SourceMediaStream::AddDirectTrackListenerImpl(
return;
}
bool isAudio = track->GetType() == MediaSegment::AUDIO;
bool isVideo = track->GetType() == MediaSegment::VIDEO;
if (!isAudio && !isVideo) {
LOG(LogLevel::Warning,
("%p: Source track for direct track listener %p is unknown",
GraphImpl(), listener.get()));
MOZ_ASSERT(false);
return;
}
MOZ_ASSERT(track->GetType() == MediaSegment::VIDEO);
for (auto entry : mDirectTrackListeners) {
if (entry.mListener == listener &&
(entry.mTrackID == TRACK_ANY || entry.mTrackID == aTrackID)) {
@ -2845,24 +2780,44 @@ void SourceMediaStream::AddDirectTrackListenerImpl(
DirectMediaStreamTrackListener::InstallationResult::SUCCESS);
// Pass buffered data to the listener
AudioSegment bufferedAudio;
VideoSegment bufferedVideo;
MediaSegment& bufferedData = isAudio
? static_cast<MediaSegment&>(bufferedAudio)
: static_cast<MediaSegment&>(bufferedVideo);
MediaSegment& trackSegment = *track->GetSegment();
if (mTracks.GetForgottenDuration() < trackSegment.GetDuration()) {
bufferedData.AppendSlice(trackSegment, mTracks.GetForgottenDuration(),
trackSegment.GetDuration());
VideoSegment bufferedData;
size_t videoFrames = 0;
// For video we append all non-null chunks, as we're only interested in
// real frames and their timestamps.
VideoSegment& trackSegment = static_cast<VideoSegment&>(*track->GetSegment());
for (VideoSegment::ConstChunkIterator iter(trackSegment); !iter.IsEnded();
iter.Next()) {
if (iter->IsNull()) {
continue;
}
++videoFrames;
MOZ_ASSERT(!iter->mTimeStamp.IsNull());
bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
iter->mFrame.GetIntrinsicSize(),
iter->mFrame.GetPrincipalHandle(),
iter->mFrame.GetForceBlack(), iter->mTimeStamp);
}
if (TrackData* updateData = FindDataForTrack(aTrackID)) {
bufferedData.AppendSlice(*updateData->mData, 0,
updateData->mData->GetDuration());
VideoSegment& video = static_cast<VideoSegment&>(*updateData->mData);
for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded();
iter.Next()) {
if (iter->IsNull()) {
continue;
}
++videoFrames;
bufferedData.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
iter->mFrame.GetIntrinsicSize(),
iter->mFrame.GetPrincipalHandle(),
iter->mFrame.GetForceBlack(), iter->mTimeStamp);
}
}
if (bufferedData.GetDuration() != 0) {
LOG(LogLevel::Info,
("%p: Notifying direct listener %p of %zu video frames and duration "
"%" PRId64,
GraphImpl(), listener.get(), videoFrames, bufferedData.GetDuration()));
if (!bufferedData.IsNull()) {
listener->NotifyRealtimeTrackData(Graph(), 0, bufferedData);
}
}
@ -2880,16 +2835,6 @@ void SourceMediaStream::RemoveDirectTrackListenerImpl(
}
}
StreamTime SourceMediaStream::GetEndOfAppendedData(TrackID aID) {
MutexAutoLock lock(mMutex);
TrackData* track = FindDataForTrack(aID);
if (track) {
return track->mEndOfFlushedData + track->mData->GetDuration();
}
MOZ_CRASH("Track not found");
return 0;
}
void SourceMediaStream::EndTrack(TrackID aID) {
MutexAutoLock lock(mMutex);
TrackData* track = FindDataForTrack(aID);

View file

@ -176,7 +176,6 @@ class DirectMediaStreamTrackListener;
class MediaInputPort;
class MediaStreamGraphImpl;
class MediaStreamTrackListener;
class MediaStreamVideoSink;
class ProcessedMediaStream;
class SourceMediaStream;
class TrackUnionStream;
@ -217,9 +216,8 @@ struct TrackBound {
*
* Any stream can have its audio and video playing when requested. The media
* stream graph plays audio by constructing audio output streams as necessary.
* Video is played by setting video frames into an MediaStreamVideoSink at the
* right time. To ensure video plays in sync with audio, make sure that the same
* stream is playing both the audio and video.
* Video is played through a DirectMediaStreamTrackListener managed by
* VideoStreamTrack.
*
* The data in a stream is managed by StreamTracks. It consists of a set of
* tracks of various types that can start and end over time.
@ -298,13 +296,6 @@ class MediaStream : public mozilla::LinkedListElement<MediaStream> {
virtual void AddAudioOutput(void* aKey);
virtual void SetAudioOutputVolume(void* aKey, float aVolume);
virtual void RemoveAudioOutput(void* aKey);
// Since a stream can be played multiple ways, we need to be able to
// play to multiple MediaStreamVideoSinks.
// Only the first enabled video track is played.
virtual void AddVideoOutput(MediaStreamVideoSink* aSink,
TrackID aID = TRACK_ANY);
virtual void RemoveVideoOutput(MediaStreamVideoSink* aSink,
TrackID aID = TRACK_ANY);
// Explicitly suspend. Useful for example if a media element is pausing
// and we need to stop its stream emitting its buffered data. As soon as the
// Suspend message reaches the graph, the stream stops processing. It
@ -421,9 +412,6 @@ class MediaStream : public mozilla::LinkedListElement<MediaStream> {
void SetAudioOutputVolumeImpl(void* aKey, float aVolume);
void AddAudioOutputImpl(void* aKey);
void RemoveAudioOutputImpl(void* aKey);
void AddVideoOutputImpl(already_AddRefed<MediaStreamVideoSink> aSink,
TrackID aID);
void RemoveVideoOutputImpl(MediaStreamVideoSink* aSink, TrackID aID);
/**
* Removes all direct listeners and signals to them that they have been
@ -566,7 +554,6 @@ class MediaStream : public mozilla::LinkedListElement<MediaStream> {
float mVolume;
};
nsTArray<AudioOutput> mAudioOutputs;
nsTArray<TrackBound<MediaStreamVideoSink>> mVideoOutputs;
// We record the last played video frame to avoid playing the frame again
// with a different frame id.
VideoFrame mLastPlayedVideoFrame;
@ -687,7 +674,7 @@ class SourceMediaStream : public MediaStream {
/**
* Extract any state updates pending in the stream, and apply them.
*/
void ExtractPendingInput(GraphTime aCurrentTime);
void ExtractPendingInput(GraphTime aCurrentTime, GraphTime aDesiredUpToTime);
enum {
ADDTRACK_QUEUED = 0x01 // Queue track add until FinishAddTracks()
@ -720,13 +707,6 @@ class SourceMediaStream : public MediaStream {
*/
virtual StreamTime AppendToTrack(TrackID aID, MediaSegment* aSegment,
MediaSegment* aRawSegment = nullptr);
/**
* Get the stream time of the end of the data that has been appended so far.
* Can be called from any thread but won't be useful if it can race with
* an AppendToTrack call, so should probably just be called from the thread
* that also calls AppendToTrack.
*/
StreamTime GetEndOfAppendedData(TrackID aID);
/**
* Indicate that a track has ended. Do not do any more API calls
* affecting this track.
@ -771,11 +751,6 @@ class SourceMediaStream : public MediaStream {
*/
bool HasPendingAudioTrack();
TimeStamp GetStreamTracksStrartTimeStamp() {
MutexAutoLock lock(mMutex);
return mStreamTracksStartTimeStamp;
}
// XXX need a Reset API
friend class MediaStreamGraphImpl;
@ -842,10 +817,6 @@ class SourceMediaStream : public MediaStream {
virtual void AdvanceTimeVaryingValuesToCurrentTime(
GraphTime aCurrentTime, GraphTime aBlockedTime) override;
void SetStreamTracksStartTimeStamp(const TimeStamp& aTimeStamp) {
MutexAutoLock lock(mMutex);
mStreamTracksStartTimeStamp = aTimeStamp;
}
// Only accessed on the MSG thread. Used so to ask the MSGImpl to usecount
// users of a specific input.
@ -857,10 +828,6 @@ class SourceMediaStream : public MediaStream {
// held together.
Mutex mMutex;
// protected by mMutex
// This time stamp will be updated in adding and blocked SourceMediaStream,
// |AddStreamGraphThread| and |AdvanceTimeVaryingValuesToCurrentTime| in
// particularly.
TimeStamp mStreamTracksStartTimeStamp;
nsTArray<TrackData> mUpdateTracks;
nsTArray<TrackData> mPendingTracks;
nsTArray<TrackBound<DirectMediaStreamTrackListener>> mDirectTrackListeners;

View file

@ -19,18 +19,17 @@ namespace mozilla {
void DirectMediaStreamTrackListener::MirrorAndDisableSegment(
AudioSegment& aFrom, AudioSegment& aTo) {
aTo.Clear();
aTo.AppendNullData(aFrom.GetDuration());
}
void DirectMediaStreamTrackListener::MirrorAndDisableSegment(
VideoSegment& aFrom, VideoSegment& aTo, DisabledTrackMode aMode) {
aTo.Clear();
if (aMode == DisabledTrackMode::SILENCE_BLACK) {
for (VideoSegment::ChunkIterator it(aFrom); !it.IsEnded(); it.Next()) {
aTo.AppendFrame(do_AddRef(it->mFrame.GetImage()), it->GetDuration(),
aTo.AppendFrame(do_AddRef(it->mFrame.GetImage()),
it->mFrame.GetIntrinsicSize(), it->GetPrincipalHandle(),
true);
aTo.ExtendLastFrameBy(it->GetDuration());
}
} else if (aMode == DisabledTrackMode::SILENCE_FREEZE) {
aTo.AppendNullData(aFrom.GetDuration());
@ -49,19 +48,17 @@ void DirectMediaStreamTrackListener::
DisabledTrackMode mode = mDisabledBlackCount > 0
? DisabledTrackMode::SILENCE_BLACK
: DisabledTrackMode::SILENCE_FREEZE;
if (!mMedia) {
mMedia = aMedia.CreateEmptyClone();
}
UniquePtr<MediaSegment> media(aMedia.CreateEmptyClone());
if (aMedia.GetType() == MediaSegment::AUDIO) {
MirrorAndDisableSegment(static_cast<AudioSegment&>(aMedia),
static_cast<AudioSegment&>(*mMedia));
static_cast<AudioSegment&>(*media));
} else if (aMedia.GetType() == MediaSegment::VIDEO) {
MirrorAndDisableSegment(static_cast<VideoSegment&>(aMedia),
static_cast<VideoSegment&>(*mMedia), mode);
static_cast<VideoSegment&>(*media), mode);
} else {
MOZ_CRASH("Unsupported media type");
}
NotifyRealtimeTrackData(aGraph, aTrackOffset, *mMedia);
NotifyRealtimeTrackData(aGraph, aTrackOffset, *media);
}
void DirectMediaStreamTrackListener::IncreaseDisabled(DisabledTrackMode aMode) {

View file

@ -71,6 +71,16 @@ class MediaStreamTrackListener {
virtual void NotifyPrincipalHandleChanged(
MediaStreamGraph* aGraph, const PrincipalHandle& aNewPrincipalHandle) {}
/**
* Notify that the enabled state for the track this listener is attached to
* has changed.
*
* The enabled state here is referring to whether audio should be audible
* (enabled) or silent (not enabled); or whether video should be displayed as
* is (enabled), or black (not enabled).
*/
virtual void NotifyEnabledStateChanged(bool aEnabled) {}
/**
* Notify that the stream output is advancing. aCurrentTrackTime is the number
* of samples that has been played out for this track in stream time.
@ -156,8 +166,6 @@ class DirectMediaStreamTrackListener : public MediaStreamTrackListener {
virtual void NotifyDirectListenerInstalled(InstallationResult aResult) {}
virtual void NotifyDirectListenerUninstalled() {}
virtual MediaStreamVideoSink* AsMediaStreamVideoSink() { return nullptr; }
protected:
virtual ~DirectMediaStreamTrackListener() {}
@ -172,12 +180,11 @@ class DirectMediaStreamTrackListener : public MediaStreamTrackListener {
void DecreaseDisabled(DisabledTrackMode aMode);
// Matches the number of disabled streams to which this listener is attached.
// The number of streams are those between the stream the listener was added
// and the SourceMediaStream that is the input of the data.
// The number of streams are those between the stream where the listener was
// added and the SourceMediaStream that is the source of the data reaching
// this listener.
Atomic<int32_t> mDisabledFreezeCount;
Atomic<int32_t> mDisabledBlackCount;
nsAutoPtr<MediaSegment> mMedia;
};
} // namespace mozilla

View file

@ -566,7 +566,7 @@ class MediaStreamTrack : public DOMEventTargetHelper,
*/
void SetMuted(bool aMuted) { mMuted = aMuted; }
void Destroy();
virtual void Destroy();
// Returns the owning DOMMediaStream's underlying owned stream.
ProcessedMediaStream* GetOwnedStream();

View file

@ -1,19 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaStreamVideoSink.h"
#include "VideoSegment.h"
namespace mozilla {
void MediaStreamVideoSink::NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) {
if (aMedia.GetType() == MediaSegment::VIDEO) {
SetCurrentFrames(static_cast<const VideoSegment&>(aMedia));
}
}
} // namespace mozilla

View file

@ -1,46 +0,0 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MEDIASTREAMVIDEOSINK_H_
#define MEDIASTREAMVIDEOSINK_H_
#include "mozilla/Pair.h"
#include "gfxPoint.h"
#include "MediaStreamListener.h"
namespace mozilla {
class VideoFrameContainer;
/**
* Base class of MediaStreamVideoSink family. This is the output of MediaStream.
*/
class MediaStreamVideoSink : public DirectMediaStreamTrackListener {
public:
// Method of DirectMediaStreamTrackListener.
void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) override;
// Call on any thread
virtual void SetCurrentFrames(const VideoSegment& aSegment) = 0;
virtual void ClearFrames() = 0;
virtual VideoFrameContainer* AsVideoFrameContainer() { return nullptr; }
virtual void Invalidate() {}
virtual MediaStreamVideoSink* AsMediaStreamVideoSink() override {
return this;
}
protected:
virtual ~MediaStreamVideoSink(){};
};
} // namespace mozilla
#endif /* MEDIASTREAMVIDEOSINK_H_ */

View file

@ -7,7 +7,6 @@
#include "VideoFrameContainer.h"
#include "mozilla/Telemetry.h"
#include "MediaDecoderOwner.h"
#include "Tracing.h"
using namespace mozilla::layers;
@ -43,7 +42,6 @@ VideoFrameContainer::VideoFrameContainer(
: mOwner(aOwner),
mImageContainer(aContainer),
mMutex("nsVideoFrameContainer"),
mBlackImage(nullptr),
mFrameID(0),
mPendingPrincipalHandle(PRINCIPAL_HANDLE_NONE),
mFrameIDForPendingPrincipalHandle(0),
@ -80,127 +78,6 @@ void VideoFrameContainer::UpdatePrincipalHandleForFrameIDLocked(
mFrameIDForPendingPrincipalHandle = aFrameID;
}
static bool SetImageToBlackPixel(PlanarYCbCrImage* aImage) {
uint8_t blackPixel[] = {0x10, 0x80, 0x80};
PlanarYCbCrData data;
data.mYChannel = blackPixel;
data.mCbChannel = blackPixel + 1;
data.mCrChannel = blackPixel + 2;
data.mYStride = data.mCbCrStride = 1;
data.mPicSize = data.mYSize = data.mCbCrSize = gfx::IntSize(1, 1);
return aImage->CopyData(data);
}
class VideoFrameContainerInvalidateRunnable : public Runnable {
public:
explicit VideoFrameContainerInvalidateRunnable(
VideoFrameContainer* aVideoFrameContainer)
: Runnable("VideoFrameContainerInvalidateRunnable"),
mVideoFrameContainer(aVideoFrameContainer) {}
NS_IMETHOD Run() override {
MOZ_ASSERT(NS_IsMainThread());
mVideoFrameContainer->Invalidate();
return NS_OK;
}
private:
RefPtr<VideoFrameContainer> mVideoFrameContainer;
};
void VideoFrameContainer::SetCurrentFrames(const VideoSegment& aSegment) {
TRACE();
if (aSegment.IsEmpty()) {
return;
}
MutexAutoLock lock(mMutex);
AutoTimer<Telemetry::VFC_SETVIDEOSEGMENT_LOCK_HOLD_MS> lockHold;
// Collect any new frames produced in this iteration.
AutoTArray<ImageContainer::NonOwningImage, 4> newImages;
PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;
VideoSegment::ConstChunkIterator iter(aSegment);
while (!iter.IsEnded()) {
VideoChunk chunk = *iter;
const VideoFrame* frame = &chunk.mFrame;
if (*frame == mLastPlayedVideoFrame) {
iter.Next();
continue;
}
Image* image = frame->GetImage();
CONTAINER_LOG(
LogLevel::Verbose,
("VideoFrameContainer %p writing video frame %p (%d x %d)", this, image,
frame->GetIntrinsicSize().width, frame->GetIntrinsicSize().height));
if (frame->GetForceBlack()) {
if (!mBlackImage) {
RefPtr<Image> blackImage =
GetImageContainer()->CreatePlanarYCbCrImage();
if (blackImage) {
// Sets the image to a single black pixel, which will be scaled to
// fill the rendered size.
if (SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage())) {
mBlackImage = blackImage;
}
}
}
if (mBlackImage) {
image = mBlackImage;
}
}
// Don't append null image to the newImages.
if (!image) {
iter.Next();
continue;
}
newImages.AppendElement(
ImageContainer::NonOwningImage(image, chunk.mTimeStamp));
lastPrincipalHandle = chunk.GetPrincipalHandle();
mLastPlayedVideoFrame = *frame;
iter.Next();
}
// Don't update if there are no changes.
if (newImages.IsEmpty()) {
return;
}
AutoTArray<ImageContainer::NonOwningImage, 4> images;
bool principalHandleChanged =
lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
lastPrincipalHandle != GetLastPrincipalHandleLocked();
// Add the frames from this iteration.
for (auto& image : newImages) {
image.mFrameID = NewFrameID();
images.AppendElement(image);
}
if (principalHandleChanged) {
UpdatePrincipalHandleForFrameIDLocked(lastPrincipalHandle,
newImages.LastElement().mFrameID);
}
SetCurrentFramesLocked(mLastPlayedVideoFrame.GetIntrinsicSize(), images);
nsCOMPtr<nsIRunnable> event = new VideoFrameContainerInvalidateRunnable(this);
mMainThread->Dispatch(event.forget());
images.ClearAndRetainStorage();
}
void VideoFrameContainer::ClearFrames() { ClearFutureFrames(); }
void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize,
Image* aImage,
const TimeStamp& aTargetTime) {

View file

@ -13,7 +13,6 @@
#include "nsCOMPtr.h"
#include "ImageContainer.h"
#include "MediaSegment.h"
#include "MediaStreamVideoSink.h"
#include "VideoSegment.h"
namespace mozilla {
@ -29,9 +28,11 @@ class MediaDecoderOwner;
* element itself ... well, maybe we could, but it could be risky and/or
* confusing.
*/
class VideoFrameContainer : public MediaStreamVideoSink {
class VideoFrameContainer {
virtual ~VideoFrameContainer();
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameContainer)
public:
typedef layers::ImageContainer ImageContainer;
typedef layers::Image Image;
@ -39,9 +40,6 @@ class VideoFrameContainer : public MediaStreamVideoSink {
VideoFrameContainer(MediaDecoderOwner* aOwner,
already_AddRefed<ImageContainer> aContainer);
// Call on any thread
virtual void SetCurrentFrames(const VideoSegment& aSegment) override;
virtual void ClearFrames() override;
void SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, Image* aImage,
const TimeStamp& aTargetTime);
// Returns the last principalHandle we notified mElement about.
@ -63,7 +61,6 @@ class VideoFrameContainer : public MediaStreamVideoSink {
SetCurrentFrames(aIntrinsicSize,
nsTArray<ImageContainer::NonOwningImage>());
}
VideoFrameContainer* AsVideoFrameContainer() override { return this; }
void ClearCurrentFrame();
// Make the current frame the only frame in the container, i.e. discard
@ -84,7 +81,7 @@ class VideoFrameContainer : public MediaStreamVideoSink {
// Call on main thread
enum { INVALIDATE_DEFAULT, INVALIDATE_FORCE };
void Invalidate() override { InvalidateWithFlags(INVALIDATE_DEFAULT); }
void Invalidate() { InvalidateWithFlags(INVALIDATE_DEFAULT); }
void InvalidateWithFlags(uint32_t aFlags);
ImageContainer* GetImageContainer();
void ForgetElement() { mOwner = nullptr; }
@ -121,9 +118,6 @@ class VideoFrameContainer : public MediaStreamVideoSink {
// mMutex protects all the fields below.
Mutex mMutex;
// Once the frame is forced to black, we initialize mBlackImage for following
// frames.
RefPtr<Image> mBlackImage;
// The intrinsic size is the ideal size which we should render the
// ImageContainer's current Image at.
// This can differ from the Image's actual size when the media resource
@ -133,9 +127,6 @@ class VideoFrameContainer : public MediaStreamVideoSink {
// We maintain our own mFrameID which is auto-incremented at every
// SetCurrentFrame() or NewFrameID() call.
ImageContainer::FrameID mFrameID;
// We record the last played video frame to avoid playing the frame again
// with a different frame id.
VideoFrame mLastPlayedVideoFrame;
// The last PrincipalHandle we notified mElement about.
PrincipalHandle mLastPrincipalHandle;
// The PrincipalHandle the client has notified us is changing with FrameID

View file

@ -0,0 +1,356 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef VideoFrameConverter_h
#define VideoFrameConverter_h
#include "ImageContainer.h"
#include "ImageToI420.h"
#include "MediaTimer.h"
#include "VideoSegment.h"
#include "VideoUtils.h"
#include "nsISupportsImpl.h"
#include "nsThreadUtils.h"
#include "mozilla/TaskQueue.h"
#include "mozilla/dom/ImageBitmapBinding.h"
#include "mozilla/dom/ImageUtils.h"
#include "webrtc/api/video/video_frame.h"
#include "webrtc/common_video/include/i420_buffer_pool.h"
#include "webrtc/common_video/include/video_frame_buffer.h"
#include "webrtc/rtc_base/keep_ref_until_done.h"
#include "webrtc/system_wrappers/include/clock.h"
// The number of frame buffers VideoFrameConverter may create before returning
// errors.
// Sometimes these are released synchronously but they can be forwarded all the
// way to the encoder for asynchronous encoding. With a pool size of 5,
// we allow 1 buffer for the current conversion, and 4 buffers to be queued at
// the encoder.
#define CONVERTER_BUFFER_POOL_SIZE 5
namespace mozilla {
static mozilla::LazyLogModule gVideoFrameConverterLog("VideoFrameConverter");
class VideoConverterListener {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoConverterListener)
virtual void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) = 0;
protected:
virtual ~VideoConverterListener() {}
};
// An async video frame format converter.
//
// Input is typically a MediaStreamTrackListener driven by MediaStreamGraph.
//
// Output is passed through to all added VideoConverterListeners on a TaskQueue
// thread whenever a frame is converted.
class VideoFrameConverter {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameConverter)
VideoFrameConverter()
: mTaskQueue(
new TaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
"VideoFrameConverter")),
mPacingTimer(new MediaTimer()),
mLastImage(
-2), // -2 or -1 are not guaranteed invalid serials (Bug 1262134).
mBufferPool(false, CONVERTER_BUFFER_POOL_SIZE),
mLastFrameQueuedForProcessing(TimeStamp::Now()),
mEnabled(true) {
MOZ_COUNT_CTOR(VideoFrameConverter);
}
void QueueVideoChunk(const VideoChunk& aChunk, bool aForceBlack) {
gfx::IntSize size = aChunk.mFrame.GetIntrinsicSize();
if (size.width == 0 || size.width == 0) {
return;
}
TimeStamp t = aChunk.mTimeStamp;
MOZ_ASSERT(!t.IsNull());
if (!mLastFrameQueuedForPacing.IsNull() && t < mLastFrameQueuedForPacing) {
// With a direct listener we can have buffered up future frames in
// mPacingTimer. The source could start sending us frames that start
// before some previously buffered frames (e.g., a MediaDecoder does that
// when it seeks). We don't want to just append these to the pacing timer,
// as frames at different times on the MediaDecoder timeline would get
// passed to the encoder in a mixed order. We don't have an explicit way
// of signaling this, so we must detect here if time goes backwards.
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("Clearing pacer because of source reset (%.3f)",
(mLastFrameQueuedForPacing - t).ToSeconds()));
mPacingTimer->Cancel();
}
mLastFrameQueuedForPacing = t;
mPacingTimer->WaitUntil(t, __func__)
->Then(mTaskQueue, __func__,
[self = RefPtr<VideoFrameConverter>(this), this,
image = RefPtr<layers::Image>(aChunk.mFrame.GetImage()),
t = std::move(t), size = std::move(size), aForceBlack] {
QueueForProcessing(std::move(image), t, size, aForceBlack);
},
[] {});
}
void SetTrackEnabled(bool aEnabled) {
nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
__func__, [self = RefPtr<VideoFrameConverter>(this), this, aEnabled] {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("VideoFrameConverter Track is now %s",
aEnabled ? "enabled" : "disabled"));
mEnabled = aEnabled;
if (!aEnabled && mLastFrameConverted) {
// After disabling, we re-send the last frame as black in case the
// source had already stopped and no frame is coming soon.
ProcessVideoFrame(nullptr, TimeStamp::Now(),
gfx::IntSize(mLastFrameConverted->width(),
mLastFrameConverted->height()),
true);
}
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void AddListener(const RefPtr<VideoConverterListener>& aListener) {
nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
"VideoFrameConverter::AddListener",
[self = RefPtr<VideoFrameConverter>(this), this, aListener] {
MOZ_ASSERT(!mListeners.Contains(aListener));
mListeners.AppendElement(aListener);
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void RemoveListener(const RefPtr<VideoConverterListener>& aListener) {
nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
"VideoFrameConverter::RemoveListener",
[self = RefPtr<VideoFrameConverter>(this), this, aListener] {
mListeners.RemoveElement(aListener);
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void Shutdown() {
mPacingTimer->Cancel();
nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
"VideoFrameConverter::Shutdown",
[self = RefPtr<VideoFrameConverter>(this), this] {
if (mSameFrameTimer) {
mSameFrameTimer->Cancel();
}
mListeners.Clear();
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
protected:
virtual ~VideoFrameConverter() { MOZ_COUNT_DTOR(VideoFrameConverter); }
static void SameFrameTick(nsITimer* aTimer, void* aClosure) {
MOZ_ASSERT(aClosure);
VideoFrameConverter* self = static_cast<VideoFrameConverter*>(aClosure);
MOZ_ASSERT(self->mTaskQueue->IsCurrentThreadIn());
if (!self->mLastFrameConverted) {
return;
}
for (RefPtr<VideoConverterListener>& listener : self->mListeners) {
listener->OnVideoFrameConverted(*self->mLastFrameConverted);
}
}
void VideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (mSameFrameTimer) {
mSameFrameTimer->Cancel();
}
const int sameFrameIntervalInMs = 1000;
NS_NewTimerWithFuncCallback(
getter_AddRefs(mSameFrameTimer), &SameFrameTick, this,
sameFrameIntervalInMs, nsITimer::TYPE_REPEATING_SLACK,
"VideoFrameConverter::mSameFrameTimer", mTaskQueue);
mLastFrameConverted = MakeUnique<webrtc::VideoFrame>(aVideoFrame);
for (RefPtr<VideoConverterListener>& listener : mListeners) {
listener->OnVideoFrameConverted(aVideoFrame);
}
}
void QueueForProcessing(RefPtr<layers::Image> aImage, TimeStamp aTime,
gfx::IntSize aSize, bool aForceBlack) {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
int32_t serial;
if (aForceBlack || !mEnabled) {
// Set the last-img check to indicate black.
// -1 is not a guaranteed invalid serial. See bug 1262134.
serial = -1;
} else if (!aImage) {
// Set the last-img check to indicate reset.
// -2 is not a guaranteed invalid serial. See bug 1262134.
serial = -2;
} else {
serial = aImage->GetSerial();
}
if (serial == mLastImage) {
// With a non-direct listener we get passed duplicate frames every ~10ms
// even with no frame change.
return;
}
if (aTime <= mLastFrameQueuedForProcessing) {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("Dropping a frame because time did not progress (%.3f)",
(mLastFrameQueuedForProcessing - aTime).ToSeconds()));
return;
}
mLastImage = serial;
mLastFrameQueuedForProcessing = aTime;
nsresult rv = mTaskQueue->Dispatch(
NewRunnableMethod<StoreCopyPassByRRef<RefPtr<layers::Image>>, TimeStamp,
gfx::IntSize, bool>(
"VideoFrameConverter::ProcessVideoFrame", this,
&VideoFrameConverter::ProcessVideoFrame, std::move(aImage), aTime,
aSize, aForceBlack || !mEnabled));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void ProcessVideoFrame(RefPtr<layers::Image> aImage, TimeStamp aTime,
gfx::IntSize aSize, bool aForceBlack) {
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
if (aTime < mLastFrameQueuedForProcessing) {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Debug,
("Dropping a frame that is %.3f seconds behind latest",
(mLastFrameQueuedForProcessing - aTime).ToSeconds()));
return;
}
// See Bug 1529581 - Ideally we'd use the mTimestamp from the chunk
// passed into QueueVideoChunk rather than the webrtc.org clock here.
int64_t now = webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
if (aForceBlack) {
// Send a black image.
rtc::scoped_refptr<webrtc::I420Buffer> buffer =
mBufferPool.CreateBuffer(aSize.width, aSize.height);
if (!buffer) {
MOZ_DIAGNOSTIC_ASSERT(false,
"Buffers not leaving scope except for "
"reconfig, should never leak");
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Warning,
("Creating a buffer for a black video frame failed"));
return;
}
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Verbose,
("Sending a black video frame"));
webrtc::I420Buffer::SetBlack(buffer);
webrtc::VideoFrame frame(buffer, 0, // not setting rtp timestamp
now, webrtc::kVideoRotation_0);
VideoFrameConverted(frame);
return;
}
if (!aImage) {
// Don't send anything for null images.
return;
}
MOZ_ASSERT(aImage->GetSize() == aSize);
if (layers::PlanarYCbCrImage* image = aImage->AsPlanarYCbCrImage()) {
dom::ImageUtils utils(image);
if (utils.GetFormat() == dom::ImageBitmapFormat::YUV420P &&
image->GetData()) {
const layers::PlanarYCbCrData* data = image->GetData();
rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
aImage->GetSize().width, aImage->GetSize().height,
data->mYChannel, data->mYStride, data->mCbChannel,
data->mCbCrStride, data->mCrChannel, data->mCbCrStride,
rtc::KeepRefUntilDone(image)));
webrtc::VideoFrame i420_frame(video_frame_buffer,
0, // not setting rtp timestamp
now, webrtc::kVideoRotation_0);
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Verbose,
("Sending an I420 video frame"));
VideoFrameConverted(i420_frame);
return;
}
}
rtc::scoped_refptr<webrtc::I420Buffer> buffer =
mBufferPool.CreateBuffer(aSize.width, aSize.height);
if (!buffer) {
MOZ_DIAGNOSTIC_ASSERT(false,
"Buffers not leaving scope except for "
"reconfig, should never leak");
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Warning,
("Creating a buffer failed"));
return;
}
nsresult rv =
ConvertToI420(aImage, buffer->MutableDataY(), buffer->StrideY(),
buffer->MutableDataU(), buffer->StrideU(),
buffer->MutableDataV(), buffer->StrideV());
if (NS_FAILED(rv)) {
MOZ_LOG(gVideoFrameConverterLog, LogLevel::Warning,
("Image conversion failed"));
return;
}
webrtc::VideoFrame frame(buffer, 0, //not setting rtp timestamp
now, webrtc::kVideoRotation_0);
VideoFrameConverted(frame);
}
const RefPtr<TaskQueue> mTaskQueue;
// Used to pace future frames close to their rendering-time. Thread-safe.
const RefPtr<MediaTimer> mPacingTimer;
// Written and read from the queueing thread (normally MSG).
// Last time we queued a frame in the pacer
TimeStamp mLastFrameQueuedForPacing;
// Accessed only from mTaskQueue.
int32_t mLastImage; // Serial number of last processed Image
webrtc::I420BufferPool mBufferPool;
nsCOMPtr<nsITimer> mSameFrameTimer;
TimeStamp mLastFrameQueuedForProcessing;
UniquePtr<webrtc::VideoFrame> mLastFrameConverted;
bool mEnabled;
nsTArray<RefPtr<VideoConverterListener>> mListeners;
};
} // namespace mozilla
#endif // VideoFrameConverter_h

View file

@ -14,7 +14,7 @@ namespace mozilla {
using namespace layers;
VideoFrame::VideoFrame(already_AddRefed<Image>& aImage,
VideoFrame::VideoFrame(already_AddRefed<Image> aImage,
const gfx::IntSize& aIntrinsicSize)
: mImage(aImage),
mIntrinsicSize(aIntrinsicSize),
@ -86,13 +86,13 @@ already_AddRefed<Image> VideoFrame::CreateBlackImage(
}
void VideoSegment::AppendFrame(already_AddRefed<Image>&& aImage,
StreamTime aDuration,
const IntSize& aIntrinsicSize,
const PrincipalHandle& aPrincipalHandle,
bool aForceBlack, TimeStamp aTimeStamp) {
VideoChunk* chunk = AppendChunk(aDuration);
VideoChunk* chunk = AppendChunk(0);
chunk->mTimeStamp = aTimeStamp;
VideoFrame frame(aImage, aIntrinsicSize);
VideoFrame frame(std::move(aImage), aIntrinsicSize);
MOZ_ASSERT_IF(!IsNull(), !aTimeStamp.IsNull());
frame.SetForceBlack(aForceBlack);
frame.SetPrincipalHandle(aPrincipalHandle);
chunk->mFrame.TakeFrom(&frame);

View file

@ -21,7 +21,7 @@ class VideoFrame {
public:
typedef mozilla::layers::Image Image;
VideoFrame(already_AddRefed<Image>& aImage,
VideoFrame(already_AddRefed<Image> aImage,
const gfx::IntSize& aIntrinsicSize);
VideoFrame();
~VideoFrame();
@ -108,11 +108,22 @@ class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> {
~VideoSegment();
void AppendFrame(already_AddRefed<Image>&& aImage, StreamTime aDuration,
void AppendFrame(already_AddRefed<Image>&& aImage,
const IntSize& aIntrinsicSize,
const PrincipalHandle& aPrincipalHandle,
bool aForceBlack = false,
TimeStamp aTimeStamp = TimeStamp::Now());
void ExtendLastFrameBy(StreamTime aDuration) {
if (aDuration <= 0) {
return;
}
if (mChunks.IsEmpty()) {
mChunks.AppendElement()->SetNull(aDuration);
} else {
mChunks[mChunks.Length() - 1].mDuration += aDuration;
}
mDuration += aDuration;
}
const VideoFrame* GetLastFrame(StreamTime* aStart = nullptr) {
VideoChunk* c = GetLastChunk();
if (!c) {
@ -123,6 +134,38 @@ class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> {
}
return &c->mFrame;
}
VideoChunk* FindChunkContaining(const TimeStamp& aTime) {
VideoChunk* previousChunk = nullptr;
for (VideoChunk& c : mChunks) {
if (c.mTimeStamp.IsNull()) {
continue;
}
if (c.mTimeStamp > aTime) {
return previousChunk;
}
previousChunk = &c;
}
return previousChunk;
}
void ForgetUpToTime(const TimeStamp& aTime) {
VideoChunk* chunk = FindChunkContaining(aTime);
if (!chunk) {
return;
}
StreamTime duration = 0;
size_t chunksToRemove = 0;
for (const VideoChunk& c : mChunks) {
if (c.mTimeStamp >= chunk->mTimeStamp) {
break;
}
duration += c.GetDuration();
++chunksToRemove;
}
mChunks.RemoveElementsAt(0, chunksToRemove);
mDuration -= duration;
MOZ_ASSERT(mChunks.Capacity() >= DEFAULT_SEGMENT_CAPACITY,
"Capacity must be retained after removing chunks");
}
// Override default impl
void ReplaceWithDisabled() override {
for (ChunkIterator i(*this); !i.IsEnded(); i.Next()) {

View file

@ -5,19 +5,218 @@
#include "VideoStreamTrack.h"
#include "MediaStreamVideoSink.h"
#include "MediaStreamGraph.h"
#include "MediaStreamListener.h"
#include "nsContentUtils.h"
#include "nsGlobalWindowInner.h"
#include "VideoFrameContainer.h"
namespace mozilla {
namespace dom {
void VideoStreamTrack::AddVideoOutput(MediaStreamVideoSink* aSink) {
GetOwnedStream()->AddVideoOutput(aSink, mTrackID);
using layers::Image;
using layers::ImageContainer;
using layers::PlanarYCbCrData;
using layers::PlanarYCbCrImage;
static bool SetImageToBlackPixel(PlanarYCbCrImage* aImage) {
uint8_t blackPixel[] = {0x10, 0x80, 0x80};
PlanarYCbCrData data;
data.mYChannel = blackPixel;
data.mCbChannel = blackPixel + 1;
data.mCrChannel = blackPixel + 2;
data.mYStride = data.mCbCrStride = 1;
data.mPicSize = data.mYSize = data.mCbCrSize = gfx::IntSize(1, 1);
return aImage->CopyData(data);
}
void VideoStreamTrack::RemoveVideoOutput(MediaStreamVideoSink* aSink) {
GetOwnedStream()->RemoveVideoOutput(aSink, mTrackID);
class VideoOutput : public DirectMediaStreamTrackListener {
protected:
virtual ~VideoOutput() = default;
void DropPastFrames() {
mMutex.AssertCurrentThreadOwns();
TimeStamp now = TimeStamp::Now();
size_t nrChunksInPast = 0;
for (const auto& idChunkPair : mFrames) {
const VideoChunk& chunk = idChunkPair.second();
if (chunk.mTimeStamp > now) {
break;
}
++nrChunksInPast;
}
if (nrChunksInPast > 1) {
// We need to keep one frame that starts in the past, because it only ends
// when the next frame starts (which also needs to be in the past for it
// to drop).
mFrames.RemoveElementsAt(0, nrChunksInPast - 1);
}
}
void SendFrames() {
mMutex.AssertCurrentThreadOwns();
DropPastFrames();
if (mFrames.IsEmpty()) {
return;
}
// Collect any new frames produced in this iteration.
AutoTArray<ImageContainer::NonOwningImage, 16> images;
PrincipalHandle lastPrincipalHandle = PRINCIPAL_HANDLE_NONE;
for (const auto& idChunkPair : mFrames) {
ImageContainer::FrameID frameId = idChunkPair.first();
const VideoChunk& chunk = idChunkPair.second();
const VideoFrame& frame = chunk.mFrame;
Image* image = frame.GetImage();
if (frame.GetForceBlack() || !mEnabled) {
if (!mBlackImage) {
RefPtr<Image> blackImage = mVideoFrameContainer->GetImageContainer()
->CreatePlanarYCbCrImage();
if (blackImage) {
// Sets the image to a single black pixel, which will be scaled to
// fill the rendered size.
if (SetImageToBlackPixel(blackImage->AsPlanarYCbCrImage())) {
mBlackImage = blackImage;
}
}
}
if (mBlackImage) {
image = mBlackImage;
}
}
if (!image) {
// We ignore null images.
continue;
}
images.AppendElement(
ImageContainer::NonOwningImage(image, chunk.mTimeStamp, frameId));
lastPrincipalHandle = chunk.GetPrincipalHandle();
}
if (images.IsEmpty()) {
// This could happen if the only images in mFrames are null. We leave the
// container at the current frame in this case.
mVideoFrameContainer->ClearFutureFrames();
return;
}
bool principalHandleChanged =
lastPrincipalHandle != PRINCIPAL_HANDLE_NONE &&
lastPrincipalHandle != mVideoFrameContainer->GetLastPrincipalHandle();
if (principalHandleChanged) {
mVideoFrameContainer->UpdatePrincipalHandleForFrameID(
lastPrincipalHandle, images.LastElement().mFrameID);
}
mVideoFrameContainer->SetCurrentFrames(
mFrames[0].second().mFrame.GetIntrinsicSize(), images);
mMainThread->Dispatch(NewRunnableMethod("VideoFrameContainer::Invalidate",
mVideoFrameContainer,
&VideoFrameContainer::Invalidate));
}
public:
VideoOutput(VideoFrameContainer* aContainer, AbstractThread* aMainThread)
: mMutex("VideoOutput::mMutex"),
mVideoFrameContainer(aContainer),
mMainThread(aMainThread) {}
void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) override {
MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO);
const VideoSegment& video = static_cast<const VideoSegment&>(aMedia);
MutexAutoLock lock(mMutex);
for (VideoSegment::ConstChunkIterator i(video); !i.IsEnded(); i.Next()) {
if (!mLastFrameTime.IsNull() && i->mTimeStamp < mLastFrameTime) {
// Time can go backwards if the source is a captured MediaDecoder and
// it seeks, as the previously buffered frames would stretch into the
// future. If this happens, we clear the buffered frames and start over.
mFrames.ClearAndRetainStorage();
}
mFrames.AppendElement(MakePair(mVideoFrameContainer->NewFrameID(), *i));
mLastFrameTime = i->mTimeStamp;
}
SendFrames();
}
void NotifyRemoved() override {
// Doesn't need locking by mMutex, since the direct listener is removed from
// the track before we get notified.
mFrames.ClearAndRetainStorage();
mVideoFrameContainer->ClearFutureFrames();
}
void NotifyEnded() override {
// Doesn't need locking by mMutex, since for the track to end, it must have
// been ended by the source, meaning that the source won't append more data.
mFrames.ClearAndRetainStorage();
}
void NotifyEnabledStateChanged(bool aEnabled) override {
MutexAutoLock lock(mMutex);
mEnabled = aEnabled;
// Since mEnabled will affect whether frames are real, or black, we assign
// new FrameIDs whenever this changes.
for (auto& idChunkPair : mFrames) {
idChunkPair.first() = mVideoFrameContainer->NewFrameID();
}
SendFrames();
}
Mutex mMutex;
TimeStamp mLastFrameTime;
// Once the frame is forced to black, we initialize mBlackImage for use in any
// following forced-black frames.
RefPtr<Image> mBlackImage;
bool mEnabled = true;
// This array is accessed from both the direct video thread, and the graph
// thread. Protected by mMutex.
nsTArray<Pair<ImageContainer::FrameID, VideoChunk>> mFrames;
const RefPtr<VideoFrameContainer> mVideoFrameContainer;
const RefPtr<AbstractThread> mMainThread;
};
namespace dom {
VideoStreamTrack::VideoStreamTrack(DOMMediaStream* aStream, TrackID aTrackID,
TrackID aInputTrackID,
MediaStreamTrackSource* aSource,
const MediaTrackConstraints& aConstraints)
: MediaStreamTrack(aStream, aTrackID, aInputTrackID, aSource,
aConstraints) {}
void VideoStreamTrack::Destroy() {
mVideoOutputs.Clear();
MediaStreamTrack::Destroy();
}
void VideoStreamTrack::AddVideoOutput(VideoFrameContainer* aSink) {
for (const auto& output : mVideoOutputs) {
if (output->mVideoFrameContainer == aSink) {
MOZ_ASSERT_UNREACHABLE("A VideoFrameContainer was already added");
return;
}
}
RefPtr<VideoOutput>& output =
*mVideoOutputs.AppendElement(MakeRefPtr<VideoOutput>(
aSink, nsGlobalWindowInner::Cast(GetParentObject())
->AbstractMainThreadFor(TaskCategory::Other)));
AddDirectListener(output);
AddListener(output);
}
void VideoStreamTrack::RemoveVideoOutput(VideoFrameContainer* aSink) {
for (const auto& output : nsTArray<RefPtr<VideoOutput>>(mVideoOutputs)) {
if (output->mVideoFrameContainer == aSink) {
mVideoOutputs.RemoveElement(output);
RemoveDirectListener(output);
RemoveListener(output);
}
}
}
void VideoStreamTrack::GetLabel(nsAString& aLabel, CallerType aCallerType) {

View file

@ -11,7 +11,8 @@
namespace mozilla {
class MediaStreamVideoSink;
class VideoFrameContainer;
class VideoOutput;
namespace dom {
@ -20,15 +21,15 @@ class VideoStreamTrack : public MediaStreamTrack {
VideoStreamTrack(
DOMMediaStream* aStream, TrackID aTrackID, TrackID aInputTrackID,
MediaStreamTrackSource* aSource,
const MediaTrackConstraints& aConstraints = MediaTrackConstraints())
: MediaStreamTrack(aStream, aTrackID, aInputTrackID, aSource,
aConstraints) {}
const MediaTrackConstraints& aConstraints = MediaTrackConstraints());
void Destroy() override;
VideoStreamTrack* AsVideoStreamTrack() override { return this; }
const VideoStreamTrack* AsVideoStreamTrack() const override { return this; }
void AddVideoOutput(MediaStreamVideoSink* aSink);
void RemoveVideoOutput(MediaStreamVideoSink* aSink);
void AddVideoOutput(VideoFrameContainer* aSink);
void RemoveVideoOutput(VideoFrameContainer* aSink);
// WebIDL
void GetKind(nsAString& aKind) override { aKind.AssignLiteral("video"); }
@ -41,6 +42,9 @@ class VideoStreamTrack : public MediaStreamTrack {
return do_AddRef(new VideoStreamTrack(
aOwningStream, aTrackID, mInputTrackID, mSource, mConstraints));
}
private:
nsTArray<RefPtr<VideoOutput>> mVideoOutputs;
};
} // namespace dom

View file

@ -8,9 +8,11 @@
#include <algorithm>
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "DriftCompensation.h"
#include "GeckoProfiler.h"
#include "MediaDecoder.h"
#include "MediaStreamVideoSink.h"
#include "MediaStreamGraphImpl.h"
#include "MediaStreamListener.h"
#include "mozilla/dom/AudioNode.h"
#include "mozilla/dom/AudioStreamTrack.h"
#include "mozilla/dom/MediaStreamTrack.h"
@ -25,6 +27,7 @@
#include "mozilla/Unused.h"
#include "nsIPrincipal.h"
#include "nsMimeTypes.h"
#include "nsThreadUtils.h"
#include "OggWriter.h"
#include "OpusTrackEncoder.h"
#include "TimeUnits.h"
@ -49,10 +52,12 @@ using namespace media;
class MediaEncoder::AudioTrackListener : public DirectMediaStreamTrackListener {
public:
AudioTrackListener(AudioTrackEncoder* aEncoder, TaskQueue* aEncoderThread)
AudioTrackListener(DriftCompensator* aDriftCompensator,
AudioTrackEncoder* aEncoder, TaskQueue* aEncoderThread)
: mDirectConnected(false),
mInitialized(false),
mRemoved(false),
mDriftCompensator(aDriftCompensator),
mEncoder(aEncoder),
mEncoderThread(aEncoderThread) {
MOZ_ASSERT(mEncoder);
@ -91,62 +96,13 @@ class MediaEncoder::AudioTrackListener : public DirectMediaStreamTrackListener {
}
if (!mInitialized) {
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<StreamTime>(
"mozilla::AudioTrackEncoder::SetStartOffset", mEncoder,
&AudioTrackEncoder::SetStartOffset, aTrackOffset));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
mDriftCompensator->NotifyAudioStart(TimeStamp::Now());
mInitialized = true;
}
if (!mDirectConnected) {
NotifyRealtimeTrackData(aGraph, aTrackOffset, aQueuedMedia);
}
mDriftCompensator->NotifyAudio(aQueuedMedia.GetDuration());
AutoTArray<Pair<bool, StreamTime>, 2> nulledSequence;
for (AudioSegment::ConstChunkIterator iter(
static_cast<const AudioSegment&>(aQueuedMedia));
!iter.IsEnded(); iter.Next()) {
if (!nulledSequence.IsEmpty()) {
Pair<bool, StreamTime>& last = nulledSequence.LastElement();
if (last.first() == iter->IsNull()) {
last.second() += iter->GetDuration();
continue;
}
}
nulledSequence.AppendElement(
MakePair(iter->IsNull(), iter->GetDuration()));
}
for (const Pair<bool, StreamTime>& nulledRange : nulledSequence) {
if (nulledRange.first()) {
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<StreamTime>(
"mozilla::AudioTrackEncoder::AdvanceBlockedInput", mEncoder,
&AudioTrackEncoder::AdvanceBlockedInput, nulledRange.second()));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
} else {
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<StreamTime>(
"mozilla::AudioTrackEncoder::AdvanceCurrentTime", mEncoder,
&AudioTrackEncoder::AdvanceCurrentTime, nulledRange.second()));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
}
}
void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) override {
TRACE_COMMENT("Encoder %p", mEncoder.get());
MOZ_ASSERT(mEncoder);
MOZ_ASSERT(mEncoderThread);
if (mShutdown) {
return;
}
const AudioSegment& audio = static_cast<const AudioSegment&>(aMedia);
const AudioSegment& audio = static_cast<const AudioSegment&>(aQueuedMedia);
AudioSegment copy;
copy.AppendSlice(audio, 0, audio.GetDuration());
@ -197,11 +153,12 @@ class MediaEncoder::AudioTrackListener : public DirectMediaStreamTrackListener {
bool mDirectConnected;
bool mInitialized;
bool mRemoved;
const RefPtr<DriftCompensator> mDriftCompensator;
RefPtr<AudioTrackEncoder> mEncoder;
RefPtr<TaskQueue> mEncoderThread;
};
class MediaEncoder::VideoTrackListener : public MediaStreamVideoSink {
class MediaEncoder::VideoTrackListener : public DirectMediaStreamTrackListener {
public:
VideoTrackListener(VideoTrackEncoder* aEncoder, TaskQueue* aEncoderThread)
: mDirectConnected(false),
@ -245,58 +202,44 @@ class MediaEncoder::VideoTrackListener : public MediaStreamVideoSink {
return;
}
const TimeStamp now = TimeStamp::Now();
if (!mInitialized) {
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<StreamTime>(
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<TimeStamp>(
"mozilla::VideoTrackEncoder::SetStartOffset", mEncoder,
&VideoTrackEncoder::SetStartOffset, aTrackOffset));
&VideoTrackEncoder::SetStartOffset, now));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
mInitialized = true;
}
AutoTArray<Pair<bool, StreamTime>, 2> nulledSequence;
for (VideoSegment::ConstChunkIterator iter(
static_cast<const VideoSegment&>(aQueuedMedia));
!iter.IsEnded(); iter.Next()) {
if (!nulledSequence.IsEmpty()) {
Pair<bool, StreamTime>& last = nulledSequence.LastElement();
if (last.first() == iter->IsNull()) {
last.second() += iter->GetDuration();
continue;
}
}
nulledSequence.AppendElement(
MakePair(iter->IsNull(), iter->GetDuration()));
}
for (const Pair<bool, StreamTime>& nulledRange : nulledSequence) {
if (nulledRange.first()) {
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<StreamTime>(
"mozilla::VideoTrackEncoder::AdvanceBlockedInput", mEncoder,
&VideoTrackEncoder::AdvanceBlockedInput, nulledRange.second()));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
} else {
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<StreamTime>(
"mozilla::VideoTrackEncoder::AdvanceCurrentTime", mEncoder,
&VideoTrackEncoder::AdvanceCurrentTime, nulledRange.second()));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
}
nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod<TimeStamp>(
"mozilla::VideoTrackEncoder::AdvanceCurrentTime", mEncoder,
&VideoTrackEncoder::AdvanceCurrentTime, now));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void SetCurrentFrames(const VideoSegment& aMedia) override {
void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) override {
TRACE_COMMENT("Encoder %p", mEncoder.get());
MOZ_ASSERT(mEncoder);
MOZ_ASSERT(mEncoderThread);
MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO);
if (mShutdown) {
return;
}
const VideoSegment& video = static_cast<const VideoSegment&>(aMedia);
VideoSegment copy;
copy.AppendSlice(aMedia, 0, aMedia.GetDuration());
for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded();
iter.Next()) {
copy.AppendFrame(do_AddRef(iter->mFrame.GetImage()),
iter->mFrame.GetIntrinsicSize(),
iter->mFrame.GetPrincipalHandle(),
iter->mFrame.GetForceBlack(), iter->mTimeStamp);
}
nsresult rv = mEncoderThread->Dispatch(
NewRunnableMethod<StoreCopyPassByRRef<VideoSegment>>(
@ -306,7 +249,27 @@ class MediaEncoder::VideoTrackListener : public MediaStreamVideoSink {
Unused << rv;
}
void ClearFrames() override {}
void NotifyEnabledStateChanged(bool aEnabled) override {
MOZ_ASSERT(mEncoder);
MOZ_ASSERT(mEncoderThread);
if (mShutdown) {
return;
}
nsresult rv;
if (aEnabled) {
rv = mEncoderThread->Dispatch(NewRunnableMethod<TimeStamp>(
"mozilla::VideoTrackEncoder::Enable", mEncoder,
&VideoTrackEncoder::Enable, TimeStamp::Now()));
} else {
rv = mEncoderThread->Dispatch(NewRunnableMethod<TimeStamp>(
"mozilla::VideoTrackEncoder::Disable", mEncoder,
&VideoTrackEncoder::Disable, TimeStamp::Now()));
}
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
void NotifyEnded() override {
MOZ_ASSERT(mEncoder);
@ -429,10 +392,11 @@ class MediaEncoder::EncoderListener : public TrackEncoderListener {
};
MediaEncoder::MediaEncoder(TaskQueue* aEncoderThread,
RefPtr<DriftCompensator> aDriftCompensator,
UniquePtr<ContainerWriter> aWriter,
AudioTrackEncoder* aAudioEncoder,
VideoTrackEncoder* aVideoEncoder,
const nsAString& aMIMEType)
TrackRate aTrackRate, const nsAString& aMIMEType)
: mEncoderThread(aEncoderThread),
mWriter(std::move(aWriter)),
mAudioEncoder(aAudioEncoder),
@ -447,8 +411,8 @@ MediaEncoder::MediaEncoder(TaskQueue* aEncoderThread,
mCanceled(false),
mShutdown(false) {
if (mAudioEncoder) {
mAudioListener =
MakeAndAddRef<AudioTrackListener>(mAudioEncoder, mEncoderThread);
mAudioListener = MakeAndAddRef<AudioTrackListener>(
aDriftCompensator, mAudioEncoder, mEncoderThread);
nsresult rv =
mEncoderThread->Dispatch(NewRunnableMethod<RefPtr<EncoderListener>>(
"mozilla::AudioTrackEncoder::RegisterListener", mAudioEncoder,
@ -470,36 +434,66 @@ MediaEncoder::MediaEncoder(TaskQueue* aEncoderThread,
MediaEncoder::~MediaEncoder() { MOZ_ASSERT(mListeners.IsEmpty()); }
void MediaEncoder::Suspend(TimeStamp aTime) {
auto& ae = mAudioEncoder;
auto& ve = mVideoEncoder;
nsresult rv = mEncoderThread->Dispatch(NewRunnableFrom([ae, ve, aTime]() {
if (ae) {
ae->Suspend(aTime);
}
if (ve) {
ve->Suspend(aTime);
}
return NS_OK;
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
void MediaEncoder::RunOnGraph(already_AddRefed<Runnable> aRunnable) {
MediaStreamGraphImpl* graph;
if (mAudioTrack) {
graph = mAudioTrack->GraphImpl();
} else if (mVideoTrack) {
graph = mVideoTrack->GraphImpl();
} else if (mPipeStream) {
graph = mPipeStream->GraphImpl();
} else {
MOZ_CRASH("No graph");
}
class Message : public ControlMessage {
public:
explicit Message(already_AddRefed<Runnable> aRunnable)
: ControlMessage(nullptr), mRunnable(aRunnable) {}
void Run() override { mRunnable->Run(); }
const RefPtr<Runnable> mRunnable;
};
graph->AppendMessage(MakeUnique<Message>(std::move(aRunnable)));
}
void MediaEncoder::Resume(TimeStamp aTime) {
auto& ae = mAudioEncoder;
auto& ve = mVideoEncoder;
nsresult rv = mEncoderThread->Dispatch(NewRunnableFrom([ae, ve, aTime]() {
if (ae) {
ae->Resume(aTime);
}
if (ve) {
ve->Resume(aTime);
}
return NS_OK;
}));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
void MediaEncoder::Suspend() {
RunOnGraph(NS_NewRunnableFunction(
"MediaEncoder::Suspend",
[thread = mEncoderThread, audio = mAudioEncoder, video = mVideoEncoder] {
if (audio) {
nsresult rv = thread->Dispatch(
NewRunnableMethod("AudioTrackEncoder::Suspend", audio,
&AudioTrackEncoder::Suspend));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
if (video) {
nsresult rv = thread->Dispatch(NewRunnableMethod<TimeStamp>(
"VideoTrackEncoder::Suspend", video, &VideoTrackEncoder::Suspend,
TimeStamp::Now()));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
}));
}
void MediaEncoder::Resume() {
RunOnGraph(NS_NewRunnableFunction(
"MediaEncoder::Resume",
[thread = mEncoderThread, audio = mAudioEncoder, video = mVideoEncoder] {
if (audio) {
nsresult rv = thread->Dispatch(NewRunnableMethod(
"AudioTrackEncoder::Resume", audio, &AudioTrackEncoder::Resume));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
if (video) {
nsresult rv = thread->Dispatch(NewRunnableMethod<TimeStamp>(
"VideoTrackEncoder::Resume", video, &VideoTrackEncoder::Resume,
TimeStamp::Now()));
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
Unused << rv;
}
}));
}
void MediaEncoder::ConnectAudioNode(AudioNode* aNode, uint32_t aOutput) {
@ -583,7 +577,7 @@ void MediaEncoder::ConnectMediaStreamTrack(MediaStreamTrack* aTrack) {
}
mVideoTrack = video;
video->AddVideoOutput(mVideoListener);
video->AddDirectListener(mVideoListener);
video->AddListener(mVideoListener);
} else {
MOZ_ASSERT(false, "Unknown track type");
@ -614,7 +608,7 @@ void MediaEncoder::RemoveMediaStreamTrack(MediaStreamTrack* aTrack) {
}
if (mVideoListener) {
video->RemoveVideoOutput(mVideoListener);
video->RemoveDirectListener(mVideoListener);
video->RemoveListener(mVideoListener);
}
mVideoTrack = nullptr;
@ -631,6 +625,8 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
UniquePtr<ContainerWriter> writer;
RefPtr<AudioTrackEncoder> audioEncoder;
RefPtr<VideoTrackEncoder> videoEncoder;
auto driftCompensator =
MakeRefPtr<DriftCompensator>(aEncoderThread, aTrackRate);
nsString mimeType;
if (!aTrackTypes) {
@ -648,11 +644,11 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
NS_ENSURE_TRUE(audioEncoder, nullptr);
}
if (Preferences::GetBool("media.recorder.video.frame_drops", true)) {
videoEncoder =
MakeAndAddRef<VP8TrackEncoder>(aTrackRate, FrameDroppingMode::ALLOW);
videoEncoder = MakeAndAddRef<VP8TrackEncoder>(
driftCompensator, aTrackRate, FrameDroppingMode::ALLOW);
} else {
videoEncoder = MakeAndAddRef<VP8TrackEncoder>(
aTrackRate, FrameDroppingMode::DISALLOW);
driftCompensator, aTrackRate, FrameDroppingMode::DISALLOW);
}
writer = MakeUnique<WebMWriter>(aTrackTypes);
NS_ENSURE_TRUE(writer, nullptr);
@ -691,8 +687,9 @@ already_AddRefed<MediaEncoder> MediaEncoder::CreateEncoder(
videoEncoder->SetBitrate(aVideoBitrate);
}
}
return MakeAndAddRef<MediaEncoder>(aEncoderThread, std::move(writer),
audioEncoder, videoEncoder, mimeType);
return MakeAndAddRef<MediaEncoder>(
aEncoderThread, std::move(driftCompensator), std::move(writer),
audioEncoder, videoEncoder, aTrackRate, mimeType);
}
nsresult MediaEncoder::GetEncodedMetadata(

View file

@ -18,6 +18,8 @@
namespace mozilla {
class DriftCompensator;
class Runnable;
class TaskQueue;
namespace dom {
@ -27,6 +29,7 @@ class MediaStreamTrack;
class VideoStreamTrack;
} // namespace dom
class DriftCompensator;
class MediaEncoder;
class MediaEncoderListener {
@ -107,17 +110,22 @@ class MediaEncoder {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaEncoder)
MediaEncoder(TaskQueue* aEncoderThread, UniquePtr<ContainerWriter> aWriter,
MediaEncoder(TaskQueue* aEncoderThread,
RefPtr<DriftCompensator> aDriftCompensator,
UniquePtr<ContainerWriter> aWriter,
AudioTrackEncoder* aAudioEncoder,
VideoTrackEncoder* aVideoEncoder, const nsAString& aMIMEType);
/* Note - called from control code, not on MSG threads. */
void Suspend(TimeStamp aTime);
VideoTrackEncoder* aVideoEncoder, TrackRate aTrackRate,
const nsAString& aMIMEType);
/**
* Note - called from control code, not on MSG threads.
* Calculates time spent paused in order to offset frames. */
void Resume(TimeStamp aTime);
* Called on main thread from MediaRecorder::Pause.
*/
void Suspend();
/**
* Called on main thread from MediaRecorder::Resume.
*/
void Resume();
/**
* Stops the current encoding, and disconnects the input tracks.
@ -227,6 +235,12 @@ class MediaEncoder {
~MediaEncoder();
private:
/**
* Takes a regular runnable and dispatches it to the graph wrapped in a
* ControlMessage.
*/
void RunOnGraph(already_AddRefed<Runnable> aRunnable);
/**
* Shuts down the MediaEncoder and cleans up track encoders.
* Listeners will be notified of the shutdown unless we were Cancel()ed first.
@ -245,6 +259,7 @@ class MediaEncoder {
nsresult CopyMetadataToMuxer(TrackEncoder* aTrackEncoder);
const RefPtr<TaskQueue> mEncoderThread;
const RefPtr<DriftCompensator> mDriftCompensator;
UniquePtr<ContainerWriter> mWriter;
RefPtr<AudioTrackEncoder> mAudioEncoder;

View file

@ -37,9 +37,7 @@ TrackEncoder::TrackEncoder(TrackRate aTrackRate)
mInitialized(false),
mEndOfStream(false),
mCanceled(false),
mCurrentTime(0),
mInitCounter(0),
mNotInitDuration(0),
mSuspended(false),
mTrackRate(aTrackRate) {}
@ -103,7 +101,7 @@ void TrackEncoder::SetWorkerThread(AbstractThread* aWorkerThread) {
mWorkerThread = aWorkerThread;
}
void AudioTrackEncoder::Suspend(TimeStamp) {
void AudioTrackEncoder::Suspend() {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Info, ("[AudioTrackEncoder %p]: Suspend(), was %s", this,
mSuspended ? "suspended" : "live"));
@ -115,7 +113,7 @@ void AudioTrackEncoder::Suspend(TimeStamp) {
mSuspended = true;
}
void AudioTrackEncoder::Resume(TimeStamp) {
void AudioTrackEncoder::Resume() {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Info, ("[AudioTrackEncoder %p]: Resume(), was %s", this,
mSuspended ? "suspended" : "live"));
@ -129,6 +127,7 @@ void AudioTrackEncoder::Resume(TimeStamp) {
void AudioTrackEncoder::AppendAudioSegment(AudioSegment&& aSegment) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
AUTO_PROFILER_LABEL("AudioTrackEncoder::AppendAudioSegment", OTHER);
TRACK_LOG(LogLevel::Verbose,
("[AudioTrackEncoder %p]: AppendAudioSegment() duration=%" PRIu64,
this, aSegment.GetDuration()));
@ -141,7 +140,15 @@ void AudioTrackEncoder::AppendAudioSegment(AudioSegment&& aSegment) {
return;
}
mIncomingBuffer.AppendFrom(&aSegment);
TryInit(mOutgoingBuffer, aSegment.GetDuration());
if (!mSuspended) {
mOutgoingBuffer.AppendFrom(&aSegment);
}
if (mInitialized && mOutgoingBuffer.GetDuration() >= GetPacketDuration()) {
OnDataAvailable();
}
}
void AudioTrackEncoder::TakeTrackData(AudioSegment& aSegment) {
@ -216,20 +223,15 @@ void AudioTrackEncoder::TryInit(const AudioSegment& aSegment,
void AudioTrackEncoder::Cancel() {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Info,
("[AudioTrackEncoder %p]: Cancel(), currentTime=%" PRIu64, this,
mCurrentTime));
TRACK_LOG(LogLevel::Info, ("[AudioTrackEncoder %p]: Cancel()", this));
mCanceled = true;
mIncomingBuffer.Clear();
mOutgoingBuffer.Clear();
}
void AudioTrackEncoder::NotifyEndOfStream() {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(
LogLevel::Info,
("[AudioTrackEncoder %p]: NotifyEndOfStream(), currentTime=%" PRIu64,
this, mCurrentTime));
TRACK_LOG(LogLevel::Info,
("[AudioTrackEncoder %p]: NotifyEndOfStream()", this));
if (!mCanceled && !mInitialized) {
// If source audio track is completely silent till the end of encoding,
@ -239,84 +241,11 @@ void AudioTrackEncoder::NotifyEndOfStream() {
mEndOfStream = true;
mIncomingBuffer.Clear();
if (mInitialized && !mCanceled) {
OnDataAvailable();
}
}
void AudioTrackEncoder::SetStartOffset(StreamTime aStartOffset) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
MOZ_ASSERT(mCurrentTime == 0);
TRACK_LOG(LogLevel::Info,
("[AudioTrackEncoder %p]: SetStartOffset(), aStartOffset=%" PRIu64,
this, aStartOffset));
mIncomingBuffer.InsertNullDataAtStart(aStartOffset);
mCurrentTime = aStartOffset;
}
void AudioTrackEncoder::AdvanceBlockedInput(StreamTime aDuration) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(
LogLevel::Verbose,
("[AudioTrackEncoder %p]: AdvanceBlockedInput(), aDuration=%" PRIu64,
this, aDuration));
// We call Init here so it can account for aDuration towards the Init timeout
TryInit(mOutgoingBuffer, aDuration);
mIncomingBuffer.InsertNullDataAtStart(aDuration);
mCurrentTime += aDuration;
}
void AudioTrackEncoder::AdvanceCurrentTime(StreamTime aDuration) {
AUTO_PROFILER_LABEL("AudioTrackEncoder::AdvanceCurrentTime", OTHER);
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
if (mCanceled) {
return;
}
if (mEndOfStream) {
return;
}
TRACK_LOG(LogLevel::Verbose,
("[AudioTrackEncoder %p]: AdvanceCurrentTime() %" PRIu64, this,
aDuration));
StreamTime currentTime = mCurrentTime + aDuration;
if (mSuspended) {
mCurrentTime = currentTime;
mIncomingBuffer.ForgetUpTo(mCurrentTime);
return;
}
if (currentTime <= mIncomingBuffer.GetDuration()) {
mOutgoingBuffer.AppendSlice(mIncomingBuffer, mCurrentTime, currentTime);
TryInit(mOutgoingBuffer, aDuration);
if (mInitialized && mOutgoingBuffer.GetDuration() >= GetPacketDuration()) {
OnDataAvailable();
}
} else {
NS_ASSERTION(false,
"AudioTrackEncoder::AdvanceCurrentTime Not enough data");
TRACK_LOG(
LogLevel::Error,
("[AudioTrackEncoder %p]: AdvanceCurrentTime() Not enough data. "
"In incoming=%" PRIu64 ", aDuration=%" PRIu64 ", currentTime=%" PRIu64,
this, mIncomingBuffer.GetDuration(), aDuration, currentTime));
OnError();
}
mCurrentTime = currentTime;
mIncomingBuffer.ForgetUpTo(mCurrentTime);
}
/*static*/
void AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk,
int32_t aDuration,
@ -366,13 +295,14 @@ void AudioTrackEncoder::DeInterleaveTrackData(AudioDataValue* aInput,
size_t AudioTrackEncoder::SizeOfExcludingThis(
mozilla::MallocSizeOf aMallocSizeOf) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
return mIncomingBuffer.SizeOfExcludingThis(aMallocSizeOf) +
mOutgoingBuffer.SizeOfExcludingThis(aMallocSizeOf);
return mOutgoingBuffer.SizeOfExcludingThis(aMallocSizeOf);
}
VideoTrackEncoder::VideoTrackEncoder(TrackRate aTrackRate,
VideoTrackEncoder::VideoTrackEncoder(RefPtr<DriftCompensator> aDriftCompensator,
TrackRate aTrackRate,
FrameDroppingMode aFrameDroppingMode)
: TrackEncoder(aTrackRate),
mDriftCompensator(std::move(aDriftCompensator)),
mFrameWidth(0),
mFrameHeight(0),
mDisplayWidth(0),
@ -380,14 +310,17 @@ VideoTrackEncoder::VideoTrackEncoder(TrackRate aTrackRate,
mEncodedTicks(0),
mVideoBitrate(0),
mFrameDroppingMode(aFrameDroppingMode),
mKeyFrameInterval(DEFAULT_KEYFRAME_INTERVAL_MS) {
mKeyFrameInterval(DEFAULT_KEYFRAME_INTERVAL_MS),
mEnabled(true) {
mLastChunk.mDuration = 0;
}
void VideoTrackEncoder::Suspend(TimeStamp aTime) {
void VideoTrackEncoder::Suspend(const TimeStamp& aTime) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Info, ("[VideoTrackEncoder %p]: Suspend(), was %s", this,
mSuspended ? "suspended" : "live"));
TRACK_LOG(LogLevel::Info,
("[VideoTrackEncoder %p]: Suspend() at %.3fs, was %s", this,
mStartTime.IsNull() ? 0.0 : (aTime - mStartTime).ToSeconds(),
mSuspended ? "suspended" : "live"));
if (mSuspended) {
return;
@ -397,21 +330,25 @@ void VideoTrackEncoder::Suspend(TimeStamp aTime) {
mSuspendTime = aTime;
}
void VideoTrackEncoder::Resume(TimeStamp aTime) {
void VideoTrackEncoder::Resume(const TimeStamp& aTime) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Info, ("[VideoTrackEncoder %p]: Resume(), was %s", this,
mSuspended ? "suspended" : "live"));
if (!mSuspended) {
return;
}
TRACK_LOG(
LogLevel::Info,
("[VideoTrackEncoder %p]: Resume() after %.3fs, was %s", this,
(aTime - mSuspendTime).ToSeconds(), mSuspended ? "suspended" : "live"));
mSuspended = false;
TimeDuration suspendDuration = aTime - mSuspendTime;
if (!mLastChunk.mTimeStamp.IsNull()) {
VideoChunk* nextChunk = mIncomingBuffer.FindChunkContaining(mCurrentTime);
if (nextChunk && nextChunk->mTimeStamp < aTime) {
VideoChunk* nextChunk = mIncomingBuffer.FindChunkContaining(aTime);
MOZ_ASSERT_IF(nextChunk, nextChunk->mTimeStamp <= aTime);
if (nextChunk) {
nextChunk->mTimeStamp = aTime;
}
mLastChunk.mTimeStamp += suspendDuration;
@ -423,11 +360,64 @@ void VideoTrackEncoder::Resume(TimeStamp aTime) {
mSuspendTime = TimeStamp();
}
void VideoTrackEncoder::Disable(const TimeStamp& aTime) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Debug, ("[VideoTrackEncoder %p]: Disable()", this));
if (mStartTime.IsNull()) {
// We haven't started yet. No need to touch future frames.
mEnabled = false;
return;
}
// Advancing currentTime to process any frames in mIncomingBuffer between
// mCurrentTime and aTime.
AdvanceCurrentTime(aTime);
if (!mLastChunk.mTimeStamp.IsNull()) {
// Insert a black frame at t=aTime into mIncomingBuffer, to trigger the
// shift to black at the right moment.
VideoSegment tempSegment;
tempSegment.AppendFrom(&mIncomingBuffer);
mIncomingBuffer.AppendFrame(do_AddRef(mLastChunk.mFrame.GetImage()),
mLastChunk.mFrame.GetIntrinsicSize(),
mLastChunk.mFrame.GetPrincipalHandle(), true,
aTime);
mIncomingBuffer.AppendFrom(&tempSegment);
}
mEnabled = false;
}
void VideoTrackEncoder::Enable(const TimeStamp& aTime) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Debug, ("[VideoTrackEncoder %p]: Enable()", this));
if (mStartTime.IsNull()) {
// We haven't started yet. No need to touch future frames.
mEnabled = true;
return;
}
// Advancing currentTime to process any frames in mIncomingBuffer between
// mCurrentTime and aTime.
AdvanceCurrentTime(aTime);
if (!mLastChunk.mTimeStamp.IsNull()) {
// Insert a real frame at t=aTime into mIncomingBuffer, to trigger the
// shift from black at the right moment.
VideoSegment tempSegment;
tempSegment.AppendFrom(&mIncomingBuffer);
mIncomingBuffer.AppendFrame(do_AddRef(mLastChunk.mFrame.GetImage()),
mLastChunk.mFrame.GetIntrinsicSize(),
mLastChunk.mFrame.GetPrincipalHandle(),
mLastChunk.mFrame.GetForceBlack(), aTime);
mIncomingBuffer.AppendFrom(&tempSegment);
}
mEnabled = true;
}
void VideoTrackEncoder::AppendVideoSegment(VideoSegment&& aSegment) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder %p]: AppendVideoSegment() duration=%" PRIu64,
this, aSegment.GetDuration()));
("[VideoTrackEncoder %p]: AppendVideoSegment()", this));
if (mCanceled) {
return;
@ -452,7 +442,7 @@ void VideoTrackEncoder::TakeTrackData(VideoSegment& aSegment) {
}
void VideoTrackEncoder::Init(const VideoSegment& aSegment,
StreamTime aDuration) {
const TimeStamp& aTime) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
if (mInitialized) {
@ -488,8 +478,7 @@ void VideoTrackEncoder::Init(const VideoSegment& aSegment,
break;
}
mNotInitDuration += aDuration;
if ((mNotInitDuration / mTrackRate > VIDEO_INIT_FAILED_DURATION) &&
if (((aTime - mStartTime).ToSeconds() > VIDEO_INIT_FAILED_DURATION) &&
mInitCounter > 1) {
TRACK_LOG(LogLevel::Warning,
("[VideoTrackEncoder %p]: No successful init for %ds.", this,
@ -503,9 +492,7 @@ void VideoTrackEncoder::Init(const VideoSegment& aSegment,
void VideoTrackEncoder::Cancel() {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(LogLevel::Info,
("[VideoTrackEncoder %p]: Cancel(), currentTime=%" PRIu64, this,
mCurrentTime));
TRACK_LOG(LogLevel::Info, ("[VideoTrackEncoder %p]: Cancel()", this));
mCanceled = true;
mIncomingBuffer.Clear();
mOutgoingBuffer.Clear();
@ -528,22 +515,33 @@ void VideoTrackEncoder::NotifyEndOfStream() {
}
mEndOfStream = true;
TRACK_LOG(
LogLevel::Info,
("[VideoTrackEncoder %p]: NotifyEndOfStream(), currentTime=%" PRIu64,
this, mCurrentTime));
TRACK_LOG(LogLevel::Info,
("[VideoTrackEncoder %p]: NotifyEndOfStream()", this));
if (!mLastChunk.IsNull() && mLastChunk.mDuration > 0) {
if (!mLastChunk.IsNull()) {
RefPtr<layers::Image> lastImage = mLastChunk.mFrame.GetImage();
TRACK_LOG(LogLevel::Debug,
("[VideoTrackEncoder]: Appending last video frame %p, "
"duration=%.5f",
lastImage.get(),
FramesToTimeUnit(mLastChunk.mDuration, mTrackRate).ToSeconds()));
mOutgoingBuffer.AppendFrame(
lastImage.forget(), mLastChunk.mDuration,
mLastChunk.mFrame.GetIntrinsicSize(), PRINCIPAL_HANDLE_NONE,
mLastChunk.mFrame.GetForceBlack(), mLastChunk.mTimeStamp);
const TimeStamp now = TimeStamp::Now();
TimeStamp currentTime = mSuspended ? mSuspendTime : mCurrentTime;
currentTime = mDriftCompensator->GetVideoTime(now, currentTime);
TimeDuration absoluteEndTime = currentTime - mStartTime;
CheckedInt64 duration =
UsecsToFrames(absoluteEndTime.ToMicroseconds(), mTrackRate) -
mEncodedTicks;
if (duration.isValid() && duration.value() > 0) {
mEncodedTicks += duration.value();
TRACK_LOG(LogLevel::Debug,
("[VideoTrackEncoder %p]: Appending last video frame %p at pos "
"%.3fs, "
"track-end=%.3fs",
this, lastImage.get(),
(mLastChunk.mTimeStamp - mStartTime).ToSeconds(),
absoluteEndTime.ToSeconds()));
mOutgoingBuffer.AppendFrame(
lastImage.forget(), mLastChunk.mFrame.GetIntrinsicSize(),
PRINCIPAL_HANDLE_NONE, mLastChunk.mFrame.GetForceBlack() || !mEnabled,
mLastChunk.mTimeStamp);
mOutgoingBuffer.ExtendLastFrameBy(duration.value());
}
}
mIncomingBuffer.Clear();
@ -554,34 +552,20 @@ void VideoTrackEncoder::NotifyEndOfStream() {
}
}
void VideoTrackEncoder::SetStartOffset(StreamTime aStartOffset) {
void VideoTrackEncoder::SetStartOffset(const TimeStamp& aStartOffset) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
MOZ_ASSERT(mCurrentTime == 0);
TRACK_LOG(LogLevel::Info,
("[VideoTrackEncoder %p]: SetStartOffset(), aStartOffset=%" PRIu64,
this, aStartOffset));
mIncomingBuffer.InsertNullDataAtStart(aStartOffset);
MOZ_ASSERT(mCurrentTime.IsNull());
TRACK_LOG(LogLevel::Info, ("[VideoTrackEncoder %p]: SetStartOffset()", this));
mStartTime = aStartOffset;
mCurrentTime = aStartOffset;
}
void VideoTrackEncoder::AdvanceBlockedInput(StreamTime aDuration) {
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
TRACK_LOG(
LogLevel::Verbose,
("[VideoTrackEncoder %p]: AdvanceBlockedInput(), aDuration=%" PRIu64,
this, aDuration));
// We call Init here so it can account for aDuration towards the Init timeout
Init(mOutgoingBuffer, aDuration);
mIncomingBuffer.InsertNullDataAtStart(aDuration);
mCurrentTime += aDuration;
}
void VideoTrackEncoder::AdvanceCurrentTime(StreamTime aDuration) {
void VideoTrackEncoder::AdvanceCurrentTime(const TimeStamp& aTime) {
AUTO_PROFILER_LABEL("VideoTrackEncoder::AdvanceCurrentTime", OTHER);
MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn());
MOZ_ASSERT(!mStartTime.IsNull());
MOZ_ASSERT(!mCurrentTime.IsNull());
if (mCanceled) {
return;
@ -591,117 +575,116 @@ void VideoTrackEncoder::AdvanceCurrentTime(StreamTime aDuration) {
return;
}
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder %p]: AdvanceCurrentTime() %" PRIu64, this,
aDuration));
StreamTime currentTime = mCurrentTime + aDuration;
MOZ_ASSERT(!mStartTime.IsNull());
if (mSuspended) {
mCurrentTime = currentTime;
mIncomingBuffer.ForgetUpTo(mCurrentTime);
TRACK_LOG(
LogLevel::Verbose,
("[VideoTrackEncoder %p]: AdvanceCurrentTime() suspended at %.3fs",
this, (mCurrentTime - mStartTime).ToSeconds()));
mCurrentTime = aTime;
mIncomingBuffer.ForgetUpToTime(mCurrentTime);
return;
}
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder %p]: AdvanceCurrentTime() to %.3fs", this,
(aTime - mStartTime).ToSeconds()));
// Grab frames within the currentTime range from the incoming buffer.
VideoSegment tempSegment;
if (currentTime <= mIncomingBuffer.GetDuration()) {
tempSegment.AppendSlice(mIncomingBuffer, mCurrentTime, currentTime);
} else {
NS_ASSERTION(false,
"VideoTrackEncoder::AdvanceCurrentTime Not enough data");
TRACK_LOG(
LogLevel::Error,
("[VideoTrackEncoder %p]: AdvanceCurrentTime() Not enough data. "
"In incoming=%" PRIu64 ", aDuration=%" PRIu64 ", currentTime=%" PRIu64,
this, mIncomingBuffer.GetDuration(), aDuration, currentTime));
OnError();
{
VideoChunk* previousChunk = &mLastChunk;
auto appendDupes = [&](const TimeStamp& aUpTo) {
while ((aUpTo - previousChunk->mTimeStamp).ToSeconds() > 1.0) {
// We encode at least one frame per second, even if there are none
// flowing.
previousChunk->mTimeStamp += TimeDuration::FromSeconds(1.0);
tempSegment.AppendFrame(
do_AddRef(previousChunk->mFrame.GetImage()),
previousChunk->mFrame.GetIntrinsicSize(),
previousChunk->mFrame.GetPrincipalHandle(),
previousChunk->mFrame.GetForceBlack() || !mEnabled,
previousChunk->mTimeStamp);
TRACK_LOG(
LogLevel::Verbose,
("[VideoTrackEncoder %p]: Duplicating video frame (%p) at pos %.3f",
this, previousChunk->mFrame.GetImage(),
(previousChunk->mTimeStamp - mStartTime).ToSeconds()));
}
};
for (VideoSegment::ChunkIterator iter(mIncomingBuffer); !iter.IsEnded();
iter.Next()) {
MOZ_ASSERT(!iter->IsNull());
if (!previousChunk->IsNull() &&
iter->mTimeStamp <= previousChunk->mTimeStamp) {
// This frame starts earlier than previousChunk. Skip.
continue;
}
if (iter->mTimeStamp >= aTime) {
// This frame starts in the future. Stop.
break;
}
if (!previousChunk->IsNull()) {
appendDupes(iter->mTimeStamp);
}
tempSegment.AppendFrame(
do_AddRef(iter->mFrame.GetImage()), iter->mFrame.GetIntrinsicSize(),
iter->mFrame.GetPrincipalHandle(),
iter->mFrame.GetForceBlack() || !mEnabled, iter->mTimeStamp);
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder %p]: Taking video frame (%p) at pos %.3f",
this, iter->mFrame.GetImage(),
(iter->mTimeStamp - mStartTime).ToSeconds()));
previousChunk = &*iter;
}
if (!previousChunk->IsNull()) {
appendDupes(aTime);
}
}
mCurrentTime = aTime;
mIncomingBuffer.ForgetUpToTime(mCurrentTime);
mCurrentTime = currentTime;
mIncomingBuffer.ForgetUpTo(mCurrentTime);
// Convert tempSegment timestamps to durations and add chunks with known
// duration to mOutgoingBuffer.
const TimeStamp now = TimeStamp::Now();
bool chunkAppended = false;
// Convert tempSegment timestamps to durations and add it to mOutgoingBuffer.
VideoSegment::ConstChunkIterator iter(tempSegment);
for (; !iter.IsEnded(); iter.Next()) {
for (VideoSegment::ConstChunkIterator iter(tempSegment); !iter.IsEnded();
iter.Next()) {
VideoChunk chunk = *iter;
if (mLastChunk.mTimeStamp.IsNull()) {
if (chunk.IsNull()) {
// The start of this track is frameless. We need to track the time
// it takes to get the first frame.
mLastChunk.mDuration += chunk.mDuration;
continue;
}
// This is the first real chunk in the track. Make it start at the
// beginning of the track.
MOZ_ASSERT(!iter->mTimeStamp.IsNull());
// This is the first real chunk in the track. Use its timestamp as the
// starting point for this track.
MOZ_ASSERT(!chunk.mTimeStamp.IsNull());
const StreamTime nullDuration = mLastChunk.mDuration;
mLastChunk = chunk;
chunk.mDuration = 0;
TRACK_LOG(
LogLevel::Verbose,
("[VideoTrackEncoder %p]: Got the first video frame (%p) at pos %.3f "
"(moving it to beginning)",
this, iter->mFrame.GetImage(),
(iter->mTimeStamp - mStartTime).ToSeconds()));
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder]: Got first video chunk after %" PRId64
" ticks.",
nullDuration));
// Adapt to the time before the first frame. This extends the first frame
// from [start, end] to [0, end], but it'll do for now.
auto diff = FramesToTimeUnit(nullDuration, mTrackRate);
if (!diff.IsValid()) {
NS_ERROR("null duration overflow");
return;
}
mLastChunk.mTimeStamp -= diff.ToTimeDuration();
mLastChunk.mDuration += nullDuration;
mLastChunk = *iter;
mLastChunk.mTimeStamp = mStartTime;
continue;
}
MOZ_ASSERT(!mLastChunk.IsNull());
if (mLastChunk.CanCombineWithFollowing(chunk) || chunk.IsNull()) {
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder]: Got dupe or null chunk."));
// This is the same frame as before (or null). We extend the last chunk
// with its duration.
mLastChunk.mDuration += chunk.mDuration;
MOZ_ASSERT(!chunk.IsNull());
if (mLastChunk.mDuration < mTrackRate) {
TRACK_LOG(LogLevel::Verbose, ("[VideoTrackEncoder]: Ignoring dupe/null "
"chunk of duration %" PRId64,
chunk.mDuration));
continue;
}
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder]: Chunk >1 second. duration=%" PRId64 ", "
"trackRate=%" PRId32,
mLastChunk.mDuration, mTrackRate));
// If we have gotten dupes for over a second, we force send one
// to the encoder to make sure there is some output.
chunk.mTimeStamp = mLastChunk.mTimeStamp + TimeDuration::FromSeconds(1);
chunk.mDuration = mLastChunk.mDuration - mTrackRate;
mLastChunk.mDuration = mTrackRate;
if (chunk.IsNull()) {
// Ensure that we don't pass null to the encoder by making mLastChunk
// null later on.
chunk.mFrame = mLastChunk.mFrame;
}
}
if (mStartTime.IsNull()) {
mStartTime = mLastChunk.mTimeStamp;
}
TimeDuration relativeTime = chunk.mTimeStamp - mStartTime;
RefPtr<layers::Image> lastImage = mLastChunk.mFrame.GetImage();
TimeDuration absoluteEndTime =
mDriftCompensator->GetVideoTime(now, chunk.mTimeStamp) - mStartTime;
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder]: Appending video frame %p, at pos %.5fs",
lastImage.get(), relativeTime.ToSeconds()));
("[VideoTrackEncoder %p]: Appending video frame %p, at pos %.3fs "
"until %.3fs",
this, mLastChunk.mFrame.GetImage(),
(mDriftCompensator->GetVideoTime(now, mLastChunk.mTimeStamp) -
mStartTime)
.ToSeconds(),
absoluteEndTime.ToSeconds()));
CheckedInt64 duration =
UsecsToFrames(relativeTime.ToMicroseconds(), mTrackRate) -
UsecsToFrames(absoluteEndTime.ToMicroseconds(), mTrackRate) -
mEncodedTicks;
if (!duration.isValid()) {
NS_ERROR("Duration overflow");
@ -709,29 +692,35 @@ void VideoTrackEncoder::AdvanceCurrentTime(StreamTime aDuration) {
}
if (duration.value() <= 0) {
// The timestamp for mLastChunk is newer than for chunk.
// This means the durations reported from MediaStreamGraph for
// mLastChunk were larger than the timestamp diff - and durations were
// used to trigger the 1-second frame above. This could happen due to
// drift or underruns in the graph.
TRACK_LOG(LogLevel::Warning,
("[VideoTrackEncoder]: Underrun detected. Diff=%" PRId64,
duration.value()));
chunk.mTimeStamp = mLastChunk.mTimeStamp;
} else {
mEncodedTicks += duration.value();
mOutgoingBuffer.AppendFrame(
lastImage.forget(), duration.value(),
mLastChunk.mFrame.GetIntrinsicSize(), PRINCIPAL_HANDLE_NONE,
mLastChunk.mFrame.GetForceBlack(), mLastChunk.mTimeStamp);
chunkAppended = true;
// A frame either started before the last frame (can happen when
// multiple frames are added before SetStartOffset), or
// two frames were so close together that they ended up at the same
// position. We handle both cases by ignoring the previous frame.
TRACK_LOG(LogLevel::Verbose,
("[VideoTrackEncoder %p]: Duration from frame %p to frame %p "
"is %" PRId64 ". Ignoring %p",
this, mLastChunk.mFrame.GetImage(), iter->mFrame.GetImage(),
duration.value(), mLastChunk.mFrame.GetImage()));
TimeStamp t = mLastChunk.mTimeStamp;
mLastChunk = *iter;
mLastChunk.mTimeStamp = t;
continue;
}
mEncodedTicks += duration.value();
mOutgoingBuffer.AppendFrame(
do_AddRef(mLastChunk.mFrame.GetImage()),
mLastChunk.mFrame.GetIntrinsicSize(), PRINCIPAL_HANDLE_NONE,
mLastChunk.mFrame.GetForceBlack() || !mEnabled, mLastChunk.mTimeStamp);
mOutgoingBuffer.ExtendLastFrameBy(duration.value());
chunkAppended = true;
mLastChunk = chunk;
}
if (chunkAppended) {
Init(mOutgoingBuffer, aDuration);
Init(mOutgoingBuffer, mCurrentTime);
if (mInitialized) {
OnDataAvailable();
}

View file

@ -16,6 +16,7 @@
namespace mozilla {
class AbstractThread;
class DriftCompensator;
class TrackEncoder;
class TrackEncoderListener {
@ -60,10 +61,6 @@ class TrackEncoder {
public:
explicit TrackEncoder(TrackRate aTrackRate);
virtual void Suspend(TimeStamp aTime) = 0;
virtual void Resume(TimeStamp aTime) = 0;
/**
* Called by MediaEncoder to cancel the encoding.
*/
@ -75,25 +72,6 @@ class TrackEncoder {
*/
virtual void NotifyEndOfStream() = 0;
/**
* MediaStreamGraph notifies us about the time of the track's start.
* This gets called on the MediaEncoder thread after a dispatch.
*/
virtual void SetStartOffset(StreamTime aStartOffset) = 0;
/**
* Dispatched from MediaStreamGraph when it has run an iteration where the
* input track of the track this TrackEncoder is associated with didn't have
* any data.
*/
virtual void AdvanceBlockedInput(StreamTime aDuration) = 0;
/**
* MediaStreamGraph notifies us about the duration of data that has just been
* processed. This gets called on the MediaEncoder thread after a dispatch.
*/
virtual void AdvanceCurrentTime(StreamTime aDuration) = 0;
/**
* Creates and sets up meta data for a specific codec, called on the worker
* thread.
@ -187,15 +165,12 @@ class TrackEncoder {
*/
bool mCanceled;
/**
* The latest current time reported to us from the MSG.
*/
StreamTime mCurrentTime;
// How many times we have tried to initialize the encoder.
uint32_t mInitCounter;
StreamTime mNotInitDuration;
/**
* True if this TrackEncoder is currently suspended.
*/
bool mSuspended;
/**
@ -217,19 +192,20 @@ class AudioTrackEncoder : public TrackEncoder {
: TrackEncoder(aTrackRate),
mChannels(0),
mSamplingRate(0),
mAudioBitrate(0),
mDirectConnected(false) {}
mNotInitDuration(0),
mAudioBitrate(0) {}
/**
* Suspends encoding from mCurrentTime, i.e., all audio data until the next
* Resume() will be dropped.
* Suspends encoding from now, i.e., all future audio data received through
* AppendAudioSegment() until the next Resume() will be dropped.
*/
void Suspend(TimeStamp aTime) override;
void Suspend();
/**
* Resumes encoding starting at mCurrentTime.
* Resumes encoding starting now, i.e., data from the next
* AppendAudioSegment() will get encoded.
*/
void Resume(TimeStamp aTime) override;
void Resume();
/**
* Appends and consumes track data from aSegment.
@ -237,8 +213,8 @@ class AudioTrackEncoder : public TrackEncoder {
void AppendAudioSegment(AudioSegment&& aSegment);
/**
* Takes track data from the last time TakeTrackData ran until mCurrentTime
* and moves it to aSegment.
* Takes all track data that has been played out from the last time
* TakeTrackData ran and moves it to aSegment.
*/
void TakeTrackData(AudioSegment& aSegment);
@ -310,25 +286,6 @@ class AudioTrackEncoder : public TrackEncoder {
*/
void NotifyEndOfStream() override;
void SetStartOffset(StreamTime aStartOffset) override;
/**
* Dispatched from MediaStreamGraph when it has run an iteration where the
* input track of the track this TrackEncoder is associated with didn't have
* any data.
*
* Since we sometimes use a direct listener for AudioSegments we miss periods
* of time for which the source didn't have any data. This ensures that the
* latest frame gets displayed while we wait for more data to be pushed.
*/
void AdvanceBlockedInput(StreamTime aDuration) override;
/**
* Dispatched from MediaStreamGraph when it has run an iteration so we can
* hand more data to the encoder.
*/
void AdvanceCurrentTime(StreamTime aDuration) override;
protected:
/**
* Number of samples per channel in a pcm buffer. This is also the value of
@ -356,27 +313,16 @@ class AudioTrackEncoder : public TrackEncoder {
*/
int mSamplingRate;
/**
* A segment queue of incoming audio track data, from listeners.
* The duration of mIncomingBuffer is strictly increasing as it gets fed more
* data. Consumed data is replaced by null data.
*/
AudioSegment mIncomingBuffer;
/**
* A segment queue of outgoing audio track data to the encoder.
* The contents of mOutgoingBuffer will always be what has been consumed from
* mIncomingBuffer (up to mCurrentTime) but not yet consumed by the encoder
* sub class.
* The contents of mOutgoingBuffer will always be what has been appended on
* the encoder thread but not yet consumed by the encoder sub class.
*/
AudioSegment mOutgoingBuffer;
uint32_t mAudioBitrate;
StreamTime mNotInitDuration;
// This may only be accessed on the MSG thread.
// I.e., in the regular NotifyQueuedChanges for audio to avoid adding data
// from that callback when the direct one is active.
bool mDirectConnected;
uint32_t mAudioBitrate;
};
enum class FrameDroppingMode {
@ -386,19 +332,32 @@ enum class FrameDroppingMode {
class VideoTrackEncoder : public TrackEncoder {
public:
explicit VideoTrackEncoder(TrackRate aTrackRate,
explicit VideoTrackEncoder(RefPtr<DriftCompensator> aDriftCompensator,
TrackRate aTrackRate,
FrameDroppingMode aFrameDroppingMode);
/**
* Suspends encoding from aTime, i.e., all video frame with a timestamp
* between aTime and the timestamp of the next Resume() will be dropped.
*/
void Suspend(TimeStamp aTime) override;
void Suspend(const TimeStamp& aTime);
/**
* Resumes encoding starting at aTime.
*/
void Resume(TimeStamp aTime) override;
void Resume(const TimeStamp& aTime);
/**
* Makes the video black from aTime.
*/
void Disable(const TimeStamp& aTime);
/**
* Makes the video non-black from aTime.
*
* NB that it could still be forced black for other reasons, like principals.
*/
void Enable(const TimeStamp& aTime);
/**
* Appends source video frames to mIncomingBuffer. We only append the source
@ -431,7 +390,7 @@ class VideoTrackEncoder : public TrackEncoder {
* Failing to initiate the encoder for an accumulated aDuration of 30 seconds
* is seen as an error and will cancel the current encoding.
*/
void Init(const VideoSegment& aSegment, StreamTime aDuration);
void Init(const VideoSegment& aSegment, const TimeStamp& aTime);
StreamTime SecondsToMediaTime(double aS) const {
NS_ASSERTION(0 <= aS && aS <= TRACK_TICKS_MAX / TRACK_RATE_MAX,
@ -439,6 +398,12 @@ class VideoTrackEncoder : public TrackEncoder {
return mTrackRate * aS;
}
/**
* MediaStreamGraph notifies us about the time of the track's start.
* This gets called on the MediaEncoder thread after a dispatch.
*/
void SetStartOffset(const TimeStamp& aStartOffset);
void Cancel() override;
/**
@ -447,24 +412,11 @@ class VideoTrackEncoder : public TrackEncoder {
*/
void NotifyEndOfStream() override;
void SetStartOffset(StreamTime aStartOffset) override;
/**
* Dispatched from MediaStreamGraph when it has run an iteration where the
* input track of the track this TrackEncoder is associated with didn't have
* any data.
*
* Since we use a direct listener for VideoSegments we miss periods of time
* for which the source didn't have any data. This ensures that the latest
* frame gets displayed while we wait for more data to be pushed.
*/
void AdvanceBlockedInput(StreamTime aDuration) override;
/**
* Dispatched from MediaStreamGraph when it has run an iteration so we can
* hand more data to the encoder.
*/
void AdvanceCurrentTime(StreamTime aDuration) override;
void AdvanceCurrentTime(const TimeStamp& aTime);
/**
* Set desired keyframe interval defined in milliseconds.
@ -481,6 +433,12 @@ class VideoTrackEncoder : public TrackEncoder {
virtual nsresult Init(int aWidth, int aHeight, int aDisplayWidth,
int aDisplayHeight) = 0;
/**
* Drift compensator for re-clocking incoming video frame wall-clock
* timestamps to audio time.
*/
const RefPtr<DriftCompensator> mDriftCompensator;
/**
* The width of source video frame, ceiled if the source width is odd.
*/
@ -510,8 +468,8 @@ class VideoTrackEncoder : public TrackEncoder {
/**
* A segment queue of incoming video track data, from listeners.
* The duration of mIncomingBuffer is strictly increasing as it gets fed more
* data. Consumed data is replaced by null data.
* The duration of mIncomingBuffer is irrelevant as we only look at TimeStamps
* of frames. Consumed data is replaced by null data.
*/
VideoSegment mIncomingBuffer;
@ -530,7 +488,14 @@ class VideoTrackEncoder : public TrackEncoder {
StreamTime mEncodedTicks;
/**
* The time of the first real video frame passed to mOutgoingBuffer (at t=0).
* The time up to which we have forwarded data from mIncomingBuffer to
* mOutgoingBuffer.
*/
TimeStamp mCurrentTime;
/**
* The time the video track started, so the start of the video track can be
* synced to the start of the audio track.
*
* Note that this time will progress during suspension, to make sure the
* incoming frames stay in sync with the output.
@ -555,6 +520,12 @@ class VideoTrackEncoder : public TrackEncoder {
* The desired keyframe interval defined in milliseconds.
*/
int32_t mKeyFrameInterval;
/**
* True if the video MediaStreamTrack this VideoTrackEncoder is attached to is
* currently enabled. While false, we encode all frames as black.
*/
bool mEnabled;
};
} // namespace mozilla

View file

@ -33,9 +33,11 @@ using namespace mozilla::layers;
using namespace mozilla::media;
using namespace mozilla::dom;
VP8TrackEncoder::VP8TrackEncoder(TrackRate aTrackRate,
VP8TrackEncoder::VP8TrackEncoder(RefPtr<DriftCompensator> aDriftCompensator,
TrackRate aTrackRate,
FrameDroppingMode aFrameDroppingMode)
: VideoTrackEncoder(aTrackRate, aFrameDroppingMode),
: VideoTrackEncoder(std::move(aDriftCompensator), aTrackRate,
aFrameDroppingMode),
mVPXContext(new vpx_codec_ctx_t()),
mVPXImageWrapper(new vpx_image_t()) {
MOZ_COUNT_CTOR(VP8TrackEncoder);

View file

@ -28,7 +28,8 @@ class VP8TrackEncoder : public VideoTrackEncoder {
};
public:
VP8TrackEncoder(TrackRate aTrackRate, FrameDroppingMode aFrameDroppingMode);
VP8TrackEncoder(RefPtr<DriftCompensator> aDriftCompensator,
TrackRate aTrackRate, FrameDroppingMode aFrameDroppingMode);
virtual ~VP8TrackEncoder();
already_AddRefed<TrackMetadataBase> GetMetadata() final;

View file

@ -33,8 +33,10 @@ DEFINES['TRACING'] = True
FINAL_LIBRARY = 'xul'
# These includes are from Android JB, for use of MediaCodec.
LOCAL_INCLUDES += ['/ipc/chromium/src']
LOCAL_INCLUDES += [
'/dom/media',
'/ipc/chromium/src',
]
include('/ipc/chromium/chromium-config.mozbuild')

View file

@ -216,9 +216,7 @@ TEST(OpusAudioTrackEncoder, FrameEncode) {
const int32_t samples = sampleRate * 5;
generator.Generate(segment, samples);
encoder.SetStartOffset(0);
encoder.AppendAudioSegment(std::move(segment));
encoder.AdvanceCurrentTime(samples);
EncodedFrameContainer container;
EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container)));

View file

@ -0,0 +1,84 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#include "gtest/gtest.h"
#include "DriftCompensation.h"
using namespace mozilla;
class DriftCompensatorTest : public ::testing::Test {
public:
const TrackRate mRate = 44100;
const TimeStamp mStart;
const RefPtr<DriftCompensator> mComp;
DriftCompensatorTest()
: mStart(TimeStamp::Now()),
mComp(MakeRefPtr<DriftCompensator>(GetCurrentThreadEventTarget(),
mRate)) {
mComp->NotifyAudioStart(mStart);
// NotifyAudioStart dispatched a runnable to update the audio mStart time on
// the video thread. Because this is a test, the video thread is the current
// thread. We spin the event loop until we know the mStart time is updated.
{
bool updated = false;
NS_DispatchToCurrentThread(
NS_NewRunnableFunction(__func__, [&] { updated = true; }));
SpinEventLoopUntil([&] { return updated; });
}
}
// Past() is half as far from `mStart` as `aNow`.
TimeStamp Past(TimeStamp aNow) {
return mStart + (aNow - mStart) / (int64_t)2;
}
// Future() is twice as far from `mStart` as `aNow`.
TimeStamp Future(TimeStamp aNow) { return mStart + (aNow - mStart) * 2; }
};
TEST_F(DriftCompensatorTest, Initialized) {
EXPECT_EQ(mComp->GetVideoTime(mStart, mStart), mStart);
}
TEST_F(DriftCompensatorTest, SlowerAudio) {
// 10s of audio took 20 seconds of wall clock to play out
mComp->NotifyAudio(mRate * 10);
TimeStamp now = mStart + TimeDuration::FromSeconds(20);
EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
EXPECT_EQ((mComp->GetVideoTime(now, Past(now)) - mStart).ToSeconds(), 5.0);
EXPECT_EQ((mComp->GetVideoTime(now, now) - mStart).ToSeconds(), 10.0);
EXPECT_EQ((mComp->GetVideoTime(now, Future(now)) - mStart).ToSeconds(), 20.0);
}
TEST_F(DriftCompensatorTest, NoDrift) {
// 10s of audio took 10 seconds of wall clock to play out
mComp->NotifyAudio(mRate * 10);
TimeStamp now = mStart + TimeDuration::FromSeconds(10);
EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
EXPECT_EQ((mComp->GetVideoTime(now, Past(now)) - mStart).ToSeconds(), 5.0);
EXPECT_EQ((mComp->GetVideoTime(now, now) - mStart).ToSeconds(), 10.0);
EXPECT_EQ((mComp->GetVideoTime(now, Future(now)) - mStart).ToSeconds(), 20.0);
}
TEST_F(DriftCompensatorTest, NoProgress) {
// 10s of audio took 0 seconds of wall clock to play out
mComp->NotifyAudio(mRate * 10);
TimeStamp now = mStart;
TimeStamp future = mStart + TimeDuration::FromSeconds(5);
EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
EXPECT_EQ((mComp->GetVideoTime(now, future) - mStart).ToSeconds(), 5.0);
}
TEST_F(DriftCompensatorTest, FasterAudio) {
// 20s of audio took 10 seconds of wall clock to play out
mComp->NotifyAudio(mRate * 20);
TimeStamp now = mStart + TimeDuration::FromSeconds(10);
EXPECT_EQ((mComp->GetVideoTime(now, mStart) - mStart).ToSeconds(), 0.0);
EXPECT_EQ((mComp->GetVideoTime(now, Past(now)) - mStart).ToSeconds(), 10.0);
EXPECT_EQ((mComp->GetVideoTime(now, now) - mStart).ToSeconds(), 20.0);
EXPECT_EQ((mComp->GetVideoTime(now, Future(now)) - mStart).ToSeconds(), 40.0);
}

View file

@ -0,0 +1,211 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "gtest/gtest.h"
#include "VideoFrameConverter.h"
#include "YUVBufferGenerator.h"
using namespace mozilla;
class VideoFrameConverterTest;
class FrameListener : public VideoConverterListener {
public:
explicit FrameListener(VideoFrameConverterTest* aTest);
void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) override;
private:
VideoFrameConverterTest* mTest;
};
class VideoFrameConverterTest : public ::testing::Test {
protected:
using FrameType = Pair<webrtc::VideoFrame, TimeStamp>;
Monitor mMonitor;
RefPtr<VideoFrameConverter> mConverter;
RefPtr<FrameListener> mListener;
std::vector<FrameType> mConvertedFrames;
VideoFrameConverterTest()
: mMonitor("PacingFixture::mMonitor"),
mConverter(MakeAndAddRef<VideoFrameConverter>()),
mListener(MakeAndAddRef<FrameListener>(this)) {
mConverter->AddListener(mListener);
}
void TearDown() override { mConverter->Shutdown(); }
size_t NumConvertedFrames() {
MonitorAutoLock lock(mMonitor);
return mConvertedFrames.size();
}
std::vector<FrameType> WaitForNConverted(size_t aN) {
MonitorAutoLock l(mMonitor);
while (mConvertedFrames.size() < aN) {
l.Wait();
}
std::vector<FrameType> v(mConvertedFrames.begin(),
mConvertedFrames.begin() + aN);
return v;
}
public:
void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) {
MonitorAutoLock lock(mMonitor);
mConvertedFrames.push_back(MakePair(aVideoFrame, TimeStamp::Now()));
mMonitor.Notify();
}
};
FrameListener::FrameListener(VideoFrameConverterTest* aTest) : mTest(aTest) {}
void FrameListener::OnVideoFrameConverted(
const webrtc::VideoFrame& aVideoFrame) {
mTest->OnVideoFrameConverted(aVideoFrame);
}
VideoChunk GenerateChunk(int32_t aWidth, int32_t aHeight, TimeStamp aTime) {
YUVBufferGenerator generator;
generator.Init(gfx::IntSize(aWidth, aHeight));
VideoFrame f(generator.GenerateI420Image(), gfx::IntSize(aWidth, aHeight));
VideoChunk c;
c.mFrame.TakeFrom(&f);
c.mTimeStamp = aTime;
c.mDuration = 0;
return c;
}
TEST_F(VideoFrameConverterTest, BasicConversion) {
TimeStamp now = TimeStamp::Now();
VideoChunk chunk = GenerateChunk(640, 480, now);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(1);
ASSERT_EQ(frames.size(), 1U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), now);
}
TEST_F(VideoFrameConverterTest, BasicPacing) {
TimeStamp now = TimeStamp::Now();
TimeStamp future = now + TimeDuration::FromMilliseconds(100);
VideoChunk chunk = GenerateChunk(640, 480, future);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(1);
EXPECT_GT(TimeStamp::Now(), future);
ASSERT_EQ(frames.size(), 1U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), future);
}
TEST_F(VideoFrameConverterTest, MultiPacing) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
VideoChunk chunk = GenerateChunk(640, 480, future1);
mConverter->QueueVideoChunk(chunk, false);
chunk = GenerateChunk(640, 480, future2);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(2);
EXPECT_GT(TimeStamp::Now(), future2);
ASSERT_EQ(frames.size(), 2U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), future1);
EXPECT_EQ(frames[1].first().width(), 640);
EXPECT_EQ(frames[1].first().height(), 480);
EXPECT_GT(frames[1].second(), future2);
EXPECT_GT(frames[1].second(), frames[0].second());
}
TEST_F(VideoFrameConverterTest, Duplication) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
VideoChunk chunk = GenerateChunk(640, 480, future1);
mConverter->QueueVideoChunk(chunk, false);
auto frames = WaitForNConverted(2);
EXPECT_GT(TimeStamp::Now(), now + TimeDuration::FromMilliseconds(1100));
ASSERT_EQ(frames.size(), 2U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), future1);
EXPECT_EQ(frames[1].first().width(), 640);
EXPECT_EQ(frames[1].first().height(), 480);
EXPECT_GT(frames[1].second(), now + TimeDuration::FromMilliseconds(1100));
}
TEST_F(VideoFrameConverterTest, DropsOld) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(1000);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(100);
mConverter->QueueVideoChunk(GenerateChunk(800, 600, future1), false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
auto frames = WaitForNConverted(1);
EXPECT_GT(TimeStamp::Now(), future2);
ASSERT_EQ(frames.size(), 1U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), future2);
}
// We check that the disabling code was triggered by sending multiple,
// different, frames to the converter within one second. While black, it shall
// treat all frames identical and issue one black frame per second.
TEST_F(VideoFrameConverterTest, BlackOnDisable) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
TimeStamp future3 = now + TimeDuration::FromMilliseconds(400);
mConverter->SetTrackEnabled(false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future2), false);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future3), false);
auto frames = WaitForNConverted(2);
EXPECT_GT(TimeStamp::Now(), now + TimeDuration::FromMilliseconds(1100));
ASSERT_EQ(frames.size(), 2U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), future1);
EXPECT_EQ(frames[1].first().width(), 640);
EXPECT_EQ(frames[1].first().height(), 480);
EXPECT_GT(frames[1].second(), now + TimeDuration::FromMilliseconds(1100));
}
TEST_F(VideoFrameConverterTest, ClearFutureFramesOnJumpingBack) {
TimeStamp now = TimeStamp::Now();
TimeStamp future1 = now + TimeDuration::FromMilliseconds(100);
TimeStamp future2 = now + TimeDuration::FromMilliseconds(200);
TimeStamp future3 = now + TimeDuration::FromMilliseconds(150);
mConverter->QueueVideoChunk(GenerateChunk(640, 480, future1), false);
WaitForNConverted(1);
// We are now at t=100ms+. Queue a future frame and jump back in time to
// signal a reset.
mConverter->QueueVideoChunk(GenerateChunk(800, 600, future2), false);
VideoChunk nullChunk;
nullChunk.mFrame = VideoFrame(nullptr, gfx::IntSize(800, 600));
nullChunk.mTimeStamp = TimeStamp::Now();
ASSERT_GT(nullChunk.mTimeStamp, future1);
mConverter->QueueVideoChunk(nullChunk, false);
// We queue one more chunk after the reset so we don't have to wait a full
// second for the same-frame timer. It has a different time and resolution
// so we can differentiate them.
mConverter->QueueVideoChunk(GenerateChunk(320, 240, future3), false);
auto frames = WaitForNConverted(2);
EXPECT_GT(TimeStamp::Now(), future3);
EXPECT_LT(TimeStamp::Now(), future2);
ASSERT_EQ(frames.size(), 2U);
EXPECT_EQ(frames[0].first().width(), 640);
EXPECT_EQ(frames[0].first().height(), 480);
EXPECT_GT(frames[0].second(), future1);
EXPECT_EQ(frames[1].first().width(), 320);
EXPECT_EQ(frames[1].first().height(), 240);
EXPECT_GT(frames[1].second(), future3);
}

View file

@ -17,9 +17,8 @@ TEST(VideoSegment, TestAppendFrameForceBlack) {
RefPtr<layers::Image> testImage = nullptr;
VideoSegment segment;
segment.AppendFrame(testImage.forget(), mozilla::StreamTime(90000),
mozilla::gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE,
true);
segment.AppendFrame(testImage.forget(), mozilla::gfx::IntSize(640, 480),
PRINCIPAL_HANDLE_NONE, true);
VideoSegment::ChunkIterator iter(segment);
while (!iter.IsEnded()) {
@ -33,8 +32,8 @@ TEST(VideoSegment, TestAppendFrameNotForceBlack) {
RefPtr<layers::Image> testImage = nullptr;
VideoSegment segment;
segment.AppendFrame(testImage.forget(), mozilla::StreamTime(90000),
mozilla::gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE);
segment.AppendFrame(testImage.forget(), mozilla::gfx::IntSize(640, 480),
PRINCIPAL_HANDLE_NONE);
VideoSegment::ChunkIterator iter(segment);
while (!iter.IsEnded()) {

File diff suppressed because it is too large Load diff

View file

@ -7,6 +7,7 @@
#include "mozilla/CheckedInt.h"
#include "mozilla/MathAlgorithms.h"
#include "nestegg/nestegg.h"
#include "DriftCompensation.h"
#include "OpusTrackEncoder.h"
#include "VP8TrackEncoder.h"
#include "WebMWriter.h"
@ -28,7 +29,7 @@ class WebMOpusTrackEncoder : public OpusTrackEncoder {
class WebMVP8TrackEncoder : public VP8TrackEncoder {
public:
explicit WebMVP8TrackEncoder(TrackRate aTrackRate = 90000)
: VP8TrackEncoder(aTrackRate, FrameDroppingMode::DISALLOW) {}
: VP8TrackEncoder(nullptr, aTrackRate, FrameDroppingMode::DISALLOW) {}
bool TestVP8Creation(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth,
int32_t aDisplayHeight) {

View file

@ -0,0 +1,160 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "YUVBufferGenerator.h"
using namespace mozilla::layers;
using namespace mozilla;
void YUVBufferGenerator::Init(const mozilla::gfx::IntSize& aSize)
{
mImageSize = aSize;
int yPlaneLen = aSize.width * aSize.height;
int cbcrPlaneLen = (yPlaneLen + 1) / 2;
int frameLen = yPlaneLen + cbcrPlaneLen;
// Generate source buffer.
mSourceBuffer.SetLength(frameLen);
// Fill Y plane.
memset(mSourceBuffer.Elements(), 0x10, yPlaneLen);
// Fill Cb/Cr planes.
memset(mSourceBuffer.Elements() + yPlaneLen, 0x80, cbcrPlaneLen);
}
mozilla::gfx::IntSize YUVBufferGenerator::GetSize() const
{
return mImageSize;
}
already_AddRefed<Image> YUVBufferGenerator::GenerateI420Image()
{
return do_AddRef(CreateI420Image());
}
already_AddRefed<Image> YUVBufferGenerator::GenerateNV12Image()
{
return do_AddRef(CreateNV12Image());
}
already_AddRefed<Image> YUVBufferGenerator::GenerateNV21Image()
{
return do_AddRef(CreateNV21Image());
}
Image* YUVBufferGenerator::CreateI420Image()
{
PlanarYCbCrImage* image = new RecyclingPlanarYCbCrImage(new BufferRecycleBin());
PlanarYCbCrData data;
data.mPicSize = mImageSize;
const uint32_t yPlaneSize = mImageSize.width * mImageSize.height;
const uint32_t halfWidth = (mImageSize.width + 1) / 2;
const uint32_t halfHeight = (mImageSize.height + 1) / 2;
const uint32_t uvPlaneSize = halfWidth * halfHeight;
// Y plane.
uint8_t* y = mSourceBuffer.Elements();
data.mYChannel = y;
data.mYSize.width = mImageSize.width;
data.mYSize.height = mImageSize.height;
data.mYStride = mImageSize.width;
data.mYSkip = 0;
// Cr plane.
uint8_t* cr = y + yPlaneSize + uvPlaneSize;
data.mCrChannel = cr;
data.mCrSkip = 0;
// Cb plane
uint8_t* cb = y + yPlaneSize;
data.mCbChannel = cb;
data.mCbSkip = 0;
// CrCb plane vectors.
data.mCbCrStride = halfWidth;
data.mCbCrSize.width = halfWidth;
data.mCbCrSize.height = halfHeight;
image->CopyData(data);
return image;
}
Image* YUVBufferGenerator::CreateNV12Image()
{
NVImage* image = new NVImage();
PlanarYCbCrData data;
data.mPicSize = mImageSize;
const uint32_t yPlaneSize = mImageSize.width * mImageSize.height;
const uint32_t halfWidth = (mImageSize.width + 1) / 2;
const uint32_t halfHeight = (mImageSize.height + 1) / 2;
// Y plane.
uint8_t* y = mSourceBuffer.Elements();
data.mYChannel = y;
data.mYSize.width = mImageSize.width;
data.mYSize.height = mImageSize.height;
data.mYStride = mImageSize.width;
data.mYSkip = 0;
// Cr plane.
uint8_t* cr = y + yPlaneSize;
data.mCrChannel = cr;
data.mCrSkip = 1;
// Cb plane
uint8_t* cb = y + yPlaneSize + 1;
data.mCbChannel = cb;
data.mCbSkip = 1;
// 4:2:0.
data.mCbCrStride = mImageSize.width;
data.mCbCrSize.width = halfWidth;
data.mCbCrSize.height = halfHeight;
image->SetData(data);
return image;
}
Image* YUVBufferGenerator::CreateNV21Image()
{
NVImage* image = new NVImage();
PlanarYCbCrData data;
data.mPicSize = mImageSize;
const uint32_t yPlaneSize = mImageSize.width * mImageSize.height;
const uint32_t halfWidth = (mImageSize.width + 1) / 2;
const uint32_t halfHeight = (mImageSize.height + 1) / 2;
// Y plane.
uint8_t* y = mSourceBuffer.Elements();
data.mYChannel = y;
data.mYSize.width = mImageSize.width;
data.mYSize.height = mImageSize.height;
data.mYStride = mImageSize.width;
data.mYSkip = 0;
// Cr plane.
uint8_t* cr = y + yPlaneSize + 1;
data.mCrChannel = cr;
data.mCrSkip = 1;
// Cb plane
uint8_t* cb = y + yPlaneSize;
data.mCbChannel = cb;
data.mCbSkip = 1;
// 4:2:0.
data.mCbCrStride = mImageSize.width;
data.mCbCrSize.width = halfWidth;
data.mCbCrSize.height = halfHeight;
image->SetData(data);
return image;
}

View file

@ -0,0 +1,32 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef YUVBufferGenerator_h
#define YUVBufferGenerator_h
#include "ImageContainer.h"
#include "mozilla/AlreadyAddRefed.h"
#include "nsTArray.h"
#include "Point.h" // mozilla::gfx::IntSize
// A helper object to generate of different YUV planes.
class YUVBufferGenerator {
public:
void Init(const mozilla::gfx::IntSize& aSize);
mozilla::gfx::IntSize GetSize() const;
already_AddRefed<mozilla::layers::Image> GenerateI420Image();
already_AddRefed<mozilla::layers::Image> GenerateNV12Image();
already_AddRefed<mozilla::layers::Image> GenerateNV21Image();
private:
mozilla::layers::Image* CreateI420Image();
mozilla::layers::Image* CreateNV12Image();
mozilla::layers::Image* CreateNV21Image();
mozilla::gfx::IntSize mImageSize;
nsTArray<uint8_t> mSourceBuffer;
};
#endif // YUVBufferGenerator_h

View file

@ -25,6 +25,7 @@ UNIFIED_SOURCES += [
'TestBlankVideoDataCreator.cpp',
'TestCDMStorage.cpp',
'TestDataMutex.cpp',
'TestDriftCompensation.cpp',
'TestGMPCrossOrigin.cpp',
'TestGMPRemoveAndDelete.cpp',
'TestGMPUtils.cpp',
@ -48,11 +49,16 @@ if CONFIG['MOZ_WEBM_ENCODER']:
UNIFIED_SOURCES += [
'TestVideoTrackEncoder.cpp',
'TestWebMWriter.cpp',
'YUVBufferGenerator.cpp',
]
LOCAL_INCLUDES += [
'/gfx/2d/',
]
if CONFIG['MOZ_WEBRTC']:
UNIFIED_SOURCES += [
'TestAudioDeviceEnumerator.cpp',
'TestVideoFrameConverter.cpp',
]
TEST_HARNESS_FILES.gtest += [

View file

@ -21,11 +21,7 @@ class CaptureTask::MediaStreamEventListener : public MediaStreamTrackListener {
: mCaptureTask(aCaptureTask){};
// MediaStreamTrackListener methods.
void NotifyEnded() override {
if (!mCaptureTask->mImageGrabbedOrTrackEnd) {
mCaptureTask->PostTrackEndEvent();
}
}
void NotifyEnded() override { mCaptureTask->PostTrackEndEvent(); }
private:
CaptureTask* mCaptureTask;
@ -94,10 +90,11 @@ void CaptureTask::PrincipalChanged(dom::MediaStreamTrack* aMediaStreamTrack) {
mPrincipalChanged = true;
}
void CaptureTask::SetCurrentFrames(const VideoSegment& aSegment) {
if (mImageGrabbedOrTrackEnd) {
return;
}
void CaptureTask::NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) {
MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO);
const VideoSegment& video = static_cast<const VideoSegment&>(aMedia);
// Callback for encoding complete, it calls on main thread.
class EncodeComplete : public dom::EncodeCompleteCallback {
@ -115,42 +112,50 @@ void CaptureTask::SetCurrentFrames(const VideoSegment& aSegment) {
RefPtr<CaptureTask> mTask;
};
for (VideoSegment::ConstChunkIterator iter(aSegment); !iter.IsEnded();
for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded();
iter.Next()) {
VideoChunk chunk = *iter;
// Extract the first valid video frame.
VideoFrame frame;
if (!chunk.IsNull()) {
RefPtr<layers::Image> image;
if (chunk.mFrame.GetForceBlack()) {
// Create a black image.
image = VideoFrame::CreateBlackImage(chunk.mFrame.GetIntrinsicSize());
} else {
image = chunk.mFrame.GetImage();
}
if (!image) {
MOZ_ASSERT(image);
continue;
}
mImageGrabbedOrTrackEnd = true;
if (chunk.IsNull()) {
continue;
}
// Encode image.
nsresult rv;
nsAutoString type(NS_LITERAL_STRING("image/jpeg"));
nsAutoString options;
rv = dom::ImageEncoder::ExtractDataFromLayersImageAsync(
type, options, false, image, false, new EncodeComplete(this));
if (NS_FAILED(rv)) {
PostTrackEndEvent();
}
RefPtr<layers::Image> image;
if (chunk.mFrame.GetForceBlack()) {
// Create a black image.
image = VideoFrame::CreateBlackImage(chunk.mFrame.GetIntrinsicSize());
} else {
image = chunk.mFrame.GetImage();
}
if (!image) {
MOZ_ASSERT(image);
continue;
}
bool wasGrabbed = mImageGrabbedOrTrackEnd.exchange(true);
if (wasGrabbed) {
return;
}
// Encode image.
nsresult rv;
nsAutoString type(NS_LITERAL_STRING("image/jpeg"));
nsAutoString options;
rv = dom::ImageEncoder::ExtractDataFromLayersImageAsync(
type, options, false, image, false, new EncodeComplete(this));
if (NS_FAILED(rv)) {
PostTrackEndEvent();
}
}
}
void CaptureTask::PostTrackEndEvent() {
mImageGrabbedOrTrackEnd = true;
bool wasGrabbed = mImageGrabbedOrTrackEnd.exchange(true);
if (wasGrabbed) {
return;
}
// Got track end or finish event, stop the task.
class TrackEndRunnable : public Runnable {

View file

@ -10,7 +10,6 @@
#include "MediaStreamGraph.h"
#include "MediaStreamListener.h"
#include "PrincipalChangeObserver.h"
#include "MediaStreamVideoSink.h"
namespace mozilla {
@ -30,16 +29,17 @@ class MediaStreamTrack;
* CaptureTask holds a reference of ImageCapture to ensure ImageCapture won't be
* released during the period of the capturing process described above.
*/
class CaptureTask : public MediaStreamVideoSink,
class CaptureTask : public DirectMediaStreamTrackListener,
public dom::PrincipalChangeObserver<dom::MediaStreamTrack> {
public:
class MediaStreamEventListener;
// MediaStreamVideoSink methods.
void SetCurrentFrames(const VideoSegment& aSegment) override;
void ClearFrames() override {}
// DirectMediaStreamTrackListener methods
void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
StreamTime aTrackOffset,
const MediaSegment& aMedia) override;
// PrincipalChangeObserver<MediaStreamTrack> method.
// PrincipalChangeObserver<MediaStreamTrack> methods
void PrincipalChanged(dom::MediaStreamTrack* aMediaStreamTrack) override;
// CaptureTask methods.
@ -76,9 +76,9 @@ class CaptureTask : public MediaStreamVideoSink,
RefPtr<MediaStreamEventListener> mEventListener;
// True when an image is retrieved from MediaStreamGraph or MediaStreamGraph
// sends a track finish, end, or removed event.
bool mImageGrabbedOrTrackEnd;
// True when an image is retrieved from the video track, or MediaStreamGraph
// sends a track finish, end, or removed event. Any thread.
Atomic<bool> mImageGrabbedOrTrackEnd;
// True after MediaStreamTrack principal changes while waiting for a photo
// to finish and we should raise a security error.

View file

@ -213,9 +213,6 @@ class DecodedStreamData {
// The decoder is responsible for calling Destroy() on this stream.
const RefPtr<SourceMediaStream> mStream;
const RefPtr<DecodedStreamGraphListener> mListener;
// True if we need to send a compensation video frame to ensure the
// StreamTime going forward.
bool mEOSVideoCompensation;
const RefPtr<OutputStreamManager> mOutputStreamManager;
const RefPtr<AbstractThread> mAbstractMainThread;
@ -238,7 +235,6 @@ DecodedStreamData::DecodedStreamData(
mListener(MakeRefPtr<DecodedStreamGraphListener>(
mStream, aInit.mAudioTrackID, std::move(aAudioEndedPromise),
aInit.mVideoTrackID, std::move(aVideoEndedPromise), aMainThread)),
mEOSVideoCompensation(false),
mOutputStreamManager(aOutputStreamManager),
mAbstractMainThread(aMainThread) {
MOZ_ASSERT(NS_IsMainThread());
@ -425,9 +421,10 @@ void DecodedStream::Stop() {
AssertOwnerThread();
MOZ_ASSERT(mStartTime.isSome(), "playback not started.");
DisconnectListener();
ResetVideo(mPrincipalHandle);
mStreamTimeOffset += SentDuration();
mStartTime.reset();
DisconnectListener();
mAudioEndedPromise = nullptr;
mVideoEndedPromise = nullptr;
@ -586,8 +583,8 @@ void DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
}
static void WriteVideoToMediaStream(MediaStream* aStream, layers::Image* aImage,
const TimeUnit& aEnd,
const TimeUnit& aStart,
const TimeUnit& aEnd,
const mozilla::gfx::IntSize& aIntrinsicSize,
const TimeStamp& aTimeStamp,
VideoSegment* aOutput,
@ -596,9 +593,12 @@ static void WriteVideoToMediaStream(MediaStream* aStream, layers::Image* aImage,
auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
auto start =
aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
StreamTime duration = end - start;
aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize,
aPrincipalHandle, false, aTimeStamp);
aOutput->AppendFrame(image.forget(), aIntrinsicSize, aPrincipalHandle, false,
aTimeStamp);
// Extend this so we get accurate durations for all frames.
// Because this track is pushed, we need durations so the graph can track
// when playout of the track has finished.
aOutput->ExtendLastFrameBy(end - start);
}
static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {
@ -610,6 +610,43 @@ static bool ZeroDurationAtLastChunk(VideoSegment& aInput) {
return lastVideoStratTime == aInput.GetDuration();
}
void DecodedStream::ResetVideo(const PrincipalHandle& aPrincipalHandle) {
AssertOwnerThread();
if (!mData) {
return;
}
if (!mInfo.HasVideo()) {
return;
}
VideoSegment resetter;
TimeStamp currentTime;
TimeUnit currentPosition = GetPosition(&currentTime);
// Giving direct consumers a frame (really *any* frame, so in this case:
// nullptr) at an earlier time than the previous, will signal to that consumer
// to discard any frames ahead in time of the new frame. To be honest, this is
// an ugly hack because the direct listeners of the MediaStreamGraph do not
// have an API that supports clearing the future frames. ImageContainer and
// VideoFrameContainer do though, and we will need to move to a similar API
// for video tracks as part of bug 1493618.
resetter.AppendFrame(nullptr, mData->mLastVideoImageDisplaySize,
aPrincipalHandle, false, currentTime);
mData->mStream->AppendToTrack(mInfo.mVideo.mTrackId, &resetter);
// Consumer buffers have been reset. We now set mNextVideoTime to the start
// time of the current frame, so that it can be displayed again on resuming.
if (RefPtr<VideoData> v = mVideoQueue.PeekFront()) {
mData->mNextVideoTime = v->mTime;
} else {
// There was no current frame in the queue. We set the next time to push to
// the current time, so we at least don't resume starting in the future.
mData->mNextVideoTime = currentPosition;
}
}
void DecodedStream::SendVideo(bool aIsSameOrigin,
const PrincipalHandle& aPrincipalHandle) {
AssertOwnerThread();
@ -631,13 +668,8 @@ void DecodedStream::SendVideo(bool aIsSameOrigin,
// is ref-counted.
mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
// tracksStartTimeStamp might be null when the SourceMediaStream not yet
// be added to MediaStreamGraph.
TimeStamp tracksStartTimeStamp =
sourceStream->GetStreamTracksStrartTimeStamp();
if (tracksStartTimeStamp.IsNull()) {
tracksStartTimeStamp = TimeStamp::Now();
}
TimeStamp currentTime;
TimeUnit currentPosition = GetPosition(&currentTime);
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
@ -652,18 +684,21 @@ void DecodedStream::SendVideo(bool aIsSameOrigin,
// video frame). E.g. if we have a video frame that is 30 sec long
// and capture happens at 15 sec, we'll have to append a black frame
// that is 15 sec long.
WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
mData->mNextVideoTime,
mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp + v->mTime.ToTimeDuration(),
&output, aPrincipalHandle);
WriteVideoToMediaStream(
sourceStream, mData->mLastVideoImage, mData->mNextVideoTime, v->mTime,
mData->mLastVideoImageDisplaySize,
currentTime +
(mData->mNextVideoTime - currentPosition).ToTimeDuration(),
&output, aPrincipalHandle);
mData->mNextVideoTime = v->mTime;
}
if (mData->mNextVideoTime < v->GetEndTime()) {
WriteVideoToMediaStream(
sourceStream, v->mImage, v->GetEndTime(), mData->mNextVideoTime,
v->mDisplay, tracksStartTimeStamp + v->GetEndTime().ToTimeDuration(),
sourceStream, v->mImage, mData->mNextVideoTime, v->GetEndTime(),
v->mDisplay,
currentTime +
(mData->mNextVideoTime - currentPosition).ToTimeDuration(),
&output, aPrincipalHandle);
mData->mNextVideoTime = v->GetEndTime();
mData->mLastVideoImage = v->mImage;
@ -672,8 +707,9 @@ void DecodedStream::SendVideo(bool aIsSameOrigin,
}
// Check the output is not empty.
bool compensateEOS = false;
if (output.GetLastFrame()) {
mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
compensateEOS = ZeroDurationAtLastChunk(output);
}
if (!aIsSameOrigin) {
@ -686,17 +722,16 @@ void DecodedStream::SendVideo(bool aIsSameOrigin,
}
if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
if (mData->mEOSVideoCompensation) {
if (compensateEOS) {
VideoSegment endSegment;
// Calculate the deviation clock time from DecodedStream.
auto deviation =
FromMicroseconds(sourceStream->StreamTimeToMicroseconds(1));
WriteVideoToMediaStream(
sourceStream, mData->mLastVideoImage,
mData->mNextVideoTime + deviation, mData->mNextVideoTime,
mData->mLastVideoImageDisplaySize,
tracksStartTimeStamp +
(mData->mNextVideoTime + deviation).ToTimeDuration(),
sourceStream, mData->mLastVideoImage, mData->mNextVideoTime,
mData->mNextVideoTime + deviation, mData->mLastVideoImageDisplaySize,
currentTime + (mData->mNextVideoTime + deviation - currentPosition)
.ToTimeDuration(),
&endSegment, aPrincipalHandle);
mData->mNextVideoTime += deviation;
MOZ_ASSERT(endSegment.GetDuration() > 0);
@ -730,6 +765,10 @@ void DecodedStream::SendData() {
return;
}
if (!mPlaying) {
return;
}
SendAudio(mParams.mVolume, mSameOrigin, mPrincipalHandle);
SendVideo(mSameOrigin, mPrincipalHandle);
}
@ -775,6 +814,11 @@ void DecodedStream::NotifyOutput(int64_t aTime) {
void DecodedStream::PlayingChanged() {
AssertOwnerThread();
if (!mPlaying) {
// On seek or pause we discard future frames.
ResetVideo(mPrincipalHandle);
}
mAbstractMainThread->Dispatch(NewRunnableMethod<bool>(
"OutputStreamManager::SetPlaying", mOutputStreamManager,
&OutputStreamManager::SetPlaying, mPlaying));

View file

@ -80,6 +80,7 @@ class DecodedStream : public MediaSink {
void SendAudio(double aVolume, bool aIsSameOrigin,
const PrincipalHandle& aPrincipalHandle);
void SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle);
void ResetVideo(const PrincipalHandle& aPrincipalHandle);
StreamTime SentDuration();
void SendData();
void NotifyOutput(int64_t aTime);

View file

@ -115,6 +115,7 @@ EXPORTS += [
'CubebUtils.h',
'DecoderTraits.h',
'DOMMediaStream.h',
'DriftCompensation.h',
'FileBlockCache.h',
'FrameStatistics.h',
'ImageToI420.h',
@ -143,7 +144,6 @@ EXPORTS += [
'MediaStreamGraph.h',
'MediaStreamListener.h',
'MediaStreamTypes.h',
'MediaStreamVideoSink.h',
'MediaTimer.h',
'MediaTrack.h',
'MediaTrackList.h',
@ -260,7 +260,6 @@ UNIFIED_SOURCES += [
'MediaStreamGraph.cpp',
'MediaStreamListener.cpp',
'MediaStreamTrack.cpp',
'MediaStreamVideoSink.cpp',
'MediaTimer.cpp',
'MediaTrack.cpp',
'MediaTrackList.cpp',

View file

@ -1356,5 +1356,5 @@ tags = cloneelementvisually
skip-if = toolkit == 'android' # Visually cloning is only supported on Desktop for now.
tags = cloneelementvisually
[test_cloneElementVisually_ended_video.html]
skip-if = toolkit == 'android' # Visually cloning is only supported on Desktop for now.
skip-if = toolkit == 'android' || (os == "win" && bits == 64) # Visually cloning is only supported on Desktop for now. Bug 1536156
tags = cloneelementvisually

View file

@ -10,7 +10,7 @@
<pre id="test">
<div id="content">
<canvas id="video-src-canvas"></canvas>
<video id="recorded-video"></video>>
<video id="recorded-video"></video>
</div>
<script class="testbody" type="text/javascript">

View file

@ -66,6 +66,10 @@ runTestWhenReady(async () => {
await haveEvent(mediaRecorder, "start", wait(5000, new Error("Timeout")));
info("onstart fired");
// The recording can be too short to cause any checks with
// waitForAnalysisSuccess(). Waiting a bit here solves this.
await wait(500);
is(mediaRecorder.state, "recording",
"Media recorder is recording before being stopped");
mediaRecorder.stop();

View file

@ -144,11 +144,7 @@ function startTest() {
previous_time = timestamp;
if (countFrames == resolution_change.length) {
// There's a race between this stop() and feeding the last frame to the
// recorder, see bug 1407650. We wait a bit with the stop() as a stop-gap
// measure.
SimpleTest.requestFlakyTimeout("Fixes intermittent bug 1407650");
new Promise(r => setTimeout(r, 1000)).then(() => mediaRecorder.stop());
mediaRecorder.stop();
return;
}

View file

@ -90,10 +90,6 @@ class AnalyserNodeEngine final : public AudioNodeEngine {
already_AddRefed<AnalyserNode> AnalyserNode::Create(
AudioContext& aAudioContext, const AnalyserOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<AnalyserNode> analyserNode = new AnalyserNode(&aAudioContext);
analyserNode->Initialize(aOptions, aRv);

View file

@ -608,10 +608,6 @@ AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
already_AddRefed<AudioBufferSourceNode> AudioBufferSourceNode::Create(
JSContext* aCx, AudioContext& aAudioContext,
const AudioBufferSourceOptions& aOptions, ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<AudioBufferSourceNode> audioNode =
new AudioBufferSourceNode(&aAudioContext);

View file

@ -328,15 +328,6 @@ already_AddRefed<AudioContext> AudioContext::Constructor(
return object.forget();
}
bool AudioContext::CheckClosed(ErrorResult& aRv) {
if (mAudioContextState == AudioContextState::Closed || mIsShutDown ||
mIsDisconnecting) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return true;
}
return false;
}
already_AddRefed<AudioBufferSourceNode> AudioContext::CreateBufferSource(
ErrorResult& aRv) {
return AudioBufferSourceNode::Create(nullptr, *this,
@ -345,10 +336,6 @@ already_AddRefed<AudioBufferSourceNode> AudioContext::CreateBufferSource(
already_AddRefed<ConstantSourceNode> AudioContext::CreateConstantSource(
ErrorResult& aRv) {
if (CheckClosed(aRv)) {
return nullptr;
}
RefPtr<ConstantSourceNode> constantSourceNode = new ConstantSourceNode(this);
return constantSourceNode.forget();
}
@ -402,10 +389,6 @@ already_AddRefed<ScriptProcessorNode> AudioContext::CreateScriptProcessor(
return nullptr;
}
if (CheckClosed(aRv)) {
return nullptr;
}
RefPtr<ScriptProcessorNode> scriptProcessor = new ScriptProcessorNode(
this, aBufferSize, aNumberOfInputChannels, aNumberOfOutputChannels);
return scriptProcessor.forget();

View file

@ -310,8 +310,6 @@ class AudioContext final : public DOMEventTargetHelper,
BasicWaveFormCache* GetBasicWaveFormCache();
bool CheckClosed(ErrorResult& aRv);
// Steals from |aParamMap|
void SetParamMapForWorkletName(const nsAString& aName,
AudioParamDescriptorMap* aParamMap);

View file

@ -26,10 +26,6 @@ already_AddRefed<AudioWorkletNode> AudioWorkletNode::Constructor(
const GlobalObject& aGlobal, AudioContext& aAudioContext,
const nsAString& aName, const AudioWorkletNodeOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
if (aOptions.mNumberOfInputs == 0 && aOptions.mNumberOfOutputs == 0) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;

View file

@ -243,10 +243,6 @@ BiquadFilterNode::BiquadFilterNode(AudioContext* aContext)
already_AddRefed<BiquadFilterNode> BiquadFilterNode::Create(
AudioContext& aAudioContext, const BiquadFilterOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<BiquadFilterNode> audioNode = new BiquadFilterNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -68,10 +68,6 @@ ChannelMergerNode::ChannelMergerNode(AudioContext* aContext,
already_AddRefed<ChannelMergerNode> ChannelMergerNode::Create(
AudioContext& aAudioContext, const ChannelMergerOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
if (aOptions.mNumberOfInputs == 0 ||
aOptions.mNumberOfInputs > WebAudioUtils::MaxChannelCount) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);

View file

@ -58,10 +58,6 @@ ChannelSplitterNode::ChannelSplitterNode(AudioContext* aContext,
already_AddRefed<ChannelSplitterNode> ChannelSplitterNode::Create(
AudioContext& aAudioContext, const ChannelSplitterOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
if (aOptions.mNumberOfOutputs == 0 ||
aOptions.mNumberOfOutputs > WebAudioUtils::MaxChannelCount) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);

View file

@ -396,10 +396,6 @@ ConvolverNode::ConvolverNode(AudioContext* aContext)
already_AddRefed<ConvolverNode> ConvolverNode::Create(
JSContext* aCx, AudioContext& aAudioContext,
const ConvolverOptions& aOptions, ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<ConvolverNode> audioNode = new ConvolverNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -185,10 +185,6 @@ DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay)
already_AddRefed<DelayNode> DelayNode::Create(AudioContext& aAudioContext,
const DelayOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
if (aOptions.mMaxDelayTime <= 0. || aOptions.mMaxDelayTime >= 180.) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;

View file

@ -184,10 +184,6 @@ DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
already_AddRefed<DynamicsCompressorNode> DynamicsCompressorNode::Create(
AudioContext& aAudioContext, const DynamicsCompressorOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<DynamicsCompressorNode> audioNode =
new DynamicsCompressorNode(&aAudioContext);

View file

@ -119,10 +119,6 @@ GainNode::GainNode(AudioContext* aContext)
already_AddRefed<GainNode> GainNode::Create(AudioContext& aAudioContext,
const GainOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<GainNode> audioNode = new GainNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -159,10 +159,6 @@ IIRFilterNode::IIRFilterNode(AudioContext* aContext,
already_AddRefed<IIRFilterNode> IIRFilterNode::Create(
AudioContext& aAudioContext, const IIRFilterOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
if (aOptions.mFeedforward.Length() == 0 ||
aOptions.mFeedforward.Length() > 20) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);

View file

@ -26,10 +26,6 @@ MediaElementAudioSourceNode::Create(
return nullptr;
}
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<MediaElementAudioSourceNode> node =
new MediaElementAudioSourceNode(&aAudioContext);

View file

@ -100,10 +100,6 @@ MediaStreamAudioDestinationNode::Create(AudioContext& aAudioContext,
return nullptr;
}
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<MediaStreamAudioDestinationNode> audioNode =
new MediaStreamAudioDestinationNode(&aAudioContext);

View file

@ -50,10 +50,6 @@ already_AddRefed<MediaStreamAudioSourceNode> MediaStreamAudioSourceNode::Create(
return nullptr;
}
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
if (aAudioContext.Graph() !=
aOptions.mMediaStream->GetPlaybackStream()->Graph()) {
nsCOMPtr<nsPIDOMWindowInner> pWindow = aAudioContext.GetParentObject();

View file

@ -390,10 +390,6 @@ OscillatorNode::OscillatorNode(AudioContext* aContext)
already_AddRefed<OscillatorNode> OscillatorNode::Create(
AudioContext& aAudioContext, const OscillatorOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<OscillatorNode> audioNode = new OscillatorNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -325,10 +325,6 @@ PannerNode::PannerNode(AudioContext* aContext)
already_AddRefed<PannerNode> PannerNode::Create(AudioContext& aAudioContext,
const PannerOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<PannerNode> audioNode = new PannerNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -168,10 +168,6 @@ StereoPannerNode::StereoPannerNode(AudioContext* aContext)
already_AddRefed<StereoPannerNode> StereoPannerNode::Create(
AudioContext& aAudioContext, const StereoPannerOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<StereoPannerNode> audioNode = new StereoPannerNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -296,10 +296,6 @@ WaveShaperNode::WaveShaperNode(AudioContext* aContext)
already_AddRefed<WaveShaperNode> WaveShaperNode::Create(
AudioContext& aAudioContext, const WaveShaperOptions& aOptions,
ErrorResult& aRv) {
if (aAudioContext.CheckClosed(aRv)) {
return nullptr;
}
RefPtr<WaveShaperNode> audioNode = new WaveShaperNode(&aAudioContext);
audioNode->Initialize(aOptions, aRv);

View file

@ -41,7 +41,7 @@ function tryToCreateNodeOnClosedContext(ctx) {
return;
}
expectException(function() {
expectNoException(function() {
ctx[e.name].apply(ctx, e.args);
}, DOMException.INVALID_STATE_ERR);
});

View file

@ -62,7 +62,6 @@ static nsString DefaultVideoName() {
MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource()
: mTimer(nullptr),
mMutex("MediaEngineDefaultVideoSource::mMutex"),
mName(DefaultVideoName()) {}
MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource() {}
@ -133,7 +132,6 @@ nsresult MediaEngineDefaultVideoSource::Allocate(
mOpts.mHeight = std::max(90, std::min(mOpts.mHeight, 2160)) & ~1;
*aOutHandle = nullptr;
MutexAutoLock lock(mMutex);
mState = kAllocated;
return NS_OK;
}
@ -146,11 +144,11 @@ nsresult MediaEngineDefaultVideoSource::Deallocate(
MOZ_ASSERT(!mImage);
MOZ_ASSERT(mState == kStopped || mState == kAllocated);
MutexAutoLock lock(mMutex);
if (mStream && IsTrackIDExplicit(mTrackID)) {
mStream->EndTrack(mTrackID);
mStream = nullptr;
mTrackID = TRACK_NONE;
mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
}
mState = kReleased;
mImageContainer = nullptr;
@ -198,11 +196,9 @@ void MediaEngineDefaultVideoSource::SetTrack(
MOZ_ASSERT(!mStream);
MOZ_ASSERT(mTrackID == TRACK_NONE);
{
MutexAutoLock lock(mMutex);
mStream = aStream;
mTrackID = aTrackID;
}
mStream = aStream;
mTrackID = aTrackID;
mPrincipalHandle = aPrincipal;
aStream->AddTrack(aTrackID, new VideoSegment(),
SourceMediaStream::ADDTRACK_QUEUED);
}
@ -244,7 +240,6 @@ nsresult MediaEngineDefaultVideoSource::Start(
this, interval, nsITimer::TYPE_REPEATING_SLACK,
"MediaEngineDefaultVideoSource::GenerateFrame");
MutexAutoLock lock(mMutex);
mState = kStarted;
return NS_OK;
}
@ -265,9 +260,6 @@ nsresult MediaEngineDefaultVideoSource::Stop(
mTimer->Cancel();
mTimer = nullptr;
MutexAutoLock lock(mMutex);
mImage = nullptr;
mState = kStopped;
return NS_OK;
@ -330,48 +322,19 @@ void MediaEngineDefaultVideoSource::GenerateFrame() {
return;
}
MutexAutoLock lock(mMutex);
mImage = std::move(ycbcr_image);
VideoSegment segment;
segment.AppendFrame(ycbcr_image.forget(),
gfx::IntSize(mOpts.mWidth, mOpts.mHeight),
mPrincipalHandle);
;
mStream->AppendToTrack(mTrackID, &segment);
}
void MediaEngineDefaultVideoSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
aTrackID);
// AppendFrame takes ownership of `segment`
VideoSegment segment;
RefPtr<layers::Image> image;
{
MutexAutoLock lock(mMutex);
// Started - append real image
// Stopped - append null
// Released - Track is ended, safe to ignore
// Can happen because NotifyPull comes from a stream listener
if (mState == kReleased) {
return;
}
MOZ_ASSERT(mState != kAllocated);
if (mState == kStarted) {
MOZ_ASSERT(mStream == aStream);
MOZ_ASSERT(mTrackID == aTrackID);
image = mImage;
}
}
StreamTime delta = aDesiredTime - aEndOfAppendedData;
MOZ_ASSERT(delta > 0);
// nullptr images are allowed
IntSize size(mOpts.mWidth, mOpts.mHeight);
segment.AppendFrame(image.forget(), delta, size, aPrincipalHandle);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
aStream->AppendToTrack(aTrackID, &segment);
}
const PrincipalHandle& aPrincipalHandle) {}
/**
* Default audio source.

View file

@ -86,15 +86,12 @@ class MediaEngineDefaultVideoSource : public MediaEngineSource {
RefPtr<layers::ImageContainer> mImageContainer;
// mMutex protects mState, mImage, mStream, mTrackID
Mutex mMutex;
// Current state of this source.
// Set under mMutex on the owning thread. Accessed under one of the two.
MediaEngineSourceState mState = kReleased;
RefPtr<layers::Image> mImage;
RefPtr<SourceMediaStream> mStream;
TrackID mTrackID = TRACK_NONE;
PrincipalHandle mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
MediaEnginePrefs mOpts;
int mCb = 16;

View file

@ -386,11 +386,6 @@ nsresult MediaEngineRemoteVideoSource::Stop(
{
MutexAutoLock lock(mMutex);
mState = kStopped;
// Drop any cached image so we don't start with a stale image on next
// usage. Also, gfx gets very upset if these are held until this object
// is gc'd in final-cc during shutdown (bug 1374164)
mImage = nullptr;
}
return NS_OK;
@ -492,37 +487,7 @@ void MediaEngineRemoteVideoSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
aTrackID);
MutexAutoLock lock(mMutex);
if (mState == kReleased) {
// We end the track before deallocating, so this is safe.
return;
}
MOZ_ASSERT(mState == kStarted || mState == kStopped);
StreamTime delta = aDesiredTime - aEndOfAppendedData;
MOZ_ASSERT(delta > 0);
VideoSegment segment;
RefPtr<layers::Image> image = mImage;
if (mState == kStarted) {
MOZ_ASSERT(!image || mImageSize == image->GetSize());
segment.AppendFrame(image.forget(), delta, mImageSize, aPrincipalHandle);
} else {
// nullptr images are allowed, but we force it to black and retain the size.
segment.AppendFrame(image.forget(), delta, mImageSize, aPrincipalHandle,
true);
}
// This is safe from any thread, and is safe if the track is Finished
// or Destroyed.
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
aStream->AppendToTrack(aTrackID, &segment);
}
const PrincipalHandle& aPrincipalHandle) {}
int MediaEngineRemoteVideoSource::DeliverFrame(
uint8_t* aBuffer, const camera::VideoFrameProperties& aProps) {
@ -670,15 +635,13 @@ int MediaEngineRemoteVideoSource::DeliverFrame(
{
MutexAutoLock lock(mMutex);
// implicitly releases last image
mImage = image.forget();
mImageSize = mImage->GetSize();
MOZ_ASSERT(mState == kStarted);
VideoSegment segment;
mImageSize = image->GetSize();
segment.AppendFrame(image.forget(), mImageSize, mPrincipal);
mStream->AppendToTrack(mTrackID, &segment);
}
// We'll push the frame into the MSG on the next Pull. This will avoid
// swamping the MSG with frames should it be taking longer than normal to run
// an iteration.
return 0;
}

View file

@ -209,10 +209,6 @@ class MediaEngineRemoteVideoSource : public MediaEngineSource,
// after Start() and before the end of Stop().
RefPtr<layers::ImageContainer> mImageContainer;
// The latest frame delivered from the video capture backend.
// Protected by mMutex.
RefPtr<layers::Image> mImage;
// A buffer pool used to manage the temporary buffer used when rescaling
// incoming images. Cameras IPC thread only.
webrtc::I420BufferPool mRescalingBufferPool;

View file

@ -33,10 +33,13 @@ namespace mozilla {
using namespace mozilla::gfx;
MediaEngineTabVideoSource::MediaEngineTabVideoSource()
: mMutex("MediaEngineTabVideoSource::mMutex") {}
MediaEngineTabVideoSource::MediaEngineTabVideoSource() {}
nsresult MediaEngineTabVideoSource::StartRunnable::Run() {
MOZ_ASSERT(NS_IsMainThread());
mVideoSource->mStreamMain = mStream;
mVideoSource->mTrackIDMain = mTrackID;
mVideoSource->mPrincipalHandleMain = mPrincipal;
mVideoSource->Draw();
mVideoSource->mTimer->InitWithNamedFuncCallback(
[](nsITimer* aTimer, void* aClosure) mutable {
@ -52,6 +55,7 @@ nsresult MediaEngineTabVideoSource::StartRunnable::Run() {
}
nsresult MediaEngineTabVideoSource::StopRunnable::Run() {
MOZ_ASSERT(NS_IsMainThread());
if (mVideoSource->mTimer) {
mVideoSource->mTimer->Cancel();
mVideoSource->mTimer = nullptr;
@ -59,10 +63,14 @@ nsresult MediaEngineTabVideoSource::StopRunnable::Run() {
if (mVideoSource->mTabSource) {
mVideoSource->mTabSource->NotifyStreamStop(mVideoSource->mWindow);
}
mVideoSource->mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
mVideoSource->mTrackIDMain = TRACK_NONE;
mVideoSource->mStream = nullptr;
return NS_OK;
}
nsresult MediaEngineTabVideoSource::InitRunnable::Run() {
MOZ_ASSERT(NS_IsMainThread());
if (mVideoSource->mWindowId != -1) {
nsGlobalWindowOuter* globalWindow =
nsGlobalWindowOuter::GetOuterWindowWithId(mVideoSource->mWindowId);
@ -93,7 +101,8 @@ nsresult MediaEngineTabVideoSource::InitRunnable::Run() {
MOZ_ASSERT(mVideoSource->mWindow);
}
mVideoSource->mTimer = NS_NewTimer();
nsCOMPtr<nsIRunnable> start(new StartRunnable(mVideoSource));
nsCOMPtr<nsIRunnable> start(
new StartRunnable(mVideoSource, mStream, mTrackID, mPrincipal));
start->Run();
return NS_OK;
}
@ -137,11 +146,7 @@ nsresult MediaEngineTabVideoSource::Allocate(
? aConstraints.mBrowserWindow.Value()
: -1;
*aOutHandle = nullptr;
{
MutexAutoLock lock(mMutex);
mState = kAllocated;
}
mState = kAllocated;
return Reconfigure(nullptr, aConstraints, aPrefs, aDeviceId,
aOutBadConstraint);
@ -159,23 +164,51 @@ nsresult MediaEngineTabVideoSource::Reconfigure(
// scrollWithPage is not proper a constraint, so just read it.
// It has no well-defined behavior in advanced, so ignore it there.
mScrollWithPage = aConstraints.mScrollWithPage.WasPassed()
? aConstraints.mScrollWithPage.Value()
: false;
const bool scrollWithPage = aConstraints.mScrollWithPage.WasPassed()
? aConstraints.mScrollWithPage.Value()
: false;
FlattenedConstraints c(aConstraints);
mBufWidthMax = c.mWidth.Get(DEFAULT_TABSHARE_VIDEO_MAX_WIDTH);
mBufHeightMax = c.mHeight.Get(DEFAULT_TABSHARE_VIDEO_MAX_HEIGHT);
double frameRate = c.mFrameRate.Get(DEFAULT_TABSHARE_VIDEO_FRAMERATE);
mTimePerFrame = std::max(10, int(1000.0 / (frameRate > 0 ? frameRate : 1)));
const int32_t bufWidthMax = c.mWidth.Get(DEFAULT_TABSHARE_VIDEO_MAX_WIDTH);
const int32_t bufHeightMax = c.mHeight.Get(DEFAULT_TABSHARE_VIDEO_MAX_HEIGHT);
const double frameRate = c.mFrameRate.Get(DEFAULT_TABSHARE_VIDEO_FRAMERATE);
const int32_t timePerFrame =
std::max(10, int(1000.0 / (frameRate > 0 ? frameRate : 1)));
if (!mScrollWithPage) {
mViewportOffsetX = c.mViewportOffsetX.Get(0);
mViewportOffsetY = c.mViewportOffsetY.Get(0);
mViewportWidth = c.mViewportWidth.Get(INT32_MAX);
mViewportHeight = c.mViewportHeight.Get(INT32_MAX);
Maybe<int32_t> viewportOffsetX;
Maybe<int32_t> viewportOffsetY;
Maybe<int32_t> viewportWidth;
Maybe<int32_t> viewportHeight;
if (!scrollWithPage) {
viewportOffsetX = Some(c.mViewportOffsetX.Get(0));
viewportOffsetY = Some(c.mViewportOffsetY.Get(0));
viewportWidth = Some(c.mViewportWidth.Get(INT32_MAX));
viewportHeight = Some(c.mViewportHeight.Get(INT32_MAX));
}
NS_DispatchToMainThread(NS_NewRunnableFunction(
"MediaEngineTabVideoSource::Reconfigure main thread setter",
[self = RefPtr<MediaEngineTabVideoSource>(this), this, scrollWithPage,
bufWidthMax, bufHeightMax, timePerFrame, viewportOffsetX,
viewportOffsetY, viewportWidth, viewportHeight]() {
mScrollWithPage = scrollWithPage;
mBufWidthMax = bufWidthMax;
mBufHeightMax = bufHeightMax;
mTimePerFrame = timePerFrame;
if (viewportOffsetX.isSome()) {
mViewportOffsetX = *viewportOffsetX;
}
if (viewportOffsetY.isSome()) {
mViewportOffsetY = *viewportOffsetY;
}
if (viewportWidth.isSome()) {
mViewportWidth = *viewportWidth;
}
if (viewportHeight.isSome()) {
mViewportHeight = *viewportHeight;
}
}));
return NS_OK;
}
@ -190,11 +223,7 @@ nsresult MediaEngineTabVideoSource::Deallocate(
}
NS_DispatchToMainThread(do_AddRef(new DestroyRunnable(this)));
{
MutexAutoLock lock(mMutex);
mState = kReleased;
}
mState = kReleased;
return NS_OK;
}
@ -212,6 +241,7 @@ void MediaEngineTabVideoSource::SetTrack(
MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
mStream = aStream;
mTrackID = aTrackID;
mPrincipalHandle = aPrincipal;
mStream->AddTrack(mTrackID, new VideoSegment());
}
@ -222,16 +252,12 @@ nsresult MediaEngineTabVideoSource::Start(
nsCOMPtr<nsIRunnable> runnable;
if (!mWindow) {
runnable = new InitRunnable(this);
runnable = new InitRunnable(this, mStream, mTrackID, mPrincipalHandle);
} else {
runnable = new StartRunnable(this);
runnable = new StartRunnable(this, mStream, mTrackID, mPrincipalHandle);
}
NS_DispatchToMainThread(runnable);
{
MutexAutoLock lock(mMutex);
mState = kStarted;
}
mState = kStarted;
return NS_OK;
}
@ -240,36 +266,11 @@ void MediaEngineTabVideoSource::Pull(
const RefPtr<const AllocationHandle>& aHandle,
const RefPtr<SourceMediaStream>& aStream, TrackID aTrackID,
StreamTime aEndOfAppendedData, StreamTime aDesiredTime,
const PrincipalHandle& aPrincipalHandle) {
TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i", aStream.get(),
aTrackID);
VideoSegment segment;
RefPtr<layers::Image> image;
gfx::IntSize imageSize;
{
MutexAutoLock lock(mMutex);
if (mState == kReleased) {
// We end the track before setting the state to released.
return;
}
if (mState == kStarted) {
image = mImage;
imageSize = mImage ? mImage->GetSize() : IntSize();
}
}
StreamTime delta = aDesiredTime - aEndOfAppendedData;
MOZ_ASSERT(delta > 0);
// nullptr images are allowed
segment.AppendFrame(image.forget(), delta, imageSize, aPrincipalHandle);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or ended the track.
aStream->AppendToTrack(aTrackID, &(segment));
}
const PrincipalHandle& aPrincipalHandle) {}
void MediaEngineTabVideoSource::Draw() {
MOZ_ASSERT(NS_IsMainThread());
if (!mWindow && !mBlackedoutWindow) {
return;
}
@ -382,8 +383,11 @@ void MediaEngineTabVideoSource::Draw() {
}
}
MutexAutoLock lock(mMutex);
mImage = rgbImage;
VideoSegment segment;
segment.AppendFrame(do_AddRef(rgbImage), size, mPrincipalHandle);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or ended the track.
mStreamMain->AppendToTrack(mTrackIDMain, &segment);
}
nsresult MediaEngineTabVideoSource::FocusOnSelectedSource(
@ -408,11 +412,7 @@ nsresult MediaEngineTabVideoSource::Stop(
}
NS_DispatchToMainThread(new StopRunnable(this));
{
MutexAutoLock lock(mMutex);
mState = kStopped;
}
mState = kStopped;
return NS_OK;
}

View file

@ -63,11 +63,19 @@ class MediaEngineTabVideoSource : public MediaEngineSource {
class StartRunnable : public Runnable {
public:
explicit StartRunnable(MediaEngineTabVideoSource* videoSource)
StartRunnable(MediaEngineTabVideoSource* videoSource,
SourceMediaStream* aStream, TrackID aTrackID,
const PrincipalHandle& aPrincipal)
: Runnable("MediaEngineTabVideoSource::StartRunnable"),
mVideoSource(videoSource) {}
mVideoSource(videoSource),
mStream(aStream),
mTrackID(aTrackID),
mPrincipal(aPrincipal) {}
NS_IMETHOD Run() override;
RefPtr<MediaEngineTabVideoSource> mVideoSource;
const RefPtr<MediaEngineTabVideoSource> mVideoSource;
const RefPtr<SourceMediaStream> mStream;
const TrackID mTrackID;
const PrincipalHandle mPrincipal;
};
class StopRunnable : public Runnable {
@ -76,16 +84,24 @@ class MediaEngineTabVideoSource : public MediaEngineSource {
: Runnable("MediaEngineTabVideoSource::StopRunnable"),
mVideoSource(videoSource) {}
NS_IMETHOD Run() override;
RefPtr<MediaEngineTabVideoSource> mVideoSource;
const RefPtr<MediaEngineTabVideoSource> mVideoSource;
};
class InitRunnable : public Runnable {
public:
explicit InitRunnable(MediaEngineTabVideoSource* videoSource)
InitRunnable(MediaEngineTabVideoSource* videoSource,
SourceMediaStream* aStream, TrackID aTrackID,
const PrincipalHandle& aPrincipal)
: Runnable("MediaEngineTabVideoSource::InitRunnable"),
mVideoSource(videoSource) {}
mVideoSource(videoSource),
mStream(aStream),
mTrackID(aTrackID),
mPrincipal(aPrincipal) {}
NS_IMETHOD Run() override;
RefPtr<MediaEngineTabVideoSource> mVideoSource;
const RefPtr<MediaEngineTabVideoSource> mVideoSource;
const RefPtr<SourceMediaStream> mStream;
const TrackID mTrackID;
const PrincipalHandle mPrincipal;
};
class DestroyRunnable : public Runnable {
@ -94,13 +110,14 @@ class MediaEngineTabVideoSource : public MediaEngineSource {
: Runnable("MediaEngineTabVideoSource::DestroyRunnable"),
mVideoSource(videoSource) {}
NS_IMETHOD Run() override;
RefPtr<MediaEngineTabVideoSource> mVideoSource;
const RefPtr<MediaEngineTabVideoSource> mVideoSource;
};
protected:
~MediaEngineTabVideoSource() {}
private:
// These are accessed only on main thread.
int32_t mBufWidthMax = 0;
int32_t mBufHeightMax = 0;
int64_t mWindowId = 0;
@ -113,22 +130,21 @@ class MediaEngineTabVideoSource : public MediaEngineSource {
RefPtr<layers::ImageContainer> mImageContainer;
nsCOMPtr<nsPIDOMWindowOuter> mWindow;
nsCOMPtr<nsITimer> mTimer;
nsCOMPtr<nsITabSource> mTabSource;
RefPtr<SourceMediaStream> mStreamMain;
TrackID mTrackIDMain = TRACK_NONE;
PrincipalHandle mPrincipalHandleMain = PRINCIPAL_HANDLE_NONE;
// If this is set, we will run despite mWindow == nullptr.
bool mBlackedoutWindow = false;
// Current state of this source.
// Written on owning thread *and* under mMutex.
// Can be read on owning thread *or* under mMutex.
// Current state of this source. Accessed on owning thread only.
MediaEngineSourceState mState = kReleased;
// mStream and mTrackID are set in SetTrack() to keep track of what to end
// in Deallocate().
// Owning thread only.
// in Deallocate(). Owning thread only.
RefPtr<SourceMediaStream> mStream;
TrackID mTrackID = TRACK_NONE;
// mImage is Protected by mMutex.
RefPtr<layers::Image> mImage;
nsCOMPtr<nsITimer> mTimer;
Mutex mMutex;
nsCOMPtr<nsITabSource> mTabSource;
PrincipalHandle mPrincipalHandle = PRINCIPAL_HANDLE_NONE;
};
} // namespace mozilla

View file

@ -263,6 +263,35 @@ void QuotaRequestChild::HandleResponse(bool aResponse) {
mRequest->SetResult(variant);
}
void QuotaRequestChild::HandleResponse(const nsTArray<nsCString>& aResponse) {
AssertIsOnOwningThread();
MOZ_ASSERT(mRequest);
RefPtr<nsVariant> variant = new nsVariant();
if (aResponse.IsEmpty()) {
variant->SetAsEmptyArray();
} else {
nsTArray<RefPtr<InitializedOriginsResult>> initializedOriginsResults(
aResponse.Length());
for (auto& origin : aResponse) {
RefPtr<InitializedOriginsResult> initializedOriginsResult =
new InitializedOriginsResult(origin);
initializedOriginsResults.AppendElement(
initializedOriginsResult.forget());
}
variant->SetAsArray(
nsIDataType::VTYPE_INTERFACE_IS,
&NS_GET_IID(nsIQuotaInitializedOriginsResult),
initializedOriginsResults.Length(),
static_cast<void*>(initializedOriginsResults.Elements()));
}
mRequest->SetResult(variant);
}
void QuotaRequestChild::ActorDestroy(ActorDestroyReason aWhy) {
AssertIsOnOwningThread();
}
@ -296,6 +325,10 @@ mozilla::ipc::IPCResult QuotaRequestChild::Recv__delete__(
HandleResponse(aResponse.get_PersistedResponse().persisted());
break;
case RequestResponse::TListInitializedOriginsResponse:
HandleResponse(aResponse.get_ListInitializedOriginsResponse().origins());
break;
default:
MOZ_CRASH("Unknown response type!");
}

View file

@ -129,6 +129,8 @@ class QuotaRequestChild final : public PQuotaRequestChild {
void HandleResponse(bool aResponse);
void HandleResponse(const nsTArray<nsCString>& aResponse);
// IPDL methods are only called by IPDL.
virtual void ActorDestroy(ActorDestroyReason aWhy) override;

View file

@ -941,7 +941,32 @@ class QuotaUsageRequestBase : public NormalOriginOperationBase,
mozilla::ipc::IPCResult RecvCancel() final;
};
class GetUsageOp final : public QuotaUsageRequestBase {
// A mix-in class to simplify operations that need to process every origin in
// one or more repositories. Sub-classes should call TraverseRepository in their
// DoDirectoryWork and implement a ProcessOrigin method for their per-origin
// logic.
class TraverseRepositoryHelper {
public:
TraverseRepositoryHelper() = default;
protected:
virtual ~TraverseRepositoryHelper() = default;
// If ProcessOrigin returns an error, TraverseRepository will immediately
// terminate and return the received error code to its caller.
nsresult TraverseRepository(QuotaManager* aQuotaManager,
PersistenceType aPersistenceType);
private:
virtual bool IsCanceled() = 0;
virtual nsresult ProcessOrigin(QuotaManager* aQuotaManager,
nsIFile* aOriginDir, const bool aPersistent,
const PersistenceType aPersistenceType) = 0;
};
class GetUsageOp final : public QuotaUsageRequestBase,
public TraverseRepositoryHelper {
nsTArray<OriginUsage> mOriginUsages;
nsDataHashtable<nsCStringHashKey, uint32_t> mOriginUsagesIndex;
@ -953,11 +978,14 @@ class GetUsageOp final : public QuotaUsageRequestBase {
private:
~GetUsageOp() {}
nsresult TraverseRepository(QuotaManager* aQuotaManager,
PersistenceType aPersistenceType);
nsresult DoDirectoryWork(QuotaManager* aQuotaManager) override;
bool IsCanceled() override;
nsresult ProcessOrigin(QuotaManager* aQuotaManager, nsIFile* aOriginDir,
const bool aPersistent,
const PersistenceType aPersistenceType) override;
void GetResponse(UsageRequestResponse& aResponse) override;
};
@ -1157,6 +1185,30 @@ class PersistOp final : public PersistRequestBase {
void GetResponse(RequestResponse& aResponse) override;
};
class ListInitializedOriginsOp final : public QuotaRequestBase,
public TraverseRepositoryHelper {
// XXX Bug 1521541 will make each origin has it's own state.
nsTArray<nsCString> mOrigins;
public:
ListInitializedOriginsOp();
bool Init(Quota* aQuota) override;
private:
~ListInitializedOriginsOp() = default;
nsresult DoDirectoryWork(QuotaManager* aQuotaManager) override;
bool IsCanceled() override;
nsresult ProcessOrigin(QuotaManager* aQuotaManager, nsIFile* aOriginDir,
const bool aPersistent,
const PersistenceType aPersistenceType) override;
void GetResponse(RequestResponse& aResponse) override;
};
/*******************************************************************************
* Other class declarations
******************************************************************************/
@ -3664,11 +3716,11 @@ nsresult QuotaManager::GetDirectoryMetadata2(
return rv;
}
if (!origin.EqualsLiteral(kChromeOrigin)) {
if (!origin.EqualsLiteral(kChromeOrigin)) {
OriginAttributes originAttributes;
nsCString originNoSuffix;
if (NS_WARN_IF(!originAttributes.PopulateFromOrigin(origin,
originNoSuffix))) {
if (NS_WARN_IF(
!originAttributes.PopulateFromOrigin(origin, originNoSuffix))) {
return NS_ERROR_FAILURE;
}
@ -3689,19 +3741,19 @@ nsresult QuotaManager::GetDirectoryMetadata2(
if (group != upToDateGroup) {
group = upToDateGroup;
rv = CreateDirectoryMetadata(
aDirectory, timestamp, suffix, group, origin);
rv =
CreateDirectoryMetadata(aDirectory, timestamp, suffix, group, origin);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
rv = CreateDirectoryMetadata2(
aDirectory, timestamp, persisted, suffix, group, origin);
rv = CreateDirectoryMetadata2(aDirectory, timestamp, persisted, suffix,
group, origin);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
ContentPrincipalInfo contentPrincipalInfo;
contentPrincipalInfo.attrs() = originAttributes;
contentPrincipalInfo.originNoSuffix() = originNoSuffix;
@ -3715,7 +3767,7 @@ nsresult QuotaManager::GetDirectoryMetadata2(
RefPtr<PrincipalVerifier> principalVerifier =
PrincipalVerifier::CreateAndDispatch(std::move(principalInfos));
#endif
#endif
}
}
@ -6598,6 +6650,7 @@ bool Quota::VerifyRequestParams(const RequestParams& aParams) const {
case RequestParams::TClearAllParams:
case RequestParams::TResetAllParams:
case RequestParams::TListInitializedOriginsParams:
break;
case RequestParams::TPersistedParams: {
@ -6760,6 +6813,10 @@ PQuotaRequestParent* Quota::AllocPQuotaRequestParent(
actor = new PersistOp(aParams);
break;
case RequestParams::TListInitializedOriginsParams:
actor = new ListInitializedOriginsOp();
break;
default:
MOZ_CRASH("Should never get here!");
}
@ -7033,14 +7090,8 @@ mozilla::ipc::IPCResult QuotaUsageRequestBase::RecvCancel() {
return IPC_OK();
}
GetUsageOp::GetUsageOp(const UsageRequestParams& aParams)
: mGetAll(aParams.get_AllUsageParams().getAll()) {
AssertIsOnOwningThread();
MOZ_ASSERT(aParams.type() == UsageRequestParams::TAllUsageParams);
}
nsresult GetUsageOp::TraverseRepository(QuotaManager* aQuotaManager,
PersistenceType aPersistenceType) {
nsresult TraverseRepositoryHelper::TraverseRepository(
QuotaManager* aQuotaManager, PersistenceType aPersistenceType) {
AssertIsOnIOThread();
MOZ_ASSERT(aQuotaManager);
@ -7071,7 +7122,7 @@ nsresult GetUsageOp::TraverseRepository(QuotaManager* aQuotaManager,
nsCOMPtr<nsIFile> originDir;
while (NS_SUCCEEDED((rv = entries->GetNextFile(getter_AddRefs(originDir)))) &&
originDir && !mCanceled) {
originDir && !IsCanceled()) {
bool isDirectory;
rv = originDir->IsDirectory(&isDirectory);
if (NS_WARN_IF(NS_FAILED(rv))) {
@ -7093,55 +7144,10 @@ nsresult GetUsageOp::TraverseRepository(QuotaManager* aQuotaManager,
continue;
}
int64_t timestamp;
bool persisted;
nsCString suffix;
nsCString group;
nsCString origin;
rv = aQuotaManager->GetDirectoryMetadata2WithRestore(
originDir, persistent, &timestamp, &persisted, suffix, group, origin);
rv = ProcessOrigin(aQuotaManager, originDir, persistent, aPersistenceType);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
if (!mGetAll && aQuotaManager->IsOriginInternal(origin)) {
continue;
}
OriginUsage* originUsage;
// We can't store pointers to OriginUsage objects in the hashtable
// since AppendElement() reallocates its internal array buffer as number
// of elements grows.
uint32_t index;
if (mOriginUsagesIndex.Get(origin, &index)) {
originUsage = &mOriginUsages[index];
} else {
index = mOriginUsages.Length();
originUsage = mOriginUsages.AppendElement();
originUsage->origin() = origin;
originUsage->persisted() = false;
originUsage->usage() = 0;
mOriginUsagesIndex.Put(origin, index);
}
if (aPersistenceType == PERSISTENCE_TYPE_DEFAULT) {
originUsage->persisted() = persisted;
}
originUsage->lastAccessed() = timestamp;
UsageInfo usageInfo;
rv = GetUsageForOrigin(aQuotaManager, aPersistenceType, group, origin,
&usageInfo);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
originUsage->usage() = originUsage->usage() + usageInfo.TotalUsage();
}
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
@ -7150,6 +7156,78 @@ nsresult GetUsageOp::TraverseRepository(QuotaManager* aQuotaManager,
return NS_OK;
}
GetUsageOp::GetUsageOp(const UsageRequestParams& aParams)
: mGetAll(aParams.get_AllUsageParams().getAll()) {
AssertIsOnOwningThread();
MOZ_ASSERT(aParams.type() == UsageRequestParams::TAllUsageParams);
}
bool GetUsageOp::IsCanceled() {
AssertIsOnIOThread();
return mCanceled;
}
nsresult GetUsageOp::ProcessOrigin(QuotaManager* aQuotaManager,
nsIFile* aOriginDir, const bool aPersistent,
const PersistenceType aPersistenceType) {
AssertIsOnIOThread();
MOZ_ASSERT(aQuotaManager);
MOZ_ASSERT(aOriginDir);
int64_t timestamp;
bool persisted;
nsCString suffix;
nsCString group;
nsCString origin;
nsresult rv = aQuotaManager->GetDirectoryMetadata2WithRestore(
aOriginDir, aPersistent, &timestamp, &persisted, suffix, group, origin);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
if (!mGetAll && aQuotaManager->IsOriginInternal(origin)) {
return NS_OK;
}
OriginUsage* originUsage;
// We can't store pointers to OriginUsage objects in the hashtable
// since AppendElement() reallocates its internal array buffer as number
// of elements grows.
uint32_t index;
if (mOriginUsagesIndex.Get(origin, &index)) {
originUsage = &mOriginUsages[index];
} else {
index = mOriginUsages.Length();
originUsage = mOriginUsages.AppendElement();
originUsage->origin() = origin;
originUsage->persisted() = false;
originUsage->usage() = 0;
mOriginUsagesIndex.Put(origin, index);
}
if (aPersistenceType == PERSISTENCE_TYPE_DEFAULT) {
originUsage->persisted() = persisted;
}
originUsage->lastAccessed() = timestamp;
UsageInfo usageInfo;
rv = GetUsageForOrigin(aQuotaManager, aPersistenceType, group, origin,
&usageInfo);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
originUsage->usage() = originUsage->usage() + usageInfo.TotalUsage();
return NS_OK;
}
nsresult GetUsageOp::DoDirectoryWork(QuotaManager* aQuotaManager) {
AssertIsOnIOThread();
@ -7927,6 +8005,88 @@ void PersistOp::GetResponse(RequestResponse& aResponse) {
aResponse = PersistResponse();
}
ListInitializedOriginsOp::ListInitializedOriginsOp()
: QuotaRequestBase(/* aExclusive */ false), TraverseRepositoryHelper() {
AssertIsOnOwningThread();
}
bool ListInitializedOriginsOp::Init(Quota* aQuota) {
AssertIsOnOwningThread();
MOZ_ASSERT(aQuota);
mNeedsQuotaManagerInit = true;
return true;
}
nsresult ListInitializedOriginsOp::DoDirectoryWork(
QuotaManager* aQuotaManager) {
AssertIsOnIOThread();
MOZ_ASSERT(aQuotaManager);
AUTO_PROFILER_LABEL("ListInitializedOriginsOp::DoDirectoryWork", OTHER);
nsresult rv;
if (!aQuotaManager->IsTemporaryStorageInitialized()) {
return NS_OK;
}
for (const PersistenceType type : kAllPersistenceTypes) {
rv = TraverseRepository(aQuotaManager, type);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
}
return NS_OK;
}
bool ListInitializedOriginsOp::IsCanceled() {
AssertIsOnIOThread();
return mCanceled;
}
nsresult ListInitializedOriginsOp::ProcessOrigin(
QuotaManager* aQuotaManager, nsIFile* aOriginDir, const bool aPersistent,
const PersistenceType aPersistenceType) {
AssertIsOnIOThread();
MOZ_ASSERT(aQuotaManager);
MOZ_ASSERT(aOriginDir);
int64_t timestamp;
bool persisted;
nsCString suffix;
nsCString group;
nsCString origin;
nsresult rv = aQuotaManager->GetDirectoryMetadata2WithRestore(
aOriginDir, aPersistent, &timestamp, &persisted, suffix, group, origin);
if (NS_WARN_IF(NS_FAILED(rv))) {
return rv;
}
if (aQuotaManager->IsOriginInternal(origin)) {
return NS_OK;
}
mOrigins.AppendElement(origin);
return NS_OK;
}
void ListInitializedOriginsOp::GetResponse(RequestResponse& aResponse) {
AssertIsOnOwningThread();
aResponse = ListInitializedOriginsResponse();
if (mOrigins.IsEmpty()) {
return;
}
nsTArray<nsCString>& origins =
aResponse.get_ListInitializedOriginsResponse().origins();
mOrigins.SwapElements(origins);
}
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
// static

View file

@ -100,6 +100,10 @@ struct PersistParams
PrincipalInfo principalInfo;
};
struct ListInitializedOriginsParams
{
};
union RequestParams
{
InitParams;
@ -112,6 +116,7 @@ union RequestParams
ResetAllParams;
PersistedParams;
PersistParams;
ListInitializedOriginsParams;
};
protocol PQuota

View file

@ -50,6 +50,11 @@ struct PersistResponse
{
};
struct ListInitializedOriginsResponse
{
nsCString[] origins;
};
union RequestResponse
{
nsresult;
@ -63,6 +68,7 @@ union RequestResponse
ResetAllResponse;
PersistedResponse;
PersistResponse;
ListInitializedOriginsResponse;
};
protocol PQuotaRequest

Some files were not shown because too many files have changed in this diff Show more