diff --git a/accessible/base/nsAccessibilityService.cpp b/accessible/base/nsAccessibilityService.cpp
index 31eeb4f47ae1..fa7d89002097 100644
--- a/accessible/base/nsAccessibilityService.cpp
+++ b/accessible/base/nsAccessibilityService.cpp
@@ -595,6 +595,7 @@ nsAccessibilityService::ContentRemoved(nsIPresShell* aPresShell,
}
if (child) {
+ MOZ_DIAGNOSTIC_ASSERT(child->Parent(), "Unattached accessible from tree");
document->ContentRemoved(child->Parent(), aChildNode);
#ifdef A11Y_LOG
if (logging::IsEnabled(logging::eTree))
diff --git a/build/macosx/local-mozconfig.common b/build/macosx/local-mozconfig.common
index c49a0680f2c2..30d911615fee 100644
--- a/build/macosx/local-mozconfig.common
+++ b/build/macosx/local-mozconfig.common
@@ -32,6 +32,11 @@ fi
ldflags="$ldflags -Wl,-no_data_in_code_info"
export LDFLAGS="$ldflags"
+# Until bug 1342503 is fixed, we can't build some of the webrender dependencies
+# on buildbot OS X builders, because rustc will use some random system toolchain
+# instead of the one we package with tooltool.
+ac_add_options --disable-webrender
+
# If not set use the system default clang
if [ -z "$CC" ]; then
export CC=clang
diff --git a/devtools/client/inspector/boxmodel/components/BoxModelEditable.js b/devtools/client/inspector/boxmodel/components/BoxModelEditable.js
index f90954d5ffe5..9b20cd560338 100644
--- a/devtools/client/inspector/boxmodel/components/BoxModelEditable.js
+++ b/devtools/client/inspector/boxmodel/components/BoxModelEditable.js
@@ -16,7 +16,7 @@ module.exports = createClass({
propTypes: {
box: PropTypes.string.isRequired,
- direction: PropTypes.string.isRequired,
+ direction: PropTypes.string,
property: PropTypes.string.isRequired,
textContent: PropTypes.oneOfType([PropTypes.string, PropTypes.number]).isRequired,
onShowBoxModelEditor: PropTypes.func.isRequired,
@@ -42,13 +42,15 @@ module.exports = createClass({
textContent,
} = this.props;
- let rotate = (direction == "left" || direction == "right") &&
+ let rotate = direction &&
+ (direction == "left" || direction == "right") &&
textContent.toString().length > LONG_TEXT_ROTATE_LIMIT;
return dom.p(
{
- className: `boxmodel-${box} boxmodel-${direction}
- ${rotate ? "boxmodel-rotate" : ""}`,
+ className: `boxmodel-${box}
+ ${direction ? " boxmodel-" + direction : "boxmodel-" + property}
+ ${rotate ? " boxmodel-rotate" : ""}`,
},
dom.span(
{
diff --git a/devtools/client/inspector/boxmodel/components/BoxModelMain.js b/devtools/client/inspector/boxmodel/components/BoxModelMain.js
index 37581a630cae..07a4c84ab4b5 100644
--- a/devtools/client/inspector/boxmodel/components/BoxModelMain.js
+++ b/devtools/client/inspector/boxmodel/components/BoxModelMain.js
@@ -134,6 +134,41 @@ module.exports = createClass({
height = this.getHeightValue(height);
width = this.getWidthValue(width);
+ let contentBox = layout["box-sizing"] == "content-box" ?
+ dom.p(
+ {
+ className: "boxmodel-size",
+ },
+ BoxModelEditable({
+ box: "content",
+ property: "width",
+ textContent: width,
+ onShowBoxModelEditor
+ }),
+ dom.span(
+ {},
+ "\u00D7"
+ ),
+ BoxModelEditable({
+ box: "content",
+ property: "height",
+ textContent: height,
+ onShowBoxModelEditor
+ })
+ )
+ :
+ dom.p(
+ {
+ className: "boxmodel-size",
+ },
+ dom.span(
+ {
+ title: BOXMODEL_L10N.getStr("boxmodel.content"),
+ },
+ SHARED_L10N.getFormatStr("dimensions", width, height)
+ )
+ );
+
return dom.div(
{
className: "boxmodel-main",
@@ -198,7 +233,7 @@ module.exports = createClass({
title: BOXMODEL_L10N.getStr("boxmodel.padding"),
},
dom.div({
- className: "boxmodel-content",
+ className: "boxmodel-contents",
"data-box": "content",
title: BOXMODEL_L10N.getStr("boxmodel.content"),
})
@@ -330,18 +365,7 @@ module.exports = createClass({
textContent: paddingLeft,
onShowBoxModelEditor,
}),
- dom.p(
- {
- className: "boxmodel-size",
- },
- dom.span(
- {
- "data-box": "content",
- title: BOXMODEL_L10N.getStr("boxmodel.content"),
- },
- SHARED_L10N.getFormatStr("dimensions", width, height)
- )
- )
+ contentBox
);
},
diff --git a/devtools/client/inspector/boxmodel/components/ComputedProperty.js b/devtools/client/inspector/boxmodel/components/ComputedProperty.js
index 87fef1a3d4c2..262258ba6184 100644
--- a/devtools/client/inspector/boxmodel/components/ComputedProperty.js
+++ b/devtools/client/inspector/boxmodel/components/ComputedProperty.js
@@ -28,6 +28,7 @@ module.exports = createClass({
return dom.div(
{
className: "property-view",
+ "data-property-name": name,
tabIndex: "0",
ref: container => {
this.container = container;
diff --git a/devtools/client/inspector/boxmodel/test/browser.ini b/devtools/client/inspector/boxmodel/test/browser.ini
index c9881c903937..ba05da95490b 100644
--- a/devtools/client/inspector/boxmodel/test/browser.ini
+++ b/devtools/client/inspector/boxmodel/test/browser.ini
@@ -22,6 +22,7 @@ support-files =
[browser_boxmodel_guides.js]
[browser_boxmodel_navigation.js]
skip-if = true # Bug 1336198
+[browser_boxmodel_properties.js]
[browser_boxmodel_rotate-labels-on-sides.js]
[browser_boxmodel_sync.js]
[browser_boxmodel_tooltips.js]
diff --git a/devtools/client/inspector/boxmodel/test/browser_boxmodel.js b/devtools/client/inspector/boxmodel/test/browser_boxmodel.js
index 9e085cafe232..3ad67b4da72d 100644
--- a/devtools/client/inspector/boxmodel/test/browser_boxmodel.js
+++ b/devtools/client/inspector/boxmodel/test/browser_boxmodel.js
@@ -14,8 +14,12 @@ var res1 = [
value: "160" + "\u00D7" + "160.117"
},
{
- selector: ".boxmodel-size > span",
- value: "100" + "\u00D7" + "100.117"
+ selector: ".boxmodel-size > .boxmodel-width",
+ value: "100"
+ },
+ {
+ selector: ".boxmodel-size > .boxmodel-height",
+ value: "100.117"
},
{
selector: ".boxmodel-margin.boxmodel-top > span",
@@ -73,8 +77,12 @@ var res2 = [
value: "190" + "\u00D7" + "210"
},
{
- selector: ".boxmodel-size > span",
- value: "100" + "\u00D7" + "150"
+ selector: ".boxmodel-size > .boxmodel-width",
+ value: "100"
+ },
+ {
+ selector: ".boxmodel-size > .boxmodel-height",
+ value: "150"
},
{
selector: ".boxmodel-margin.boxmodel-top > span",
diff --git a/devtools/client/inspector/boxmodel/test/browser_boxmodel_properties.js b/devtools/client/inspector/boxmodel/test/browser_boxmodel_properties.js
new file mode 100644
index 000000000000..95479f756538
--- /dev/null
+++ b/devtools/client/inspector/boxmodel/test/browser_boxmodel_properties.js
@@ -0,0 +1,120 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+// Test that the box model properties list displays the right values
+// and that it updates when the node's style is changed.
+
+const TEST_URI = `
+
+
Test Node
+`;
+
+const res1 = [
+ {
+ property: "box-sizing",
+ value: "border-box"
+ },
+ {
+ property: "display",
+ value: "block"
+ },
+ {
+ property: "float",
+ value: "left"
+ },
+ {
+ property: "line-height",
+ value: "20px"
+ },
+ {
+ property: "position",
+ value: "relative"
+ },
+ {
+ property: "z-index",
+ value: 2
+ },
+];
+
+const res2 = [
+ {
+ property: "box-sizing",
+ value: "content-box"
+ },
+ {
+ property: "display",
+ value: "block"
+ },
+ {
+ property: "float",
+ value: "right"
+ },
+ {
+ property: "line-height",
+ value: "10px"
+ },
+ {
+ property: "position",
+ value: "static"
+ },
+ {
+ property: "z-index",
+ value: 5
+ },
+];
+
+add_task(function* () {
+ yield addTab("data:text/html;charset=utf-8," + encodeURIComponent(TEST_URI));
+ let { inspector, boxmodel, testActor } = yield openLayoutView();
+ yield selectNode("div", inspector);
+
+ yield testInitialValues(inspector, boxmodel);
+ yield testChangingValues(inspector, boxmodel, testActor);
+});
+
+function* testInitialValues(inspector, boxmodel) {
+ info("Test that the initial values of the box model are correct");
+ let doc = boxmodel.document;
+
+ for (let { property, value } of res1) {
+ let elt = doc.querySelector(getPropertySelector(property));
+ is(elt.textContent, value, property + " has the right value.");
+ }
+}
+
+function* testChangingValues(inspector, boxmodel, testActor) {
+ info("Test that changing the document updates the box model");
+ let doc = boxmodel.document;
+
+ let onUpdated = waitForUpdate(inspector);
+ yield testActor.setAttribute("div", "style",
+ "box-sizing:content-box;float:right;" +
+ "line-height:10px;position:static;z-index:5;");
+ yield onUpdated;
+
+ for (let { property, value } of res2) {
+ let elt = doc.querySelector(getPropertySelector(property));
+ is(elt.textContent, value, property + " has the right value after style update.");
+ }
+}
+
+function getPropertySelector(propertyName) {
+ return `.boxmodel-properties-wrapper .property-view` +
+ `[data-property-name=${propertyName}] .property-value`;
+}
diff --git a/devtools/client/inspector/boxmodel/test/doc_boxmodel_iframe2.html b/devtools/client/inspector/boxmodel/test/doc_boxmodel_iframe2.html
index 1f1b0463c3d4..0fa6dc02e97d 100644
--- a/devtools/client/inspector/boxmodel/test/doc_boxmodel_iframe2.html
+++ b/devtools/client/inspector/boxmodel/test/doc_boxmodel_iframe2.html
@@ -1,3 +1,3 @@
-iframe 1
-
+iframe 1
+
diff --git a/devtools/client/inspector/boxmodel/test/head.js b/devtools/client/inspector/boxmodel/test/head.js
index b842968a5595..4df6e686a28e 100644
--- a/devtools/client/inspector/boxmodel/test/head.js
+++ b/devtools/client/inspector/boxmodel/test/head.js
@@ -11,8 +11,10 @@ Services.scriptloader.loadSubScript(
"chrome://mochitests/content/browser/devtools/client/inspector/test/head.js",
this);
+Services.prefs.setBoolPref("devtools.layoutview.enabled", true);
Services.prefs.setIntPref("devtools.toolbox.footer.height", 350);
registerCleanupFunction(() => {
+ Services.prefs.clearUserPref("devtools.layoutview.enabled");
Services.prefs.clearUserPref("devtools.toolbox.footer.height");
});
@@ -66,6 +68,36 @@ function openBoxModelView() {
});
}
+/**
+ * Open the toolbox, with the inspector tool visible, and the layout view
+ * sidebar tab selected to display the box model view with properties.
+ *
+ * @return {Promise} a promise that resolves when the inspector is ready and the box model
+ * view is visible and ready.
+ */
+function openLayoutView() {
+ return openInspectorSidebarTab("layoutview").then(data => {
+ // The actual highligher show/hide methods are mocked in box model tests.
+ // The highlighter is tested in devtools/inspector/test.
+ function mockHighlighter({highlighter}) {
+ highlighter.showBoxModel = function () {
+ return promise.resolve();
+ };
+ highlighter.hideBoxModel = function () {
+ return promise.resolve();
+ };
+ }
+ mockHighlighter(data.toolbox);
+
+ return {
+ toolbox: data.toolbox,
+ inspector: data.inspector,
+ boxmodel: data.inspector.boxmodel,
+ testActor: data.testActor
+ };
+ });
+}
+
/**
* Wait for the boxmodel-view-updated event.
*
diff --git a/devtools/client/inspector/inspector.js b/devtools/client/inspector/inspector.js
index b58dce429004..349945a49cae 100644
--- a/devtools/client/inspector/inspector.js
+++ b/devtools/client/inspector/inspector.js
@@ -926,10 +926,18 @@ Inspector.prototype = {
this.ruleview.destroy();
}
+ if (this.boxmodel) {
+ this.boxmodel.destroy();
+ }
+
if (this.computedview) {
this.computedview.destroy();
}
+ if (this.gridInspector) {
+ this.gridInspector.destroy();
+ }
+
if (this.layoutview) {
this.layoutview.destroy();
}
diff --git a/devtools/client/themes/boxmodel.css b/devtools/client/themes/boxmodel.css
index d8e2b4d31efd..4cef659de5aa 100644
--- a/devtools/client/themes/boxmodel.css
+++ b/devtools/client/themes/boxmodel.css
@@ -52,7 +52,7 @@
/* Regions are 3 nested elements with wide borders and outlines */
-.boxmodel-content {
+.boxmodel-contents {
height: 18px;
}
@@ -84,7 +84,7 @@
border-color: #6a5acd;
}
-.boxmodel-content {
+.boxmodel-contents {
background-color: #87ceeb;
}
@@ -104,7 +104,8 @@
/* Editable region sizes are contained in absolutely positioned */
-.boxmodel-main > p {
+.boxmodel-main > p,
+.boxmodel-size {
position: absolute;
pointer-events: none;
margin: 0;
@@ -112,7 +113,8 @@
}
.boxmodel-main > p > span,
-.boxmodel-main > p > input {
+.boxmodel-main > p > input,
+.boxmodel-content {
vertical-align: middle;
pointer-events: auto;
}
@@ -172,8 +174,8 @@
.boxmodel-position.boxmodel-right,
.boxmodel-margin.boxmodel-right,
.boxmodel-margin.boxmodel-left,
-.boxmodel-border.boxmodel-left,
.boxmodel-border.boxmodel-right,
+.boxmodel-border.boxmodel-left,
.boxmodel-padding.boxmodel-right,
.boxmodel-padding.boxmodel-left {
width: 21px;
@@ -218,6 +220,12 @@
height: 30px;
}
+.boxmodel-size > p {
+ display: inline-block;
+ margin: auto;
+ line-height: 0;
+}
+
.boxmodel-rotate.boxmodel-right.boxmodel-position:not(.boxmodel-editing) {
border-top: none;
border-left: 1px solid var(--theme-highlight-purple);
@@ -290,8 +298,6 @@
border-bottom-color: hsl(0, 0%, 50%);
}
-/* Make sure the content size doesn't appear as editable like the other sizes */
-
.boxmodel-size > span {
cursor: default;
}
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
index f0e97c75fdae..b78b6478b54f 100644
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -384,10 +384,9 @@ FindD3D9BlacklistedDLL()
class CreateDXVAManagerEvent : public Runnable
{
public:
- CreateDXVAManagerEvent(LayersBackend aBackend,
- layers::KnowsCompositor* aKnowsCompositor,
+ CreateDXVAManagerEvent(layers::KnowsCompositor* aKnowsCompositor,
nsCString& aFailureReason)
- : mBackend(aBackend)
+ : mBackend(LayersBackend::LAYERS_D3D11)
, mKnowsCompositor(aKnowsCompositor)
, mFailureReason(aFailureReason)
{
@@ -435,7 +434,7 @@ public:
};
bool
-WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
+WMFVideoMFTManager::InitializeDXVA()
{
// If we use DXVA but aren't running with a D3D layer manager then the
// readback of decoded video frames from GPU to CPU memory grinds painting
@@ -447,17 +446,14 @@ WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
}
MOZ_ASSERT(!mDXVA2Manager);
LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
- if (backend != LayersBackend::LAYERS_D3D9
- && backend != LayersBackend::LAYERS_D3D11) {
+ if (backend != LayersBackend::LAYERS_D3D11) {
mDXVAFailureReason.AssignLiteral("Unsupported layers backend");
return false;
}
// The DXVA manager must be created on the main thread.
RefPtr event =
- new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9
- : backend,
- mKnowsCompositor,
+ new CreateDXVAManagerEvent(mKnowsCompositor,
mDXVAFailureReason);
if (NS_IsMainThread()) {
@@ -499,7 +495,7 @@ WMFVideoMFTManager::Init()
return false;
}
- bool success = InitInternal(/* aForceD3D9 = */ false);
+ bool success = InitInternal();
if (success && mDXVA2Manager) {
// If we had some failures but eventually made it work,
@@ -515,10 +511,10 @@ WMFVideoMFTManager::Init()
}
bool
-WMFVideoMFTManager::InitInternal(bool aForceD3D9)
+WMFVideoMFTManager::InitInternal()
{
mUseHwAccel = false; // default value; changed if D3D setup succeeds.
- bool useDxva = InitializeDXVA(aForceD3D9);
+ bool useDxva = InitializeDXVA();
RefPtr decoder(new MFTDecoder());
@@ -836,8 +832,7 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
- if (backend != LayersBackend::LAYERS_D3D9 &&
- backend != LayersBackend::LAYERS_D3D11) {
+ if (backend != LayersBackend::LAYERS_D3D11) {
RefPtr v =
VideoData::CreateAndCopyData(mVideoInfo,
mImageContainer,
diff --git a/dom/media/platforms/wmf/WMFVideoMFTManager.h b/dom/media/platforms/wmf/WMFVideoMFTManager.h
index 7d9851790135..56dbfaac1314 100644
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.h
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -69,9 +69,9 @@ public:
private:
bool ValidateVideoInfo();
- bool InitializeDXVA(bool aForceD3D9);
+ bool InitializeDXVA();
- bool InitInternal(bool aForceD3D9);
+ bool InitInternal();
HRESULT CreateBasicVideoFrame(IMFSample* aSample,
int64_t aStreamOffset,
diff --git a/dom/xslt/xslt/txExecutionState.cpp b/dom/xslt/xslt/txExecutionState.cpp
index ed57c79351dd..a32fdb0b8065 100644
--- a/dom/xslt/xslt/txExecutionState.cpp
+++ b/dom/xslt/xslt/txExecutionState.cpp
@@ -87,7 +87,10 @@ txExecutionState::~txExecutionState()
txStackIterator handlerIter(&mResultHandlerStack);
while (handlerIter.hasNext()) {
- delete (txAXMLEventHandler*)handlerIter.next();
+ txAXMLEventHandler* handler = (txAXMLEventHandler*)handlerIter.next();
+ if (handler != mObsoleteHandler) {
+ delete handler;
+ }
}
txStackIterator paramIter(&mParamStack);
@@ -159,6 +162,17 @@ txExecutionState::end(nsresult aResult)
return mOutputHandler->endDocument(aResult);
}
+void
+txExecutionState::popAndDeleteEvalContext()
+{
+ if (!mEvalContextStack.isEmpty()) {
+ auto ctx = popEvalContext();
+ if (ctx != mInitialEvalContext) {
+ delete ctx;
+ }
+ }
+}
+
void
txExecutionState::popAndDeleteEvalContextUntil(txIEvalContext* aContext)
{
diff --git a/dom/xslt/xslt/txExecutionState.h b/dom/xslt/xslt/txExecutionState.h
index 44f1918c86b0..3aa6d5a660bf 100644
--- a/dom/xslt/xslt/txExecutionState.h
+++ b/dom/xslt/xslt/txExecutionState.h
@@ -95,6 +95,8 @@ public:
nsresult pushEvalContext(txIEvalContext* aContext);
txIEvalContext* popEvalContext();
+ void popAndDeleteEvalContext();
+
/**
* Helper that deletes all entries before |aContext| and then
* pops it off the stack. The caller must delete |aContext| if
diff --git a/dom/xslt/xslt/txInstructions.cpp b/dom/xslt/xslt/txInstructions.cpp
index 38253e0cb332..f769680474f3 100644
--- a/dom/xslt/xslt/txInstructions.cpp
+++ b/dom/xslt/xslt/txInstructions.cpp
@@ -37,16 +37,7 @@ txApplyDefaultElementTemplate::execute(txExecutionState& aEs)
}
nsresult
-txApplyImportsEnd::execute(txExecutionState& aEs)
-{
- aEs.popTemplateRule();
- aEs.popParamMap();
-
- return NS_OK;
-}
-
-nsresult
-txApplyImportsStart::execute(txExecutionState& aEs)
+txApplyImports::execute(txExecutionState& aEs)
{
txExecutionState::TemplateRule* rule = aEs.getCurrentTemplateRule();
// The frame is set to null when there is no current template rule, or
@@ -68,7 +59,12 @@ txApplyImportsStart::execute(txExecutionState& aEs)
aEs.pushTemplateRule(frame, mode, rule->mParams);
- return aEs.runTemplate(templ);
+ rv = aEs.runTemplate(templ);
+
+ aEs.popTemplateRule();
+ aEs.popParamMap();
+
+ return rv;
}
txApplyTemplates::txApplyTemplates(const txExpandedName& aMode)
@@ -474,7 +470,7 @@ txLoopNodeSet::execute(txExecutionState& aEs)
txNodeSetContext* context =
static_cast(aEs.getEvalContext());
if (!context->hasNext()) {
- delete aEs.popEvalContext();
+ aEs.popAndDeleteEvalContext();
return NS_OK;
}
diff --git a/dom/xslt/xslt/txInstructions.h b/dom/xslt/xslt/txInstructions.h
index 55138a600a54..d363400e868c 100644
--- a/dom/xslt/xslt/txInstructions.h
+++ b/dom/xslt/xslt/txInstructions.h
@@ -47,13 +47,7 @@ public:
TX_DECL_TXINSTRUCTION
};
-class txApplyImportsEnd : public txInstruction
-{
-public:
- TX_DECL_TXINSTRUCTION
-};
-
-class txApplyImportsStart : public txInstruction
+class txApplyImports : public txInstruction
{
public:
TX_DECL_TXINSTRUCTION
diff --git a/dom/xslt/xslt/txStylesheetCompileHandlers.cpp b/dom/xslt/xslt/txStylesheetCompileHandlers.cpp
index f447df7cfc46..4d451e3c303f 100644
--- a/dom/xslt/xslt/txStylesheetCompileHandlers.cpp
+++ b/dom/xslt/xslt/txStylesheetCompileHandlers.cpp
@@ -1312,8 +1312,7 @@ txFnText(const nsAString& aStr, txStylesheetCompilerState& aState)
/*
xsl:apply-imports
- txApplyImportsStart
- txApplyImportsEnd
+ txApplyImports
*/
static nsresult
txFnStartApplyImports(int32_t aNamespaceID,
@@ -1325,11 +1324,7 @@ txFnStartApplyImports(int32_t aNamespaceID,
{
nsresult rv = NS_OK;
- nsAutoPtr instr(new txApplyImportsStart);
- rv = aState.addInstruction(Move(instr));
- NS_ENSURE_SUCCESS(rv, rv);
-
- instr = new txApplyImportsEnd;
+ nsAutoPtr instr(new txApplyImports);
rv = aState.addInstruction(Move(instr));
NS_ENSURE_SUCCESS(rv, rv);
diff --git a/gfx/gl/GLLibraryEGL.cpp b/gfx/gl/GLLibraryEGL.cpp
index 6cfcf9797ff7..c502f9f0827e 100644
--- a/gfx/gl/GLLibraryEGL.cpp
+++ b/gfx/gl/GLLibraryEGL.cpp
@@ -154,7 +154,7 @@ IsAccelAngleSupported(const nsCOMPtr& gfxInfo,
if (CompositorThreadHolder::IsInCompositorThread()) {
// We can only enter here with WebRender, so assert that this is a
// WebRender-enabled build.
-#ifndef MOZ_ENABLE_WEBRENDER
+#ifndef MOZ_BUILD_WEBRENDER
MOZ_ASSERT(false);
#endif
return true;
diff --git a/gfx/layers/LayersTypes.h b/gfx/layers/LayersTypes.h
index f9ac335c1c9b..adc70fefb9f3 100644
--- a/gfx/layers/LayersTypes.h
+++ b/gfx/layers/LayersTypes.h
@@ -46,7 +46,6 @@ enum class LayersBackend : int8_t {
LAYERS_NONE = 0,
LAYERS_BASIC,
LAYERS_OPENGL,
- LAYERS_D3D9,
LAYERS_D3D11,
LAYERS_CLIENT,
LAYERS_WR,
diff --git a/gfx/layers/client/CanvasClient.cpp b/gfx/layers/client/CanvasClient.cpp
index 19683a6c82b3..03d406b5b924 100644
--- a/gfx/layers/client/CanvasClient.cpp
+++ b/gfx/layers/client/CanvasClient.cpp
@@ -336,9 +336,7 @@ TexClientFromReadback(SharedSurface* src, CompositableForwarder* allocator,
// RB_SWAPPED doesn't work with D3D11. (bug 1051010)
// RB_SWAPPED doesn't work with Basic. (bug ???????)
- // RB_SWAPPED doesn't work with D3D9. (bug ???????)
bool layersNeedsManualSwap = layersBackend == LayersBackend::LAYERS_BASIC ||
- layersBackend == LayersBackend::LAYERS_D3D9 ||
layersBackend == LayersBackend::LAYERS_D3D11;
if (texClient->HasFlags(TextureFlags::RB_SWAPPED) &&
layersNeedsManualSwap)
diff --git a/gfx/layers/client/ClientLayerManager.cpp b/gfx/layers/client/ClientLayerManager.cpp
index 9a57682b0b5f..08fe6c4d2b13 100644
--- a/gfx/layers/client/ClientLayerManager.cpp
+++ b/gfx/layers/client/ClientLayerManager.cpp
@@ -826,7 +826,6 @@ ClientLayerManager::GetBackendName(nsAString& aName)
case LayersBackend::LAYERS_NONE: aName.AssignLiteral("None"); return;
case LayersBackend::LAYERS_BASIC: aName.AssignLiteral("Basic"); return;
case LayersBackend::LAYERS_OPENGL: aName.AssignLiteral("OpenGL"); return;
- case LayersBackend::LAYERS_D3D9: aName.AssignLiteral("Direct3D 9"); return;
case LayersBackend::LAYERS_D3D11: {
#ifdef XP_WIN
if (DeviceManagerDx::Get()->IsWARP()) {
diff --git a/gfx/layers/client/ContentClient.cpp b/gfx/layers/client/ContentClient.cpp
index 69384ae7ebde..1d2840920d84 100644
--- a/gfx/layers/client/ContentClient.cpp
+++ b/gfx/layers/client/ContentClient.cpp
@@ -61,7 +61,6 @@ ContentClient::CreateContentClient(CompositableForwarder* aForwarder)
{
LayersBackend backend = aForwarder->GetCompositorBackendType();
if (backend != LayersBackend::LAYERS_OPENGL &&
- backend != LayersBackend::LAYERS_D3D9 &&
backend != LayersBackend::LAYERS_D3D11 &&
backend != LayersBackend::LAYERS_WR &&
backend != LayersBackend::LAYERS_BASIC) {
diff --git a/gfx/layers/ipc/CompositorBridgeParent.cpp b/gfx/layers/ipc/CompositorBridgeParent.cpp
index b30b9405e04f..812f126319a0 100644
--- a/gfx/layers/ipc/CompositorBridgeParent.cpp
+++ b/gfx/layers/ipc/CompositorBridgeParent.cpp
@@ -1579,7 +1579,7 @@ CompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipel
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t* aIdNamespace)
{
-#ifndef MOZ_ENABLE_WEBRENDER
+#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);
@@ -1614,7 +1614,7 @@ CompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipel
bool
CompositorBridgeParent::DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor)
{
-#ifndef MOZ_ENABLE_WEBRENDER
+#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);
diff --git a/gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp b/gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp
index 3f5d968589aa..9369d7317acf 100644
--- a/gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp
+++ b/gfx/layers/ipc/CrossProcessCompositorBridgeParent.cpp
@@ -203,7 +203,7 @@ CrossProcessCompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::Pipeli
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t *aIdNamespace)
{
-#ifndef MOZ_ENABLE_WEBRENDER
+#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);
@@ -238,7 +238,7 @@ CrossProcessCompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::Pipeli
bool
CrossProcessCompositorBridgeParent::DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor)
{
-#ifndef MOZ_ENABLE_WEBRENDER
+#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);
diff --git a/gfx/tests/gtest/TestCompositor.cpp b/gfx/tests/gtest/TestCompositor.cpp
index 816c0d011aee..0a8498bb6988 100644
--- a/gfx/tests/gtest/TestCompositor.cpp
+++ b/gfx/tests/gtest/TestCompositor.cpp
@@ -63,9 +63,6 @@ static already_AddRefed CreateTestCompositor(LayersBackend backend,
} else if (backend == LayersBackend::LAYERS_D3D11) {
//compositor = new CompositorD3D11();
MOZ_CRASH(); // No support yet
- } else if (backend == LayersBackend::LAYERS_D3D9) {
- //compositor = new CompositorD3D9(this, mWidget);
- MOZ_CRASH(); // No support yet
#endif
}
nsCString failureReason;
diff --git a/gfx/thebes/gfxPlatform.cpp b/gfx/thebes/gfxPlatform.cpp
index 9a4bd2ae07cd..f61a82a02b57 100644
--- a/gfx/thebes/gfxPlatform.cpp
+++ b/gfx/thebes/gfxPlatform.cpp
@@ -2289,12 +2289,13 @@ gfxPlatform::InitWebRenderConfig()
{
FeatureState& featureWebRender = gfxConfig::GetFeature(Feature::WEBRENDER);
- featureWebRender.EnableByDefault();
+ featureWebRender.DisableByDefault(
+ FeatureStatus::OptIn,
+ "WebRender is an opt-in feature",
+ NS_LITERAL_CSTRING("FEATURE_FAILURE_DEFAULT_OFF"));
- if (!Preferences::GetBool("gfx.webrender.enabled", false)) {
- featureWebRender.UserDisable(
- "User disabled WebRender",
- NS_LITERAL_CSTRING("FEATURE_FAILURE_WEBRENDER_DISABLED"));
+ if (Preferences::GetBool("gfx.webrender.enabled", false)) {
+ featureWebRender.UserEnable("Enabled by pref");
}
// WebRender relies on the GPU process when on Windows
@@ -2314,7 +2315,7 @@ gfxPlatform::InitWebRenderConfig()
NS_LITERAL_CSTRING("FEATURE_FAILURE_SAFE_MODE"));
}
-#ifndef MOZ_ENABLE_WEBRENDER
+#ifndef MOZ_BUILD_WEBRENDER
featureWebRender.ForceDisable(
FeatureStatus::Unavailable,
"Build doesn't include WebRender",
diff --git a/gfx/thebes/gfxWindowsPlatform.cpp b/gfx/thebes/gfxWindowsPlatform.cpp
index 7ad26449ae19..1410fb040298 100755
--- a/gfx/thebes/gfxWindowsPlatform.cpp
+++ b/gfx/thebes/gfxWindowsPlatform.cpp
@@ -1217,6 +1217,14 @@ gfxWindowsPlatform::SetupClearTypeParams()
}
}
+ if (GetDefaultContentBackend() == BackendType::SKIA) {
+ // Skia doesn't support a contrast value outside of 0-1, so default to 1.0
+ if (contrast < 0.0 || contrast > 1.0) {
+ NS_WARNING("Custom dwrite contrast not supported in Skia. Defaulting to 1.0.");
+ contrast = 1.0;
+ }
+ }
+
// For parameters that have not been explicitly set,
// we copy values from default params (or our overridden value for contrast)
if (gamma < 1.0 || gamma > 2.2) {
diff --git a/gfx/webrender_bindings/webrender_ffi.h b/gfx/webrender_bindings/webrender_ffi.h
index b4f5a3c009d4..b662785c0d58 100644
--- a/gfx/webrender_bindings/webrender_ffi.h
+++ b/gfx/webrender_bindings/webrender_ffi.h
@@ -452,7 +452,7 @@ struct WrVecU8 {
// an error and causes the build to fail. So for wr_* functions called by
// destructors in C++ classes, use WR_DESTRUCTOR_SAFE_FUNC instead, which omits
// the unreachable annotation.
-#ifdef MOZ_ENABLE_WEBRENDER
+#ifdef MOZ_BUILD_WEBRENDER
# define WR_INLINE
# define WR_FUNC
# define WR_DESTRUCTOR_SAFE_FUNC
diff --git a/image/FrameAnimator.cpp b/image/FrameAnimator.cpp
index a3ed0a4a69e2..736d722d7a04 100644
--- a/image/FrameAnimator.cpp
+++ b/image/FrameAnimator.cpp
@@ -141,28 +141,33 @@ AnimationState::LoopLength() const
// FrameAnimator implementation.
///////////////////////////////////////////////////////////////////////////////
-TimeStamp
+Maybe
FrameAnimator::GetCurrentImgFrameEndTime(AnimationState& aState) const
{
TimeStamp currentFrameTime = aState.mCurrentAnimationFrameTime;
- FrameTimeout timeout = GetTimeoutForFrame(aState.mCurrentAnimationFrameIndex);
+ Maybe timeout = GetTimeoutForFrame(aState, aState.mCurrentAnimationFrameIndex);
- if (timeout == FrameTimeout::Forever()) {
+ if (timeout.isNothing()) {
+ MOZ_ASSERT(aState.GetHasBeenDecoded() && !aState.GetIsCurrentlyDecoded());
+ return Nothing();
+ }
+
+ if (*timeout == FrameTimeout::Forever()) {
// We need to return a sentinel value in this case, because our logic
// doesn't work correctly if we have an infinitely long timeout. We use one
// year in the future as the sentinel because it works with the loop in
// RequestRefresh() below.
// XXX(seth): It'd be preferable to make our logic work correctly with
// infinitely long timeouts.
- return TimeStamp::NowLoRes() +
- TimeDuration::FromMilliseconds(31536000.0);
+ return Some(TimeStamp::NowLoRes() +
+ TimeDuration::FromMilliseconds(31536000.0));
}
TimeDuration durationOfTimeout =
- TimeDuration::FromMilliseconds(double(timeout.AsMilliseconds()));
+ TimeDuration::FromMilliseconds(double(timeout->AsMilliseconds()));
TimeStamp currentFrameEndTime = currentFrameTime + durationOfTimeout;
- return currentFrameEndTime;
+ return Some(currentFrameEndTime);
}
RefreshResult
@@ -238,7 +243,11 @@ FrameAnimator::AdvanceFrame(AnimationState& aState, TimeStamp aTime)
return ret;
}
- if (GetTimeoutForFrame(nextFrameIndex) == FrameTimeout::Forever()) {
+ Maybe nextFrameTimeout = GetTimeoutForFrame(aState, nextFrameIndex);
+ // GetTimeoutForFrame can only return none if frame doesn't exist,
+ // but we just got it above.
+ MOZ_ASSERT(nextFrameTimeout.isSome());
+ if (*nextFrameTimeout == FrameTimeout::Forever()) {
ret.mAnimationFinished = true;
}
@@ -252,7 +261,9 @@ FrameAnimator::AdvanceFrame(AnimationState& aState, TimeStamp aTime)
// something went wrong, move on to next
NS_WARNING("FrameAnimator::AdvanceFrame(): Compositing of frame failed");
nextFrame->SetCompositingFailed(true);
- aState.mCurrentAnimationFrameTime = GetCurrentImgFrameEndTime(aState);
+ Maybe currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
+ MOZ_ASSERT(currentFrameEndTime.isSome());
+ aState.mCurrentAnimationFrameTime = *currentFrameEndTime;
aState.mCurrentAnimationFrameIndex = nextFrameIndex;
return ret;
@@ -261,7 +272,9 @@ FrameAnimator::AdvanceFrame(AnimationState& aState, TimeStamp aTime)
nextFrame->SetCompositingFailed(false);
}
- aState.mCurrentAnimationFrameTime = GetCurrentImgFrameEndTime(aState);
+ Maybe currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
+ MOZ_ASSERT(currentFrameEndTime.isSome());
+ aState.mCurrentAnimationFrameTime = *currentFrameEndTime;
// If we can get closer to the current time by a multiple of the image's loop
// time, we should. We can only do this if we're done decoding; otherwise, we
@@ -301,10 +314,18 @@ FrameAnimator::RequestRefresh(AnimationState& aState, const TimeStamp& aTime)
// only advance the frame if the current time is greater than or
// equal to the current frame's end time.
- TimeStamp currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
+ Maybe currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
+ if (currentFrameEndTime.isNothing()) {
+ MOZ_ASSERT(gfxPrefs::ImageMemAnimatedDiscardable());
+ MOZ_ASSERT(aState.GetHasBeenDecoded() && !aState.GetIsCurrentlyDecoded());
+ MOZ_ASSERT(aState.mCompositedFrameInvalid);
+ // Nothing we can do but wait for our previous current frame to be decoded
+ // again so we can determine what to do next.
+ return ret;
+ }
- while (currentFrameEndTime <= aTime) {
- TimeStamp oldFrameEndTime = currentFrameEndTime;
+ while (*currentFrameEndTime <= aTime) {
+ TimeStamp oldFrameEndTime = *currentFrameEndTime;
RefreshResult frameRes = AdvanceFrame(aState, aTime);
@@ -312,17 +333,19 @@ FrameAnimator::RequestRefresh(AnimationState& aState, const TimeStamp& aTime)
ret.Accumulate(frameRes);
currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
+ // AdvanceFrame can't advance to a frame that doesn't exist yet.
+ MOZ_ASSERT(currentFrameEndTime.isSome());
// If we didn't advance a frame, and our frame end time didn't change,
// then we need to break out of this loop & wait for the frame(s)
// to finish downloading.
- if (!frameRes.mFrameAdvanced && (currentFrameEndTime == oldFrameEndTime)) {
+ if (!frameRes.mFrameAdvanced && (*currentFrameEndTime == oldFrameEndTime)) {
break;
}
}
// Advanced to the correct frame, the composited frame is now valid to be drawn.
- if (currentFrameEndTime > aTime) {
+ if (*currentFrameEndTime > aTime) {
aState.mCompositedFrameInvalid = false;
}
@@ -371,17 +394,18 @@ FrameAnimator::GetCompositedFrame(AnimationState& aState)
return result;
}
-FrameTimeout
-FrameAnimator::GetTimeoutForFrame(uint32_t aFrameNum) const
+Maybe
+FrameAnimator::GetTimeoutForFrame(AnimationState& aState,
+ uint32_t aFrameNum) const
{
RawAccessFrameRef frame = GetRawFrame(aFrameNum);
if (frame) {
AnimationData data = frame->GetAnimationData();
- return data.mTimeout;
+ return Some(data.mTimeout);
}
- NS_WARNING("No frame; called GetTimeoutForFrame too early?");
- return FrameTimeout::FromRawMilliseconds(100);
+ MOZ_ASSERT(aState.mHasBeenDecoded && !aState.mIsCurrentlyDecoded);
+ return Nothing();
}
static void
diff --git a/image/FrameAnimator.h b/image/FrameAnimator.h
index 998a79f66db3..2f1cbe9eacca 100644
--- a/image/FrameAnimator.h
+++ b/image/FrameAnimator.h
@@ -312,15 +312,17 @@ private: // methods
*/
RawAccessFrameRef GetRawFrame(uint32_t aFrameNum) const;
- /// @return the given frame's timeout.
- FrameTimeout GetTimeoutForFrame(uint32_t aFrameNum) const;
+ /// @return the given frame's timeout if it is available
+ Maybe GetTimeoutForFrame(AnimationState& aState,
+ uint32_t aFrameNum) const;
/**
* Get the time the frame we're currently displaying is supposed to end.
*
- * In the error case, returns an "infinity" timestamp.
+ * In the error case (like if the requested frame is not currently
+ * decoded), returns None().
*/
- TimeStamp GetCurrentImgFrameEndTime(AnimationState& aState) const;
+ Maybe GetCurrentImgFrameEndTime(AnimationState& aState) const;
bool DoBlend(gfx::IntRect* aDirtyRect,
uint32_t aPrevFrameIndex,
diff --git a/image/RasterImage.cpp b/image/RasterImage.cpp
index e2c28f773f6f..3796041881b1 100644
--- a/image/RasterImage.cpp
+++ b/image/RasterImage.cpp
@@ -107,6 +107,9 @@ RasterImage::~RasterImage()
// Record Telemetry.
Telemetry::Accumulate(Telemetry::IMAGE_DECODE_COUNT, mDecodeCount);
+ if (mAnimationState) {
+ Telemetry::Accumulate(Telemetry::IMAGE_ANIMATED_DECODE_COUNT, mDecodeCount);
+ }
}
nsresult
@@ -1428,6 +1431,11 @@ RasterImage::Draw(gfxContext* aContext,
TimeDuration drawLatency = TimeStamp::Now() - mDrawStartTime;
Telemetry::Accumulate(Telemetry::IMAGE_DECODE_ON_DRAW_LATENCY,
int32_t(drawLatency.ToMicroseconds()));
+ if (mAnimationState) {
+ Telemetry::Accumulate(Telemetry::IMAGE_ANIMATED_DECODE_ON_DRAW_LATENCY,
+ int32_t(drawLatency.ToMicroseconds()));
+
+ }
mDrawStartTime = TimeStamp();
}
@@ -1677,6 +1685,11 @@ RasterImage::NotifyDecodeComplete(const DecoderFinalStatus& aStatus,
Telemetry::Accumulate(Telemetry::IMAGE_DECODE_TIME,
int32_t(aTelemetry.mDecodeTime.ToMicroseconds()));
+ if (mAnimationState) {
+ Telemetry::Accumulate(Telemetry::IMAGE_ANIMATED_DECODE_TIME,
+ int32_t(aTelemetry.mDecodeTime.ToMicroseconds()));
+ }
+
if (aTelemetry.mSpeedHistogram) {
Telemetry::Accumulate(*aTelemetry.mSpeedHistogram, aTelemetry.Speed());
}
diff --git a/ipc/ipdl/ipdl.py b/ipc/ipdl/ipdl.py
index d94e8536b8a6..de0132168996 100755
--- a/ipc/ipdl/ipdl.py
+++ b/ipc/ipdl/ipdl.py
@@ -151,7 +151,9 @@ for f in files:
log(3, ' pretty printed code:')
ipdl.genipdl(ast, codedir)
-ipdl.checkFixedSyncMessages(parser)
+if not ipdl.checkFixedSyncMessages(parser):
+ # Errors have alraedy been printed to stderr, just exit
+ sys.exit(1)
# Second pass: generate code
for f in files:
diff --git a/ipc/ipdl/ipdl/checker.py b/ipc/ipdl/ipdl/checker.py
index df836e9bcc00..114380035625 100644
--- a/ipc/ipdl/ipdl/checker.py
+++ b/ipc/ipdl/ipdl/checker.py
@@ -55,6 +55,7 @@ def checkSyncMessage(tu, syncMsgList, errout=sys.stderr):
def checkFixedSyncMessages(config, errout=sys.stderr):
fixed = SyncMessageChecker.getFixedSyncMessages()
+ error_free = True
for item in fixed:
protocol = item.split('::')[0]
# Ignore things like sync messages in test protocols we didn't compile.
@@ -63,3 +64,5 @@ def checkFixedSyncMessages(config, errout=sys.stderr):
'platform' not in config.options(item):
print >>errout, 'Error: Sync IPC message %s not found, it appears to be fixed.\n' \
'Please remove it from sync-messages.ini.' % item
+ error_free = False
+ return error_free
diff --git a/ipc/ipdl/sync-messages.ini b/ipc/ipdl/sync-messages.ini
index 46ef0984c0d8..ac446f695ca9 100644
--- a/ipc/ipdl/sync-messages.ini
+++ b/ipc/ipdl/sync-messages.ini
@@ -233,244 +233,364 @@ description =
# A11y code
[PDocAccessible::State]
description =
+platform = notwin
[PDocAccessible::NativeState]
description =
+platform = notwin
[PDocAccessible::Name]
description =
+platform = notwin
[PDocAccessible::Value]
description =
+platform = notwin
[PDocAccessible::Help]
description =
+platform = notwin
[PDocAccessible::Description]
description =
+platform = notwin
[PDocAccessible::Attributes]
description =
+platform = notwin
[PDocAccessible::RelationByType]
description =
+platform = notwin
[PDocAccessible::Relations]
description =
+platform = notwin
[PDocAccessible::IsSearchbox]
description =
+platform = notwin
[PDocAccessible::LandmarkRole]
description =
+platform = notwin
[PDocAccessible::ARIARoleAtom]
description =
+platform = notwin
[PDocAccessible::GetLevelInternal]
description =
+platform = notwin
[PDocAccessible::CaretLineNumber]
description =
+platform = notwin
[PDocAccessible::CaretOffset]
description =
+platform = notwin
[PDocAccessible::CharacterCount]
description =
+platform = notwin
[PDocAccessible::SelectionCount]
description =
+platform = notwin
[PDocAccessible::TextSubstring]
description =
+platform = notwin
[PDocAccessible::GetTextAfterOffset]
description =
+platform = notwin
[PDocAccessible::GetTextAtOffset]
description =
+platform = notwin
[PDocAccessible::GetTextBeforeOffset]
description =
+platform = notwin
[PDocAccessible::CharAt]
description =
+platform = notwin
[PDocAccessible::TextAttributes]
description =
+platform = notwin
[PDocAccessible::DefaultTextAttributes]
description =
+platform = notwin
[PDocAccessible::TextBounds]
description =
+platform = notwin
[PDocAccessible::CharBounds]
description =
+platform = notwin
[PDocAccessible::OffsetAtPoint]
description =
+platform = notwin
[PDocAccessible::SelectionBoundsAt]
description =
+platform = notwin
[PDocAccessible::SetSelectionBoundsAt]
description =
+platform = notwin
[PDocAccessible::AddToSelection]
description =
+platform = notwin
[PDocAccessible::RemoveFromSelection]
description =
+platform = notwin
[PDocAccessible::Text]
description =
+platform = notwin
[PDocAccessible::ReplaceText]
description =
+platform = notwin
[PDocAccessible::InsertText]
description =
+platform = notwin
[PDocAccessible::CopyText]
description =
+platform = notwin
[PDocAccessible::CutText]
description =
+platform = notwin
[PDocAccessible::DeleteText]
description =
+platform = notwin
[PDocAccessible::PasteText]
description =
+platform = notwin
[PDocAccessible::ImagePosition]
description =
+platform = notwin
[PDocAccessible::ImageSize]
description =
+platform = notwin
[PDocAccessible::StartOffset]
description =
+platform = notwin
[PDocAccessible::EndOffset]
description =
+platform = notwin
[PDocAccessible::IsLinkValid]
description =
+platform = notwin
[PDocAccessible::AnchorCount]
description =
+platform = notwin
[PDocAccessible::AnchorURIAt]
description =
+platform = notwin
[PDocAccessible::AnchorAt]
description =
+platform = notwin
[PDocAccessible::LinkCount]
description =
+platform = notwin
[PDocAccessible::LinkAt]
description =
+platform = notwin
[PDocAccessible::LinkIndexOf]
description =
+platform = notwin
[PDocAccessible::LinkIndexAtOffset]
description =
+platform = notwin
[PDocAccessible::TableOfACell]
description =
+platform = notwin
[PDocAccessible::ColIdx]
description =
+platform = notwin
[PDocAccessible::RowIdx]
description =
+platform = notwin
[PDocAccessible::GetPosition]
description =
+platform = notwin
[PDocAccessible::ColExtent]
description =
+platform = notwin
[PDocAccessible::RowExtent]
description =
+platform = notwin
[PDocAccessible::GetColRowExtents]
description =
+platform = notwin
[PDocAccessible::ColHeaderCells]
description =
+platform = notwin
[PDocAccessible::RowHeaderCells]
description =
+platform = notwin
[PDocAccessible::IsCellSelected]
description =
+platform = notwin
[PDocAccessible::TableCaption]
description =
+platform = notwin
[PDocAccessible::TableSummary]
description =
+platform = notwin
[PDocAccessible::TableColumnCount]
description =
+platform = notwin
[PDocAccessible::TableRowCount]
description =
+platform = notwin
[PDocAccessible::TableCellAt]
description =
+platform = notwin
[PDocAccessible::TableCellIndexAt]
description =
+platform = notwin
[PDocAccessible::TableColumnIndexAt]
description =
+platform = notwin
[PDocAccessible::TableRowIndexAt]
description =
+platform = notwin
[PDocAccessible::TableRowAndColumnIndicesAt]
description =
+platform = notwin
[PDocAccessible::TableColumnExtentAt]
description =
+platform = notwin
[PDocAccessible::TableRowExtentAt]
description =
+platform = notwin
[PDocAccessible::TableColumnDescription]
description =
+platform = notwin
[PDocAccessible::TableRowDescription]
description =
+platform = notwin
[PDocAccessible::TableColumnSelected]
description =
+platform = notwin
[PDocAccessible::TableRowSelected]
description =
+platform = notwin
[PDocAccessible::TableCellSelected]
description =
+platform = notwin
[PDocAccessible::TableSelectedCellCount]
description =
+platform = notwin
[PDocAccessible::TableSelectedColumnCount]
description =
+platform = notwin
[PDocAccessible::TableSelectedRowCount]
description =
+platform = notwin
[PDocAccessible::TableSelectedCells]
description =
+platform = notwin
[PDocAccessible::TableSelectedCellIndices]
description =
+platform = notwin
[PDocAccessible::TableSelectedColumnIndices]
description =
+platform = notwin
[PDocAccessible::TableSelectedRowIndices]
description =
+platform = notwin
[PDocAccessible::TableSelectColumn]
description =
+platform = notwin
[PDocAccessible::TableSelectRow]
description =
+platform = notwin
[PDocAccessible::TableUnselectColumn]
description =
+platform = notwin
[PDocAccessible::TableUnselectRow]
description =
+platform = notwin
[PDocAccessible::TableIsProbablyForLayout]
description =
+platform = notwin
[PDocAccessible::AtkTableColumnHeader]
description =
+platform = notwin
[PDocAccessible::AtkTableRowHeader]
description =
+platform = notwin
[PDocAccessible::SelectedItems]
description =
+platform = notwin
[PDocAccessible::SelectedItemCount]
description =
+platform = notwin
[PDocAccessible::GetSelectedItem]
description =
+platform = notwin
[PDocAccessible::IsItemSelected]
description =
+platform = notwin
[PDocAccessible::AddItemToSelection]
description =
+platform = notwin
[PDocAccessible::RemoveItemFromSelection]
description =
+platform = notwin
[PDocAccessible::SelectAll]
description =
+platform = notwin
[PDocAccessible::UnselectAll]
description =
+platform = notwin
[PDocAccessible::DoAction]
description =
+platform = notwin
[PDocAccessible::ActionCount]
description =
+platform = notwin
[PDocAccessible::ActionDescriptionAt]
description =
+platform = notwin
[PDocAccessible::ActionNameAt]
description =
+platform = notwin
[PDocAccessible::AccessKey]
description =
+platform = notwin
[PDocAccessible::KeyboardShortcut]
description =
+platform = notwin
[PDocAccessible::AtkKeyBinding]
description =
+platform = notwin
[PDocAccessible::CurValue]
description =
+platform = notwin
[PDocAccessible::SetCurValue]
description =
+platform = notwin
[PDocAccessible::MinValue]
description =
+platform = notwin
[PDocAccessible::MaxValue]
description =
+platform = notwin
[PDocAccessible::Step]
description =
+platform = notwin
[PDocAccessible::FocusedChild]
description =
+platform = notwin
[PDocAccessible::Language]
description =
+platform = notwin
[PDocAccessible::DocType]
description =
+platform = notwin
[PDocAccessible::Title]
description =
+platform = notwin
[PDocAccessible::URL]
description =
+platform = notwin
[PDocAccessible::MimeType]
description =
+platform = notwin
[PDocAccessible::URLDocTypeMimeType]
description =
+platform = notwin
[PDocAccessible::AccessibleAtPoint]
description =
+platform = notwin
[PDocAccessible::Extents]
description =
+platform = notwin
[PDocAccessible::DOMNodeID]
description =
+platform = notwin
[PDocAccessible::GetWindowedPluginIAccessible]
description =
platform = win
diff --git a/js/public/ProfilingFrameIterator.h b/js/public/ProfilingFrameIterator.h
index e769c269a339..550ac518a193 100644
--- a/js/public/ProfilingFrameIterator.h
+++ b/js/public/ProfilingFrameIterator.h
@@ -93,9 +93,10 @@ class MOZ_NON_PARAM JS_PUBLIC_API(ProfilingFrameIterator)
public:
struct RegisterState
{
- RegisterState() : pc(nullptr), sp(nullptr), lr(nullptr) {}
+ RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
void* pc;
void* sp;
+ void* fp;
void* lr;
};
diff --git a/js/src/gc/RootMarking.cpp b/js/src/gc/RootMarking.cpp
index d7905a97f92a..c41825be8d55 100644
--- a/js/src/gc/RootMarking.cpp
+++ b/js/src/gc/RootMarking.cpp
@@ -330,6 +330,7 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM
// Trace active interpreter and JIT stack roots.
TraceInterpreterActivations(cx, target, trc);
jit::TraceJitActivations(cx, target, trc);
+ wasm::TraceActivations(cx, target, trc);
// Trace legacy C stack roots.
AutoGCRooter::traceAll(target, trc);
diff --git a/js/src/jit-test/tests/asm.js/testAtomics.js b/js/src/jit-test/tests/asm.js/testAtomics.js
index 5b30fb622034..89582fe686f7 100644
--- a/js/src/jit-test/tests/asm.js/testAtomics.js
+++ b/js/src/jit-test/tests/asm.js/testAtomics.js
@@ -1872,15 +1872,51 @@ setARMHwCapFlags('vfp');
asmCompile('stdlib', 'ffi', 'heap',
USE_ASM + `
+ var atomic_cmpxchg = stdlib.Atomics.compareExchange;
var atomic_exchange = stdlib.Atomics.exchange;
+ var atomic_add = stdlib.Atomics.add;
+ var atomic_sub = stdlib.Atomics.sub;
+ var atomic_and = stdlib.Atomics.and;
+ var atomic_or = stdlib.Atomics.or;
+ var atomic_xor = stdlib.Atomics.xor;
var i8a = new stdlib.Int8Array(heap);
+ function do_cas() {
+ var v = 0;
+ v = atomic_cmpxchg(i8a, 100, 0, -1);
+ return v|0;
+ }
function do_xchg() {
var v = 0;
v = atomic_exchange(i8a, 200, 37);
return v|0;
}
+ function do_add() {
+ var v = 0;
+ v = atomic_add(i8a, 10, 37);
+ return v|0;
+ }
+ function do_sub() {
+ var v = 0;
+ v = atomic_sub(i8a, 10, 37);
+ return v|0;
+ }
+ function do_and() {
+ var v = 0;
+ v = atomic_and(i8a, 10, 37);
+ return v|0;
+ }
+ function do_or() {
+ var v = 0;
+ v = atomic_or(i8a, 10, 37);
+ return v|0;
+ }
+ function do_xor() {
+ var v = 0;
+ v = atomic_xor(i8a, 10, 37);
+ return v|0;
+ }
- return { xchg: do_xchg }
+ return { cas:do_cas, xchg: do_xchg, add: do_add, sub: do_sub, and: do_and, or: do_or, xor: do_xor }
`);
diff --git a/js/src/jit-test/tests/asm.js/testProfiling.js b/js/src/jit-test/tests/asm.js/testProfiling.js
index 7c3f348e8b20..1c47da213bf2 100644
--- a/js/src/jit-test/tests/asm.js/testProfiling.js
+++ b/js/src/jit-test/tests/asm.js/testProfiling.js
@@ -69,7 +69,7 @@ var f = asmLink(asmCompile('global','ffis',USE_ASM + "var ffi=ffis.ffi; function
f(0);
assertStackContainsSeq(stacks, "");
f(+1);
-assertStackContainsSeq(stacks, "");
+assertStackContainsSeq(stacks, "<,g,f,>");
f(0);
assertStackContainsSeq(stacks, "<,g,f,>");
f(-1);
@@ -112,7 +112,7 @@ function testBuiltinD2D(name) {
enableSingleStepProfiling();
assertEq(f(.1), eval("Math." + name + "(.1)"));
var stacks = disableSingleStepProfiling();
- assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
+ assertStackContainsSeq(stacks, ">,f,>,f,>,>");
}
}
for (name of ['sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'ceil', 'floor', 'exp', 'log'])
@@ -125,7 +125,7 @@ function testBuiltinF2F(name) {
enableSingleStepProfiling();
assertEq(f(.1), eval("Math.fround(Math." + name + "(Math.fround(.1)))"));
var stacks = disableSingleStepProfiling();
- assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
+ assertStackContainsSeq(stacks, ">,f,>,f,>,>");
}
}
for (name of ['ceil', 'floor'])
@@ -138,7 +138,7 @@ function testBuiltinDD2D(name) {
enableSingleStepProfiling();
assertEq(f(.1, .2), eval("Math." + name + "(.1, .2)"));
var stacks = disableSingleStepProfiling();
- assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
+ assertStackContainsSeq(stacks, ">,f,>,f,>,>");
}
}
for (name of ['atan2', 'pow'])
diff --git a/js/src/jit-test/tests/debug/wasm-onExceptionUnwind-gc.js b/js/src/jit-test/tests/debug/wasm-onExceptionUnwind-gc.js
new file mode 100644
index 000000000000..ca191ea0f772
--- /dev/null
+++ b/js/src/jit-test/tests/debug/wasm-onExceptionUnwind-gc.js
@@ -0,0 +1,50 @@
+
+if (!wasmIsSupported())
+ quit();
+
+var sandbox = newGlobal();
+var dbg = new Debugger(sandbox);
+var counter = 0;
+dbg.onExceptionUnwind = (frame, value) => {
+ if (frame.type !== "wasmcall")
+ return;
+ if (++counter != 2)
+ return;
+ gc();
+};
+
+sandbox.innerCode = wasmTextToBinary(`(module
+ (import "imports" "tbl" (table 1 anyfunc))
+ (import $setNull "imports" "setNull" (func))
+ (func $trap
+ call $setNull
+ unreachable
+ )
+ (elem (i32.const 0) $trap)
+)`);
+sandbox.outerCode = wasmTextToBinary(`(module
+ (import "imports" "tbl" (table 1 anyfunc))
+ (type $v2v (func))
+ (func (export "run")
+ i32.const 0
+ call_indirect $v2v
+ )
+)`);
+
+sandbox.eval(`
+(function() {
+
+var tbl = new WebAssembly.Table({initial:1, element:"anyfunc"});
+function setNull() { tbl.set(0, null) }
+new WebAssembly.Instance(new WebAssembly.Module(innerCode), {imports:{tbl,setNull}});
+var outer = new WebAssembly.Instance(new WebAssembly.Module(outerCode), {imports:{tbl}});
+var caught;
+try {
+ outer.exports.run();
+} catch (e) {
+ caught = e;
+}
+assertEq(caught instanceof WebAssembly.RuntimeError, true);
+
+})();
+`);
diff --git a/js/src/jit-test/tests/wasm/profiling.js b/js/src/jit-test/tests/wasm/profiling.js
index 108c3c5d978e..45b169e70b40 100644
--- a/js/src/jit-test/tests/wasm/profiling.js
+++ b/js/src/jit-test/tests/wasm/profiling.js
@@ -125,7 +125,7 @@ testError(
(func (export "") (call $foo))
)`,
WebAssembly.RuntimeError,
-["", ">", "1,>", "0,1,>", "trap handling,0,1,>", "inline stub,0,1,>", "trap handling,0,1,>", ""]);
+["", ">", "1,>", "0,1,>", "interstitial,0,1,>", "trap handling,0,1,>", ""]);
testError(
`(module
@@ -140,7 +140,7 @@ WebAssembly.RuntimeError,
// Technically we have this one *one-instruction* interval where
// the caller is lost (the stack with "1,>"). It's annoying to fix and shouldn't
// mess up profiles in practice so we ignore it.
-["", ">", "0,>", "1,0,>", "1,>", "trap handling,0,>", "inline stub,0,>", "trap handling,0,>", ""]);
+["", ">", "0,>", "1,0,>", "1,>", "trap handling,0,>", ""]);
(function() {
var e = wasmEvalText(`
diff --git a/js/src/jit-test/tests/wasm/timeout/1.js b/js/src/jit-test/tests/wasm/timeout/1.js
new file mode 100644
index 000000000000..3bcbf8034963
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/timeout/1.js
@@ -0,0 +1,17 @@
+// |jit-test| exitstatus: 6;
+
+// Don't include wasm.js in timeout tests: when wasm isn't supported, it will
+// quit(0) which will cause the test to fail.
+if (!wasmIsSupported())
+ quit(6);
+
+var code = wasmTextToBinary(`(module
+ (func (export "iloop")
+ (loop $top br $top)
+ )
+)`);
+
+var i = new WebAssembly.Instance(new WebAssembly.Module(code));
+timeout(1);
+i.exports.iloop();
+assertEq(true, false);
diff --git a/js/src/jit-test/tests/wasm/timeout/2.js b/js/src/jit-test/tests/wasm/timeout/2.js
new file mode 100644
index 000000000000..ef84b00470d7
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/timeout/2.js
@@ -0,0 +1,31 @@
+// |jit-test| exitstatus: 6;
+
+// Don't include wasm.js in timeout tests: when wasm isn't supported, it will
+// quit(0) which will cause the test to fail.
+if (!wasmIsSupported())
+ quit(6);
+
+var tbl = new WebAssembly.Table({initial:1, element:"anyfunc"});
+
+new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(`(module
+ (func $iloop
+ loop $top
+ br $top
+ end
+ )
+ (import "imports" "tbl" (table 1 anyfunc))
+ (elem (i32.const 0) $iloop)
+)`)), {imports:{tbl}});
+
+var outer = new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(`(module
+ (import "imports" "tbl" (table 1 anyfunc))
+ (type $v2v (func))
+ (func (export "run")
+ i32.const 0
+ call_indirect $v2v
+ )
+)`)), {imports:{tbl}});
+
+timeout(1, () => { tbl.set(0, null); gc() });
+outer.exports.run();
+assertEq(true, false);
diff --git a/js/src/jit-test/tests/wasm/timeout/directives.txt b/js/src/jit-test/tests/wasm/timeout/directives.txt
new file mode 100644
index 000000000000..8262f0bbae1f
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/timeout/directives.txt
@@ -0,0 +1,2 @@
+|jit-test| test-also-wasm-baseline
+
diff --git a/js/src/jit/CompileInfo.h b/js/src/jit/CompileInfo.h
index a73677c14d54..92210679e092 100644
--- a/js/src/jit/CompileInfo.h
+++ b/js/src/jit/CompileInfo.h
@@ -447,14 +447,18 @@ class CompileInfo
// the frame is active on the stack. This implies that these definitions
// would have to be executed and that they cannot be removed even if they
// are unused.
- bool isObservableSlot(uint32_t slot) const {
- if (isObservableFrameSlot(slot))
- return true;
+ inline bool isObservableSlot(uint32_t slot) const {
+ if (slot >= firstLocalSlot()) {
+ // The |this| slot for a derived class constructor is a local slot.
+ if (thisSlotForDerivedClassConstructor_)
+ return *thisSlotForDerivedClassConstructor_ == slot;
+ return false;
+ }
- if (isObservableArgumentSlot(slot))
- return true;
+ if (slot < firstArgSlot())
+ return isObservableFrameSlot(slot);
- return false;
+ return isObservableArgumentSlot(slot);
}
bool isObservableFrameSlot(uint32_t slot) const {
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
index d7a51b51083b..df992177e6a3 100644
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -1861,6 +1861,17 @@ OptimizeMIR(MIRGenerator* mir)
return false;
}
+ // BCE marks bounds checks as dead, so do BCE before DCE.
+ if (mir->compilingWasm() && !JitOptions.wasmAlwaysCheckBounds) {
+ if (!EliminateBoundsChecks(mir, graph))
+ return false;
+ gs.spewPass("Redundant Bounds Check Elimination");
+ AssertGraphCoherency(graph);
+
+ if (mir->shouldCancel("BCE"))
+ return false;
+ }
+
{
AutoTraceLog log(logger, TraceLogger_EliminateDeadCode);
if (!EliminateDeadCode(mir, graph))
@@ -1933,13 +1944,6 @@ OptimizeMIR(MIRGenerator* mir)
AssertGraphCoherency(graph);
}
- if (mir->compilingWasm()) {
- if (!EliminateBoundsChecks(mir, graph))
- return false;
- gs.spewPass("Redundant Bounds Check Elimination");
- AssertGraphCoherency(graph);
- }
-
AssertGraphCoherency(graph, /* force = */ true);
DumpMIRExpressions(graph);
diff --git a/js/src/jit/IonAnalysis.cpp b/js/src/jit/IonAnalysis.cpp
index 7de6715b4398..28c5460c8a05 100644
--- a/js/src/jit/IonAnalysis.cpp
+++ b/js/src/jit/IonAnalysis.cpp
@@ -196,6 +196,8 @@ FlagPhiInputsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block, MBasicBl
static bool
FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
{
+ const CompileInfo& info = block->info();
+
// Flag all instructions operands as having removed uses.
MInstructionIterator end = block->end();
for (MInstructionIterator it = block->begin(); it != end; it++) {
@@ -210,8 +212,10 @@ FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
if (MResumePoint* rp = ins->resumePoint()) {
// Note: no need to iterate over the caller's of the resume point as
// this is the same as the entry resume point.
- for (size_t i = 0, e = rp->numOperands(); i < e; i++)
- rp->getOperand(i)->setUseRemovedUnchecked();
+ for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
+ if (info.isObservableSlot(i))
+ rp->getOperand(i)->setUseRemovedUnchecked();
+ }
}
}
@@ -221,8 +225,10 @@ FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 2"))
return false;
- for (size_t i = 0, e = rp->numOperands(); i < e; i++)
- rp->getOperand(i)->setUseRemovedUnchecked();
+ for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
+ if (info.isObservableSlot(i))
+ rp->getOperand(i)->setUseRemovedUnchecked();
+ }
rp = rp->caller();
}
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index f93603c6ab84..7c090b6f9cd5 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -4278,10 +4278,7 @@ LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
#ifdef WASM_HUGE_MEMORY
MOZ_CRASH("No bounds checking on huge memory");
#else
- if (ins->isRedundant()) {
- if (MOZ_LIKELY(!JitOptions.wasmAlwaysCheckBounds))
- return;
- }
+ MOZ_ASSERT(!ins->isRedundant());
MDefinition* index = ins->index();
MOZ_ASSERT(index->type() == MIRType::Int32);
@@ -4369,13 +4366,7 @@ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
MDefinition* rval = ins->getOperand(0);
if (rval->type() == MIRType::Int64) {
- LWasmReturnI64* lir = new(alloc()) LWasmReturnI64(useInt64Fixed(rval, ReturnReg64));
-
- // Preserve the TLS pointer we were passed in `WasmTlsReg`.
- MDefinition* tlsPtr = ins->getOperand(1);
- lir->setOperand(INT64_PIECES, useFixed(tlsPtr, WasmTlsReg));
-
- add(lir);
+ add(new(alloc()) LWasmReturnI64(useInt64Fixed(rval, ReturnReg64)));
return;
}
@@ -4391,23 +4382,13 @@ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
else
MOZ_CRASH("Unexpected wasm return type");
- // Preserve the TLS pointer we were passed in `WasmTlsReg`.
- MDefinition* tlsPtr = ins->getOperand(1);
- lir->setOperand(1, useFixed(tlsPtr, WasmTlsReg));
-
add(lir);
}
void
LIRGenerator::visitWasmReturnVoid(MWasmReturnVoid* ins)
{
- auto* lir = new(alloc()) LWasmReturnVoid;
-
- // Preserve the TLS pointer we were passed in `WasmTlsReg`.
- MDefinition* tlsPtr = ins->getOperand(0);
- lir->setOperand(0, useFixed(tlsPtr, WasmTlsReg));
-
- add(lir);
+ add(new(alloc()) LWasmReturnVoid);
}
void
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index 44e8c026fe47..02ee711a89e6 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -3171,7 +3171,8 @@ MBinaryArithInstruction::foldsTo(TempAllocator& alloc)
if (isTruncated()) {
if (!folded->block())
block()->insertBefore(this, folded);
- return MTruncateToInt32::New(alloc, folded);
+ if (folded->type() != MIRType::Int32)
+ return MTruncateToInt32::New(alloc, folded);
}
return folded;
}
@@ -5700,10 +5701,9 @@ MWasmUnsignedToFloat32::foldsTo(TempAllocator& alloc)
MWasmCall*
MWasmCall::New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee,
- const Args& args, MIRType resultType, uint32_t spIncrement, uint32_t tlsStackOffset,
- MDefinition* tableIndex)
+ const Args& args, MIRType resultType, uint32_t spIncrement, MDefinition* tableIndex)
{
- MWasmCall* call = new(alloc) MWasmCall(desc, callee, spIncrement, tlsStackOffset);
+ MWasmCall* call = new(alloc) MWasmCall(desc, callee, spIncrement);
call->setResultType(resultType);
if (!call->argRegs_.init(alloc, args.length()))
@@ -5729,12 +5729,10 @@ MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
- uint32_t spIncrement,
- uint32_t tlsStackOffset)
+ uint32_t spIncrement)
{
auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
- MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
- tlsStackOffset, nullptr);
+ MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement, nullptr);
if (!call)
return nullptr;
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index 596ff609460d..1b67d16d56df 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -13718,15 +13718,14 @@ class MWasmBoundsCheck
: public MBinaryInstruction,
public NoTypePolicy::Data
{
- bool redundant_;
wasm::TrapOffset trapOffset_;
explicit MWasmBoundsCheck(MDefinition* index, MDefinition* boundsCheckLimit, wasm::TrapOffset trapOffset)
: MBinaryInstruction(index, boundsCheckLimit),
- redundant_(false),
trapOffset_(trapOffset)
{
- setGuard(); // Effectful: throws for OOB.
+ // Bounds check is effectful: it throws for OOB.
+ setGuard();
}
public:
@@ -13739,11 +13738,11 @@ class MWasmBoundsCheck
}
bool isRedundant() const {
- return redundant_;
+ return !isGuard();
}
- void setRedundant(bool val) {
- redundant_ = val;
+ void setRedundant() {
+ setNotGuard();
}
wasm::TrapOffset trapOffset() const {
@@ -14247,12 +14246,11 @@ class MWasmParameter : public MNullaryInstruction
};
class MWasmReturn
- : public MAryControlInstruction<2, 0>,
+ : public MAryControlInstruction<1, 0>,
public NoTypePolicy::Data
{
- explicit MWasmReturn(MDefinition* ins, MDefinition* tlsPtr) {
+ explicit MWasmReturn(MDefinition* ins) {
initOperand(0, ins);
- initOperand(1, tlsPtr);
}
public:
@@ -14261,13 +14259,9 @@ class MWasmReturn
};
class MWasmReturnVoid
- : public MAryControlInstruction<1, 0>,
+ : public MAryControlInstruction<0, 0>,
public NoTypePolicy::Data
{
- explicit MWasmReturnVoid(MDefinition* tlsPtr) {
- initOperand(0, tlsPtr);
- }
-
public:
INSTRUCTION_HEADER(WasmReturnVoid)
TRIVIAL_NEW_WRAPPERS
@@ -14305,15 +14299,12 @@ class MWasmCall final
wasm::CalleeDesc callee_;
FixedList argRegs_;
uint32_t spIncrement_;
- uint32_t tlsStackOffset_;
ABIArg instanceArg_;
- MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement,
- uint32_t tlsStackOffset)
+ MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement)
: desc_(desc),
callee_(callee),
- spIncrement_(spIncrement),
- tlsStackOffset_(tlsStackOffset)
+ spIncrement_(spIncrement)
{ }
public:
@@ -14326,15 +14317,12 @@ class MWasmCall final
};
typedef Vector Args;
- static const uint32_t DontSaveTls = UINT32_MAX;
-
static MWasmCall* New(TempAllocator& alloc,
const wasm::CallSiteDesc& desc,
const wasm::CalleeDesc& callee,
const Args& args,
MIRType resultType,
uint32_t spIncrement,
- uint32_t tlsStackOffset,
MDefinition* tableIndex = nullptr);
static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
@@ -14343,8 +14331,7 @@ class MWasmCall final
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
- uint32_t spIncrement,
- uint32_t tlsStackOffset);
+ uint32_t spIncrement);
size_t numArgs() const {
return argRegs_.length();
@@ -14362,13 +14349,6 @@ class MWasmCall final
uint32_t spIncrement() const {
return spIncrement_;
}
- bool saveTls() const {
- return tlsStackOffset_ != DontSaveTls;
- }
- uint32_t tlsStackOffset() const {
- MOZ_ASSERT(saveTls());
- return tlsStackOffset_;
- }
bool possiblyCalls() const override {
return true;
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
index edca338fab0e..0ec15f179384 100644
--- a/js/src/jit/MacroAssembler-inl.h
+++ b/js/src/jit/MacroAssembler-inl.h
@@ -84,21 +84,21 @@ void
MacroAssembler::call(const wasm::CallSiteDesc& desc, const Register reg)
{
CodeOffset l = call(reg);
- append(desc, l, framePushed());
+ append(desc, l);
}
void
MacroAssembler::call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex)
{
CodeOffset l = callWithPatch();
- append(desc, l, framePushed(), funcDefIndex);
+ append(desc, l, funcDefIndex);
}
void
MacroAssembler::call(const wasm::CallSiteDesc& desc, wasm::Trap trap)
{
CodeOffset l = callWithPatch();
- append(desc, l, framePushed(), trap);
+ append(desc, l, trap);
}
// ===============================================================
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 813fc13de2fa..72723f337bdb 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -2931,6 +2931,14 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
if (size_t dec = StackDecrementForCall(ABIStackAlignment, alreadyPushed, toPush))
reserveStack(dec);
+ // To call the trap handler function, we must have the WasmTlsReg
+ // filled since this is the normal calling ABI. To avoid requiring
+ // every trapping operation to have the TLS register filled for the
+ // rare case that it takes a trap, we restore it from the frame on
+ // the out-of-line path. However, there are millions of out-of-line
+ // paths (viz. for loads/stores), so the load is factored out into
+ // the shared FarJumpIsland generated by patchCallSites.
+
// Call the trap's exit, using the bytecode offset of the trap site.
// Note that this code is inside the same CodeRange::Function as the
// trap site so it's as if the trapping instruction called the
@@ -2955,8 +2963,34 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
clearTrapSites();
}
+void
+MacroAssembler::wasmAssertNonExitInvariants(Register activation)
+{
+#ifdef DEBUG
+ // WasmActivation.exitFP should be null when outside any exit frame.
+ Label ok;
+ Address exitFP(activation, WasmActivation::offsetOfExitFP());
+ branchPtr(Assembler::Equal, exitFP, ImmWord(0), &ok);
+ breakpoint();
+ bind(&ok);
+#endif
+}
+
//}}} check_macroassembler_style
+void
+MacroAssembler::loadWasmActivationFromTls(Register dest)
+{
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
+ loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
+}
+
+void
+MacroAssembler::loadWasmTlsRegFromFrame(Register dest)
+{
+ loadPtr(Address(getStackPointer(), framePushed() + offsetof(wasm::Frame, tls)), dest);
+}
+
void
MacroAssembler::BranchType::emit(MacroAssembler& masm)
{
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 2a0d8faefdf8..55c7ab071108 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -447,6 +447,7 @@ class MacroAssembler : public MacroAssemblerSpecific
void Push(const ImmPtr imm) PER_SHARED_ARCH;
void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
void Push(FloatRegister reg) PER_SHARED_ARCH;
+ void PushFlags() DEFINED_ON(x86_shared);
void Push(jsid id, Register scratchReg);
void Push(TypedOrValueRegister v);
void Push(const ConstantOrRegister& v);
@@ -462,6 +463,7 @@ class MacroAssembler : public MacroAssemblerSpecific
void Pop(Register reg) PER_SHARED_ARCH;
void Pop(FloatRegister t) PER_SHARED_ARCH;
void Pop(const ValueOperand& val) PER_SHARED_ARCH;
+ void PopFlags() DEFINED_ON(x86_shared);
void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
// Move the stack pointer based on the requested amount.
@@ -1462,6 +1464,9 @@ class MacroAssembler : public MacroAssemblerSpecific
// including "normal" OutOfLineCode.
void wasmEmitTrapOutOfLineCode();
+ // Assert invariants that should be true within any non-exit-stub wasm code.
+ void wasmAssertNonExitInvariants(Register activation);
+
public:
// ========================================================================
// Clamping functions.
@@ -1517,15 +1522,9 @@ class MacroAssembler : public MacroAssemblerSpecific
loadJSContext(dest);
loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
}
- void loadWasmActivationFromTls(Register dest) {
- loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
- loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
- }
- void loadWasmActivationFromSymbolicAddress(Register dest) {
- movePtr(wasm::SymbolicAddress::ContextPtr, dest);
- loadPtr(Address(dest, 0), dest);
- loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
- }
+
+ void loadWasmActivationFromTls(Register dest);
+ void loadWasmTlsRegFromFrame(Register dest = WasmTlsReg);
template
void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
diff --git a/js/src/jit/RegisterAllocator.h b/js/src/jit/RegisterAllocator.h
index d5b4110d5bd6..bcc3915d9983 100644
--- a/js/src/jit/RegisterAllocator.h
+++ b/js/src/jit/RegisterAllocator.h
@@ -288,9 +288,12 @@ class RegisterAllocator
allRegisters_.take(AnyRegister(HeapReg));
allRegisters_.take(AnyRegister(HeapLenReg));
#endif
+ allRegisters_.take(FramePointer);
} else {
- if (FramePointer != InvalidReg && mir->instrumentedProfiling())
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
+ if (mir->instrumentedProfiling())
allRegisters_.take(AnyRegister(FramePointer));
+#endif
}
}
diff --git a/js/src/jit/WasmBCE.cpp b/js/src/jit/WasmBCE.cpp
index aac362738572..20739a5de059 100644
--- a/js/src/jit/WasmBCE.cpp
+++ b/js/src/jit/WasmBCE.cpp
@@ -6,6 +6,7 @@
#include "jit/WasmBCE.h"
#include "jit/MIRGenerator.h"
#include "jit/MIRGraph.h"
+#include "wasm/WasmTypes.h"
using namespace js;
using namespace js::jit;
@@ -42,15 +43,34 @@ jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph)
switch (def->op()) {
case MDefinition::Op_WasmBoundsCheck: {
MWasmBoundsCheck* bc = def->toWasmBoundsCheck();
- MDefinition* addr = def->getOperand(0);
+ MDefinition* addr = bc->index();
- LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
- if (ptr) {
- if (ptr->value()->block()->dominates(block))
- bc->setRedundant(true);
- } else {
- if (!lastSeen.add(ptr, addr->id(), def))
- return false;
+ // Eliminate constant-address bounds checks to addresses below
+ // the heap minimum.
+ //
+ // The payload of the MConstant will be Double if the constant
+ // result is above 2^31-1, but we don't care about that for BCE.
+
+#ifndef WASM_HUGE_MEMORY
+ MOZ_ASSERT(wasm::MaxMemoryAccessSize < wasm::GuardSize,
+ "Guard page handles partial out-of-bounds");
+#endif
+
+ if (addr->isConstant() && addr->toConstant()->type() == MIRType::Int32 &&
+ uint32_t(addr->toConstant()->toInt32()) < mir->minWasmHeapLength())
+ {
+ bc->setRedundant();
+ }
+ else
+ {
+ LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
+ if (ptr) {
+ if (ptr->value()->block()->dominates(block))
+ bc->setRedundant();
+ } else {
+ if (!lastSeen.add(ptr, addr->id(), def))
+ return false;
+ }
}
break;
}
diff --git a/js/src/jit/arm/Assembler-arm.h b/js/src/jit/arm/Assembler-arm.h
index 71043b2e1ae2..0bdc2768c511 100644
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -73,7 +73,7 @@ static constexpr Register IntArgReg0 = r0;
static constexpr Register IntArgReg1 = r1;
static constexpr Register IntArgReg2 = r2;
static constexpr Register IntArgReg3 = r3;
-static constexpr Register HeapReg = r11;
+static constexpr Register HeapReg = r10;
static constexpr Register CallTempNonArgRegs[] = { r5, r6, r7, r8 };
static const uint32_t NumCallTempNonArgRegs =
mozilla::ArrayLength(CallTempNonArgRegs);
@@ -134,7 +134,7 @@ static constexpr FloatRegister InvalidFloatReg;
static constexpr Register JSReturnReg_Type = r3;
static constexpr Register JSReturnReg_Data = r2;
static constexpr Register StackPointer = sp;
-static constexpr Register FramePointer = InvalidReg;
+static constexpr Register FramePointer = r11;
static constexpr Register ReturnReg = r0;
static constexpr Register64 ReturnReg64(r1, r0);
static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::d0, VFPRegister::Single };
@@ -168,6 +168,7 @@ static constexpr Register WasmIonExitRegE1 = r1;
// None of these may be the second scratch register (lr).
static constexpr Register WasmIonExitRegReturnData = r2;
static constexpr Register WasmIonExitRegReturnType = r3;
+static constexpr Register WasmIonExitTlsReg = r9;
static constexpr Register WasmIonExitRegD0 = r0;
static constexpr Register WasmIonExitRegD1 = r1;
static constexpr Register WasmIonExitRegD2 = r4;
diff --git a/js/src/jit/arm/Lowering-arm.cpp b/js/src/jit/arm/Lowering-arm.cpp
index 9b9e09cf5523..c488394da0bd 100644
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -893,11 +893,12 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
LAsmJSCompareExchangeCallout* lir =
- new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(base),
- useRegisterAtStart(ins->oldValue()),
- useRegisterAtStart(ins->newValue()),
- useFixed(ins->tls(), WasmTlsReg),
- temp(), temp());
+ new(alloc()) LAsmJSCompareExchangeCallout(useFixedAtStart(base, IntArgReg2),
+ useFixedAtStart(ins->oldValue(), IntArgReg3),
+ useFixedAtStart(ins->newValue(), CallTempReg0),
+ useFixedAtStart(ins->tls(), WasmTlsReg),
+ tempFixed(IntArgReg0),
+ tempFixed(IntArgReg1));
defineReturn(lir, ins);
return;
}
@@ -917,17 +918,18 @@ LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
MOZ_ASSERT(ins->access().offset() == 0);
- const LAllocation base = useRegisterAtStart(ins->base());
- const LAllocation value = useRegisterAtStart(ins->value());
-
if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
// Call out on ARMv6.
- defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value,
- useFixed(ins->tls(), WasmTlsReg),
- temp(), temp()), ins);
+ defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(useFixedAtStart(ins->base(), IntArgReg2),
+ useFixedAtStart(ins->value(), IntArgReg3),
+ useFixedAtStart(ins->tls(), WasmTlsReg),
+ tempFixed(IntArgReg0),
+ tempFixed(IntArgReg1)), ins);
return;
}
+ const LAllocation base = useRegisterAtStart(ins->base());
+ const LAllocation value = useRegisterAtStart(ins->value());
define(new(alloc()) LAsmJSAtomicExchangeHeap(base, value), ins);
}
@@ -942,10 +944,11 @@ LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
LAsmJSAtomicBinopCallout* lir =
- new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(base),
- useRegisterAtStart(ins->value()),
- useFixed(ins->tls(), WasmTlsReg),
- temp(), temp());
+ new(alloc()) LAsmJSAtomicBinopCallout(useFixedAtStart(base, IntArgReg2),
+ useFixedAtStart(ins->value(), IntArgReg3),
+ useFixedAtStart(ins->tls(), WasmTlsReg),
+ tempFixed(IntArgReg0),
+ tempFixed(IntArgReg1));
defineReturn(lir, ins);
return;
}
diff --git a/js/src/jit/arm/MacroAssembler-arm.cpp b/js/src/jit/arm/MacroAssembler-arm.cpp
index 8fa1d70cf279..99dc9a1dfec8 100644
--- a/js/src/jit/arm/MacroAssembler-arm.cpp
+++ b/js/src/jit/arm/MacroAssembler-arm.cpp
@@ -5138,7 +5138,7 @@ MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
{
CodeOffset offset(currentOffset());
ma_nop();
- append(desc, CodeOffset(currentOffset()), framePushed());
+ append(desc, CodeOffset(currentOffset()));
return offset;
}
diff --git a/js/src/jit/arm/Simulator-arm.h b/js/src/jit/arm/Simulator-arm.h
index 0259e776ce9e..606261c9c603 100644
--- a/js/src/jit/arm/Simulator-arm.h
+++ b/js/src/jit/arm/Simulator-arm.h
@@ -81,6 +81,8 @@ class Simulator
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
r8, r9, r10, r11, r12, r13, r14, r15,
num_registers,
+ fp = 11,
+ ip = 12,
sp = 13,
lr = 14,
pc = 15,
diff --git a/js/src/jit/arm64/Assembler-arm64.h b/js/src/jit/arm64/Assembler-arm64.h
index 276c6b9eea58..ba6367421682 100644
--- a/js/src/jit/arm64/Assembler-arm64.h
+++ b/js/src/jit/arm64/Assembler-arm64.h
@@ -126,6 +126,7 @@ static constexpr Register WasmIonExitRegE1 = r1;
// None of these may be the second scratch register.
static constexpr Register WasmIonExitRegReturnData = r2;
static constexpr Register WasmIonExitRegReturnType = r3;
+static constexpr Register WasmIonExitTlsReg = r17;
static constexpr Register WasmIonExitRegD0 = r0;
static constexpr Register WasmIonExitRegD1 = r1;
static constexpr Register WasmIonExitRegD2 = r4;
diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h
index 3c8a6ffafbe0..dd3fa38f44db 100644
--- a/js/src/jit/mips32/Assembler-mips32.h
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -77,6 +77,7 @@ static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f16, F
// None of these may be the second scratch register (t8).
static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
+static constexpr Register WasmIonExitTlsReg = s5;
static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegister::Double };
static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegister::Double };
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
index eed15b133a09..f9884b9e5063 100644
--- a/js/src/jit/mips64/Assembler-mips64.h
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -71,6 +71,7 @@ static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f21, F
// None of these may be the second scratch register (t8).
static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
+static constexpr Register WasmIonExitTlsReg = s5;
static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegisters::Double };
static constexpr FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Double };
diff --git a/js/src/jit/none/MacroAssembler-none.h b/js/src/jit/none/MacroAssembler-none.h
index 1795fd869cf1..cb13d834e9f1 100644
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -48,6 +48,7 @@ static constexpr Register WasmIonExitRegE1 { Registers::invalid_reg };
static constexpr Register WasmIonExitRegReturnData { Registers::invalid_reg };
static constexpr Register WasmIonExitRegReturnType { Registers::invalid_reg };
+static constexpr Register WasmIonExitTlsReg = { Registers::invalid_reg };
static constexpr Register WasmIonExitRegD0 { Registers::invalid_reg };
static constexpr Register WasmIonExitRegD1 { Registers::invalid_reg };
static constexpr Register WasmIonExitRegD2 { Registers::invalid_reg };
@@ -415,7 +416,6 @@ class MacroAssemblerNone : public Assembler
bool buildOOLFakeExitFrame(void*) { MOZ_CRASH(); }
void loadWasmGlobalPtr(uint32_t, Register) { MOZ_CRASH(); }
void loadWasmActivationFromTls(Register) { MOZ_CRASH(); }
- void loadWasmActivationFromSymbolicAddress(Register) { MOZ_CRASH(); }
void loadWasmPinnedRegsFromTls() { MOZ_CRASH(); }
void setPrinter(Sprinter*) { MOZ_CRASH(); }
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
index 19f80e609b05..0c8a1cf3d39a 100644
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -749,6 +749,25 @@ struct GlobalAccess
typedef Vector GlobalAccessVector;
+// A CallFarJump records the offset of a jump that needs to be patched to a
+// call at the end of the module when all calls have been emitted.
+
+struct CallFarJump
+{
+ uint32_t funcIndex;
+ jit::CodeOffset jump;
+
+ CallFarJump(uint32_t funcIndex, jit::CodeOffset jump)
+ : funcIndex(funcIndex), jump(jump)
+ {}
+
+ void offsetBy(size_t delta) {
+ jump.offsetBy(delta);
+ }
+};
+
+typedef Vector CallFarJumpVector;
+
// The TrapDesc struct describes a wasm trap that is about to be emitted. This
// includes the logical wasm bytecode offset to report, the kind of instruction
// causing the trap, and the stack depth right before control is transferred to
@@ -808,6 +827,7 @@ namespace jit {
class AssemblerShared
{
wasm::CallSiteAndTargetVector callSites_;
+ wasm::CallFarJumpVector callFarJumps_;
wasm::TrapSiteVector trapSites_;
wasm::TrapFarJumpVector trapFarJumps_;
wasm::MemoryAccessVector memoryAccesses_;
@@ -842,16 +862,18 @@ class AssemblerShared
}
template
- void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
- Args&&... args)
+ void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, Args&&... args)
{
- // framePushed does not include sizeof(wasm:Frame), so add it in explicitly when
- // setting the CallSite::stackDepth.
- wasm::CallSite cs(desc, retAddr.offset(), framePushed + sizeof(wasm::Frame));
+ wasm::CallSite cs(desc, retAddr.offset());
enoughMemory_ &= callSites_.emplaceBack(cs, mozilla::Forward(args)...);
}
wasm::CallSiteAndTargetVector& callSites() { return callSites_; }
+ void append(wasm::CallFarJump jmp) {
+ enoughMemory_ &= callFarJumps_.append(jmp);
+ }
+ const wasm::CallFarJumpVector& callFarJumps() const { return callFarJumps_; }
+
void append(wasm::TrapSite trapSite) {
enoughMemory_ &= trapSites_.append(trapSite);
}
@@ -911,6 +933,11 @@ class AssemblerShared
MOZ_ASSERT(other.trapSites_.empty(), "should have been cleared by wasmEmitTrapOutOfLineCode");
+ i = callFarJumps_.length();
+ enoughMemory_ &= callFarJumps_.appendAll(other.callFarJumps_);
+ for (; i < callFarJumps_.length(); i++)
+ callFarJumps_[i].offsetBy(delta);
+
i = trapFarJumps_.length();
enoughMemory_ &= trapFarJumps_.appendAll(other.trapFarJumps_);
for (; i < trapFarJumps_.length(); i++)
diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
index b32e1c3fc6c3..7922d16490f2 100644
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -1499,35 +1499,37 @@ CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
masm.bind(&ok);
#endif
- // Save the caller's TLS register in a reserved stack slot (below the
- // call's stack arguments) for retrieval after the call.
- if (mir->saveTls())
- masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
+ // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
+ // TLS and pinned regs. The only case where where we don't have to reload
+ // the TLS and pinned regs is when the callee preserves them.
+ bool reloadRegs = true;
const wasm::CallSiteDesc& desc = mir->desc();
const wasm::CalleeDesc& callee = mir->callee();
switch (callee.which()) {
case wasm::CalleeDesc::Func:
masm.call(desc, callee.funcIndex());
+ reloadRegs = false;
break;
case wasm::CalleeDesc::Import:
masm.wasmCallImport(desc, callee);
break;
- case wasm::CalleeDesc::WasmTable:
case wasm::CalleeDesc::AsmJSTable:
+ case wasm::CalleeDesc::WasmTable:
masm.wasmCallIndirect(desc, callee, ins->needsBoundsCheck());
+ reloadRegs = callee.which() == wasm::CalleeDesc::WasmTable && callee.wasmTableIsExternal();
break;
case wasm::CalleeDesc::Builtin:
masm.call(callee.builtin());
+ reloadRegs = false;
break;
case wasm::CalleeDesc::BuiltinInstanceMethod:
masm.wasmCallBuiltinInstanceMethod(mir->instanceArg(), callee.builtin());
break;
}
- // After return, restore the caller's TLS and pinned registers.
- if (mir->saveTls()) {
- masm.loadPtr(Address(masm.getStackPointer(), mir->tlsStackOffset()), WasmTlsReg);
+ if (reloadRegs) {
+ masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
}
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index 0c632201ec06..c67a8e814cc8 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -8605,13 +8605,13 @@ class LWasmParameterI64 : public LInstructionHelper
LIR_HEADER(WasmParameterI64);
};
-class LWasmReturn : public LInstructionHelper<0, 2, 0>
+class LWasmReturn : public LInstructionHelper<0, 1, 0>
{
public:
LIR_HEADER(WasmReturn);
};
-class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0>
+class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES, 0>
{
public:
LIR_HEADER(WasmReturnI64)
@@ -8621,7 +8621,7 @@ class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0>
}
};
-class LWasmReturnVoid : public LInstructionHelper<0, 1, 0>
+class LWasmReturnVoid : public LInstructionHelper<0, 0, 0>
{
public:
LIR_HEADER(WasmReturnVoid);
@@ -8683,6 +8683,7 @@ class LWasmCallBase : public LInstruction
// - internal/indirect calls do by the internal wasm ABI
// - import calls do by explicitly saving/restoring at the callsite
// - builtin calls do because the TLS reg is non-volatile
+ // See also CodeGeneratorShared::emitWasmCallBase.
return !reg.isFloat() && reg.gpr() == WasmTlsReg;
}
diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
index e2900770f02c..1b13a7f68417 100644
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -817,6 +817,12 @@ LIRGeneratorShared::useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtS
#endif
}
+LInt64Allocation
+LIRGeneratorShared::useInt64FixedAtStart(MDefinition* mir, Register64 regs)
+{
+ return useInt64Fixed(mir, regs, true);
+}
+
LInt64Allocation
LIRGeneratorShared::useInt64(MDefinition* mir, bool useAtStart)
{
diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h
index 4247e8ba069d..b3ecd6e9be0f 100644
--- a/js/src/jit/shared/Lowering-shared.h
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -208,6 +208,7 @@ class LIRGeneratorShared : public MDefinitionVisitor
inline LInt64Allocation useInt64Register(MDefinition* mir, bool useAtStart = false);
inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir, bool useAtStart = false);
inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtStart = false);
+ inline LInt64Allocation useInt64FixedAtStart(MDefinition* mir, Register64 regs);
LInt64Allocation useInt64RegisterAtStart(MDefinition* mir) {
return useInt64Register(mir, /* useAtStart = */ true);
diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h
index 2fb167dbc061..cf6c5afbc460 100644
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -157,6 +157,7 @@ static constexpr Register WasmIonExitRegE1 = rdi;
// Registers used in the GenerateFFIIonExit Disable Activation block.
static constexpr Register WasmIonExitRegReturnData = ecx;
static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitTlsReg = r14;
static constexpr Register WasmIonExitRegD0 = rax;
static constexpr Register WasmIonExitRegD1 = rdi;
static constexpr Register WasmIonExitRegD2 = rbx;
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
index 5eedd508fba9..352ba8043d4c 100644
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -474,9 +474,8 @@ MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
ABIStackAlignment);
} else {
- static_assert(sizeof(wasm::Frame) % ABIStackAlignment == 0,
- "wasm::Frame should be part of the stack alignment.");
- stackForCall += ComputeByteAlignment(stackForCall + framePushed(),
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
ABIStackAlignment);
}
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
index 1359bb1aaa4b..8ecc66428649 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.cpp
@@ -600,6 +600,13 @@ MacroAssembler::Push(FloatRegister t)
adjustFrame(sizeof(double));
}
+void
+MacroAssembler::PushFlags()
+{
+ pushFlags();
+ adjustFrame(sizeof(intptr_t));
+}
+
void
MacroAssembler::Pop(const Operand op)
{
@@ -628,6 +635,13 @@ MacroAssembler::Pop(const ValueOperand& val)
implicitPop(sizeof(Value));
}
+void
+MacroAssembler::PopFlags()
+{
+ popFlags();
+ implicitPop(sizeof(intptr_t));
+}
+
// ===============================================================
// Simple call functions.
@@ -741,7 +755,7 @@ MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
{
CodeOffset offset(currentOffset());
masm.nop_five();
- append(desc, CodeOffset(currentOffset()), framePushed());
+ append(desc, CodeOffset(currentOffset()));
MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
return offset;
}
diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
index 2fe4ae90c882..c1a9e5a1fc38 100644
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -112,9 +112,10 @@ static constexpr Register WasmIonExitRegE1 = eax;
// Registers used in the GenerateFFIIonExit Disable Activation block.
static constexpr Register WasmIonExitRegReturnData = edx;
static constexpr Register WasmIonExitRegReturnType = ecx;
+static constexpr Register WasmIonExitTlsReg = esi;
static constexpr Register WasmIonExitRegD0 = edi;
static constexpr Register WasmIonExitRegD1 = eax;
-static constexpr Register WasmIonExitRegD2 = esi;
+static constexpr Register WasmIonExitRegD2 = ebx;
// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
index dea148125724..ce1880d51fe6 100644
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -949,20 +949,11 @@ CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir)
{
Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register temp = ToRegister(lir->temp());
Register64 output = ToOutRegister64(lir);
MOZ_ASSERT(output == ReturnReg64);
- // We are free to clobber all registers, since this is a call instruction.
- AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
- regs.take(lhs.low);
- regs.take(lhs.high);
- if (lhs != rhs) {
- regs.take(rhs.low);
- regs.take(rhs.high);
- }
- Register temp = regs.takeAny();
-
Label done;
// Handle divide by zero.
@@ -1006,20 +997,11 @@ CodeGeneratorX86::visitUDivOrModI64(LUDivOrModI64* lir)
{
Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register temp = ToRegister(lir->temp());
Register64 output = ToOutRegister64(lir);
MOZ_ASSERT(output == ReturnReg64);
- // We are free to clobber all registers, since this is a call instruction.
- AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
- regs.take(lhs.low);
- regs.take(lhs.high);
- if (lhs != rhs) {
- regs.take(rhs.low);
- regs.take(rhs.high);
- }
- Register temp = regs.takeAny();
-
// Prevent divide by zero.
if (lir->canBeDivideByZero())
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));
diff --git a/js/src/jit/x86/LIR-x86.h b/js/src/jit/x86/LIR-x86.h
index f49ec7b87667..0c36494eac41 100644
--- a/js/src/jit/x86/LIR-x86.h
+++ b/js/src/jit/x86/LIR-x86.h
@@ -109,7 +109,7 @@ class LWasmUint32ToFloat32: public LInstructionHelper<1, 1, 1>
}
};
-class LDivOrModI64 : public LCallInstructionHelper
+class LDivOrModI64 : public LCallInstructionHelper
{
public:
LIR_HEADER(DivOrModI64)
@@ -117,10 +117,11 @@ class LDivOrModI64 : public LCallInstructionHelpertoMod()->trapOffset();
return mir_->toDiv()->trapOffset();
}
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
};
-class LUDivOrModI64 : public LCallInstructionHelper
+class LUDivOrModI64 : public LCallInstructionHelper
{
public:
LIR_HEADER(UDivOrModI64)
@@ -153,10 +157,11 @@ class LUDivOrModI64 : public LCallInstructionHelpertoMod()->trapOffset();
return mir_->toDiv()->trapOffset();
}
+ const LDefinition* temp() {
+ return getTemp(0);
+ }
};
class LWasmTruncateToInt64 : public LInstructionHelper
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
index 5c11bac16555..337b165a56ed 100644
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -618,8 +618,9 @@ LIRGeneratorX86::lowerDivI64(MDiv* div)
return;
}
- LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
- useInt64RegisterAtStart(div->rhs()));
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
+ tempFixed(esi));
defineReturn(lir, div);
}
@@ -631,24 +632,27 @@ LIRGeneratorX86::lowerModI64(MMod* mod)
return;
}
- LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
- useInt64RegisterAtStart(mod->rhs()));
+ LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64FixedAtStart(mod->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(mod->rhs(), Register64(ecx, edx)),
+ tempFixed(esi));
defineReturn(lir, mod);
}
void
LIRGeneratorX86::lowerUDivI64(MDiv* div)
{
- LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
- useInt64RegisterAtStart(div->rhs()));
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
+ tempFixed(esi));
defineReturn(lir, div);
}
void
LIRGeneratorX86::lowerUModI64(MMod* mod)
{
- LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
- useInt64RegisterAtStart(mod->rhs()));
+ LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64FixedAtStart(mod->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(mod->rhs(), Register64(ecx, edx)),
+ tempFixed(esi));
defineReturn(lir, mod);
}
diff --git a/js/src/moz.build b/js/src/moz.build
index 8b11432f99bc..32459181050e 100644
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -381,7 +381,6 @@ UNIFIED_SOURCES += [
'wasm/WasmCode.cpp',
'wasm/WasmCompartment.cpp',
'wasm/WasmCompile.cpp',
- 'wasm/WasmDebugFrame.cpp',
'wasm/WasmFrameIterator.cpp',
'wasm/WasmGenerator.cpp',
'wasm/WasmInstance.cpp',
diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp
index 93966b5d68a6..5e0a70e4aeec 100644
--- a/js/src/shell/js.cpp
+++ b/js/src/shell/js.cpp
@@ -5274,9 +5274,11 @@ SingleStepCallback(void* arg, jit::Simulator* sim, void* pc)
#if defined(JS_SIMULATOR_ARM)
state.sp = (void*)sim->get_register(jit::Simulator::sp);
state.lr = (void*)sim->get_register(jit::Simulator::lr);
+ state.fp = (void*)sim->get_register(jit::Simulator::fp);
#elif defined(JS_SIMULATOR_MIPS64)
state.sp = (void*)sim->getRegister(jit::Simulator::sp);
state.lr = (void*)sim->getRegister(jit::Simulator::ra);
+ state.fp = (void*)sim->getRegister(jit::Simulator::fp);
#else
# error "NYI: Single-step profiling support"
#endif
diff --git a/js/src/vm/Debugger.cpp b/js/src/vm/Debugger.cpp
index 8bb44714a4c2..be3db0cc80bb 100644
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -2549,7 +2549,7 @@ Debugger::updateExecutionObservabilityOfFrames(JSContext* cx, const ExecutionObs
oldestEnabledFrame.setIsDebuggee();
}
if (iter.abstractFramePtr().isWasmDebugFrame())
- iter.abstractFramePtr().asWasmDebugFrame()->observeFrame(cx);
+ iter.abstractFramePtr().asWasmDebugFrame()->observe(cx);
} else {
#ifdef DEBUG
// Debugger.Frame lifetimes are managed by the debug epilogue,
diff --git a/js/src/vm/GeckoProfiler.cpp b/js/src/vm/GeckoProfiler.cpp
index 86fdcc003617..fdd1cf07fc88 100644
--- a/js/src/vm/GeckoProfiler.cpp
+++ b/js/src/vm/GeckoProfiler.cpp
@@ -141,6 +141,12 @@ GeckoProfiler::enable(bool enabled)
}
}
+ // WebAssembly code does not need to be released, but profiling string
+ // labels have to be generated so that they are available during async
+ // profiling stack iteration.
+ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
+ c->wasm.ensureProfilingLabels(enabled);
+
return true;
}
diff --git a/js/src/vm/Stack-inl.h b/js/src/vm/Stack-inl.h
index 8679d6fb7af9..85fc08249574 100644
--- a/js/src/vm/Stack-inl.h
+++ b/js/src/vm/Stack-inl.h
@@ -19,7 +19,6 @@
#include "js/Debug.h"
#include "vm/EnvironmentObject.h"
#include "vm/GeneratorObject.h"
-#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmInstance.h"
#include "jsobjinlines.h"
@@ -455,7 +454,7 @@ AbstractFramePtr::environmentChain() const
if (isBaselineFrame())
return asBaselineFrame()->environmentChain();
if (isWasmDebugFrame())
- return asWasmDebugFrame()->environmentChain();
+ return &global()->lexicalEnvironment();
return asRematerializedFrame()->environmentChain();
}
diff --git a/js/src/vm/Stack.cpp b/js/src/vm/Stack.cpp
index 8e5fe3060610..70150b556b52 100644
--- a/js/src/vm/Stack.cpp
+++ b/js/src/vm/Stack.cpp
@@ -17,7 +17,6 @@
#include "js/GCAPI.h"
#include "vm/Debugger.h"
#include "vm/Opcodes.h"
-#include "wasm/WasmDebugFrame.h"
#include "jit/JitFrameIterator-inl.h"
#include "vm/EnvironmentObject-inl.h"
@@ -1647,7 +1646,7 @@ WasmActivation::WasmActivation(JSContext* cx)
: Activation(cx, Wasm),
entrySP_(nullptr),
resumePC_(nullptr),
- fp_(nullptr),
+ exitFP_(nullptr),
exitReason_(wasm::ExitReason::None)
{
(void) entrySP_; // silence "unused private member" warning
@@ -1655,8 +1654,6 @@ WasmActivation::WasmActivation(JSContext* cx)
prevWasm_ = cx->wasmActivationStack_;
cx->wasmActivationStack_ = this;
- cx->compartment()->wasm.activationCount_++;
-
// Now that the WasmActivation is fully initialized, make it visible to
// asynchronous profiling.
registerProfiling();
@@ -1667,13 +1664,11 @@ WasmActivation::~WasmActivation()
// Hide this activation from the profiler before is is destroyed.
unregisterProfiling();
- MOZ_ASSERT(fp_ == nullptr);
+ MOZ_ASSERT(exitFP_ == nullptr);
+ MOZ_ASSERT(exitReason_ == wasm::ExitReason::None);
MOZ_ASSERT(cx_->wasmActivationStack_ == this);
cx_->wasmActivationStack_ = prevWasm_;
-
- MOZ_ASSERT(cx_->compartment()->wasm.activationCount_ > 0);
- cx_->compartment()->wasm.activationCount_--;
}
InterpreterFrameIterator&
diff --git a/js/src/vm/Stack.h b/js/src/vm/Stack.h
index 582a4da128db..5655b5f76291 100644
--- a/js/src/vm/Stack.h
+++ b/js/src/vm/Stack.h
@@ -25,6 +25,7 @@
#include "vm/ArgumentsObject.h"
#include "vm/SavedFrame.h"
#include "wasm/WasmFrameIterator.h"
+#include "wasm/WasmTypes.h"
struct JSCompartment;
@@ -166,6 +167,7 @@ class AbstractFramePtr
MOZ_IMPLICIT AbstractFramePtr(wasm::DebugFrame* fp)
: ptr_(fp ? uintptr_t(fp) | Tag_WasmDebugFrame : 0)
{
+ static_assert(wasm::DebugFrame::Alignment >= TagMask, "aligned");
MOZ_ASSERT_IF(fp, asWasmDebugFrame() == fp);
}
@@ -1733,7 +1735,7 @@ class WasmActivation : public Activation
WasmActivation* prevWasm_;
void* entrySP_;
void* resumePC_;
- uint8_t* fp_;
+ uint8_t* exitFP_;
wasm::ExitReason exitReason_;
public:
@@ -1746,20 +1748,16 @@ class WasmActivation : public Activation
return true;
}
- // Returns a pointer to the base of the innermost stack frame of wasm code
- // in this activation.
- uint8_t* fp() const { return fp_; }
+ // Returns null or the final wasm::Frame* when wasm exited this
+ // WasmActivation.
+ uint8_t* exitFP() const { return exitFP_; }
// Returns the reason why wasm code called out of wasm code.
wasm::ExitReason exitReason() const { return exitReason_; }
- // Read by JIT code:
- static unsigned offsetOfContext() { return offsetof(WasmActivation, cx_); }
- static unsigned offsetOfResumePC() { return offsetof(WasmActivation, resumePC_); }
-
// Written by JIT code:
static unsigned offsetOfEntrySP() { return offsetof(WasmActivation, entrySP_); }
- static unsigned offsetOfFP() { return offsetof(WasmActivation, fp_); }
+ static unsigned offsetOfExitFP() { return offsetof(WasmActivation, exitFP_); }
static unsigned offsetOfExitReason() { return offsetof(WasmActivation, exitReason_); }
// Read/written from SIGSEGV handler:
@@ -1767,7 +1765,7 @@ class WasmActivation : public Activation
void* resumePC() const { return resumePC_; }
// Used by wasm::FrameIterator during stack unwinding.
- void unwindFP(uint8_t* fp) { fp_ = fp; }
+ void unwindExitFP(uint8_t* exitFP) { exitFP_ = exitFP; exitReason_ = wasm::ExitReason::None; }
};
// A FrameIter walks over a context's stack of JS script activations,
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
index cd6ab32fd434..c367bfa573d8 100644
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -97,7 +97,6 @@
#endif
#include "wasm/WasmBinaryIterator.h"
-#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmSignalHandlers.h"
#include "wasm/WasmValidate.h"
@@ -199,31 +198,19 @@ static constexpr int32_t TlsSlotSize = sizeof(void*);
static constexpr int32_t TlsSlotOffset = TlsSlotSize;
BaseLocalIter::BaseLocalIter(const ValTypeVector& locals,
- size_t argsLength,
- bool debugEnabled)
+ size_t argsLength,
+ bool debugEnabled)
: locals_(locals),
argsLength_(argsLength),
argsRange_(locals.begin(), argsLength),
argsIter_(argsRange_),
index_(0),
- localSize_(0),
+ localSize_(debugEnabled ? DebugFrame::offsetOfFrame() : 0),
+ reservedSize_(localSize_),
done_(false)
{
MOZ_ASSERT(argsLength <= locals.length());
- // Reserve a stack slot for the TLS pointer outside the locals range so it
- // isn't zero-filled like the normal locals.
- DebugOnly tlsSlotOffset = pushLocal(TlsSlotSize);
- MOZ_ASSERT(tlsSlotOffset == TlsSlotOffset);
- if (debugEnabled) {
- // If debug information is generated, constructing DebugFrame record:
- // reserving some data before TLS pointer. The TLS pointer allocated
- // above and regular wasm::Frame data starts after locals.
- localSize_ += DebugFrame::offsetOfTlsData();
- MOZ_ASSERT(DebugFrame::offsetOfFrame() == localSize_);
- }
- reservedSize_ = localSize_;
-
settle();
}
@@ -628,10 +615,6 @@ class BaseCompiler
Vector localInfo_;
Vector outOfLine_;
- // Index into localInfo_ of the special local used for saving the TLS
- // pointer. This follows the function's real arguments and locals.
- uint32_t tlsSlot_;
-
// On specific platforms we sometimes need to use specific registers.
#ifdef JS_CODEGEN_X64
@@ -2229,9 +2212,6 @@ class BaseCompiler
maxFramePushed_ = localSize_;
- // The TLS pointer is always passed as a hidden argument in WasmTlsReg.
- // Save it into its assigned local slot.
- storeToFramePtr(WasmTlsReg, localInfo_[tlsSlot_].offs());
if (debugEnabled_) {
// Initialize funcIndex and flag fields of DebugFrame.
size_t debugFrame = masm.framePushed() - DebugFrame::offsetOfFrame();
@@ -2361,8 +2341,9 @@ class BaseCompiler
masm.breakpoint();
// Patch the add in the prologue so that it checks against the correct
- // frame size.
+ // frame size. Flush the constant pool in case it needs to be patched.
MOZ_ASSERT(maxFramePushed_ >= localSize_);
+ masm.flush();
masm.patchAdd32ToPtr(stackAddOffset_, Imm32(-int32_t(maxFramePushed_ - localSize_)));
// Since we just overflowed the stack, to be on the safe side, pop the
@@ -2390,9 +2371,6 @@ class BaseCompiler
restoreResult();
}
- // Restore the TLS register in case it was overwritten by the function.
- loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
-
GenerateFunctionEpilogue(masm, localSize_, &offsets_);
#if defined(JS_ION_PERF)
@@ -2481,7 +2459,7 @@ class BaseCompiler
// On x86 there are no pinned registers, so don't waste time
// reloading the Tls.
#ifndef JS_CODEGEN_X86
- loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
#endif
}
@@ -2678,7 +2656,7 @@ class BaseCompiler
const FunctionCall& call)
{
// Builtin method calls assume the TLS register has been set.
- loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame();
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
masm.wasmCallBuiltinInstanceMethod(instanceArg, builtin);
@@ -3317,56 +3295,56 @@ class BaseCompiler
void loadGlobalVarI32(unsigned globalDataOffset, RegI32 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.load32(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void loadGlobalVarI64(unsigned globalDataOffset, RegI64 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.load64(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void loadGlobalVarF32(unsigned globalDataOffset, RegF32 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.loadFloat32(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void loadGlobalVarF64(unsigned globalDataOffset, RegF64 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.loadDouble(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void storeGlobalVarI32(unsigned globalDataOffset, RegI32 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.store32(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
void storeGlobalVarI64(unsigned globalDataOffset, RegI64 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.store64(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
void storeGlobalVarF32(unsigned globalDataOffset, RegF32 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.storeFloat32(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
void storeGlobalVarF64(unsigned globalDataOffset, RegF64 r)
{
ScratchI32 tmp(*this);
- loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tmp);
masm.storeDouble(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
@@ -5187,8 +5165,15 @@ BaseCompiler::sniffConditionalControlCmp(Cond compareOp, ValType operandType)
MOZ_ASSERT(latentOp_ == LatentOp::None, "Latent comparison state not properly reset");
switch (iter_.peekOp()) {
- case uint16_t(Op::BrIf):
case uint16_t(Op::Select):
+#ifdef JS_CODEGEN_X86
+ // On x86, with only 5 available registers, a latent i64 binary
+ // comparison takes 4 leaving only 1 which is not enough for select.
+ if (operandType == ValType::I64)
+ return false;
+#endif
+ MOZ_FALLTHROUGH;
+ case uint16_t(Op::BrIf):
case uint16_t(Op::If):
setLatentCompare(compareOp, operandType);
return true;
@@ -5804,11 +5789,8 @@ BaseCompiler::emitCallArgs(const ValTypeVector& argTypes, FunctionCall& baseline
for (size_t i = 0; i < numArgs; ++i)
passArg(baselineCall, argTypes[i], peek(numArgs - 1 - i));
- // Pass the TLS pointer as a hidden argument in WasmTlsReg. Load
- // it directly out if its stack slot so we don't interfere with
- // the stk_.
if (baselineCall.loadTlsBefore)
- loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame();
return true;
}
@@ -6450,7 +6432,7 @@ BaseCompiler::maybeLoadTlsForAccess(bool omitBoundsCheck)
RegI32 tls = invalidI32();
if (needTlsForAccess(omitBoundsCheck)) {
tls = needI32();
- loadFromFramePtr(tls, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmTlsRegFromFrame(tls);
}
return tls;
}
@@ -7473,7 +7455,6 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& env,
#ifdef DEBUG
scratchRegisterTaken_(false),
#endif
- tlsSlot_(0),
#ifdef JS_CODEGEN_X64
specific_rax(RegI64(Register64(rax))),
specific_rcx(RegI64(Register64(rcx))),
@@ -7511,6 +7492,7 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& env,
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
availGPR_.take(HeapReg);
#endif
+ availGPR_.take(FramePointer);
#ifdef DEBUG
setupRegisterLeakCheck();
@@ -7533,15 +7515,9 @@ BaseCompiler::init()
const ValTypeVector& args = func_.sig().args();
- // localInfo_ contains an entry for every local in locals_, followed by
- // entries for special locals. Currently the only special local is the TLS
- // pointer.
- tlsSlot_ = locals_.length();
- if (!localInfo_.resize(locals_.length() + 1))
+ if (!localInfo_.resize(locals_.length()))
return false;
- localInfo_[tlsSlot_].init(MIRType::Pointer, TlsSlotOffset);
-
BaseLocalIter i(locals_, args.length(), debugEnabled_);
varLow_ = i.reservedSize();
for (; !i.done() && i.index() < args.length(); i++) {
diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp
index 4d928ceaeda3..697390e2b384 100644
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -111,7 +111,7 @@ StaticallyLink(CodeSegment& cs, const LinkData& linkData, JSContext* cx)
const Uint32Vector& offsets = linkData.symbolicLinks[imm];
for (size_t i = 0; i < offsets.length(); i++) {
uint8_t* patchAt = cs.base() + offsets[i];
- void* target = AddressOf(imm, cx);
+ void* target = AddressOf(imm);
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
PatchedImmPtr(target),
PatchedImmPtr((void*)-1));
@@ -299,60 +299,44 @@ FuncImport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
CodeRange::CodeRange(Kind kind, Offsets offsets)
: begin_(offsets.begin),
- profilingReturn_(0),
+ ret_(0),
end_(offsets.end),
funcIndex_(0),
funcLineOrBytecode_(0),
- funcBeginToTableEntry_(0),
- funcBeginToTableProfilingJump_(0),
- funcBeginToNonProfilingEntry_(0),
- funcProfilingJumpToProfilingReturn_(0),
- funcProfilingEpilogueToProfilingReturn_(0),
+ funcBeginToNormalEntry_(0),
kind_(kind)
{
MOZ_ASSERT(begin_ <= end_);
- MOZ_ASSERT(kind_ == Entry || kind_ == Inline ||
+ MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == Throw ||
kind_ == FarJumpIsland || kind_ == DebugTrap);
}
-CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
+CodeRange::CodeRange(Kind kind, CallableOffsets offsets)
: begin_(offsets.begin),
- profilingReturn_(offsets.profilingReturn),
+ ret_(offsets.ret),
end_(offsets.end),
funcIndex_(0),
funcLineOrBytecode_(0),
- funcBeginToTableEntry_(0),
- funcBeginToTableProfilingJump_(0),
- funcBeginToNonProfilingEntry_(0),
- funcProfilingJumpToProfilingReturn_(0),
- funcProfilingEpilogueToProfilingReturn_(0),
+ funcBeginToNormalEntry_(0),
kind_(kind)
{
- MOZ_ASSERT(begin_ < profilingReturn_);
- MOZ_ASSERT(profilingReturn_ < end_);
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit || kind_ == TrapExit);
}
CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
: begin_(offsets.begin),
- profilingReturn_(offsets.profilingReturn),
+ ret_(offsets.ret),
end_(offsets.end),
funcIndex_(funcIndex),
funcLineOrBytecode_(funcLineOrBytecode),
- funcBeginToTableEntry_(offsets.tableEntry - begin_),
- funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
- funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
- funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
- funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
+ funcBeginToNormalEntry_(offsets.normalEntry - begin_),
kind_(Function)
{
- MOZ_ASSERT(begin_ < profilingReturn_);
- MOZ_ASSERT(profilingReturn_ < end_);
- MOZ_ASSERT(offsets.tableEntry - begin_ <= UINT8_MAX);
- MOZ_ASSERT(offsets.tableProfilingJump - begin_ <= UINT8_MAX);
- MOZ_ASSERT(offsets.nonProfilingEntry - begin_ <= UINT8_MAX);
- MOZ_ASSERT(profilingReturn_ - offsets.profilingJump <= UINT8_MAX);
- MOZ_ASSERT(profilingReturn_ - offsets.profilingEpilogue <= UINT8_MAX);
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ MOZ_ASSERT(offsets.normalEntry - begin_ <= UINT8_MAX);
}
static size_t
@@ -413,7 +397,6 @@ Metadata::serializedSize() const
SerializedPodVectorSize(memoryAccesses) +
SerializedPodVectorSize(codeRanges) +
SerializedPodVectorSize(callSites) +
- SerializedPodVectorSize(callThunks) +
SerializedPodVectorSize(funcNames) +
SerializedPodVectorSize(customSections) +
filename.serializedSize();
@@ -434,7 +417,6 @@ Metadata::serialize(uint8_t* cursor) const
cursor = SerializePodVector(cursor, memoryAccesses);
cursor = SerializePodVector(cursor, codeRanges);
cursor = SerializePodVector(cursor, callSites);
- cursor = SerializePodVector(cursor, callThunks);
cursor = SerializePodVector(cursor, funcNames);
cursor = SerializePodVector(cursor, customSections);
cursor = filename.serialize(cursor);
@@ -453,7 +435,6 @@ Metadata::deserialize(const uint8_t* cursor)
(cursor = DeserializePodVector(cursor, &memoryAccesses)) &&
(cursor = DeserializePodVector(cursor, &codeRanges)) &&
(cursor = DeserializePodVector(cursor, &callSites)) &&
- (cursor = DeserializePodVector(cursor, &callThunks)) &&
(cursor = DeserializePodVector(cursor, &funcNames)) &&
(cursor = DeserializePodVector(cursor, &customSections)) &&
(cursor = filename.deserialize(cursor));
@@ -476,7 +457,6 @@ Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
codeRanges.sizeOfExcludingThis(mallocSizeOf) +
callSites.sizeOfExcludingThis(mallocSizeOf) +
- callThunks.sizeOfExcludingThis(mallocSizeOf) +
funcNames.sizeOfExcludingThis(mallocSizeOf) +
customSections.sizeOfExcludingThis(mallocSizeOf) +
filename.sizeOfExcludingThis(mallocSizeOf);
@@ -578,7 +558,6 @@ Code::Code(UniqueCodeSegment segment,
: segment_(Move(segment)),
metadata_(&metadata),
maybeBytecode_(maybeBytecode),
- profilingEnabled_(false),
enterAndLeaveFrameTrapsCounter_(0)
{
MOZ_ASSERT_IF(metadata_->debugEnabled, maybeBytecode);
@@ -986,77 +965,67 @@ Code::clearBreakpointsIn(JSContext* cx, WasmInstanceObject* instance, js::Debugg
}
-bool
-Code::ensureProfilingState(JSRuntime* rt, bool newProfilingEnabled)
+// When enabled, generate profiling labels for every name in funcNames_ that is
+// the name of some Function CodeRange. This involves malloc() so do it now
+// since, once we start sampling, we'll be in a signal-handing context where we
+// cannot malloc.
+void
+Code::ensureProfilingLabels(bool profilingEnabled)
{
- if (profilingEnabled_ == newProfilingEnabled)
- return true;
+ if (!profilingEnabled) {
+ profilingLabels_.clear();
+ return;
+ }
- // When enabled, generate profiling labels for every name in funcNames_
- // that is the name of some Function CodeRange. This involves malloc() so
- // do it now since, once we start sampling, we'll be in a signal-handing
- // context where we cannot malloc.
- if (newProfilingEnabled) {
- for (const CodeRange& codeRange : metadata_->codeRanges) {
- if (!codeRange.isFunction())
- continue;
+ if (!profilingLabels_.empty())
+ return;
- ToCStringBuf cbuf;
- const char* bytecodeStr = NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
- MOZ_ASSERT(bytecodeStr);
+ for (const CodeRange& codeRange : metadata_->codeRanges) {
+ if (!codeRange.isFunction())
+ continue;
- UTF8Bytes name;
- if (!getFuncName(codeRange.funcIndex(), &name) || !name.append(" (", 2))
- return false;
+ ToCStringBuf cbuf;
+ const char* bytecodeStr = NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
+ MOZ_ASSERT(bytecodeStr);
- if (const char* filename = metadata_->filename.get()) {
- if (!name.append(filename, strlen(filename)))
- return false;
- } else {
- if (!name.append('?'))
- return false;
- }
+ UTF8Bytes name;
+ if (!getFuncName(codeRange.funcIndex(), &name) || !name.append(" (", 2))
+ return;
- if (!name.append(':') ||
- !name.append(bytecodeStr, strlen(bytecodeStr)) ||
- !name.append(")\0", 2))
- {
- return false;
- }
-
- UniqueChars label(name.extractOrCopyRawBuffer());
- if (!label)
- return false;
-
- if (codeRange.funcIndex() >= funcLabels_.length()) {
- if (!funcLabels_.resize(codeRange.funcIndex() + 1))
- return false;
- }
-
- funcLabels_[codeRange.funcIndex()] = Move(label);
+ if (const char* filename = metadata_->filename.get()) {
+ if (!name.append(filename, strlen(filename)))
+ return;
+ } else {
+ if (!name.append('?'))
+ return;
}
- } else {
- funcLabels_.clear();
+
+ if (!name.append(':') ||
+ !name.append(bytecodeStr, strlen(bytecodeStr)) ||
+ !name.append(")\0", 2))
+ {
+ return;
+ }
+
+ UniqueChars label(name.extractOrCopyRawBuffer());
+ if (!label)
+ return;
+
+ if (codeRange.funcIndex() >= profilingLabels_.length()) {
+ if (!profilingLabels_.resize(codeRange.funcIndex() + 1))
+ return;
+ }
+
+ profilingLabels_[codeRange.funcIndex()] = Move(label);
}
+}
- // Only mutate the code after the fallible operations are complete to avoid
- // the need to rollback.
- profilingEnabled_ = newProfilingEnabled;
-
- {
- AutoWritableJitCode awjc(segment_->base(), segment_->length());
- AutoFlushICache afc("Code::ensureProfilingState");
- AutoFlushICache::setRange(uintptr_t(segment_->base()), segment_->length());
-
- for (const CallSite& callSite : metadata_->callSites)
- ToggleProfiling(*this, callSite, newProfilingEnabled);
- for (const CallThunk& callThunk : metadata_->callThunks)
- ToggleProfiling(*this, callThunk, newProfilingEnabled);
- for (const CodeRange& codeRange : metadata_->codeRanges)
- ToggleProfiling(*this, codeRange, newProfilingEnabled);
- }
-
- return true;
+const char*
+Code::profilingLabel(uint32_t funcIndex) const
+{
+ if (funcIndex >= profilingLabels_.length() || !profilingLabels_[funcIndex])
+ return "?";
+ return profilingLabels_[funcIndex].get();
}
void
diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h
index 6c26ec8043a6..0acb8c3f3c9d 100644
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -57,9 +57,6 @@ class CodeSegment
uint8_t* outOfBoundsCode_;
uint8_t* unalignedAccessCode_;
- // The profiling mode may be changed dynamically.
- bool profilingEnabled_;
-
public:
#ifdef MOZ_VTUNE
unsigned vtune_method_id_; // Zero if unset.
@@ -242,28 +239,24 @@ class CodeRange
DebugTrap, // calls C++ to handle debug event such as
// enter/leave frame or breakpoint
FarJumpIsland, // inserted to connect otherwise out-of-range insns
- Inline // stub that is jumped-to, not called, and thus
- // replaces/loses preceding innermost frame
+ Inline, // stub that is jumped-to within prologue/epilogue
+ Throw // special stack-unwinding stub
};
private:
// All fields are treated as cacheable POD:
uint32_t begin_;
- uint32_t profilingReturn_;
+ uint32_t ret_;
uint32_t end_;
uint32_t funcIndex_;
uint32_t funcLineOrBytecode_;
- uint8_t funcBeginToTableEntry_;
- uint8_t funcBeginToTableProfilingJump_;
- uint8_t funcBeginToNonProfilingEntry_;
- uint8_t funcProfilingJumpToProfilingReturn_;
- uint8_t funcProfilingEpilogueToProfilingReturn_;
+ uint8_t funcBeginToNormalEntry_;
Kind kind_ : 8;
public:
CodeRange() = default;
CodeRange(Kind kind, Offsets offsets);
- CodeRange(Kind kind, ProfilingOffsets offsets);
+ CodeRange(Kind kind, CallableOffsets offsets);
CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
// All CodeRanges have a begin and end.
@@ -293,41 +286,30 @@ class CodeRange
bool isInline() const {
return kind() == Inline;
}
+ bool isThunk() const {
+ return kind() == FarJumpIsland;
+ }
- // Every CodeRange except entry and inline stubs has a profiling return
- // which is used for asynchronous profiling to determine the frame pointer.
+ // Every CodeRange except entry and inline stubs are callable and have a
+ // return statement. Asynchronous frame iteration needs to know the offset
+ // of the return instruction to calculate the frame pointer.
- uint32_t profilingReturn() const {
+ uint32_t ret() const {
MOZ_ASSERT(isFunction() || isImportExit() || isTrapExit());
- return profilingReturn_;
+ return ret_;
}
- // Functions have offsets which allow patching to selectively execute
- // profiling prologues/epilogues.
+ // Function CodeRanges have two entry points: one for normal calls (with a
+ // known signature) and one for table calls (which involves dynamic
+ // signature checking).
- uint32_t funcProfilingEntry() const {
- MOZ_ASSERT(isFunction());
- return begin();
- }
uint32_t funcTableEntry() const {
MOZ_ASSERT(isFunction());
- return begin_ + funcBeginToTableEntry_;
+ return begin_;
}
- uint32_t funcTableProfilingJump() const {
+ uint32_t funcNormalEntry() const {
MOZ_ASSERT(isFunction());
- return begin_ + funcBeginToTableProfilingJump_;
- }
- uint32_t funcNonProfilingEntry() const {
- MOZ_ASSERT(isFunction());
- return begin_ + funcBeginToNonProfilingEntry_;
- }
- uint32_t funcProfilingJump() const {
- MOZ_ASSERT(isFunction());
- return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
- }
- uint32_t funcProfilingEpilogue() const {
- MOZ_ASSERT(isFunction());
- return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
+ return begin_ + funcBeginToNormalEntry_;
}
uint32_t funcIndex() const {
MOZ_ASSERT(isFunction());
@@ -354,25 +336,6 @@ class CodeRange
WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
-// A CallThunk describes the offset and target of thunks so that they may be
-// patched at runtime when profiling is toggled. Thunks are emitted to connect
-// callsites that are too far away from callees to fit in a single call
-// instruction's relative offset.
-
-struct CallThunk
-{
- uint32_t offset;
- union {
- uint32_t funcIndex;
- uint32_t codeRangeIndex;
- } u;
-
- CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
- CallThunk() = default;
-};
-
-WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
-
// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
// shared memory (SharedArrayBuffer).
@@ -463,7 +426,6 @@ struct Metadata : ShareableBase, MetadataCacheablePod
MemoryAccessVector memoryAccesses;
CodeRangeVector codeRanges;
CallSiteVector callSites;
- CallThunkVector callThunks;
NameInBytecodeVector funcNames;
CustomSectionVector customSections;
CacheableChars filename;
@@ -560,8 +522,9 @@ class Code
const SharedMetadata metadata_;
const SharedBytes maybeBytecode_;
UniqueGeneratedSourceMap maybeSourceMap_;
- CacheableCharsVector funcLabels_;
- bool profilingEnabled_;
+
+ // Mutated at runtime:
+ CacheableCharsVector profilingLabels_;
// State maintained when debugging is enabled:
@@ -602,15 +565,11 @@ class Code
bool getOffsetLocation(JSContext* cx, uint32_t offset, bool* found, size_t* lineno, size_t* column);
bool totalSourceLines(JSContext* cx, uint32_t* count);
- // Each Code has a profiling mode that is updated to match the runtime's
- // profiling mode when there are no other activations of the code live on
- // the stack. Once in profiling mode, ProfilingFrameIterator can be used to
- // asynchronously walk the stack. Otherwise, the ProfilingFrameIterator will
- // skip any activations of this code.
+ // To save memory, profilingLabels_ are generated lazily when profiling mode
+ // is enabled.
- MOZ_MUST_USE bool ensureProfilingState(JSRuntime* rt, bool enabled);
- bool profilingEnabled() const { return profilingEnabled_; }
- const char* profilingLabel(uint32_t funcIndex) const { return funcLabels_[funcIndex].get(); }
+ void ensureProfilingLabels(bool profilingEnabled);
+ const char* profilingLabel(uint32_t funcIndex) const;
// The Code can track enter/leave frame events. Any such event triggers
// debug trap. The enter/leave frame events enabled or disabled across
diff --git a/js/src/wasm/WasmCompartment.cpp b/js/src/wasm/WasmCompartment.cpp
index 60a1b566f2bd..7c6279a053c2 100644
--- a/js/src/wasm/WasmCompartment.cpp
+++ b/js/src/wasm/WasmCompartment.cpp
@@ -29,13 +29,12 @@ using namespace wasm;
Compartment::Compartment(Zone* zone)
: mutatingInstances_(false),
- activationCount_(0),
- profilingEnabled_(false)
+ interruptedCount_(0)
{}
Compartment::~Compartment()
{
- MOZ_ASSERT(activationCount_ == 0);
+ MOZ_ASSERT(interruptedCount_ == 0);
MOZ_ASSERT(instances_.empty());
MOZ_ASSERT(!mutatingInstances_);
}
@@ -58,10 +57,14 @@ void
Compartment::trace(JSTracer* trc)
{
// A WasmInstanceObject that was initially reachable when called can become
- // unreachable while executing on the stack. Since wasm does not otherwise
- // scan the stack during GC to identify live instances, we mark all instance
- // objects live if there is any running wasm in the compartment.
- if (activationCount_) {
+ // unreachable while executing on the stack. When execution in a compartment
+ // is interrupted inside wasm code, wasm::TraceActivations() may miss frames
+ // due to its use of FrameIterator which assumes wasm has exited through an
+ // exit stub. This could be fixed by changing wasm::TraceActivations() to
+ // use a ProfilingFrameIterator, which inspects register state, but for now
+ // just mark everything in the compartment in this super-rare case.
+
+ if (interruptedCount_) {
for (Instance* i : instances_)
i->trace(trc);
}
@@ -73,8 +76,7 @@ Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceOb
Instance& instance = instanceObj->instance();
MOZ_ASSERT(this == &instance.compartment()->wasm);
- if (!instance.ensureProfilingState(cx, profilingEnabled_))
- return false;
+ instance.code().ensureProfilingLabels(cx->runtime()->geckoProfiler().enabled());
size_t index;
if (BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
@@ -139,38 +141,22 @@ Compartment::lookupInstanceDeprecated(const void* pc) const
return instances_[index];
}
-bool
-Compartment::ensureProfilingState(JSContext* cx)
+void
+Compartment::setInterrupted(bool interrupted)
{
- bool newProfilingEnabled = cx->runtime()->geckoProfiler().enabled();
- if (profilingEnabled_ == newProfilingEnabled)
- return true;
-
- // Since one Instance can call another Instance in the same compartment
- // directly without calling through Instance::callExport(), when profiling
- // is enabled, enable it for the entire compartment at once. It is only safe
- // to enable profiling when the wasm is not on the stack, so delay enabling
- // profiling until there are no live WasmActivations in this compartment.
-
- if (activationCount_ > 0)
- return true;
-
- for (Instance* instance : instances_) {
- if (!instance->ensureProfilingState(cx, newProfilingEnabled))
- return false;
+ if (interrupted) {
+ interruptedCount_++;
+ } else {
+ MOZ_ASSERT(interruptedCount_ > 0);
+ interruptedCount_--;
}
-
- profilingEnabled_ = newProfilingEnabled;
- return true;
}
-bool
-Compartment::profilingEnabled() const
+void
+Compartment::ensureProfilingLabels(bool profilingEnabled)
{
- // Profiling can asynchronously interrupt the mutation of the instances_
- // vector which is used by lookupCode() during stack-walking. To handle
- // this rare case, disable profiling during mutation.
- return profilingEnabled_ && !mutatingInstances_;
+ for (Instance* instance : instances_)
+ instance->code().ensureProfilingLabels(profilingEnabled);
}
void
diff --git a/js/src/wasm/WasmCompartment.h b/js/src/wasm/WasmCompartment.h
index dcdd75d0c317..3ef43f12dedd 100644
--- a/js/src/wasm/WasmCompartment.h
+++ b/js/src/wasm/WasmCompartment.h
@@ -39,8 +39,7 @@ class Compartment
{
InstanceVector instances_;
volatile bool mutatingInstances_;
- size_t activationCount_;
- bool profilingEnabled_;
+ size_t interruptedCount_;
friend class js::WasmActivation;
@@ -89,12 +88,14 @@ class Compartment
Instance* lookupInstanceDeprecated(const void* pc) const;
- // To ensure profiling is enabled (so that wasm frames are not lost in
- // profiling callstacks), ensureProfilingState must be called before calling
- // the first wasm function in a compartment.
+ // The wasm::Compartment must be notified when execution is interrupted
+ // while executing in wasm code in this compartment.
- bool ensureProfilingState(JSContext* cx);
- bool profilingEnabled() const;
+ void setInterrupted(bool interrupted);
+
+ // Ensure all Instances in this JSCompartment have profiling labels created.
+
+ void ensureProfilingLabels(bool profilingEnabled);
// about:memory reporting
diff --git a/js/src/wasm/WasmDebugFrame.cpp b/js/src/wasm/WasmDebugFrame.cpp
deleted file mode 100644
index 274677d92068..000000000000
--- a/js/src/wasm/WasmDebugFrame.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * Copyright 2016 Mozilla Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "wasm/WasmDebugFrame.h"
-
-#include "vm/EnvironmentObject.h"
-#include "wasm/WasmBaselineCompile.h"
-#include "wasm/WasmInstance.h"
-
-#include "jsobjinlines.h"
-
-using namespace js;
-using namespace js::wasm;
-
-Instance*
-DebugFrame::instance() const
-{
- return tlsData_->instance;
-}
-
-GlobalObject*
-DebugFrame::global() const
-{
- return &instance()->object()->global();
-}
-
-JSObject*
-DebugFrame::environmentChain() const
-{
- return &global()->lexicalEnvironment();
-}
-
-void
-DebugFrame::observeFrame(JSContext* cx)
-{
- if (observing_)
- return;
-
- instance()->code().adjustEnterAndLeaveFrameTrapsState(cx, /* enabled = */ true);
- observing_ = true;
-}
-
-void
-DebugFrame::leaveFrame(JSContext* cx)
-{
- if (!observing_)
- return;
-
- instance()->code().adjustEnterAndLeaveFrameTrapsState(cx, /* enabled = */ false);
- observing_ = false;
-}
-
-void
-DebugFrame::clearReturnJSValue()
-{
- hasCachedReturnJSValue_ = true;
- cachedReturnJSValue_.setUndefined();
-}
-
-void
-DebugFrame::updateReturnJSValue()
-{
- hasCachedReturnJSValue_ = true;
- ExprType returnType = instance()->code().debugGetResultType(funcIndex());
- switch (returnType) {
- case ExprType::Void:
- cachedReturnJSValue_.setUndefined();
- break;
- case ExprType::I32:
- cachedReturnJSValue_.setInt32(resultI32_);
- break;
- case ExprType::I64:
- // Just display as a Number; it's ok if we lose some precision
- cachedReturnJSValue_.setDouble((double)resultI64_);
- break;
- case ExprType::F32:
- cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF32_));
- break;
- case ExprType::F64:
- cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF64_));
- break;
- default:
- MOZ_CRASH("result type");
- }
-}
-
-bool
-DebugFrame::getLocal(uint32_t localIndex, MutableHandleValue vp)
-{
- ValTypeVector locals;
- size_t argsLength;
- if (!instance()->code().debugGetLocalTypes(funcIndex(), &locals, &argsLength))
- return false;
-
- BaseLocalIter iter(locals, argsLength, /* debugEnabled = */ true);
- while (!iter.done() && iter.index() < localIndex)
- iter++;
- MOZ_ALWAYS_TRUE(!iter.done());
-
- uint8_t* frame = static_cast((void*)this) + offsetOfFrame();
- void* dataPtr = frame - iter.frameOffset();
- switch (iter.mirType()) {
- case jit::MIRType::Int32:
- vp.set(Int32Value(*static_cast(dataPtr)));
- break;
- case jit::MIRType::Int64:
- // Just display as a Number; it's ok if we lose some precision
- vp.set(NumberValue((double)*static_cast(dataPtr)));
- break;
- case jit::MIRType::Float32:
- vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast(dataPtr))));
- break;
- case jit::MIRType::Double:
- vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast(dataPtr))));
- break;
- default:
- MOZ_CRASH("local type");
- }
- return true;
-}
-
diff --git a/js/src/wasm/WasmDebugFrame.h b/js/src/wasm/WasmDebugFrame.h
deleted file mode 100644
index 963f54026365..000000000000
--- a/js/src/wasm/WasmDebugFrame.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * vim: set ts=8 sts=4 et sw=4 tw=99:
- *
- * Copyright 2016 Mozilla Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef wasmdebugframe_js_h
-#define wasmdebugframe_js_h
-
-#include "gc/Barrier.h"
-#include "js/RootingAPI.h"
-#include "js/TracingAPI.h"
-#include "wasm/WasmTypes.h"
-
-namespace js {
-
-class WasmFunctionCallObject;
-
-namespace wasm {
-
-class DebugFrame
-{
- union
- {
- int32_t resultI32_;
- int64_t resultI64_;
- float resultF32_;
- double resultF64_;
- };
-
- js::Value cachedReturnJSValue_;
-
- // The fields below are initialized by the baseline compiler.
- uint32_t funcIndex_;
- uint32_t reserved0_;
-
- union
- {
- struct
- {
- bool observing_ : 1;
- bool isDebuggee_ : 1;
- bool prevUpToDate_ : 1;
- bool hasCachedSavedFrame_ : 1;
- bool hasCachedReturnJSValue_ : 1;
- };
- void* reserved1_;
- };
-
- TlsData* tlsData_;
- Frame frame_;
-
- explicit DebugFrame() {}
-
- void StaticAsserts() {
- // VS2017 doesn't consider offsetOfResults() etc. to be constexpr, so we have to use
- // offsetof directly. These asserts can't be at class-level because the type is incomplete.
- static_assert(offsetof(DebugFrame, resultI32_) == 0, "results shall be at offset 0");
- static_assert(offsetof(DebugFrame, tlsData_) + sizeof(TlsData*) ==
- offsetof(DebugFrame, frame_),
- "TLS pointer must be a field just before the wasm frame");
- static_assert(sizeof(DebugFrame) % 8 == 0 && offsetof(DebugFrame, frame_) % 8 == 0,
- "DebugFrame and its portion is 8-bytes aligned for AbstractFramePtr");
- }
-
- public:
- inline uint32_t funcIndex() const { return funcIndex_; }
- inline TlsData* tlsData() const { return tlsData_; }
- inline Frame& frame() { return frame_; }
-
- Instance* instance() const;
- GlobalObject* global() const;
-
- JSObject* environmentChain() const;
-
- void observeFrame(JSContext* cx);
- void leaveFrame(JSContext* cx);
-
- void trace(JSTracer* trc);
-
- // These are opaque boolean flags used by the debugger and
- // saved-frame-chains code.
- inline bool isDebuggee() const { return isDebuggee_; }
- inline void setIsDebuggee() { isDebuggee_ = true; }
- inline void unsetIsDebuggee() { isDebuggee_ = false; }
-
- inline bool prevUpToDate() const { return prevUpToDate_; }
- inline void setPrevUpToDate() { prevUpToDate_ = true; }
- inline void unsetPrevUpToDate() { prevUpToDate_ = false; }
-
- inline bool hasCachedSavedFrame() const { return hasCachedSavedFrame_; }
- inline void setHasCachedSavedFrame() { hasCachedSavedFrame_ = true; }
-
- inline void* resultsPtr() { return &resultI32_; }
-
- inline HandleValue returnValue() const {
- MOZ_ASSERT(hasCachedReturnJSValue_);
- return HandleValue::fromMarkedLocation(&cachedReturnJSValue_);
- }
- void updateReturnJSValue();
- void clearReturnJSValue();
-
- bool getLocal(uint32_t localIndex, MutableHandleValue vp);
-
- static constexpr size_t offsetOfResults() { return offsetof(DebugFrame, resultI32_); }
- static constexpr size_t offsetOfFlagsWord() { return offsetof(DebugFrame, reserved1_); }
- static constexpr size_t offsetOfFuncIndex() { return offsetof(DebugFrame, funcIndex_); }
- static constexpr size_t offsetOfTlsData() { return offsetof(DebugFrame, tlsData_); }
- static constexpr size_t offsetOfFrame() { return offsetof(DebugFrame, frame_); }
-};
-
-} // namespace wasm
-} // namespace js
-
-#endif // wasmdebugframe_js_h
diff --git a/js/src/wasm/WasmFrameIterator.cpp b/js/src/wasm/WasmFrameIterator.cpp
index 232774966aac..35dde46a53a7 100644
--- a/js/src/wasm/WasmFrameIterator.cpp
+++ b/js/src/wasm/WasmFrameIterator.cpp
@@ -18,7 +18,6 @@
#include "wasm/WasmFrameIterator.h"
-#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmInstance.h"
#include "jit/MacroAssembler-inl.h"
@@ -45,11 +44,10 @@ CallerFPFromFP(void* fp)
return reinterpret_cast(fp)->callerFP;
}
-static TlsData*
-TlsDataFromFP(void *fp)
+static DebugFrame*
+FrameToDebugFrame(void* fp)
{
- void* debugFrame = (uint8_t*)fp - DebugFrame::offsetOfFrame();
- return reinterpret_cast(debugFrame)->tlsData();
+ return reinterpret_cast((uint8_t*)fp - DebugFrame::offsetOfFrame());
}
FrameIterator::FrameIterator()
@@ -69,33 +67,30 @@ FrameIterator::FrameIterator(WasmActivation* activation, Unwind unwind)
code_(nullptr),
callsite_(nullptr),
codeRange_(nullptr),
- fp_(activation->fp()),
+ fp_(nullptr),
unwind_(unwind),
missingFrameMessage_(false)
{
- if (fp_) {
- settle();
+ // When execution is interrupted, the embedding may capture a stack trace.
+ // Since we've lost all the register state, we can't unwind the full stack
+ // like ProfilingFrameIterator does. However, we can recover the interrupted
+ // function via the resumePC and at least print that frame.
+ if (void* resumePC = activation->resumePC()) {
+ code_ = activation->compartment()->wasm.lookupCode(resumePC);
+ codeRange_ = code_->lookupRange(resumePC);
+ MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
+ MOZ_ASSERT(!done());
return;
}
- void* pc = activation_->resumePC();
- if (!pc) {
+ fp_ = activation->exitFP();
+
+ if (!fp_) {
MOZ_ASSERT(done());
return;
}
- code_ = activation_->compartment()->wasm.lookupCode(pc);
- MOZ_ASSERT(code_);
-
- const CodeRange* codeRange = code_->lookupRange(pc);
- MOZ_ASSERT(codeRange);
-
- if (codeRange->kind() == CodeRange::Function)
- codeRange_ = codeRange;
- else
- missingFrameMessage_ = true;
-
- MOZ_ASSERT(!done());
+ settle();
}
bool
@@ -123,38 +118,33 @@ void
FrameIterator::settle()
{
if (unwind_ == Unwind::True)
- activation_->unwindFP(fp_);
+ activation_->unwindExitFP(fp_);
void* returnAddress = ReturnAddressFromFP(fp_);
- code_ = activation_->compartment()->wasm.lookupCode(returnAddress);
- MOZ_ASSERT(code_);
+ fp_ = CallerFPFromFP(fp_);
- codeRange_ = code_->lookupRange(returnAddress);
- MOZ_ASSERT(codeRange_);
-
- if (codeRange_->kind() == CodeRange::Entry) {
- fp_ = nullptr;
+ if (!fp_) {
code_ = nullptr;
codeRange_ = nullptr;
callsite_ = nullptr;
if (unwind_ == Unwind::True)
- activation_->unwindFP(nullptr);
+ activation_->unwindExitFP(nullptr);
MOZ_ASSERT(done());
return;
}
- MOZ_RELEASE_ASSERT(codeRange_->kind() == CodeRange::Function);
+ code_ = activation_->compartment()->wasm.lookupCode(returnAddress);
+ MOZ_ASSERT(code_);
+
+ codeRange_ = code_->lookupRange(returnAddress);
+ MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
callsite_ = code_->lookupCallSite(returnAddress);
MOZ_ASSERT(callsite_);
- DebugOnly oldfp = fp_;
- fp_ += callsite_->stackDepth();
- MOZ_ASSERT_IF(code_->profilingEnabled(), fp_ == CallerFPFromFP(oldfp));
-
MOZ_ASSERT(!done());
}
@@ -187,8 +177,7 @@ FrameIterator::functionDisplayAtom() const
JSContext* cx = activation_->cx();
if (missingFrameMessage_) {
- const char* msg = "asm.js/wasm frames may be missing; enable the profiler before running "
- "to see all frames";
+ const char* msg = "asm.js/wasm frames may be missing below this one";
JSAtom* atom = Atomize(cx, msg, strlen(msg));
if (!atom) {
cx->clearPendingException();
@@ -217,11 +206,19 @@ FrameIterator::lineOrBytecode() const
: (codeRange_ ? codeRange_->funcLineOrBytecode() : 0);
}
+bool
+FrameIterator::hasInstance() const
+{
+ MOZ_ASSERT(!done());
+ return !!fp_;
+}
+
Instance*
FrameIterator::instance() const
{
- MOZ_ASSERT(!done() && debugEnabled());
- return TlsDataFromFP(fp_)->instance;
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(hasInstance());
+ return FrameToDebugFrame(fp_)->instance();
}
bool
@@ -238,9 +235,7 @@ DebugFrame*
FrameIterator::debugFrame() const
{
MOZ_ASSERT(!done() && debugEnabled());
- // The fp() points to wasm::Frame.
- void* buf = static_cast(fp_) - DebugFrame::offsetOfFrame();
- return static_cast(buf);
+ return FrameToDebugFrame(fp_);
}
const CallSite*
@@ -255,70 +250,64 @@ FrameIterator::debugTrapCallsite() const
/*****************************************************************************/
// Prologue/epilogue code generation
-// These constants reflect statically-determined offsets in the profiling
+// These constants reflect statically-determined offsets in the
// prologue/epilogue. The offsets are dynamically asserted during code
// generation.
#if defined(JS_CODEGEN_X64)
-# if defined(DEBUG)
static const unsigned PushedRetAddr = 0;
-static const unsigned PostStorePrePopFP = 0;
-# endif
-static const unsigned PushedFP = 26;
-static const unsigned StoredFP = 33;
+static const unsigned PushedFP = 1;
+static const unsigned PushedTLS = 3;
+static const unsigned PoppedTLS = 1;
#elif defined(JS_CODEGEN_X86)
-# if defined(DEBUG)
static const unsigned PushedRetAddr = 0;
-static const unsigned PostStorePrePopFP = 0;
-# endif
-static const unsigned PushedFP = 16;
-static const unsigned StoredFP = 19;
+static const unsigned PushedFP = 1;
+static const unsigned PushedTLS = 2;
+static const unsigned PoppedTLS = 1;
#elif defined(JS_CODEGEN_ARM)
+static const unsigned BeforePushRetAddr = 0;
static const unsigned PushedRetAddr = 4;
-static const unsigned PushedFP = 28;
-static const unsigned StoredFP = 32;
-static const unsigned PostStorePrePopFP = 4;
+static const unsigned PushedFP = 8;
+static const unsigned PushedTLS = 12;
+static const unsigned PoppedTLS = 4;
#elif defined(JS_CODEGEN_ARM64)
+static const unsigned BeforePushRetAddr = 0;
static const unsigned PushedRetAddr = 0;
static const unsigned PushedFP = 0;
-static const unsigned StoredFP = 0;
-static const unsigned PostStorePrePopFP = 0;
+static const unsigned PushedTLS = 0;
+static const unsigned PoppedTLS = 0;
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
-static const unsigned PushedRetAddr = 8;
-static const unsigned PushedFP = 36;
-static const unsigned StoredFP = 40;
-static const unsigned PostStorePrePopFP = 4;
+static const unsigned BeforePushRetAddr = 0;
+static const unsigned PushedRetAddr = 4;
+static const unsigned PushedFP = 8;
+static const unsigned PushedTLS = 12;
+static const unsigned PoppedTLS = 4;
#elif defined(JS_CODEGEN_NONE)
-# if defined(DEBUG)
static const unsigned PushedRetAddr = 0;
-static const unsigned PostStorePrePopFP = 0;
-# endif
-static const unsigned PushedFP = 1;
-static const unsigned StoredFP = 1;
+static const unsigned PushedFP = 0;
+static const unsigned PushedTLS = 0;
+static const unsigned PoppedTLS = 0;
#else
# error "Unknown architecture!"
#endif
static void
-PushRetAddr(MacroAssembler& masm)
+PushRetAddr(MacroAssembler& masm, unsigned entry)
{
#if defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(masm.currentOffset() - entry == BeforePushRetAddr);
masm.push(lr);
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(masm.currentOffset() - entry == BeforePushRetAddr);
masm.push(ra);
#else
// The x86/x64 call instruction pushes the return address.
#endif
}
-// Generate a prologue that maintains WasmActivation::fp as the virtual frame
-// pointer so that ProfilingFrameIterator can walk the stack at any pc in
-// generated code.
static void
-GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- ProfilingOffsets* offsets)
+GenerateCallablePrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ uint32_t* entry)
{
- Register scratch = ABINonArgReg0;
-
// ProfilingFrameIterator needs to know the offsets of several key
// instructions from entry. To save space, we make these offsets static
// constants and assert that they match the actual codegen below. On ARM,
@@ -329,102 +318,75 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason
AutoForbidPools afp(&masm, /* number of instructions in scope = */ 8);
#endif
- offsets->begin = masm.currentOffset();
+ *entry = masm.currentOffset();
- PushRetAddr(masm);
- MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - offsets->begin);
-
- masm.loadWasmActivationFromSymbolicAddress(scratch);
- masm.push(Address(scratch, WasmActivation::offsetOfFP()));
- MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - offsets->begin);
-
- masm.storePtr(masm.getStackPointer(), Address(scratch, WasmActivation::offsetOfFP()));
- MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
+ PushRetAddr(masm, *entry);
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.push(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.push(WasmTlsReg);
+ MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
}
- if (reason != ExitReason::None)
- masm.store32(Imm32(int32_t(reason)), Address(scratch, WasmActivation::offsetOfExitReason()));
+ if (reason != ExitReason::None) {
+ Register scratch = ABINonArgReg0;
+ masm.loadWasmActivationFromTls(scratch);
+ masm.wasmAssertNonExitInvariants(scratch);
+ Address exitReason(scratch, WasmActivation::offsetOfExitReason());
+ masm.store32(Imm32(int32_t(reason)), exitReason);
+ Address exitFP(scratch, WasmActivation::offsetOfExitFP());
+ masm.storePtr(FramePointer, exitFP);
+ }
if (framePushed)
masm.subFromStackPtr(Imm32(framePushed));
}
-// Generate the inverse of GenerateProfilingPrologue.
static void
-GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- ProfilingOffsets* offsets)
+GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ uint32_t* ret)
{
- Register scratch = ABINonArgReturnReg0;
-#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
- defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- Register scratch2 = ABINonArgReturnReg1;
-#endif
-
if (framePushed)
masm.addToStackPtr(Imm32(framePushed));
- masm.loadWasmActivationFromSymbolicAddress(scratch);
if (reason != ExitReason::None) {
- masm.store32(Imm32(int32_t(ExitReason::None)),
- Address(scratch, WasmActivation::offsetOfExitReason()));
+ Register scratch = ABINonArgReturnReg0;
+ masm.loadWasmActivationFromTls(scratch);
+ Address exitFP(scratch, WasmActivation::offsetOfExitFP());
+ masm.storePtr(ImmWord(0), exitFP);
+ Address exitReason(scratch, WasmActivation::offsetOfExitReason());
+ masm.store32(Imm32(int32_t(ExitReason::None)), exitReason);
}
- // ProfilingFrameIterator assumes fixed offsets of the last few
- // instructions from profilingReturn, so AutoForbidPools to ensure that
- // unintended instructions are not automatically inserted.
- {
+ // Forbid pools for the same reason as described in GenerateCallablePrologue.
#if defined(JS_CODEGEN_ARM)
- AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
+ AutoForbidPools afp(&masm, /* number of instructions in scope = */ 3);
#endif
- // sp protects the stack from clobber via asynchronous signal handlers
- // and the async interrupt exit. Since activation.fp can be read at any
- // time and still points to the current frame, be careful to only update
- // sp after activation.fp has been repointed to the caller's frame.
-#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
- defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- masm.loadPtr(Address(masm.getStackPointer(), 0), scratch2);
- masm.storePtr(scratch2, Address(scratch, WasmActivation::offsetOfFP()));
- DebugOnly prePop = masm.currentOffset();
- masm.addToStackPtr(Imm32(sizeof(void *)));
- MOZ_ASSERT_IF(!masm.oom(), PostStorePrePopFP == masm.currentOffset() - prePop);
-#else
- masm.pop(Address(scratch, WasmActivation::offsetOfFP()));
- MOZ_ASSERT(PostStorePrePopFP == 0);
-#endif
-
- offsets->profilingReturn = masm.currentOffset();
- masm.ret();
- }
+ masm.pop(WasmTlsReg);
+ DebugOnly poppedTLS = masm.currentOffset();
+ masm.pop(FramePointer);
+ *ret = masm.currentOffset();
+ masm.ret();
+ MOZ_ASSERT_IF(!masm.oom(), PoppedTLS == *ret - poppedTLS);
}
-// In profiling mode, we need to maintain fp so that we can unwind the stack at
-// any pc. In non-profiling mode, the only way to observe WasmActivation::fp is
-// to call out to C++ so, as an optimization, we don't update fp. To avoid
-// recompilation when the profiling mode is toggled, we generate both prologues
-// a priori and switch between prologues when the profiling mode is toggled.
-// Specifically, ToggleProfiling patches all callsites to either call the
-// profiling or non-profiling entry point.
void
wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, const SigIdDesc& sigId,
FuncOffsets* offsets)
{
#if defined(JS_CODEGEN_ARM)
// Flush pending pools so they do not get dumped between the 'begin' and
- // 'entry' offsets since the difference must be less than UINT8_MAX.
+ // 'normalEntry' offsets since the difference must be less than UINT8_MAX
+ // to be stored in CodeRange::funcBeginToNormalEntry_.
masm.flushBuffer();
#endif
-
masm.haltingAlign(CodeAlignment);
- GenerateProfilingPrologue(masm, framePushed, ExitReason::None, offsets);
- Label body;
- masm.jump(&body);
-
- // Generate table entry thunk:
- masm.haltingAlign(CodeAlignment);
- offsets->tableEntry = masm.currentOffset();
+ // Generate table entry:
+ offsets->begin = masm.currentOffset();
TrapOffset trapOffset(0); // ignored by masm.wasmEmitTrapOutOfLineCode
TrapDesc trap(trapOffset, Trap::IndirectCallBadSig, masm.framePushed());
switch (sigId.kind()) {
@@ -440,66 +402,38 @@ wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, const
case SigIdDesc::Kind::None:
break;
}
- offsets->tableProfilingJump = masm.nopPatchableToNearJump().offset();
- // Generate normal prologue:
+ // Generate normal entry:
masm.nopAlign(CodeAlignment);
- offsets->nonProfilingEntry = masm.currentOffset();
- PushRetAddr(masm);
- masm.subFromStackPtr(Imm32(framePushed + FrameBytesAfterReturnAddress));
+ GenerateCallablePrologue(masm, framePushed, ExitReason::None, &offsets->normalEntry);
- // Prologue join point, body begin:
- masm.bind(&body);
masm.setFramePushed(framePushed);
}
-// Similar to GenerateFunctionPrologue (see comment), we generate both a
-// profiling and non-profiling epilogue a priori. When the profiling mode is
-// toggled, ToggleProfiling patches the 'profiling jump' to either be a nop
-// (falling through to the normal prologue) or a jump (jumping to the profiling
-// epilogue).
void
wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
MOZ_ASSERT(masm.framePushed() == framePushed);
-
-#if defined(JS_CODEGEN_ARM)
- // Flush pending pools so they do not get dumped between the profilingReturn
- // and profilingJump/profilingEpilogue offsets since the difference must be
- // less than UINT8_MAX.
- masm.flushBuffer();
-#endif
-
- // Generate a nop that is overwritten by a jump to the profiling epilogue
- // when profiling is enabled.
- offsets->profilingJump = masm.nopPatchableToNearJump().offset();
-
- // Normal epilogue:
- masm.addToStackPtr(Imm32(framePushed + FrameBytesAfterReturnAddress));
- masm.ret();
+ GenerateCallableEpilogue(masm, framePushed, ExitReason::None, &offsets->ret);
masm.setFramePushed(0);
-
- // Profiling epilogue:
- offsets->profilingEpilogue = masm.currentOffset();
- GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
}
void
wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- ProfilingOffsets* offsets)
+ CallableOffsets* offsets)
{
masm.haltingAlign(CodeAlignment);
- GenerateProfilingPrologue(masm, framePushed, reason, offsets);
+ GenerateCallablePrologue(masm, framePushed, reason, &offsets->begin);
masm.setFramePushed(framePushed);
}
void
wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- ProfilingOffsets* offsets)
+ CallableOffsets* offsets)
{
// Inverse of GenerateExitPrologue:
MOZ_ASSERT(masm.framePushed() == framePushed);
- GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
+ GenerateCallableEpilogue(masm, framePushed, reason, &offsets->ret);
masm.setFramePushed(0);
}
@@ -527,21 +461,11 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation)
stackAddress_(nullptr),
exitReason_(ExitReason::None)
{
- // If profiling hasn't been enabled for this instance, then CallerFPFromFP
- // will be trash, so ignore the entire activation. In practice, this only
- // happens if profiling is enabled while the instance is on the stack (in
- // which case profiling will be enabled when the instance becomes inactive
- // and gets called again).
- if (!activation_->compartment()->wasm.profilingEnabled()) {
- MOZ_ASSERT(done());
- return;
- }
-
- initFromFP();
+ initFromExitFP();
}
static inline void
-AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* callerFP, void* fp)
+AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* callerFP)
{
#ifdef DEBUG
Code* code = activation.compartment()->wasm.lookupCode(callerPC);
@@ -557,15 +481,13 @@ AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* ca
const CallSite* callsite = code->lookupCallSite(callerPC);
MOZ_ASSERT(callsite);
-
- MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
#endif
}
void
-ProfilingFrameIterator::initFromFP()
+ProfilingFrameIterator::initFromExitFP()
{
- uint8_t* fp = activation_->fp();
+ uint8_t* fp = activation_->exitFP();
stackAddress_ = fp;
// If a signal was handled while entering an activation, the frame will
@@ -600,13 +522,14 @@ ProfilingFrameIterator::initFromFP()
fp = CallerFPFromFP(fp);
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
break;
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::TrapExit:
case CodeRange::DebugTrap:
case CodeRange::Inline:
+ case CodeRange::Throw:
case CodeRange::FarJumpIsland:
MOZ_CRASH("Unexpected CodeRange kind");
}
@@ -615,28 +538,11 @@ ProfilingFrameIterator::initFromFP()
// This allows the variety of exit reasons to show up in the callstack.
exitReason_ = activation_->exitReason();
- // In the case of calls to builtins or asynchronous interrupts, no exit path
- // is taken so the exitReason is None. Coerce these to the Native exit
- // reason so that self-time is accounted for.
- if (exitReason_ == ExitReason::None)
- exitReason_ = ExitReason::Native;
-
MOZ_ASSERT(!done());
}
typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
-static bool
-InThunk(const CodeRange& codeRange, uint32_t offsetInModule)
-{
- if (codeRange.kind() == CodeRange::FarJumpIsland)
- return true;
-
- return codeRange.isFunction() &&
- offsetInModule >= codeRange.funcTableEntry() &&
- offsetInModule < codeRange.funcNonProfilingEntry();
-}
-
ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
const RegisterState& state)
: activation_(&activation),
@@ -647,13 +553,10 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
stackAddress_(nullptr),
exitReason_(ExitReason::None)
{
- // If profiling hasn't been enabled for this instance, then CallerFPFromFP
- // will be trash, so ignore the entire activation. In practice, this only
- // happens if profiling is enabled while the instance is on the stack (in
- // which case profiling will be enabled when the instance becomes inactive
- // and gets called again).
- if (!activation_->compartment()->wasm.profilingEnabled()) {
- MOZ_ASSERT(done());
+ // In the case of ImportJitExit, the fp register may be temporarily
+ // clobbered on return from Ion so always use activation.fp when it is set.
+ if (activation.exitFP()) {
+ initFromExitFP();
return;
}
@@ -661,87 +564,98 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
// exit trampoline or signal handler.
code_ = activation_->compartment()->wasm.lookupCode(state.pc);
if (!code_) {
- initFromFP();
+ MOZ_ASSERT(done());
return;
}
- // Note: fp may be null while entering and leaving the activation.
- uint8_t* fp = activation.fp();
+ // When the pc is inside the prologue/epilogue, the innermost call's Frame
+ // is not complete and thus fp points to the second-to-innermost call's
+ // Frame. Since fp can only tell you about its caller, naively unwinding
+ // while pc is in the prologue/epilogue would skip the second-to-innermost
+ // call. To avoid this problem, we use the static structure of the code in
+ // the prologue and epilogue to do the Right Thing.
+ uint8_t* fp = (uint8_t*)state.fp;
+ uint8_t* pc = (uint8_t*)state.pc;
+ void** sp = (void**)state.sp;
+
+ const CodeRange* codeRange = code_->lookupRange(pc);
+ uint32_t offsetInModule = pc - code_->segment().base();
+ MOZ_ASSERT(offsetInModule >= codeRange->begin());
+ MOZ_ASSERT(offsetInModule < codeRange->end());
+
+ // Compute the offset of the pc from the (normal) entry of the code range.
+ // The stack state of the pc for the entire table-entry is equivalent to
+ // that of the first pc of the normal-entry. Thus, we can simplify the below
+ // case analysis by redirecting all pc-in-table-entry cases to the
+ // pc-at-normal-entry case.
+ uint32_t offsetFromEntry;
+ if (codeRange->isFunction()) {
+ if (offsetInModule < codeRange->funcNormalEntry())
+ offsetFromEntry = 0;
+ else
+ offsetFromEntry = offsetInModule - codeRange->funcNormalEntry();
+ } else {
+ offsetFromEntry = offsetInModule - codeRange->begin();
+ }
- const CodeRange* codeRange = code_->lookupRange(state.pc);
switch (codeRange->kind()) {
case CodeRange::Function:
case CodeRange::FarJumpIsland:
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
- case CodeRange::TrapExit: {
- // When the pc is inside the prologue/epilogue, the innermost call's
- // Frame is not complete and thus fp points to the second-to-innermost
- // call's Frame. Since fp can only tell you about its caller (via
- // ReturnAddressFromFP(fp)), naively unwinding while pc is in the
- // prologue/epilogue would skip the second-to- innermost call. To avoid
- // this problem, we use the static structure of the code in the prologue
- // and epilogue to do the Right Thing.
- uint32_t offsetInModule = (uint8_t*)state.pc - code_->segment().base();
- MOZ_ASSERT(offsetInModule >= codeRange->begin());
- MOZ_ASSERT(offsetInModule < codeRange->end());
- uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
- void** sp = (void**)state.sp;
-#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- if (offsetInCodeRange < PushedRetAddr || InThunk(*codeRange, offsetInModule)) {
- // First instruction of the ARM/MIPS function; the return address is
- // still in lr and fp still holds the caller's fp.
+ case CodeRange::TrapExit:
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
+ // The return address is still in lr and fp holds the caller's fp.
callerPC_ = state.lr;
callerFP_ = fp;
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 2);
- } else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
- // Second-to-last instruction of the ARM/MIPS function; fp points to
- // the caller's fp; have not yet popped Frame.
- callerPC_ = ReturnAddressFromFP(sp);
- callerFP_ = CallerFPFromFP(sp);
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else
#endif
- if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn() ||
- InThunk(*codeRange, offsetInModule))
- {
- // The return address has been pushed on the stack but not fp; fp
- // still points to the caller's fp.
- callerPC_ = *sp;
+ if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
+ // The return address has been pushed on the stack but fp still
+ // points to the caller's fp.
+ callerPC_ = sp[0];
callerFP_ = fp;
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 1);
- } else if (offsetInCodeRange < StoredFP) {
- // The full Frame has been pushed; fp still points to the caller's
- // frame.
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
+ } else if (offsetFromEntry == PushedFP) {
+ // The return address and caller's fp have been pushed on the stack; fp
+ // is still the caller's fp.
+ callerPC_ = sp[1];
+ callerFP_ = sp[0];
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
+ } else if (offsetFromEntry == PushedTLS) {
+ // The full Frame has been pushed; fp is still the caller's fp.
MOZ_ASSERT(fp == CallerFPFromFP(sp));
callerPC_ = ReturnAddressFromFP(sp);
- callerFP_ = CallerFPFromFP(sp);
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
+ callerFP_ = fp;
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
+ } else if (offsetInModule == codeRange->ret() - PoppedTLS) {
+ // The TLS field of the Frame has been popped.
+ callerPC_ = sp[1];
+ callerFP_ = sp[0];
+ } else if (offsetInModule == codeRange->ret()) {
+ // Both the TLS and callerFP fields have been popped and fp now
+ // points to the caller's frame.
+ callerPC_ = sp[0];
+ callerFP_ = fp;
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else {
// Not in the prologue/epilogue.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
}
break;
- }
- case CodeRange::Entry: {
+ case CodeRange::Entry:
// The entry trampoline is the final frame in an WasmActivation. The entry
// trampoline also doesn't GeneratePrologue/Epilogue so we can't use
// the general unwinding logic above.
- MOZ_ASSERT(!fp);
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
- }
case CodeRange::DebugTrap:
- case CodeRange::Inline: {
- // The throw stub clears WasmActivation::fp on it's way out.
- if (!fp) {
- MOZ_ASSERT(done());
- return;
- }
-
+ case CodeRange::Inline:
// Most inline code stubs execute after the prologue/epilogue have
// completed so we can simply unwind based on fp. The only exception is
// the async interrupt stub, since it can be executed at any time.
@@ -749,13 +663,18 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
// skipped frames. Thus, we use simply unwind based on fp.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
- AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
break;
- }
+ case CodeRange::Throw:
+ // The throw stub executes a small number of instructions before popping
+ // the entire activation. To simplify testing, we simply pretend throw
+ // stubs have already popped the entire stack.
+ MOZ_ASSERT(done());
+ return;
}
codeRange_ = codeRange;
- stackAddress_ = state.sp;
+ stackAddress_ = sp;
MOZ_ASSERT(!done());
}
@@ -784,6 +703,7 @@ ProfilingFrameIterator::operator++()
switch (codeRange_->kind()) {
case CodeRange::Entry:
+ case CodeRange::Throw:
MOZ_ASSERT(callerFP_ == nullptr);
callerPC_ = nullptr;
break;
@@ -796,7 +716,7 @@ ProfilingFrameIterator::operator++()
case CodeRange::FarJumpIsland:
stackAddress_ = callerFP_;
callerPC_ = ReturnAddressFromFP(callerFP_);
- AssertMatchesCallSite(*activation_, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
+ AssertMatchesCallSite(*activation_, callerPC_, CallerFPFromFP(callerFP_));
callerFP_ = CallerFPFromFP(callerFP_);
break;
}
@@ -816,7 +736,6 @@ ProfilingFrameIterator::label() const
// devtools/client/performance/modules/logic/frame-utils.js
const char* importJitDescription = "fast FFI trampoline (in asm.js)";
const char* importInterpDescription = "slow FFI trampoline (in asm.js)";
- const char* nativeDescription = "native call (in asm.js)";
const char* trapDescription = "trap handling (in asm.js)";
const char* debugTrapDescription = "debug trap handling (in asm.js)";
@@ -827,8 +746,6 @@ ProfilingFrameIterator::label() const
return importJitDescription;
case ExitReason::ImportInterp:
return importInterpDescription;
- case ExitReason::Native:
- return nativeDescription;
case ExitReason::Trap:
return trapDescription;
case ExitReason::DebugTrap:
@@ -844,99 +761,21 @@ ProfilingFrameIterator::label() const
case CodeRange::DebugTrap: return debugTrapDescription;
case CodeRange::Inline: return "inline stub (in asm.js)";
case CodeRange::FarJumpIsland: return "interstitial (in asm.js)";
+ case CodeRange::Throw: MOZ_CRASH("no frame for throw stubs");
}
MOZ_CRASH("bad code range kind");
}
-/*****************************************************************************/
-// Runtime patching to enable/disable profiling
-
void
-wasm::ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled)
+wasm::TraceActivations(JSContext* cx, const CooperatingContext& target, JSTracer* trc)
{
- if (callSite.kind() != CallSite::Func)
- return;
-
- uint8_t* callerRetAddr = code.segment().base() + callSite.returnAddressOffset();
-
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
- void* callee = X86Encoding::GetRel32Target(callerRetAddr);
-#elif defined(JS_CODEGEN_ARM)
- uint8_t* caller = callerRetAddr - 4;
- Instruction* callerInsn = reinterpret_cast(caller);
- BOffImm calleeOffset;
- callerInsn->as()->extractImm(&calleeOffset);
- void* callee = calleeOffset.getDest(callerInsn);
-#elif defined(JS_CODEGEN_ARM64)
- MOZ_CRASH();
- void* callee = nullptr;
- (void)callerRetAddr;
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- uint8_t* caller = callerRetAddr - 2 * sizeof(uint32_t);
- InstImm* callerInsn = reinterpret_cast(caller);
- BOffImm16 calleeOffset;
- callerInsn->extractImm16(&calleeOffset);
- void* callee = calleeOffset.getDest(reinterpret_cast(caller));
-#elif defined(JS_CODEGEN_NONE)
- MOZ_CRASH();
- void* callee = nullptr;
-#else
-# error "Missing architecture"
-#endif
-
- const CodeRange* codeRange = code.lookupRange(callee);
- if (!codeRange->isFunction())
- return;
-
- uint8_t* from = code.segment().base() + codeRange->funcNonProfilingEntry();
- uint8_t* to = code.segment().base() + codeRange->funcProfilingEntry();
- if (!enabled)
- Swap(from, to);
-
- MOZ_ASSERT(callee == from);
-
-#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
- X86Encoding::SetRel32(callerRetAddr, to);
-#elif defined(JS_CODEGEN_ARM)
- new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always);
-#elif defined(JS_CODEGEN_ARM64)
- (void)to;
- MOZ_CRASH();
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
- new (caller) InstImm(op_regimm, zero, rt_bgezal, BOffImm16(to - caller));
-#elif defined(JS_CODEGEN_NONE)
- MOZ_CRASH();
-#else
-# error "Missing architecture"
-#endif
-}
-
-void
-wasm::ToggleProfiling(const Code& code, const CallThunk& callThunk, bool enabled)
-{
- const CodeRange& cr = code.metadata().codeRanges[callThunk.u.codeRangeIndex];
- uint32_t calleeOffset = enabled ? cr.funcProfilingEntry() : cr.funcNonProfilingEntry();
- MacroAssembler::repatchFarJump(code.segment().base(), callThunk.offset, calleeOffset);
-}
-
-void
-wasm::ToggleProfiling(const Code& code, const CodeRange& codeRange, bool enabled)
-{
- if (!codeRange.isFunction())
- return;
-
- uint8_t* codeBase = code.segment().base();
- uint8_t* profilingEntry = codeBase + codeRange.funcProfilingEntry();
- uint8_t* tableProfilingJump = codeBase + codeRange.funcTableProfilingJump();
- uint8_t* profilingJump = codeBase + codeRange.funcProfilingJump();
- uint8_t* profilingEpilogue = codeBase + codeRange.funcProfilingEpilogue();
-
- if (enabled) {
- MacroAssembler::patchNopToNearJump(tableProfilingJump, profilingEntry);
- MacroAssembler::patchNopToNearJump(profilingJump, profilingEpilogue);
- } else {
- MacroAssembler::patchNearJumpToNop(tableProfilingJump);
- MacroAssembler::patchNearJumpToNop(profilingJump);
+ for (ActivationIterator iter(cx, target); !iter.done(); ++iter) {
+ if (iter.activation()->isWasm()) {
+ for (FrameIterator fi(iter.activation()->asWasm()); !fi.done(); ++fi) {
+ if (fi.hasInstance())
+ fi.instance()->trace(trc);
+ }
+ }
}
}
diff --git a/js/src/wasm/WasmFrameIterator.h b/js/src/wasm/WasmFrameIterator.h
index 7c8e440b61cc..1f780c73ef96 100644
--- a/js/src/wasm/WasmFrameIterator.h
+++ b/js/src/wasm/WasmFrameIterator.h
@@ -36,9 +36,8 @@ class CodeRange;
class DebugFrame;
class Instance;
class SigIdDesc;
-struct CallThunk;
struct FuncOffsets;
-struct ProfilingOffsets;
+struct CallableOffsets;
struct TrapOffset;
// Iterates over the frames of a single WasmActivation, called synchronously
@@ -76,6 +75,7 @@ class FrameIterator
JSAtom* functionDisplayAtom() const;
unsigned lineOrBytecode() const;
const CodeRange* codeRange() const { return codeRange_; }
+ bool hasInstance() const;
Instance* instance() const;
bool debugEnabled() const;
DebugFrame* debugFrame() const;
@@ -89,25 +89,23 @@ enum class ExitReason : uint32_t
None, // default state, the pc is in wasm code
ImportJit, // fast-path call directly into JIT code
ImportInterp, // slow-path call into C++ Invoke()
- Native, // call to native C++ code (e.g., Math.sin, ToInt32(), interrupt)
Trap, // call to trap handler for the trap in WasmActivation::trap
DebugTrap // call to debug trap handler
};
// Iterates over the frames of a single WasmActivation, given an
-// asynchrously-interrupted thread's state. If the activation's
-// module is not in profiling mode, the activation is skipped.
+// asynchronously-interrupted thread's state.
class ProfilingFrameIterator
{
const WasmActivation* activation_;
const Code* code_;
const CodeRange* codeRange_;
- uint8_t* callerFP_;
+ void* callerFP_;
void* callerPC_;
void* stackAddress_;
ExitReason exitReason_;
- void initFromFP();
+ void initFromExitFP();
public:
ProfilingFrameIterator();
@@ -125,26 +123,20 @@ class ProfilingFrameIterator
void
GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- ProfilingOffsets* offsets);
+ CallableOffsets* offsets);
void
GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
- ProfilingOffsets* offsets);
+ CallableOffsets* offsets);
void
GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, const SigIdDesc& sigId,
FuncOffsets* offsets);
void
GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
-// Runtime patching to enable/disable profiling
+// Mark all instance objects live on the stack.
void
-ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled);
-
-void
-ToggleProfiling(const Code& code, const CallThunk& callThunk, bool enabled);
-
-void
-ToggleProfiling(const Code& code, const CodeRange& codeRange, bool enabled);
+TraceActivations(JSContext* cx, const CooperatingContext& target, JSTracer* trc);
} // namespace wasm
} // namespace js
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
index e1f3a7639d17..9cb645c91270 100644
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -309,7 +309,7 @@ JumpRange()
typedef HashMap, SystemAllocPolicy> OffsetMap;
bool
-ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
+ModuleGenerator::patchCallSites()
{
masm_.haltingAlign(CodeAlignment);
@@ -338,7 +338,7 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
break;
case CallSiteDesc::Func: {
if (funcIsCompiled(cs.funcIndex())) {
- uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNonProfilingEntry();
+ uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNormalEntry();
MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
@@ -351,7 +351,7 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
if (!p) {
Offsets offsets;
offsets.begin = masm_.currentOffset();
- uint32_t jumpOffset = masm_.farJumpWithPatch().offset();
+ masm_.append(CallFarJump(cs.funcIndex(), masm_.farJumpWithPatch()));
offsets.end = masm_.currentOffset();
if (masm_.oom())
return false;
@@ -360,30 +360,18 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
return false;
if (!existingCallFarJumps.add(p, cs.funcIndex(), offsets.begin))
return false;
-
- // Record calls' far jumps in metadata since they must be
- // repatched at runtime when profiling mode is toggled.
- if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcIndex()))
- return false;
}
masm_.patchCall(callerOffset, p->value());
break;
}
case CallSiteDesc::TrapExit: {
- if (maybeTrapExits) {
- uint32_t calleeOffset = (*maybeTrapExits)[cs.trap()].begin;
- MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
-
- if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
- masm_.patchCall(callerOffset, calleeOffset);
- break;
- }
- }
-
if (!existingTrapFarJumps[cs.trap()]) {
+ // See MacroAssembler::wasmEmitTrapOutOfLineCode for why we must
+ // reload the TLS register on this path.
Offsets offsets;
offsets.begin = masm_.currentOffset();
+ masm_.loadPtr(Address(FramePointer, offsetof(Frame, tls)), WasmTlsReg);
masm_.append(TrapFarJump(cs.trap(), masm_.farJumpWithPatch()));
offsets.end = masm_.currentOffset();
if (masm_.oom())
@@ -429,12 +417,8 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
bool
ModuleGenerator::patchFarJumps(const TrapExitOffsetArray& trapExits, const Offsets& debugTrapStub)
{
- for (CallThunk& callThunk : metadata_->callThunks) {
- uint32_t funcIndex = callThunk.u.funcIndex;
- callThunk.u.codeRangeIndex = funcToCodeRange_[funcIndex];
- CodeOffset farJump(callThunk.offset);
- masm_.patchFarJump(farJump, funcCodeRange(funcIndex).funcNonProfilingEntry());
- }
+ for (const CallFarJump& farJump : masm_.callFarJumps())
+ masm_.patchFarJump(farJump.jump, funcCodeRange(farJump.funcIndex).funcNormalEntry());
for (const TrapFarJump& farJump : masm_.trapFarJumps())
masm_.patchFarJump(farJump.jump, trapExits[farJump.trap].begin);
@@ -534,7 +518,7 @@ ModuleGenerator::finishFuncExports()
}
typedef Vector OffsetVector;
-typedef Vector ProfilingOffsetVector;
+typedef Vector CallableOffsetVector;
bool
ModuleGenerator::finishCodegen()
@@ -550,8 +534,8 @@ ModuleGenerator::finishCodegen()
// due to the large absolute offsets temporarily stored by Label::bind().
OffsetVector entries;
- ProfilingOffsetVector interpExits;
- ProfilingOffsetVector jitExits;
+ CallableOffsetVector interpExits;
+ CallableOffsetVector jitExits;
TrapExitOffsetArray trapExits;
Offsets outOfBoundsExit;
Offsets unalignedAccessExit;
@@ -632,7 +616,7 @@ ModuleGenerator::finishCodegen()
return false;
throwStub.offsetBy(offsetInWhole);
- if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, throwStub))
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::Throw, throwStub))
return false;
debugTrapStub.offsetBy(offsetInWhole);
@@ -649,7 +633,7 @@ ModuleGenerator::finishCodegen()
// then far jumps. Patching callsites can generate far jumps so there is an
// ordering dependency.
- if (!patchCallSites(&trapExits))
+ if (!patchCallSites())
return false;
if (!patchFarJumps(trapExits, debugTrapStub))
@@ -1168,7 +1152,6 @@ ModuleGenerator::finish(const ShareableBytes& bytecode)
metadata_->memoryAccesses.podResizeToFit();
metadata_->codeRanges.podResizeToFit();
metadata_->callSites.podResizeToFit();
- metadata_->callThunks.podResizeToFit();
metadata_->debugTrapFarJumpOffsets.podResizeToFit();
metadata_->debugFuncToCodeRange.podResizeToFit();
diff --git a/js/src/wasm/WasmGenerator.h b/js/src/wasm/WasmGenerator.h
index b7a1630a55d6..6a820525b27a 100644
--- a/js/src/wasm/WasmGenerator.h
+++ b/js/src/wasm/WasmGenerator.h
@@ -212,7 +212,7 @@ class MOZ_STACK_CLASS ModuleGenerator
typedef HashSet, SystemAllocPolicy> Uint32Set;
typedef Vector CompileTaskVector;
typedef Vector CompileTaskPtrVector;
- typedef EnumeratedArray TrapExitOffsetArray;
+ typedef EnumeratedArray TrapExitOffsetArray;
// Constant parameters
CompileMode compileMode_;
@@ -257,7 +257,7 @@ class MOZ_STACK_CLASS ModuleGenerator
bool funcIsCompiled(uint32_t funcIndex) const;
const CodeRange& funcCodeRange(uint32_t funcIndex) const;
uint32_t numFuncImports() const;
- MOZ_MUST_USE bool patchCallSites(TrapExitOffsetArray* maybeTrapExits = nullptr);
+ MOZ_MUST_USE bool patchCallSites();
MOZ_MUST_USE bool patchFarJumps(const TrapExitOffsetArray& trapExits, const Offsets& debugTrapStub);
MOZ_MUST_USE bool finishTask(CompileTask* task);
MOZ_MUST_USE bool finishOutstandingTask();
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
index ba8e759008b5..019bd1239648 100644
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -352,7 +352,7 @@ Instance::Instance(JSContext* cx,
const CodeRange& codeRange = calleeInstanceObj->getExportedFunctionCodeRange(f);
Instance& calleeInstance = calleeInstanceObj->instance();
import.tls = calleeInstance.tlsData();
- import.code = calleeInstance.codeSegment().base() + codeRange.funcNonProfilingEntry();
+ import.code = calleeInstance.codeSegment().base() + codeRange.funcNormalEntry();
import.baselineScript = nullptr;
import.obj = calleeInstanceObj;
} else {
@@ -538,9 +538,6 @@ Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(!memory_ || tlsData()->memoryBase == memory_->buffer().dataPointerEither());
- if (!cx->compartment()->wasm.ensureProfilingState(cx))
- return false;
-
const FuncExport& func = metadata().lookupFuncExport(funcIndex);
// The calling convention for an external call into wasm is to pass an
@@ -782,61 +779,6 @@ Instance::deoptimizeImportExit(uint32_t funcImportIndex)
import.baselineScript = nullptr;
}
-static void
-UpdateEntry(const Code& code, bool profilingEnabled, void** entry)
-{
- const CodeRange& codeRange = *code.lookupRange(*entry);
- void* from = code.segment().base() + codeRange.funcNonProfilingEntry();
- void* to = code.segment().base() + codeRange.funcProfilingEntry();
-
- if (!profilingEnabled)
- Swap(from, to);
-
- MOZ_ASSERT(*entry == from);
- *entry = to;
-}
-
-bool
-Instance::ensureProfilingState(JSContext* cx, bool newProfilingEnabled)
-{
- if (code_->profilingEnabled() == newProfilingEnabled)
- return true;
-
- if (!code_->ensureProfilingState(cx->runtime(), newProfilingEnabled))
- return false;
-
- // Imported wasm functions and typed function tables point directly to
- // either the profiling or non-profiling prologue and must therefore be
- // updated when the profiling mode is toggled.
-
- for (const FuncImport& fi : metadata().funcImports) {
- FuncImportTls& import = funcImportTls(fi);
- if (import.obj && import.obj->is()) {
- Code& code = import.obj->as().instance().code();
- UpdateEntry(code, newProfilingEnabled, &import.code);
- }
- }
-
- for (const SharedTable& table : tables_) {
- if (!table->isTypedFunction())
- continue;
-
- // This logic will have to be generalized to match the import logic
- // above if wasm can create typed function tables since a single table
- // can contain elements from multiple instances.
- MOZ_ASSERT(metadata().kind == ModuleKind::AsmJS);
-
- void** array = table->internalArray();
- uint32_t length = table->length();
- for (size_t i = 0; i < length; i++) {
- if (array[i])
- UpdateEntry(*code_, newProfilingEnabled, &array[i]);
- }
- }
-
- return true;
-}
-
void
Instance::ensureEnterFrameTrapsState(JSContext* cx, bool enabled)
{
diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
index 753e89560b45..46b8a622154f 100644
--- a/js/src/wasm/WasmInstance.h
+++ b/js/src/wasm/WasmInstance.h
@@ -78,7 +78,7 @@ class Instance
TableTls& tableTls(const TableDesc& td) const;
// Import call slow paths which are called directly from wasm code.
- friend void* AddressOf(SymbolicAddress, JSContext*);
+ friend void* AddressOf(SymbolicAddress);
static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*);
static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
@@ -152,10 +152,6 @@ class Instance
void onMovingGrowMemory(uint8_t* prevMemoryBase);
void onMovingGrowTable();
- // See Code::ensureProfilingState comment.
-
- MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
-
// Debug support:
bool debugEnabled() const { return code_->metadata().debugEnabled; }
bool enterFrameTrapsEnabled() const { return enterFrameTrapsEnabled_; }
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
index 66d1c788f64d..7938395974cc 100644
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -55,22 +55,6 @@ typedef OpIter IonOpIter;
class FunctionCompiler;
-// TlsUsage describes how the TLS register is used during a function call.
-
-enum class TlsUsage
-{
- Unused, // No particular action is taken with respect to the TLS register.
- Need, // The TLS register must be reloaded just before the call.
- CallerSaved // Same, plus space must be allocated to save/restore the TLS
- // register.
-};
-
-static bool
-NeedsTls(TlsUsage usage)
-{
- return usage == TlsUsage::Need || usage == TlsUsage::CallerSaved;
-}
-
// CallCompileState describes a call that is being compiled. Due to expression
// nesting, multiple calls can be in the middle of compilation at the same time
// and these are tracked in a stack by FunctionCompiler.
@@ -93,11 +77,6 @@ class CallCompileState
// FunctionCompiler::startCall() comment below.
uint32_t spIncrement_;
- // Set by FunctionCompiler::finishCall(), tells a potentially-inter-module
- // call the offset of the reserved space in which it can save the caller's
- // WasmTlsReg.
- uint32_t tlsStackOffset_;
-
// Accumulates the register arguments while compiling arguments.
MWasmCall::Args regArgs_;
@@ -123,7 +102,6 @@ class CallCompileState
: lineOrBytecode_(lineOrBytecode),
maxChildStackBytes_(0),
spIncrement_(0),
- tlsStackOffset_(MWasmCall::DontSaveTls),
childClobbers_(false)
{ }
};
@@ -995,7 +973,7 @@ class FunctionCompiler
outer->childClobbers_ = true;
}
- bool finishCall(CallCompileState* call, TlsUsage tls)
+ bool finishCall(CallCompileState* call)
{
MOZ_ALWAYS_TRUE(callStack_.popCopy() == call);
@@ -1004,22 +982,10 @@ class FunctionCompiler
return true;
}
- if (NeedsTls(tls)) {
- if (!call->regArgs_.append(MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_)))
- return false;
- }
+ if (!call->regArgs_.append(MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_)))
+ return false;
uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
-
- // If this is a potentially-inter-module call, allocate an extra word of
- // stack space to save/restore the caller's WasmTlsReg during the call.
- // Record the stack offset before including spIncrement since MWasmCall
- // will use this offset after having bumped the stack pointer.
- if (tls == TlsUsage::CallerSaved) {
- call->tlsStackOffset_ = stackBytes;
- stackBytes += sizeof(void*);
- }
-
if (call->childClobbers_) {
call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, WasmStackAlignment);
for (MWasmStackArg* stackArg : call->stackArgs_)
@@ -1052,8 +1018,7 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Func);
MIRType ret = ToMIRType(sig.ret());
auto callee = CalleeDesc::function(funcIndex);
- auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
- call.spIncrement_, MWasmCall::DontSaveTls);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret, call.spIncrement_);
if (!ins)
return false;
@@ -1078,7 +1043,6 @@ class FunctionCompiler
const TableDesc& table = env_.tables[env_.asmJSSigToTableIndex[sigIndex]];
MOZ_ASSERT(IsPowerOfTwo(table.limits.initial));
MOZ_ASSERT(!table.external);
- MOZ_ASSERT(call.tlsStackOffset_ == MWasmCall::DontSaveTls);
MConstant* mask = MConstant::New(alloc(), Int32Value(table.limits.initial - 1));
curBlock_->add(mask);
@@ -1091,14 +1055,12 @@ class FunctionCompiler
MOZ_ASSERT(sig.id.kind() != SigIdDesc::Kind::None);
MOZ_ASSERT(env_.tables.length() == 1);
const TableDesc& table = env_.tables[0];
- MOZ_ASSERT(table.external == (call.tlsStackOffset_ != MWasmCall::DontSaveTls));
-
callee = CalleeDesc::wasmTable(table, sig.id);
}
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Dynamic);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(sig.ret()),
- call.spIncrement_, call.tlsStackOffset_, index);
+ call.spIncrement_, index);
if (!ins)
return false;
@@ -1115,12 +1077,10 @@ class FunctionCompiler
return true;
}
- MOZ_ASSERT(call.tlsStackOffset_ != MWasmCall::DontSaveTls);
-
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Dynamic);
auto callee = CalleeDesc::import(globalDataOffset);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(ret),
- call.spIncrement_, call.tlsStackOffset_);
+ call.spIncrement_);
if (!ins)
return false;
@@ -1140,7 +1100,7 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Symbolic);
auto callee = CalleeDesc::builtin(builtin);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(ret),
- call.spIncrement_, MWasmCall::DontSaveTls);
+ call.spIncrement_);
if (!ins)
return false;
@@ -1160,8 +1120,7 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Symbolic);
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
call.instanceArg_, call.regArgs_,
- ToMIRType(ret), call.spIncrement_,
- call.tlsStackOffset_);
+ ToMIRType(ret), call.spIncrement_);
if (!ins)
return false;
@@ -1181,7 +1140,7 @@ class FunctionCompiler
if (inDeadCode())
return;
- MWasmReturn* ins = MWasmReturn::New(alloc(), operand, tlsPointer_);
+ MWasmReturn* ins = MWasmReturn::New(alloc(), operand);
curBlock_->end(ins);
curBlock_ = nullptr;
}
@@ -1191,7 +1150,7 @@ class FunctionCompiler
if (inDeadCode())
return;
- MWasmReturnVoid* ins = MWasmReturnVoid::New(alloc(), tlsPointer_);
+ MWasmReturnVoid* ins = MWasmReturnVoid::New(alloc());
curBlock_->end(ins);
curBlock_ = nullptr;
}
@@ -2018,11 +1977,8 @@ EmitUnreachable(FunctionCompiler& f)
typedef IonOpIter::ValueVector DefVector;
static bool
-EmitCallArgs(FunctionCompiler& f, const Sig& sig, const DefVector& args, TlsUsage tls,
- CallCompileState* call)
+EmitCallArgs(FunctionCompiler& f, const Sig& sig, const DefVector& args, CallCompileState* call)
{
- MOZ_ASSERT(NeedsTls(tls));
-
if (!f.startCall(call))
return false;
@@ -2031,7 +1987,7 @@ EmitCallArgs(FunctionCompiler& f, const Sig& sig, const DefVector& args, TlsUsag
return false;
}
- return f.finishCall(call, tls);
+ return f.finishCall(call);
}
static bool
@@ -2048,15 +2004,13 @@ EmitCall(FunctionCompiler& f)
return true;
const Sig& sig = *f.env().funcSigs[funcIndex];
- bool import = f.env().funcIsImport(funcIndex);
- TlsUsage tls = import ? TlsUsage::CallerSaved : TlsUsage::Need;
CallCompileState call(f, lineOrBytecode);
- if (!EmitCallArgs(f, sig, args, tls, &call))
+ if (!EmitCallArgs(f, sig, args, &call))
return false;
MDefinition* def;
- if (import) {
+ if (f.env().funcIsImport(funcIndex)) {
uint32_t globalDataOffset = f.env().funcImportGlobalDataOffsets[funcIndex];
if (!f.callImport(globalDataOffset, call, sig.ret(), &def))
return false;
@@ -2093,12 +2047,8 @@ EmitCallIndirect(FunctionCompiler& f, bool oldStyle)
const Sig& sig = f.env().sigs[sigIndex];
- TlsUsage tls = !f.env().isAsmJS() && f.env().tables[0].external
- ? TlsUsage::CallerSaved
- : TlsUsage::Need;
-
CallCompileState call(f, lineOrBytecode);
- if (!EmitCallArgs(f, sig, args, tls, &call))
+ if (!EmitCallArgs(f, sig, args, &call))
return false;
MDefinition* def;
@@ -2581,7 +2531,7 @@ EmitUnaryMathBuiltinCall(FunctionCompiler& f, SymbolicAddress callee, ValType op
if (!f.passArg(input, operandType, &call))
return false;
- if (!f.finishCall(&call, TlsUsage::Unused))
+ if (!f.finishCall(&call))
return false;
MDefinition* def;
@@ -2612,7 +2562,7 @@ EmitBinaryMathBuiltinCall(FunctionCompiler& f, SymbolicAddress callee, ValType o
if (!f.passArg(rhs, operandType, &call))
return false;
- if (!f.finishCall(&call, TlsUsage::Unused))
+ if (!f.finishCall(&call))
return false;
MDefinition* def;
@@ -3217,10 +3167,7 @@ EmitGrowMemory(FunctionCompiler& f)
if (!f.passArg(delta, ValType::I32, &args))
return false;
- // As a short-cut, pretend this is an inter-module call so that any pinned
- // heap pointer will be reloaded after the call. This hack will go away once
- // we can stop pinning registers.
- f.finishCall(&args, TlsUsage::CallerSaved);
+ f.finishCall(&args);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
@@ -3246,7 +3193,7 @@ EmitCurrentMemory(FunctionCompiler& f)
if (!f.passInstance(&args))
return false;
- f.finishCall(&args, TlsUsage::Need);
+ f.finishCall(&args);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, args, ValType::I32, &ret))
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
index 1bbf5e336d3c..7bd49a7fbf42 100644
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -65,13 +65,6 @@ wasm::HasCompilerSupport(JSContext* cx)
if (!wasm::HaveSignalHandlers())
return false;
-#if defined(JS_CODEGEN_ARM)
- // movw/t are required for the loadWasmActivationFromSymbolicAddress in
- // GenerateProfilingPrologue/Epilogue to avoid using the constant pool.
- if (!HasMOVWT())
- return false;
-#endif
-
#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
return false;
#else
diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
index bbf926a2ebe1..b4c65082941d 100644
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -440,11 +440,11 @@ Module::extractCode(JSContext* cx, MutableHandleValue vp)
if (!JS_DefineProperty(cx, segment, "funcIndex", value, JSPROP_ENUMERATE))
return false;
- value.setNumber((uint32_t)p.funcNonProfilingEntry());
+ value.setNumber((uint32_t)p.funcNormalEntry());
if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value, JSPROP_ENUMERATE))
return false;
- value.setNumber((uint32_t)p.funcProfilingEpilogue());
+ value.setNumber((uint32_t)p.end());
if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value, JSPROP_ENUMERATE))
return false;
}
@@ -521,7 +521,6 @@ Module::initSegments(JSContext* cx,
for (const ElemSegment& seg : elemSegments_) {
Table& table = *tables[seg.tableIndex];
uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
- bool profilingEnabled = instance.code().profilingEnabled();
const CodeRangeVector& codeRanges = metadata().codeRanges;
uint8_t* codeBase = instance.codeBase();
@@ -539,9 +538,7 @@ Module::initSegments(JSContext* cx,
} else {
const CodeRange& cr = codeRanges[seg.elemCodeRangeIndices[i]];
uint32_t entryOffset = table.isTypedFunction()
- ? profilingEnabled
- ? cr.funcProfilingEntry()
- : cr.funcNonProfilingEntry()
+ ? cr.funcNormalEntry()
: cr.funcTableEntry();
table.set(offset + i, codeBase + entryOffset, instance);
}
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
index ebe446a7eeb9..8c28d98981ac 100644
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -122,7 +122,7 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
Register scratch = ABINonArgReturnReg1;
// Read the arguments of wasm::ExportFuncPtr according to the native ABI.
- // The entry stub's frame is only 1 word, not the usual 2 for wasm::Frame.
+ // The entry stub's frame is 1 word.
const unsigned argBase = sizeof(void*) + masm.framePushed();
ABIArgGenerator abi;
ABIArg arg;
@@ -262,12 +262,25 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
}
}
+ // Set the FramePointer to null for the benefit of debugging.
+ masm.movePtr(ImmWord(0), FramePointer);
+
// Call into the real function.
masm.assertStackAlignment(WasmStackAlignment);
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
+ masm.assertStackAlignment(WasmStackAlignment);
+
+#ifdef DEBUG
+ // Assert FramePointer was returned to null by the callee.
+ Label ok;
+ masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
// Recover the stack pointer value before dynamic alignment.
masm.loadWasmActivationFromTls(scratch);
+ masm.wasmAssertNonExitInvariants(scratch);
masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
masm.setFramePushed(FramePushedForEntrySP);
@@ -445,15 +458,14 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
// normal wasm function for the purposes of exports and table calls. In
// particular, the wrapper function provides:
// - a table entry, so JS imports can be put into tables
-// - normal (non-)profiling entries, so that, if the import is re-exported,
-// an entry stub can be generated and called without any special cases
+// - normal entries, so that, if the import is re-exported, an entry stub can
+// be generated and called without any special cases
FuncOffsets
wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDesc sigId)
{
masm.setFramePushed(0);
- unsigned tlsBytes = sizeof(void*);
- unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args(), tlsBytes);
+ unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args());
FuncOffsets offsets;
GenerateFunctionPrologue(masm, framePushed, sigId, &offsets);
@@ -474,16 +486,12 @@ wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, Si
StackCopy(masm, i.mirType(), scratch, src, dst);
}
- // Save the TLS register so it can be restored later.
- uint32_t tlsStackOffset = i.stackBytesConsumedSoFar();
- masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), tlsStackOffset));
-
// Call the import exit stub.
CallSiteDesc desc(CallSiteDesc::Dynamic);
masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
// Restore the TLS register and pinned regs, per wasm function ABI.
- masm.loadPtr(Address(masm.getStackPointer(), tlsStackOffset), WasmTlsReg);
+ masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
GenerateFunctionEpilogue(masm, framePushed, &offsets);
@@ -497,7 +505,7 @@ wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, Si
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into an appropriate callImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
-ProfilingOffsets
+CallableOffsets
wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
Label* throwLabel)
{
@@ -519,7 +527,7 @@ wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint3
unsigned argBytes = Max(1, fi.sig().args().length()) * sizeof(Value);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
- ProfilingOffsets offsets;
+ CallableOffsets offsets;
GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
// Fill the argument array.
@@ -621,12 +629,10 @@ wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint3
return offsets;
}
-static const unsigned SavedTlsReg = sizeof(void*);
-
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into a compatible JIT function,
// having boxed all the ABI arguments into the JIT stack frame layout.
-ProfilingOffsets
+CallableOffsets
wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
{
masm.setFramePushed(0);
@@ -640,11 +646,11 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
unsigned sizeOfRetAddr = sizeof(void*);
unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
- unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
+ unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes;
unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
sizeOfRetAddr;
- ProfilingOffsets offsets;
+ CallableOffsets offsets;
GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, &offsets);
// 1. Descriptor
@@ -684,12 +690,6 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
argOffset += fi.sig().args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == jitFrameBytes);
- // 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
- // must be kept live for the benefit of the epilogue, so push it on the
- // stack so that it can be restored before the epilogue.
- static_assert(SavedTlsReg == sizeof(void*), "stack frame accounting");
- masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), jitFrameBytes));
-
{
// Enable Activation.
//
@@ -700,8 +700,7 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
Register act = WasmIonExitRegE1;
// JitActivation* act = cx->activation();
- masm.movePtr(SymbolicAddress::ContextPtr, cx);
- masm.loadPtr(Address(cx, 0), cx);
+ masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), cx);
masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
// act.active_ = true;
@@ -718,20 +717,26 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
masm.callJitNoProfiler(callee);
AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
+ // The JIT callee clobbers all registers, including WasmTlsReg and
+ // FrameRegister, so restore those here.
+ masm.loadWasmTlsRegFromFrame();
+ masm.moveStackPtrTo(FramePointer);
+ masm.addPtr(Imm32(masm.framePushed()), FramePointer);
+
{
// Disable Activation.
//
- // This sequence needs three registers, and must preserve the JSReturnReg_Data and
- // JSReturnReg_Type, so there are five live registers.
+ // This sequence needs three registers and must preserve WasmTlsReg,
+ // JSReturnReg_Data and JSReturnReg_Type.
MOZ_ASSERT(JSReturnReg_Data == WasmIonExitRegReturnData);
MOZ_ASSERT(JSReturnReg_Type == WasmIonExitRegReturnType);
+ MOZ_ASSERT(WasmTlsReg == WasmIonExitTlsReg);
Register cx = WasmIonExitRegD0;
Register act = WasmIonExitRegD1;
Register tmp = WasmIonExitRegD2;
// JitActivation* act = cx->activation();
- masm.movePtr(SymbolicAddress::ContextPtr, cx);
- masm.loadPtr(Address(cx, 0), cx);
+ masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), cx);
masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
// cx->jitTop = act->prevJitTop_;
@@ -795,12 +800,6 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
Label done;
masm.bind(&done);
- // Ion code does not respect the system ABI's callee-saved register
- // conventions so reload any assumed-non-volatile registers. Note that the
- // reserveStack(sizeOfRetAddr) above means that the stack pointer is at a
- // different offset than when WasmTlsReg was stored.
- masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes + sizeOfRetAddr), WasmTlsReg);
-
GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, &offsets);
if (oolConvert.used()) {
@@ -864,10 +863,10 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
}
// Generate a stub that calls into ReportTrap with the right trap reason.
-// This stub is called with ABIStackAlignment by a trap out-of-line path. A
-// profiling prologue/epilogue is used so that stack unwinding picks up the
+// This stub is called with ABIStackAlignment by a trap out-of-line path. An
+// exit prologue/epilogue is used so that stack unwinding picks up the
// current WasmActivation. Unwinding will begin at the caller of this trap exit.
-ProfilingOffsets
+CallableOffsets
wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
{
masm.haltingAlign(CodeAlignment);
@@ -879,7 +878,7 @@ wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
- ProfilingOffsets offsets;
+ CallableOffsets offsets;
GenerateExitPrologue(masm, framePushed, ExitReason::Trap, &offsets);
ABIArgMIRTypeIter i(args);
@@ -904,11 +903,11 @@ wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
// Generate a stub which is only used by the signal handlers to handle out of
// bounds access by experimental SIMD.js and Atomics and unaligned accesses on
// ARM. This stub is executed by direct PC transfer from the faulting memory
-// access and thus the stack depth is unknown. Since WasmActivation::fp is not
-// set before calling the error reporter, the current wasm activation will be
-// lost. This stub should be removed when SIMD.js and Atomics are moved to wasm
-// and given proper traps and when we use a non-faulting strategy for unaligned
-// ARM access.
+// access and thus the stack depth is unknown. Since WasmActivation::exitFP is
+// not set before calling the error reporter, the current wasm activation will
+// be lost. This stub should be removed when SIMD.js and Atomics are moved to
+// wasm and given proper traps and when we use a non-faulting strategy for
+// unaligned ARM access.
static Offsets
GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel)
{
@@ -943,13 +942,17 @@ wasm::GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel)
return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportUnalignedAccess, throwLabel);
}
+#if defined(JS_CODEGEN_ARM)
+static const LiveRegisterSet AllRegsExceptPCSP(
+ GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
+ (uint32_t(1) << Registers::pc))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
+#else
static const LiveRegisterSet AllRegsExceptSP(
GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
FloatRegisterSet(FloatRegisters::AllMask));
-
-static const LiveRegisterSet AllAllocatableRegs = LiveRegisterSet(
- GeneralRegisterSet(Registers::AllocatableMask),
- FloatRegisterSet(FloatRegisters::AllMask));
+#endif
// The async interrupt-callback exit is called from arbitrarily-interrupted wasm
// code. That means we must first save *all* registers and restore *all*
@@ -971,18 +974,11 @@ wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
// Be very careful here not to perturb the machine state before saving it
// to the stack. In particular, add/sub instructions may set conditions in
// the flags register.
- masm.push(Imm32(0)); // space for resumePC
- masm.pushFlags(); // after this we are safe to use sub
- masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
+ masm.push(Imm32(0)); // space used as return address, updated below
+ masm.setFramePushed(0); // set to 0 now so that framePushed is offset of return address
+ masm.PushFlags(); // after this we are safe to use sub
masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
- Register scratch = ABINonArgReturnReg0;
-
- // Store resumePC into the reserved space.
- masm.loadWasmActivationFromSymbolicAddress(scratch);
- masm.loadPtr(Address(scratch, WasmActivation::offsetOfResumePC()), scratch);
- masm.storePtr(scratch, Address(masm.getStackPointer(), masm.framePushed() + sizeof(void*)));
-
// We know that StackPointer is word-aligned, but not necessarily
// stack-aligned, so we need to align it dynamically.
masm.moveStackPtrTo(ABINonVolatileReg);
@@ -990,18 +986,27 @@ wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
if (ShadowStackSpace)
masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ // Make the call to C++, which preserves ABINonVolatileReg.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
- masm.branchIfFalseBool(ReturnReg, throwLabel);
+ // HandleExecutionInterrupt returns null if execution is interrupted and
+ // the resumption pc otherwise.
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
- // Restore the StackPointer to its position before the call.
+ // Restore the stack pointer then store resumePC into the stack slow that
+ // will be popped by the 'ret' below.
masm.moveToStackPtr(ABINonVolatileReg);
+ masm.storePtr(ReturnReg, Address(StackPointer, masm.framePushed()));
- // Restore the machine state to before the interrupt.
- masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
- masm.popFlags(); // after this, nothing that sets conditions
- masm.ret(); // pop resumePC into PC
+ // Restore the machine state to before the interrupt. After popping flags,
+ // no instructions can be executed which set flags.
+ masm.PopRegsInMask(AllRegsExceptSP);
+ masm.PopFlags();
+
+ // Return to the resumePC stored into this stack slot above.
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.ret();
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
// Reserve space to store resumePC and HeapReg.
masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
@@ -1049,61 +1054,40 @@ wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
masm.as_jr(HeapReg);
masm.loadPtr(Address(StackPointer, -sizeof(intptr_t)), HeapReg);
#elif defined(JS_CODEGEN_ARM)
- masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
+ masm.push(Imm32(0)); // space used as return address, updated below
+ masm.setFramePushed(0); // set to 0 now so that framePushed is offset of return address
+ masm.PushRegsInMask(AllRegsExceptPCSP); // save all GP/FP registers (except PC and SP)
- // Save all GPR, except the stack pointer.
- masm.PushRegsInMask(LiveRegisterSet(
- GeneralRegisterSet(Registers::AllMask & ~(1<
Maybe maximum() const { return maximum_; }
uint8_t* base() const { return array_.get(); }
- // All updates must go through a set() function with the exception of
- // (profiling) updates to the callee pointer that do not change which
- // logical function is being called.
+ // All table updates must go through set() or setNull().
void** internalArray() const;
ExternalTableElem* externalArray() const;
diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
index 6347f00a56fb..b117f65a530c 100644
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -28,6 +28,7 @@
#include "jit/MacroAssembler.h"
#include "js/Conversions.h"
#include "vm/Interpreter.h"
+#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmInstance.h"
#include "wasm/WasmSerialize.h"
#include "wasm/WasmSignalHandlers.h"
@@ -78,25 +79,26 @@ __aeabi_uidivmod(int, int);
}
#endif
-static void
-WasmReportOverRecursed()
-{
- ReportOverRecursed(JSContext::innermostWasmActivation()->cx());
-}
-
-static bool
+static void*
WasmHandleExecutionInterrupt()
{
WasmActivation* activation = JSContext::innermostWasmActivation();
+
+ // wasm::Compartment requires notification when execution is interrupted in
+ // the compartment. Only the innermost compartment has been interrupted;
+ // enclosing compartments necessarily exited through an exit stub.
+ activation->compartment()->wasm.setInterrupted(true);
bool success = CheckForInterrupt(activation->cx());
+ activation->compartment()->wasm.setInterrupted(false);
// Preserve the invariant that having a non-null resumePC means that we are
- // handling an interrupt. Note that resumePC has already been copied onto
- // the stack by the interrupt stub, so we can clear it before returning
- // to the stub.
+ // handling an interrupt.
+ void* resumePC = activation->resumePC();
activation->setResumePC(nullptr);
- return success;
+ // Return the resumePC if execution can continue or null if execution should
+ // jump to the throw stub.
+ return success ? resumePC : nullptr;
}
static bool
@@ -115,7 +117,7 @@ WasmHandleDebugTrap()
return true;
DebugFrame* frame = iter.debugFrame();
frame->setIsDebuggee();
- frame->observeFrame(cx);
+ frame->observe(cx);
// TODO call onEnterFrame
JSTrapStatus status = Debugger::onEnterFrame(cx, frame);
if (status == JSTRAP_RETURN) {
@@ -131,7 +133,7 @@ WasmHandleDebugTrap()
DebugFrame* frame = iter.debugFrame();
frame->updateReturnJSValue();
bool ok = Debugger::onLeaveFrame(cx, frame, nullptr, true);
- frame->leaveFrame(cx);
+ frame->leave(cx);
return ok;
}
@@ -163,14 +165,36 @@ WasmHandleDebugTrap()
return true;
}
-static void
+static WasmActivation*
WasmHandleThrow()
{
- WasmActivation* activation = JSContext::innermostWasmActivation();
- MOZ_ASSERT(activation);
- JSContext* cx = activation->cx();
+ JSContext* cx = TlsContext.get();
- for (FrameIterator iter(activation, FrameIterator::Unwind::True); !iter.done(); ++iter) {
+ WasmActivation* activation = cx->wasmActivationStack();
+ MOZ_ASSERT(activation);
+
+ // FrameIterator iterates down wasm frames in the activation starting at
+ // WasmActivation::exitFP. Pass Unwind::True to pop WasmActivation::exitFP
+ // once each time FrameIterator is incremented, ultimately leaving exitFP
+ // null when the FrameIterator is done(). This is necessary to prevent a
+ // DebugFrame from being observed again after we just called onLeaveFrame
+ // (which would lead to the frame being re-added to the map of live frames,
+ // right as it becomes trash).
+ FrameIterator iter(activation, FrameIterator::Unwind::True);
+ if (iter.done())
+ return activation;
+
+ // Live wasm code on the stack is kept alive (in wasm::TraceActivations) by
+ // marking the instance of every wasm::Frame found by FrameIterator.
+ // However, as explained above, we're popping frames while iterating which
+ // means that a GC during this loop could collect the code of frames whose
+ // code is still on the stack. This is actually mostly fine: as soon as we
+ // return to the throw stub, the entire stack will be popped as a whole,
+ // returning to the C++ caller. However, we must keep the throw stub alive
+ // itself which is owned by the innermost instance.
+ RootedWasmInstanceObject keepAlive(cx, iter.instance()->object());
+
+ for (; !iter.done(); ++iter) {
if (!iter.debugEnabled())
continue;
@@ -196,8 +220,10 @@ WasmHandleThrow()
// TODO properly handle success and resume wasm execution.
JS_ReportErrorASCII(cx, "Unexpected success from onLeaveFrame");
}
- frame->leaveFrame(cx);
+ frame->leave(cx);
}
+
+ return activation;
}
static void
@@ -407,13 +433,9 @@ wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode)
}
void*
-wasm::AddressOf(SymbolicAddress imm, JSContext* cx)
+wasm::AddressOf(SymbolicAddress imm)
{
switch (imm) {
- case SymbolicAddress::ContextPtr:
- return cx->zone()->group()->addressOfOwnerContext();
- case SymbolicAddress::ReportOverRecursed:
- return FuncCast(WasmReportOverRecursed, Args_General0);
case SymbolicAddress::HandleExecutionInterrupt:
return FuncCast(WasmHandleExecutionInterrupt, Args_General0);
case SymbolicAddress::HandleDebugTrap:
@@ -998,3 +1020,122 @@ wasm::ComputeMappedSize(uint32_t maxSize)
}
#endif // WASM_HUGE_MEMORY
+
+void
+DebugFrame::alignmentStaticAsserts()
+{
+ // VS2017 doesn't consider offsetOfFrame() to be a constexpr, so we have
+ // to use offsetof directly. These asserts can't be at class-level
+ // because the type is incomplete.
+
+ static_assert(WasmStackAlignment >= Alignment,
+ "Aligned by ABI before pushing DebugFrame");
+ static_assert((offsetof(DebugFrame, frame_) + sizeof(Frame)) % Alignment == 0,
+ "Aligned after pushing DebugFrame");
+}
+
+GlobalObject*
+DebugFrame::global() const
+{
+ return &instance()->object()->global();
+}
+
+JSObject*
+DebugFrame::environmentChain() const
+{
+ return &global()->lexicalEnvironment();
+}
+
+bool
+DebugFrame::getLocal(uint32_t localIndex, MutableHandleValue vp)
+{
+ ValTypeVector locals;
+ size_t argsLength;
+ if (!instance()->code().debugGetLocalTypes(funcIndex(), &locals, &argsLength))
+ return false;
+
+ BaseLocalIter iter(locals, argsLength, /* debugEnabled = */ true);
+ while (!iter.done() && iter.index() < localIndex)
+ iter++;
+ MOZ_ALWAYS_TRUE(!iter.done());
+
+ uint8_t* frame = static_cast((void*)this) + offsetOfFrame();
+ void* dataPtr = frame - iter.frameOffset();
+ switch (iter.mirType()) {
+ case jit::MIRType::Int32:
+ vp.set(Int32Value(*static_cast(dataPtr)));
+ break;
+ case jit::MIRType::Int64:
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)*static_cast(dataPtr)));
+ break;
+ case jit::MIRType::Float32:
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast(dataPtr))));
+ break;
+ case jit::MIRType::Double:
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast(dataPtr))));
+ break;
+ default:
+ MOZ_CRASH("local type");
+ }
+ return true;
+}
+
+void
+DebugFrame::updateReturnJSValue()
+{
+ hasCachedReturnJSValue_ = true;
+ ExprType returnType = instance()->code().debugGetResultType(funcIndex());
+ switch (returnType) {
+ case ExprType::Void:
+ cachedReturnJSValue_.setUndefined();
+ break;
+ case ExprType::I32:
+ cachedReturnJSValue_.setInt32(resultI32_);
+ break;
+ case ExprType::I64:
+ // Just display as a Number; it's ok if we lose some precision
+ cachedReturnJSValue_.setDouble((double)resultI64_);
+ break;
+ case ExprType::F32:
+ cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF32_));
+ break;
+ case ExprType::F64:
+ cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF64_));
+ break;
+ default:
+ MOZ_CRASH("result type");
+ }
+}
+
+HandleValue
+DebugFrame::returnValue() const
+{
+ MOZ_ASSERT(hasCachedReturnJSValue_);
+ return HandleValue::fromMarkedLocation(&cachedReturnJSValue_);
+}
+
+void
+DebugFrame::clearReturnJSValue()
+{
+ hasCachedReturnJSValue_ = true;
+ cachedReturnJSValue_.setUndefined();
+}
+
+void
+DebugFrame::observe(JSContext* cx)
+{
+ if (!observing_) {
+ instance()->code().adjustEnterAndLeaveFrameTrapsState(cx, /* enabled = */ true);
+ observing_ = true;
+ }
+}
+
+void
+DebugFrame::leave(JSContext* cx)
+{
+ if (observing_) {
+ instance()->code().adjustEnterAndLeaveFrameTrapsState(cx, /* enabled = */ false);
+ observing_ = false;
+ }
+}
diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
index 910b8d9aaa55..899c6ae23fbb 100644
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -41,6 +41,8 @@
namespace js {
class PropertyName;
+class WasmActivation;
+class WasmFunctionCallObject;
namespace jit {
struct BaselineScript;
enum class RoundingMode;
@@ -762,7 +764,7 @@ struct SigWithId : Sig
typedef Vector SigWithIdVector;
typedef Vector SigWithIdPtrVector;
-// The (,Profiling,Func)Offsets classes are used to record the offsets of
+// The (,Callable,Func)Offsets classes are used to record the offsets of
// different key points in a CodeRange during compilation.
struct Offsets
@@ -782,62 +784,39 @@ struct Offsets
}
};
-struct ProfilingOffsets : Offsets
+struct CallableOffsets : Offsets
{
- MOZ_IMPLICIT ProfilingOffsets(uint32_t profilingReturn = 0)
- : Offsets(), profilingReturn(profilingReturn)
+ MOZ_IMPLICIT CallableOffsets(uint32_t ret = 0)
+ : Offsets(), ret(ret)
{}
- // For CodeRanges with ProfilingOffsets, 'begin' is the offset of the
- // profiling entry.
- uint32_t profilingEntry() const { return begin; }
-
- // The profiling return is the offset of the return instruction, which
- // precedes the 'end' by a variable number of instructions due to
- // out-of-line codegen.
- uint32_t profilingReturn;
+ // The offset of the return instruction precedes 'end' by a variable number
+ // of instructions due to out-of-line codegen.
+ uint32_t ret;
void offsetBy(uint32_t offset) {
Offsets::offsetBy(offset);
- profilingReturn += offset;
+ ret += offset;
}
};
-struct FuncOffsets : ProfilingOffsets
+struct FuncOffsets : CallableOffsets
{
MOZ_IMPLICIT FuncOffsets()
- : ProfilingOffsets(),
- tableEntry(0),
- tableProfilingJump(0),
- nonProfilingEntry(0),
- profilingJump(0),
- profilingEpilogue(0)
+ : CallableOffsets(),
+ normalEntry(0)
{}
// Function CodeRanges have a table entry which takes an extra signature
// argument which is checked against the callee's signature before falling
- // through to the normal prologue. When profiling is enabled, a nop on the
- // fallthrough is patched to instead jump to the profiling epilogue.
- uint32_t tableEntry;
- uint32_t tableProfilingJump;
-
- // Function CodeRanges have an additional non-profiling entry that comes
- // after the profiling entry and a non-profiling epilogue that comes before
- // the profiling epilogue.
- uint32_t nonProfilingEntry;
-
- // When profiling is enabled, the 'nop' at offset 'profilingJump' is
- // overwritten to be a jump to 'profilingEpilogue'.
- uint32_t profilingJump;
- uint32_t profilingEpilogue;
+ // through to the normal prologue. The table entry is thus at the beginning
+ // of the CodeRange and the normal entry is at some offset after the table
+ // entry.
+ uint32_t normalEntry;
void offsetBy(uint32_t offset) {
- ProfilingOffsets::offsetBy(offset);
- tableEntry += offset;
- tableProfilingJump += offset;
- nonProfilingEntry += offset;
- profilingJump += offset;
- profilingEpilogue += offset;
+ CallableOffsets::offsetBy(offset);
+ normalEntry += offset;
}
};
@@ -926,26 +905,18 @@ class CallSiteDesc
class CallSite : public CallSiteDesc
{
uint32_t returnAddressOffset_;
- uint32_t stackDepth_;
public:
CallSite() {}
- CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
+ CallSite(CallSiteDesc desc, uint32_t returnAddressOffset)
: CallSiteDesc(desc),
- returnAddressOffset_(returnAddressOffset),
- stackDepth_(stackDepth)
+ returnAddressOffset_(returnAddressOffset)
{ }
void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
uint32_t returnAddressOffset() const { return returnAddressOffset_; }
-
- // The stackDepth measures the amount of stack space pushed since the
- // function was called. In particular, this includes the pushed return
- // address on all archs (whether or not the call instruction pushes the
- // return address (x86/x64) or the prologue does (ARM/MIPS)).
- uint32_t stackDepth() const { return stackDepth_; }
};
WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
@@ -978,12 +949,12 @@ class CallSiteAndTarget : public CallSite
typedef Vector CallSiteAndTargetVector;
-// A wasm::SymbolicAddress represents a pointer to a well-known function or
-// object that is embedded in wasm code. Since wasm code is serialized and
-// later deserialized into a different address space, symbolic addresses must be
-// used for *all* pointers into the address space. The MacroAssembler records a
-// list of all SymbolicAddresses and the offsets of their use in the code for
-// later patching during static linking.
+// A wasm::SymbolicAddress represents a pointer to a well-known function that is
+// embedded in wasm code. Since wasm code is serialized and later deserialized
+// into a different address space, symbolic addresses must be used for *all*
+// pointers into the address space. The MacroAssembler records a list of all
+// SymbolicAddresses and the offsets of their use in the code for later patching
+// during static linking.
enum class SymbolicAddress
{
@@ -1018,8 +989,6 @@ enum class SymbolicAddress
LogD,
PowD,
ATan2D,
- ContextPtr,
- ReportOverRecursed,
HandleExecutionInterrupt,
HandleDebugTrap,
HandleThrow,
@@ -1051,7 +1020,7 @@ bool
IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
void*
-AddressOf(SymbolicAddress imm, JSContext* cx);
+AddressOf(SymbolicAddress imm);
// Assumptions captures ambient state that must be the same when compiling and
// deserializing a module for the compiled code to be valid. If it's not, then
@@ -1485,19 +1454,127 @@ WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
struct Frame
{
- // The caller's saved frame pointer. In non-profiling mode, internal
- // wasm-to-wasm calls don't update fp and thus don't save the caller's
- // frame pointer; the space is reserved, however, so that profiling mode can
- // reuse the same function body without recompiling.
+ // The saved value of WasmTlsReg on entry to the function. This is
+ // effectively the callee's instance.
+ TlsData* tls;
+
+ // The caller's Frame*.
uint8_t* callerFP;
// The return address pushed by the call (in the case of ARM/MIPS the return
// address is pushed by the first instruction of the prologue).
void* returnAddress;
+
+ // Helper functions:
+
+ Instance* instance() const { return tls->instance; }
};
-static_assert(sizeof(Frame) == 2 * sizeof(void*), "?!");
-static const uint32_t FrameBytesAfterReturnAddress = sizeof(void*);
+// A DebugFrame is a Frame with additional fields that are added after the
+// normal function prologue by the baseline compiler. If a Module is compiled
+// with debugging enabled, then all its code creates DebugFrames on the stack
+// instead of just Frames. These extra fields are used by the Debugger API.
+
+class DebugFrame
+{
+ // The results field left uninitialized and only used during the baseline
+ // compiler's return sequence to allow the debugger to inspect and modify
+ // the return value of a frame being debugged.
+ union
+ {
+ int32_t resultI32_;
+ int64_t resultI64_;
+ float resultF32_;
+ double resultF64_;
+ };
+
+ // The returnValue() method returns a HandleValue pointing to this field.
+ js::Value cachedReturnJSValue_;
+
+ // The function index of this frame. Technically, this could be derived
+ // given a PC into this frame (which could lookup the CodeRange which has
+ // the function index), but this isn't always readily available.
+ uint32_t funcIndex_;
+
+ // Flags whose meaning are described below.
+ union
+ {
+ struct
+ {
+ bool observing_ : 1;
+ bool isDebuggee_ : 1;
+ bool prevUpToDate_ : 1;
+ bool hasCachedSavedFrame_ : 1;
+ bool hasCachedReturnJSValue_ : 1;
+ };
+ void* flagsWord_;
+ };
+
+ // Padding so that DebugFrame has Alignment.
+#if JS_BITS_PER_WORD == 32
+ void* padding_;
+#endif
+
+ // The Frame goes at the end since the stack grows down.
+ Frame frame_;
+
+ public:
+ Frame& frame() { return frame_; }
+ uint32_t funcIndex() const { return funcIndex_; }
+ Instance* instance() const { return frame_.instance(); }
+ GlobalObject* global() const;
+ JSObject* environmentChain() const;
+ bool getLocal(uint32_t localIndex, MutableHandleValue vp);
+
+ // The return value must be written from the unboxed representation in the
+ // results union into cachedReturnJSValue_ by updateReturnJSValue() before
+ // returnValue() can return a Handle to it.
+
+ void updateReturnJSValue();
+ HandleValue returnValue() const;
+ void clearReturnJSValue();
+
+ // Once the debugger observes a frame, it must be notified via
+ // onLeaveFrame() before the frame is popped. Calling observe() ensures the
+ // leave frame traps are enabled. Both methods are idempotent so the caller
+ // doesn't have to worry about calling them more than once.
+
+ void observe(JSContext* cx);
+ void leave(JSContext* cx);
+
+ // The 'isDebugge' bit is initialized to false and set by the WebAssembly
+ // runtime right before a frame is exposed to the debugger, as required by
+ // the Debugger API. The bit is then used for Debugger-internal purposes
+ // afterwards.
+
+ bool isDebuggee() const { return isDebuggee_; }
+ void setIsDebuggee() { isDebuggee_ = true; }
+ void unsetIsDebuggee() { isDebuggee_ = false; }
+
+ // These are opaque boolean flags used by the debugger to implement
+ // AbstractFramePtr. They are initialized to false and not otherwise read or
+ // written by wasm code or runtime.
+
+ bool prevUpToDate() const { return prevUpToDate_; }
+ void setPrevUpToDate() { prevUpToDate_ = true; }
+ void unsetPrevUpToDate() { prevUpToDate_ = false; }
+
+ bool hasCachedSavedFrame() const { return hasCachedSavedFrame_; }
+ void setHasCachedSavedFrame() { hasCachedSavedFrame_ = true; }
+
+ // DebugFrame is accessed directly by JIT code.
+
+ static constexpr size_t offsetOfResults() { return offsetof(DebugFrame, resultI32_); }
+ static constexpr size_t offsetOfFlagsWord() { return offsetof(DebugFrame, flagsWord_); }
+ static constexpr size_t offsetOfFuncIndex() { return offsetof(DebugFrame, funcIndex_); }
+ static constexpr size_t offsetOfFrame() { return offsetof(DebugFrame, frame_); }
+
+ // DebugFrames are aligned to 8-byte aligned, allowing them to be placed in
+ // an AbstractFramePtr.
+
+ static const unsigned Alignment = 8;
+ static void alignmentStaticAsserts();
+};
} // namespace wasm
} // namespace js
diff --git a/layout/generic/nsIFrame.h b/layout/generic/nsIFrame.h
index a716601df3b9..ba49c928543e 100644
--- a/layout/generic/nsIFrame.h
+++ b/layout/generic/nsIFrame.h
@@ -838,9 +838,30 @@ public:
nsIFrame* aSubFrame) const;
/**
- * Bounding rect of the frame. The values are in app units, and the origin is
- * relative to the upper-left of the geometric parent. The size includes the
- * content area, borders, and padding.
+ * Bounding rect of the frame.
+ *
+ * For frames that are laid out according to CSS box model rules the values
+ * are in app units, and the origin is relative to the upper-left of the
+ * geometric parent. The size includes the content area, borders, and
+ * padding.
+ *
+ * Frames that are laid out according to SVG's coordinate space based rules
+ * (frames with the NS_FRAME_SVG_LAYOUT bit set, which *excludes*
+ * nsSVGOuterSVGFrame) are different. Many frames of this type do not set or
+ * use mRect, in which case the frame rect is undefined. The exceptions are:
+ *
+ * - nsSVGInnerSVGFrame
+ * - SVGGeometryFrame (used for , , etc.)
+ * - nsSVGImageFrame
+ * - nsSVGForeignObjectFrame
+ *
+ * For these frames the frame rect contains the frame's element's userspace
+ * bounds including fill, stroke and markers, but converted to app units
+ * rather than being in user units (CSS px). In the SVG code "userspace" is
+ * defined to be the coordinate system for the attributes that define an
+ * element's geometry (such as the 'cx' attribute for ). For more
+ * precise details see these frames' implementations of the ReflowSVG method
+ * where mRect is set.
*
* Note: moving or sizing the frame does not affect the view's size or
* position.
diff --git a/layout/svg/nsSVGFilterFrame.cpp b/layout/svg/nsSVGFilterFrame.cpp
index 13ce16993d78..60817bd7d050 100644
--- a/layout/svg/nsSVGFilterFrame.cpp
+++ b/layout/svg/nsSVGFilterFrame.cpp
@@ -7,6 +7,7 @@
#include "nsSVGFilterFrame.h"
// Keep others in (case-insensitive) order:
+#include "AutoReferenceChainGuard.h"
#include "gfxUtils.h"
#include "nsGkAtoms.h"
#include "nsSVGEffects.h"
@@ -17,6 +18,7 @@
#include "nsSVGUtils.h"
#include "nsContentUtils.h"
+using namespace mozilla;
using namespace mozilla::dom;
nsIFrame*
@@ -27,26 +29,6 @@ NS_NewSVGFilterFrame(nsIPresShell* aPresShell, nsStyleContext* aContext)
NS_IMPL_FRAMEARENA_HELPERS(nsSVGFilterFrame)
-class MOZ_RAII nsSVGFilterFrame::AutoFilterReferencer
-{
-public:
- explicit AutoFilterReferencer(nsSVGFilterFrame *aFrame MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : mFrame(aFrame)
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- // Reference loops should normally be detected in advance and handled, so
- // we're not expecting to encounter them here
- MOZ_ASSERT(!mFrame->mLoopFlag, "Undetected reference loop!");
- mFrame->mLoopFlag = true;
- }
- ~AutoFilterReferencer() {
- mFrame->mLoopFlag = false;
- }
-private:
- nsSVGFilterFrame *mFrame;
- MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
-};
-
uint16_t
nsSVGFilterFrame::GetEnumValue(uint32_t aIndex, nsIContent *aDefault)
{
@@ -56,12 +38,22 @@ nsSVGFilterFrame::GetEnumValue(uint32_t aIndex, nsIContent *aDefault)
if (thisEnum.IsExplicitlySet())
return thisEnum.GetAnimValue();
- AutoFilterReferencer filterRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->
+ mEnumAttributes[aIndex].GetAnimValue();
+ }
- nsSVGFilterFrame *next = GetReferencedFilterIfNotInUse();
- return next ? next->GetEnumValue(aIndex, aDefault) :
- static_cast(aDefault)->
- mEnumAttributes[aIndex].GetAnimValue();
+ nsSVGFilterFrame *next = GetReferencedFilter();
+
+ return next ? next->GetEnumValue(aIndex, aDefault)
+ : static_cast(aDefault)->
+ mEnumAttributes[aIndex].GetAnimValue();
}
const nsSVGLength2 *
@@ -73,11 +65,20 @@ nsSVGFilterFrame::GetLengthValue(uint32_t aIndex, nsIContent *aDefault)
if (thisLength->IsExplicitlySet())
return thisLength;
- AutoFilterReferencer filterRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return &static_cast(aDefault)->mLengthAttributes[aIndex];
+ }
- nsSVGFilterFrame *next = GetReferencedFilterIfNotInUse();
- return next ? next->GetLengthValue(aIndex, aDefault) :
- &static_cast(aDefault)->mLengthAttributes[aIndex];
+ nsSVGFilterFrame *next = GetReferencedFilter();
+
+ return next ? next->GetLengthValue(aIndex, aDefault)
+ : &static_cast(aDefault)->mLengthAttributes[aIndex];
}
const SVGFilterElement *
@@ -93,11 +94,20 @@ nsSVGFilterFrame::GetFilterContent(nsIContent *aDefault)
}
}
- AutoFilterReferencer filterRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault);
+ }
- nsSVGFilterFrame *next = GetReferencedFilterIfNotInUse();
- return next ? next->GetFilterContent(aDefault) :
- static_cast(aDefault);
+ nsSVGFilterFrame *next = GetReferencedFilter();
+
+ return next ? next->GetFilterContent(aDefault)
+ : static_cast(aDefault);
}
nsSVGFilterFrame *
@@ -150,22 +160,6 @@ nsSVGFilterFrame::GetReferencedFilter()
return static_cast(result);
}
-nsSVGFilterFrame *
-nsSVGFilterFrame::GetReferencedFilterIfNotInUse()
-{
- nsSVGFilterFrame *referenced = GetReferencedFilter();
- if (!referenced)
- return nullptr;
-
- if (referenced->mLoopFlag) {
- // XXXjwatt: we should really send an error to the JavaScript Console here:
- NS_WARNING("Filter reference loop detected while inheriting attribute!");
- return nullptr;
- }
-
- return referenced;
-}
-
nsresult
nsSVGFilterFrame::AttributeChanged(int32_t aNameSpaceID,
nsIAtom* aAttribute,
diff --git a/layout/svg/nsSVGFilterFrame.h b/layout/svg/nsSVGFilterFrame.h
index 223c787f6e85..eac06ce7c9ec 100644
--- a/layout/svg/nsSVGFilterFrame.h
+++ b/layout/svg/nsSVGFilterFrame.h
@@ -69,10 +69,8 @@ private:
// Parse our xlink:href and set up our nsSVGPaintingProperty if we
// reference another filter and we don't have a property. Return
// the referenced filter's frame if available, null otherwise.
- class AutoFilterReferencer;
friend class nsSVGFilterInstance;
nsSVGFilterFrame* GetReferencedFilter();
- nsSVGFilterFrame* GetReferencedFilterIfNotInUse();
// Accessors to lookup filter attributes
uint16_t GetEnumValue(uint32_t aIndex, nsIContent *aDefault);
diff --git a/layout/svg/nsSVGGradientFrame.cpp b/layout/svg/nsSVGGradientFrame.cpp
index e3b627ea102c..f6eaf174a986 100644
--- a/layout/svg/nsSVGGradientFrame.cpp
+++ b/layout/svg/nsSVGGradientFrame.cpp
@@ -8,6 +8,7 @@
#include
// Keep others in (case-insensitive) order:
+#include "AutoReferenceChainGuard.h"
#include "gfxPattern.h"
#include "mozilla/dom/SVGGradientElement.h"
#include "mozilla/dom/SVGStopElement.h"
@@ -21,30 +22,6 @@ using namespace mozilla;
using namespace mozilla::dom;
using namespace mozilla::gfx;
-//----------------------------------------------------------------------
-// Helper classes
-
-class MOZ_RAII nsSVGGradientFrame::AutoGradientReferencer
-{
-public:
- explicit AutoGradientReferencer(nsSVGGradientFrame *aFrame
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : mFrame(aFrame)
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- // Reference loops should normally be detected in advance and handled, so
- // we're not expecting to encounter them here
- MOZ_ASSERT(!mFrame->mLoopFlag, "Undetected reference loop!");
- mFrame->mLoopFlag = true;
- }
- ~AutoGradientReferencer() {
- mFrame->mLoopFlag = false;
- }
-private:
- nsSVGGradientFrame *mFrame;
- MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
-};
-
//----------------------------------------------------------------------
// Implementation
@@ -93,12 +70,22 @@ nsSVGGradientFrame::GetEnumValue(uint32_t aIndex, nsIContent *aDefault)
if (thisEnum.IsExplicitlySet())
return thisEnum.GetAnimValue();
- AutoGradientReferencer gradientRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->
+ mEnumAttributes[aIndex].GetAnimValue();
+ }
- nsSVGGradientFrame *next = GetReferencedGradientIfNotInUse();
- return next ? next->GetEnumValue(aIndex, aDefault) :
- static_cast(aDefault)->
- mEnumAttributes[aIndex].GetAnimValue();
+ nsSVGGradientFrame *next = GetReferencedGradient();
+
+ return next ? next->GetEnumValue(aIndex, aDefault)
+ : static_cast(aDefault)->
+ mEnumAttributes[aIndex].GetAnimValue();
}
uint16_t
@@ -123,12 +110,22 @@ nsSVGGradientFrame::GetGradientTransformList(nsIContent* aDefault)
if (thisTransformList && thisTransformList->IsExplicitlySet())
return thisTransformList;
- AutoGradientReferencer gradientRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->
+ mGradientTransform.get();
+ }
- nsSVGGradientFrame *next = GetReferencedGradientIfNotInUse();
- return next ? next->GetGradientTransformList(aDefault) :
- static_cast(aDefault)
- ->mGradientTransform.get();
+ nsSVGGradientFrame *next = GetReferencedGradient();
+
+ return next ? next->GetGradientTransformList(aDefault)
+ : static_cast(aDefault)->
+ mGradientTransform.get();
}
gfxMatrix
@@ -167,9 +164,17 @@ nsSVGGradientFrame::GetLinearGradientWithLength(uint32_t aIndex,
// already found it in nsSVGLinearGradientFrame::GetLinearGradientWithLength.
// Since we didn't find the length, continue looking down the chain.
- AutoGradientReferencer gradientRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return aDefault;
+ }
- nsSVGGradientFrame *next = GetReferencedGradientIfNotInUse();
+ nsSVGGradientFrame *next = GetReferencedGradient();
return next ? next->GetLinearGradientWithLength(aIndex, aDefault) : aDefault;
}
@@ -181,9 +186,17 @@ nsSVGGradientFrame::GetRadialGradientWithLength(uint32_t aIndex,
// already found it in nsSVGRadialGradientFrame::GetRadialGradientWithLength.
// Since we didn't find the length, continue looking down the chain.
- AutoGradientReferencer gradientRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return aDefault;
+ }
- nsSVGGradientFrame *next = GetReferencedGradientIfNotInUse();
+ nsSVGGradientFrame *next = GetReferencedGradient();
return next ? next->GetRadialGradientWithLength(aIndex, aDefault) : aDefault;
}
@@ -362,22 +375,6 @@ nsSVGGradientFrame::GetReferencedGradient()
return static_cast(result);
}
-nsSVGGradientFrame *
-nsSVGGradientFrame::GetReferencedGradientIfNotInUse()
-{
- nsSVGGradientFrame *referenced = GetReferencedGradient();
- if (!referenced)
- return nullptr;
-
- if (referenced->mLoopFlag) {
- // XXXjwatt: we should really send an error to the JavaScript Console here:
- NS_WARNING("gradient reference loop detected while inheriting attribute!");
- return nullptr;
- }
-
- return referenced;
-}
-
void
nsSVGGradientFrame::GetStopFrames(nsTArray* aStopFrames)
{
@@ -394,13 +391,20 @@ nsSVGGradientFrame::GetStopFrames(nsTArray* aStopFrames)
// Our gradient element doesn't have stops - try to "inherit" them
- AutoGradientReferencer gradientRef(this);
- nsSVGGradientFrame* next = GetReferencedGradientIfNotInUse();
- if (!next) {
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
return;
}
- return next->GetStopFrames(aStopFrames);
+ nsSVGGradientFrame* next = GetReferencedGradient();
+ if (next) {
+ next->GetStopFrames(aStopFrames);
+ }
}
// -------------------------------------------------------------------------
diff --git a/layout/svg/nsSVGGradientFrame.h b/layout/svg/nsSVGGradientFrame.h
index f12b132533a3..c9bc6c2ca5d4 100644
--- a/layout/svg/nsSVGGradientFrame.h
+++ b/layout/svg/nsSVGGradientFrame.h
@@ -86,10 +86,6 @@ protected:
virtual bool GradientVectorLengthIsZero() = 0;
virtual already_AddRefed CreateGradient() = 0;
- // Internal methods for handling referenced gradients
- class AutoGradientReferencer;
- nsSVGGradientFrame* GetReferencedGradientIfNotInUse();
-
// Accessors to lookup gradient attributes
uint16_t GetEnumValue(uint32_t aIndex, nsIContent *aDefault);
uint16_t GetEnumValue(uint32_t aIndex)
diff --git a/layout/svg/nsSVGMaskFrame.cpp b/layout/svg/nsSVGMaskFrame.cpp
index a9887e57348c..47346d0f5ecd 100644
--- a/layout/svg/nsSVGMaskFrame.cpp
+++ b/layout/svg/nsSVGMaskFrame.cpp
@@ -7,6 +7,7 @@
#include "nsSVGMaskFrame.h"
// Keep others in (case-insensitive) order:
+#include "AutoReferenceChainGuard.h"
#include "gfx2DGlue.h"
#include "gfxContext.h"
#include "mozilla/gfx/2D.h"
@@ -204,14 +205,14 @@ NS_IMPL_FRAMEARENA_HELPERS(nsSVGMaskFrame)
mozilla::Pair>
nsSVGMaskFrame::GetMaskForMaskedFrame(MaskParams& aParams)
{
- // If the flag is set when we get here, it means this mask frame
- // has already been used painting the current mask, and the document
- // has a mask reference loop.
- if (mInUse) {
- NS_WARNING("Mask loop detected!");
+ // Make sure we break reference loops and over long reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mInUse,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
return MakePair(DrawResult::SUCCESS, RefPtr());
}
- AutoMaskReferencer maskRef(this);
gfxRect maskArea = GetMaskArea(aParams.maskedFrame);
gfxContext* context = aParams.ctx;
diff --git a/layout/svg/nsSVGMaskFrame.h b/layout/svg/nsSVGMaskFrame.h
index 40ba0b555af0..32c23ad083d9 100644
--- a/layout/svg/nsSVGMaskFrame.h
+++ b/layout/svg/nsSVGMaskFrame.h
@@ -111,28 +111,6 @@ private:
*/
gfxMatrix GetMaskTransform(nsIFrame* aMaskedFrame);
- // A helper class to allow us to paint masks safely. The helper
- // automatically sets and clears the mInUse flag on the mask frame
- // (to prevent nasty reference loops). It's easy to mess this up
- // and break things, so this helper makes the code far more robust.
- class MOZ_RAII AutoMaskReferencer
- {
- public:
- explicit AutoMaskReferencer(nsSVGMaskFrame *aFrame
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : mFrame(aFrame) {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- NS_ASSERTION(!mFrame->mInUse, "reference loop!");
- mFrame->mInUse = true;
- }
- ~AutoMaskReferencer() {
- mFrame->mInUse = false;
- }
- private:
- nsSVGMaskFrame *mFrame;
- MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
- };
-
gfxMatrix mMatrixForChildren;
// recursion prevention flag
bool mInUse;
diff --git a/layout/svg/nsSVGPatternFrame.cpp b/layout/svg/nsSVGPatternFrame.cpp
index 67f0abf8bc68..68cf1fd775e8 100644
--- a/layout/svg/nsSVGPatternFrame.cpp
+++ b/layout/svg/nsSVGPatternFrame.cpp
@@ -7,6 +7,7 @@
#include "nsSVGPatternFrame.h"
// Keep others in (case-insensitive) order:
+#include "AutoReferenceChainGuard.h"
#include "gfx2DGlue.h"
#include "gfxContext.h"
#include "gfxMatrix.h"
@@ -29,30 +30,6 @@ using namespace mozilla::dom;
using namespace mozilla::gfx;
using namespace mozilla::image;
-//----------------------------------------------------------------------
-// Helper classes
-
-class MOZ_RAII nsSVGPatternFrame::AutoPatternReferencer
-{
-public:
- explicit AutoPatternReferencer(nsSVGPatternFrame *aFrame
- MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
- : mFrame(aFrame)
- {
- MOZ_GUARD_OBJECT_NOTIFIER_INIT;
- // Reference loops should normally be detected in advance and handled, so
- // we're not expecting to encounter them here
- MOZ_ASSERT(!mFrame->mLoopFlag, "Undetected reference loop!");
- mFrame->mLoopFlag = true;
- }
- ~AutoPatternReferencer() {
- mFrame->mLoopFlag = false;
- }
-private:
- nsSVGPatternFrame *mFrame;
- MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
-};
-
//----------------------------------------------------------------------
// Implementation
@@ -443,9 +420,18 @@ nsSVGPatternFrame::GetPatternWithChildren()
return this;
// No, see if we chain to someone who does
- AutoPatternReferencer patternRef(this);
- nsSVGPatternFrame* next = GetReferencedPatternIfNotInUse();
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return nullptr;
+ }
+
+ nsSVGPatternFrame* next = GetReferencedPattern();
if (!next)
return nullptr;
@@ -461,12 +447,21 @@ nsSVGPatternFrame::GetEnumValue(uint32_t aIndex, nsIContent *aDefault)
if (thisEnum.IsExplicitlySet())
return thisEnum.GetAnimValue();
- AutoPatternReferencer patternRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->
+ mEnumAttributes[aIndex].GetAnimValue();
+ }
- nsSVGPatternFrame *next = GetReferencedPatternIfNotInUse();
- return next ? next->GetEnumValue(aIndex, aDefault) :
- static_cast(aDefault)->
- mEnumAttributes[aIndex].GetAnimValue();
+ nsSVGPatternFrame *next = GetReferencedPattern();
+ return next ? next->GetEnumValue(aIndex, aDefault)
+ : static_cast(aDefault)->
+ mEnumAttributes[aIndex].GetAnimValue();
}
nsSVGAnimatedTransformList*
@@ -478,11 +473,19 @@ nsSVGPatternFrame::GetPatternTransformList(nsIContent* aDefault)
if (thisTransformList && thisTransformList->IsExplicitlySet())
return thisTransformList;
- AutoPatternReferencer patternRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->mPatternTransform.get();
+ }
- nsSVGPatternFrame *next = GetReferencedPatternIfNotInUse();
- return next ? next->GetPatternTransformList(aDefault) :
- static_cast(aDefault)->mPatternTransform.get();
+ nsSVGPatternFrame *next = GetReferencedPattern();
+ return next ? next->GetPatternTransformList(aDefault)
+ : static_cast(aDefault)->mPatternTransform.get();
}
gfxMatrix
@@ -505,11 +508,19 @@ nsSVGPatternFrame::GetViewBox(nsIContent* aDefault)
if (thisViewBox.IsExplicitlySet())
return thisViewBox;
- AutoPatternReferencer patternRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->mViewBox;
+ }
- nsSVGPatternFrame *next = GetReferencedPatternIfNotInUse();
- return next ? next->GetViewBox(aDefault) :
- static_cast(aDefault)->mViewBox;
+ nsSVGPatternFrame *next = GetReferencedPattern();
+ return next ? next->GetViewBox(aDefault)
+ : static_cast(aDefault)->mViewBox;
}
const SVGAnimatedPreserveAspectRatio &
@@ -521,11 +532,19 @@ nsSVGPatternFrame::GetPreserveAspectRatio(nsIContent *aDefault)
if (thisPar.IsExplicitlySet())
return thisPar;
- AutoPatternReferencer patternRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return static_cast(aDefault)->mPreserveAspectRatio;
+ }
- nsSVGPatternFrame *next = GetReferencedPatternIfNotInUse();
- return next ? next->GetPreserveAspectRatio(aDefault) :
- static_cast(aDefault)->mPreserveAspectRatio;
+ nsSVGPatternFrame *next = GetReferencedPattern();
+ return next ? next->GetPreserveAspectRatio(aDefault)
+ : static_cast(aDefault)->mPreserveAspectRatio;
}
const nsSVGLength2 *
@@ -537,11 +556,19 @@ nsSVGPatternFrame::GetLengthValue(uint32_t aIndex, nsIContent *aDefault)
if (thisLength->IsExplicitlySet())
return thisLength;
- AutoPatternReferencer patternRef(this);
+ // Before we recurse, make sure we'll break reference loops and over long
+ // reference chains:
+ static int16_t sRefChainLengthCounter = AutoReferenceChainGuard::noChain;
+ AutoReferenceChainGuard refChainGuard(this, &mLoopFlag,
+ &sRefChainLengthCounter);
+ if (MOZ_UNLIKELY(!refChainGuard.Reference())) {
+ // Break reference chain
+ return &static_cast(aDefault)->mLengthAttributes[aIndex];
+ }
- nsSVGPatternFrame *next = GetReferencedPatternIfNotInUse();
- return next ? next->GetLengthValue(aIndex, aDefault) :
- &static_cast(aDefault)->mLengthAttributes[aIndex];
+ nsSVGPatternFrame *next = GetReferencedPattern();
+ return next ? next->GetLengthValue(aIndex, aDefault)
+ : &static_cast(aDefault)->mLengthAttributes[aIndex];
}
// Private (helper) methods
@@ -595,22 +622,6 @@ nsSVGPatternFrame::GetReferencedPattern()
return static_cast(result);
}
-nsSVGPatternFrame *
-nsSVGPatternFrame::GetReferencedPatternIfNotInUse()
-{
- nsSVGPatternFrame *referenced = GetReferencedPattern();
- if (!referenced)
- return nullptr;
-
- if (referenced->mLoopFlag) {
- // XXXjwatt: we should really send an error to the JavaScript Console here:
- NS_WARNING("pattern reference loop detected while inheriting attribute!");
- return nullptr;
- }
-
- return referenced;
-}
-
gfxRect
nsSVGPatternFrame::GetPatternRect(uint16_t aPatternUnits,
const gfxRect &aTargetBBox,
diff --git a/layout/svg/nsSVGPatternFrame.h b/layout/svg/nsSVGPatternFrame.h
index 5182e55385eb..3443d630f3c7 100644
--- a/layout/svg/nsSVGPatternFrame.h
+++ b/layout/svg/nsSVGPatternFrame.h
@@ -81,9 +81,7 @@ public:
protected:
// Internal methods for handling referenced patterns
- class AutoPatternReferencer;
nsSVGPatternFrame* GetReferencedPattern();
- nsSVGPatternFrame* GetReferencedPatternIfNotInUse();
// Accessors to lookup pattern attributes
uint16_t GetEnumValue(uint32_t aIndex, nsIContent *aDefault);
diff --git a/modules/libpref/init/all.js b/modules/libpref/init/all.js
index 8887ee65770e..da28a1d79860 100644
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -818,7 +818,11 @@ pref("gfx.logging.peak-texture-usage.enabled", false);
pref("gfx.ycbcr.accurate-conversion", false);
+#ifdef MOZ_ENABLE_WEBRENDER
pref("gfx.webrender.enabled", true);
+#else
+pref("gfx.webrender.enabled", false);
+#endif
pref("accessibility.browsewithcaret", false);
pref("accessibility.warn_on_browsewithcaret", true);
diff --git a/modules/libpref/moz.build b/modules/libpref/moz.build
index cd8b526bc93e..4cfa17fb8a9d 100644
--- a/modules/libpref/moz.build
+++ b/modules/libpref/moz.build
@@ -45,6 +45,8 @@ FINAL_LIBRARY = 'xul'
DEFINES['OS_ARCH'] = CONFIG['OS_ARCH']
DEFINES['MOZ_WIDGET_TOOLKIT'] = CONFIG['MOZ_WIDGET_TOOLKIT']
+if CONFIG['MOZ_ENABLE_WEBRENDER']:
+ DEFINES['MOZ_ENABLE_WEBRENDER'] = True
FINAL_TARGET_PP_FILES += [
'greprefs.js',
diff --git a/python/mozbuild/mozbuild/mach_commands.py b/python/mozbuild/mozbuild/mach_commands.py
index 4ae31ba3d79d..6f9323be70fa 100644
--- a/python/mozbuild/mozbuild/mach_commands.py
+++ b/python/mozbuild/mozbuild/mach_commands.py
@@ -210,6 +210,8 @@ class BuildOutputManager(LoggingMixin):
# TODO convert terminal footer to config file setting.
if not terminal or os.environ.get('MACH_NO_TERMINAL_FOOTER', None):
return
+ if os.environ.get('INSIDE_EMACS', None):
+ return
self.t = terminal
self.footer = BuildProgressFooter(terminal, monitor)
diff --git a/toolkit/components/telemetry/Histograms.json b/toolkit/components/telemetry/Histograms.json
index 3bbb706dfa8e..73216b6393d7 100644
--- a/toolkit/components/telemetry/Histograms.json
+++ b/toolkit/components/telemetry/Histograms.json
@@ -1444,6 +1444,16 @@
"n_buckets": 100,
"description": "Time spent decoding an image (us)"
},
+ "IMAGE_ANIMATED_DECODE_TIME": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1347302],
+ "expires_in_version": "57",
+ "kind": "exponential",
+ "low": 50,
+ "high": 50000000,
+ "n_buckets": 100,
+ "description": "Time spent decoding an animated image (us)"
+ },
"IMAGE_DECODE_ON_DRAW_LATENCY": {
"expires_in_version": "never",
"kind": "exponential",
@@ -1452,6 +1462,16 @@
"n_buckets": 100,
"description": "Time from starting a decode to it showing up on the screen (us)"
},
+ "IMAGE_ANIMATED_DECODE_ON_DRAW_LATENCY": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1347302],
+ "expires_in_version": "57",
+ "kind": "exponential",
+ "low": 50,
+ "high": 50000000,
+ "n_buckets": 100,
+ "description": "Time from starting a decode of an animated image to it showing up on the screen (us)"
+ },
"IMAGE_DECODE_CHUNKS": {
"expires_in_version": "never",
"kind": "exponential",
@@ -1466,6 +1486,15 @@
"n_buckets": 50,
"description": "Decode count"
},
+ "IMAGE_ANIMATED_DECODE_COUNT": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1347302],
+ "expires_in_version": "57",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Decode count of animated images"
+ },
"IMAGE_DECODE_SPEED_JPEG": {
"expires_in_version": "never",
"kind": "exponential",
diff --git a/toolkit/components/telemetry/TelemetrySession.jsm b/toolkit/components/telemetry/TelemetrySession.jsm
index f9542729f715..42bd06448097 100644
--- a/toolkit/components/telemetry/TelemetrySession.jsm
+++ b/toolkit/components/telemetry/TelemetrySession.jsm
@@ -17,7 +17,6 @@ Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
Cu.import("resource://gre/modules/Promise.jsm", this);
Cu.import("resource://gre/modules/DeferredTask.jsm", this);
Cu.import("resource://gre/modules/Preferences.jsm");
-Cu.import("resource://gre/modules/Task.jsm");
Cu.import("resource://gre/modules/Timer.jsm");
Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
@@ -1529,14 +1528,14 @@ var Impl = {
delayedInit() {
this._log.trace("delayedInit");
- this._delayedInitTask = Task.spawn(function* () {
+ this._delayedInitTask = (async function() {
try {
this._initialized = true;
- yield this._loadSessionData();
+ await this._loadSessionData();
// Update the session data to keep track of new subsessions created before
// the initialization.
- yield TelemetryStorage.saveSessionData(this._getSessionDataObject());
+ await TelemetryStorage.saveSessionData(this._getSessionDataObject());
this.attachObservers();
this.gatherMemory();
@@ -1548,18 +1547,18 @@ var Impl = {
Telemetry.asyncFetchTelemetryData(function() {});
// Update the crash annotation with the proper client ID.
- annotateCrashReport(this._sessionId, yield ClientID.getClientID(),
+ annotateCrashReport(this._sessionId, await ClientID.getClientID(),
Preferences.get(PREF_SERVER, undefined));
if (IS_UNIFIED_TELEMETRY) {
// Check for a previously written aborted session ping.
- yield TelemetryController.checkAbortedSessionPing();
+ await TelemetryController.checkAbortedSessionPing();
// Write the first aborted-session ping as early as possible. Just do that
// if we are not testing, since calling Telemetry.reset() will make a previous
// aborted ping a pending ping.
if (!this._testing) {
- yield this._saveAbortedSessionPing();
+ await this._saveAbortedSessionPing();
}
// The last change date for the environment, used to throttle environment changes.
@@ -1578,7 +1577,7 @@ var Impl = {
this._delayedInitTask = null;
throw e;
}
- }.bind(this));
+ }.bind(this))();
return this._delayedInitTask;
},
@@ -1988,15 +1987,15 @@ var Impl = {
this._initialized = false;
};
- return Task.spawn(function*() {
- yield this.saveShutdownPings();
+ return (async function() {
+ await this.saveShutdownPings();
if (IS_UNIFIED_TELEMETRY) {
- yield TelemetryController.removeAbortedSessionPing();
+ await TelemetryController.removeAbortedSessionPing();
}
reset();
- }.bind(this));
+ }.bind(this))();
};
// We can be in one the following states here:
@@ -2049,8 +2048,8 @@ var Impl = {
* @return {Promise