merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2017-03-23 13:44:09 +01:00
commit 492970c342
144 changed files with 2457 additions and 2006 deletions

View file

@ -595,6 +595,7 @@ nsAccessibilityService::ContentRemoved(nsIPresShell* aPresShell,
}
if (child) {
MOZ_DIAGNOSTIC_ASSERT(child->Parent(), "Unattached accessible from tree");
document->ContentRemoved(child->Parent(), aChildNode);
#ifdef A11Y_LOG
if (logging::IsEnabled(logging::eTree))

View file

@ -32,6 +32,11 @@ fi
ldflags="$ldflags -Wl,-no_data_in_code_info"
export LDFLAGS="$ldflags"
# Until bug 1342503 is fixed, we can't build some of the webrender dependencies
# on buildbot OS X builders, because rustc will use some random system toolchain
# instead of the one we package with tooltool.
ac_add_options --disable-webrender
# If not set use the system default clang
if [ -z "$CC" ]; then
export CC=clang

View file

@ -16,7 +16,7 @@ module.exports = createClass({
propTypes: {
box: PropTypes.string.isRequired,
direction: PropTypes.string.isRequired,
direction: PropTypes.string,
property: PropTypes.string.isRequired,
textContent: PropTypes.oneOfType([PropTypes.string, PropTypes.number]).isRequired,
onShowBoxModelEditor: PropTypes.func.isRequired,
@ -42,13 +42,15 @@ module.exports = createClass({
textContent,
} = this.props;
let rotate = (direction == "left" || direction == "right") &&
let rotate = direction &&
(direction == "left" || direction == "right") &&
textContent.toString().length > LONG_TEXT_ROTATE_LIMIT;
return dom.p(
{
className: `boxmodel-${box} boxmodel-${direction}
${rotate ? "boxmodel-rotate" : ""}`,
className: `boxmodel-${box}
${direction ? " boxmodel-" + direction : "boxmodel-" + property}
${rotate ? " boxmodel-rotate" : ""}`,
},
dom.span(
{

View file

@ -134,6 +134,41 @@ module.exports = createClass({
height = this.getHeightValue(height);
width = this.getWidthValue(width);
let contentBox = layout["box-sizing"] == "content-box" ?
dom.p(
{
className: "boxmodel-size",
},
BoxModelEditable({
box: "content",
property: "width",
textContent: width,
onShowBoxModelEditor
}),
dom.span(
{},
"\u00D7"
),
BoxModelEditable({
box: "content",
property: "height",
textContent: height,
onShowBoxModelEditor
})
)
:
dom.p(
{
className: "boxmodel-size",
},
dom.span(
{
title: BOXMODEL_L10N.getStr("boxmodel.content"),
},
SHARED_L10N.getFormatStr("dimensions", width, height)
)
);
return dom.div(
{
className: "boxmodel-main",
@ -198,7 +233,7 @@ module.exports = createClass({
title: BOXMODEL_L10N.getStr("boxmodel.padding"),
},
dom.div({
className: "boxmodel-content",
className: "boxmodel-contents",
"data-box": "content",
title: BOXMODEL_L10N.getStr("boxmodel.content"),
})
@ -330,18 +365,7 @@ module.exports = createClass({
textContent: paddingLeft,
onShowBoxModelEditor,
}),
dom.p(
{
className: "boxmodel-size",
},
dom.span(
{
"data-box": "content",
title: BOXMODEL_L10N.getStr("boxmodel.content"),
},
SHARED_L10N.getFormatStr("dimensions", width, height)
)
)
contentBox
);
},

View file

@ -28,6 +28,7 @@ module.exports = createClass({
return dom.div(
{
className: "property-view",
"data-property-name": name,
tabIndex: "0",
ref: container => {
this.container = container;

View file

@ -22,6 +22,7 @@ support-files =
[browser_boxmodel_guides.js]
[browser_boxmodel_navigation.js]
skip-if = true # Bug 1336198
[browser_boxmodel_properties.js]
[browser_boxmodel_rotate-labels-on-sides.js]
[browser_boxmodel_sync.js]
[browser_boxmodel_tooltips.js]

View file

@ -14,8 +14,12 @@ var res1 = [
value: "160" + "\u00D7" + "160.117"
},
{
selector: ".boxmodel-size > span",
value: "100" + "\u00D7" + "100.117"
selector: ".boxmodel-size > .boxmodel-width",
value: "100"
},
{
selector: ".boxmodel-size > .boxmodel-height",
value: "100.117"
},
{
selector: ".boxmodel-margin.boxmodel-top > span",
@ -73,8 +77,12 @@ var res2 = [
value: "190" + "\u00D7" + "210"
},
{
selector: ".boxmodel-size > span",
value: "100" + "\u00D7" + "150"
selector: ".boxmodel-size > .boxmodel-width",
value: "100"
},
{
selector: ".boxmodel-size > .boxmodel-height",
value: "150"
},
{
selector: ".boxmodel-margin.boxmodel-top > span",

View file

@ -0,0 +1,120 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
"use strict";
// Test that the box model properties list displays the right values
// and that it updates when the node's style is changed.
const TEST_URI = `
<style type='text/css'>
div {
box-sizing: border-box;
display: block;
float: left;
line-height: 20px;
position: relative;
z-index: 2;
height: 100px;
width: 100px;
border: 10px solid black;
padding: 20px;
margin: 30px auto;
}
</style>
<div>Test Node</div>
`;
const res1 = [
{
property: "box-sizing",
value: "border-box"
},
{
property: "display",
value: "block"
},
{
property: "float",
value: "left"
},
{
property: "line-height",
value: "20px"
},
{
property: "position",
value: "relative"
},
{
property: "z-index",
value: 2
},
];
const res2 = [
{
property: "box-sizing",
value: "content-box"
},
{
property: "display",
value: "block"
},
{
property: "float",
value: "right"
},
{
property: "line-height",
value: "10px"
},
{
property: "position",
value: "static"
},
{
property: "z-index",
value: 5
},
];
add_task(function* () {
yield addTab("data:text/html;charset=utf-8," + encodeURIComponent(TEST_URI));
let { inspector, boxmodel, testActor } = yield openLayoutView();
yield selectNode("div", inspector);
yield testInitialValues(inspector, boxmodel);
yield testChangingValues(inspector, boxmodel, testActor);
});
function* testInitialValues(inspector, boxmodel) {
info("Test that the initial values of the box model are correct");
let doc = boxmodel.document;
for (let { property, value } of res1) {
let elt = doc.querySelector(getPropertySelector(property));
is(elt.textContent, value, property + " has the right value.");
}
}
function* testChangingValues(inspector, boxmodel, testActor) {
info("Test that changing the document updates the box model");
let doc = boxmodel.document;
let onUpdated = waitForUpdate(inspector);
yield testActor.setAttribute("div", "style",
"box-sizing:content-box;float:right;" +
"line-height:10px;position:static;z-index:5;");
yield onUpdated;
for (let { property, value } of res2) {
let elt = doc.querySelector(getPropertySelector(property));
is(elt.textContent, value, property + " has the right value after style update.");
}
}
function getPropertySelector(propertyName) {
return `.boxmodel-properties-wrapper .property-view` +
`[data-property-name=${propertyName}] .property-value`;
}

View file

@ -1,3 +1,3 @@
<!DOCTYPE html>
<p style="width:100px;height:100px;background:red;">iframe 1</p>
<iframe src="data:text/html,<div style='width:400px;height:200px;background:yellow;'>iframe 2</div>"></iframe>
<p style="width:100px;height:100px;background:red;box-sizing:border-box">iframe 1</p>
<iframe src="data:text/html,<div style='width:400px;height:200px;background:yellow;box-sizing:border-box'>iframe 2</div>"></iframe>

View file

@ -11,8 +11,10 @@ Services.scriptloader.loadSubScript(
"chrome://mochitests/content/browser/devtools/client/inspector/test/head.js",
this);
Services.prefs.setBoolPref("devtools.layoutview.enabled", true);
Services.prefs.setIntPref("devtools.toolbox.footer.height", 350);
registerCleanupFunction(() => {
Services.prefs.clearUserPref("devtools.layoutview.enabled");
Services.prefs.clearUserPref("devtools.toolbox.footer.height");
});
@ -66,6 +68,36 @@ function openBoxModelView() {
});
}
/**
* Open the toolbox, with the inspector tool visible, and the layout view
* sidebar tab selected to display the box model view with properties.
*
* @return {Promise} a promise that resolves when the inspector is ready and the box model
* view is visible and ready.
*/
function openLayoutView() {
return openInspectorSidebarTab("layoutview").then(data => {
// The actual highligher show/hide methods are mocked in box model tests.
// The highlighter is tested in devtools/inspector/test.
function mockHighlighter({highlighter}) {
highlighter.showBoxModel = function () {
return promise.resolve();
};
highlighter.hideBoxModel = function () {
return promise.resolve();
};
}
mockHighlighter(data.toolbox);
return {
toolbox: data.toolbox,
inspector: data.inspector,
boxmodel: data.inspector.boxmodel,
testActor: data.testActor
};
});
}
/**
* Wait for the boxmodel-view-updated event.
*

View file

@ -926,10 +926,18 @@ Inspector.prototype = {
this.ruleview.destroy();
}
if (this.boxmodel) {
this.boxmodel.destroy();
}
if (this.computedview) {
this.computedview.destroy();
}
if (this.gridInspector) {
this.gridInspector.destroy();
}
if (this.layoutview) {
this.layoutview.destroy();
}

View file

@ -52,7 +52,7 @@
/* Regions are 3 nested elements with wide borders and outlines */
.boxmodel-content {
.boxmodel-contents {
height: 18px;
}
@ -84,7 +84,7 @@
border-color: #6a5acd;
}
.boxmodel-content {
.boxmodel-contents {
background-color: #87ceeb;
}
@ -104,7 +104,8 @@
/* Editable region sizes are contained in absolutely positioned <p> */
.boxmodel-main > p {
.boxmodel-main > p,
.boxmodel-size {
position: absolute;
pointer-events: none;
margin: 0;
@ -112,7 +113,8 @@
}
.boxmodel-main > p > span,
.boxmodel-main > p > input {
.boxmodel-main > p > input,
.boxmodel-content {
vertical-align: middle;
pointer-events: auto;
}
@ -172,8 +174,8 @@
.boxmodel-position.boxmodel-right,
.boxmodel-margin.boxmodel-right,
.boxmodel-margin.boxmodel-left,
.boxmodel-border.boxmodel-left,
.boxmodel-border.boxmodel-right,
.boxmodel-border.boxmodel-left,
.boxmodel-padding.boxmodel-right,
.boxmodel-padding.boxmodel-left {
width: 21px;
@ -218,6 +220,12 @@
height: 30px;
}
.boxmodel-size > p {
display: inline-block;
margin: auto;
line-height: 0;
}
.boxmodel-rotate.boxmodel-right.boxmodel-position:not(.boxmodel-editing) {
border-top: none;
border-left: 1px solid var(--theme-highlight-purple);
@ -290,8 +298,6 @@
border-bottom-color: hsl(0, 0%, 50%);
}
/* Make sure the content size doesn't appear as editable like the other sizes */
.boxmodel-size > span {
cursor: default;
}

View file

@ -384,10 +384,9 @@ FindD3D9BlacklistedDLL()
class CreateDXVAManagerEvent : public Runnable
{
public:
CreateDXVAManagerEvent(LayersBackend aBackend,
layers::KnowsCompositor* aKnowsCompositor,
CreateDXVAManagerEvent(layers::KnowsCompositor* aKnowsCompositor,
nsCString& aFailureReason)
: mBackend(aBackend)
: mBackend(LayersBackend::LAYERS_D3D11)
, mKnowsCompositor(aKnowsCompositor)
, mFailureReason(aFailureReason)
{
@ -435,7 +434,7 @@ public:
};
bool
WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
WMFVideoMFTManager::InitializeDXVA()
{
// If we use DXVA but aren't running with a D3D layer manager then the
// readback of decoded video frames from GPU to CPU memory grinds painting
@ -447,17 +446,14 @@ WMFVideoMFTManager::InitializeDXVA(bool aForceD3D9)
}
MOZ_ASSERT(!mDXVA2Manager);
LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
if (backend != LayersBackend::LAYERS_D3D9
&& backend != LayersBackend::LAYERS_D3D11) {
if (backend != LayersBackend::LAYERS_D3D11) {
mDXVAFailureReason.AssignLiteral("Unsupported layers backend");
return false;
}
// The DXVA manager must be created on the main thread.
RefPtr<CreateDXVAManagerEvent> event =
new CreateDXVAManagerEvent(aForceD3D9 ? LayersBackend::LAYERS_D3D9
: backend,
mKnowsCompositor,
new CreateDXVAManagerEvent(mKnowsCompositor,
mDXVAFailureReason);
if (NS_IsMainThread()) {
@ -499,7 +495,7 @@ WMFVideoMFTManager::Init()
return false;
}
bool success = InitInternal(/* aForceD3D9 = */ false);
bool success = InitInternal();
if (success && mDXVA2Manager) {
// If we had some failures but eventually made it work,
@ -515,10 +511,10 @@ WMFVideoMFTManager::Init()
}
bool
WMFVideoMFTManager::InitInternal(bool aForceD3D9)
WMFVideoMFTManager::InitInternal()
{
mUseHwAccel = false; // default value; changed if D3D setup succeeds.
bool useDxva = InitializeDXVA(aForceD3D9);
bool useDxva = InitializeDXVA();
RefPtr<MFTDecoder> decoder(new MFTDecoder());
@ -836,8 +832,7 @@ WMFVideoMFTManager::CreateBasicVideoFrame(IMFSample* aSample,
nsIntRect pictureRegion = mVideoInfo.ScaledImageRect(videoWidth, videoHeight);
LayersBackend backend = GetCompositorBackendType(mKnowsCompositor);
if (backend != LayersBackend::LAYERS_D3D9 &&
backend != LayersBackend::LAYERS_D3D11) {
if (backend != LayersBackend::LAYERS_D3D11) {
RefPtr<VideoData> v =
VideoData::CreateAndCopyData(mVideoInfo,
mImageContainer,

View file

@ -69,9 +69,9 @@ public:
private:
bool ValidateVideoInfo();
bool InitializeDXVA(bool aForceD3D9);
bool InitializeDXVA();
bool InitInternal(bool aForceD3D9);
bool InitInternal();
HRESULT CreateBasicVideoFrame(IMFSample* aSample,
int64_t aStreamOffset,

View file

@ -87,7 +87,10 @@ txExecutionState::~txExecutionState()
txStackIterator handlerIter(&mResultHandlerStack);
while (handlerIter.hasNext()) {
delete (txAXMLEventHandler*)handlerIter.next();
txAXMLEventHandler* handler = (txAXMLEventHandler*)handlerIter.next();
if (handler != mObsoleteHandler) {
delete handler;
}
}
txStackIterator paramIter(&mParamStack);
@ -159,6 +162,17 @@ txExecutionState::end(nsresult aResult)
return mOutputHandler->endDocument(aResult);
}
void
txExecutionState::popAndDeleteEvalContext()
{
if (!mEvalContextStack.isEmpty()) {
auto ctx = popEvalContext();
if (ctx != mInitialEvalContext) {
delete ctx;
}
}
}
void
txExecutionState::popAndDeleteEvalContextUntil(txIEvalContext* aContext)
{

View file

@ -95,6 +95,8 @@ public:
nsresult pushEvalContext(txIEvalContext* aContext);
txIEvalContext* popEvalContext();
void popAndDeleteEvalContext();
/**
* Helper that deletes all entries before |aContext| and then
* pops it off the stack. The caller must delete |aContext| if

View file

@ -37,16 +37,7 @@ txApplyDefaultElementTemplate::execute(txExecutionState& aEs)
}
nsresult
txApplyImportsEnd::execute(txExecutionState& aEs)
{
aEs.popTemplateRule();
aEs.popParamMap();
return NS_OK;
}
nsresult
txApplyImportsStart::execute(txExecutionState& aEs)
txApplyImports::execute(txExecutionState& aEs)
{
txExecutionState::TemplateRule* rule = aEs.getCurrentTemplateRule();
// The frame is set to null when there is no current template rule, or
@ -68,7 +59,12 @@ txApplyImportsStart::execute(txExecutionState& aEs)
aEs.pushTemplateRule(frame, mode, rule->mParams);
return aEs.runTemplate(templ);
rv = aEs.runTemplate(templ);
aEs.popTemplateRule();
aEs.popParamMap();
return rv;
}
txApplyTemplates::txApplyTemplates(const txExpandedName& aMode)
@ -474,7 +470,7 @@ txLoopNodeSet::execute(txExecutionState& aEs)
txNodeSetContext* context =
static_cast<txNodeSetContext*>(aEs.getEvalContext());
if (!context->hasNext()) {
delete aEs.popEvalContext();
aEs.popAndDeleteEvalContext();
return NS_OK;
}

View file

@ -47,13 +47,7 @@ public:
TX_DECL_TXINSTRUCTION
};
class txApplyImportsEnd : public txInstruction
{
public:
TX_DECL_TXINSTRUCTION
};
class txApplyImportsStart : public txInstruction
class txApplyImports : public txInstruction
{
public:
TX_DECL_TXINSTRUCTION

View file

@ -1312,8 +1312,7 @@ txFnText(const nsAString& aStr, txStylesheetCompilerState& aState)
/*
xsl:apply-imports
txApplyImportsStart
txApplyImportsEnd
txApplyImports
*/
static nsresult
txFnStartApplyImports(int32_t aNamespaceID,
@ -1325,11 +1324,7 @@ txFnStartApplyImports(int32_t aNamespaceID,
{
nsresult rv = NS_OK;
nsAutoPtr<txInstruction> instr(new txApplyImportsStart);
rv = aState.addInstruction(Move(instr));
NS_ENSURE_SUCCESS(rv, rv);
instr = new txApplyImportsEnd;
nsAutoPtr<txInstruction> instr(new txApplyImports);
rv = aState.addInstruction(Move(instr));
NS_ENSURE_SUCCESS(rv, rv);

View file

@ -154,7 +154,7 @@ IsAccelAngleSupported(const nsCOMPtr<nsIGfxInfo>& gfxInfo,
if (CompositorThreadHolder::IsInCompositorThread()) {
// We can only enter here with WebRender, so assert that this is a
// WebRender-enabled build.
#ifndef MOZ_ENABLE_WEBRENDER
#ifndef MOZ_BUILD_WEBRENDER
MOZ_ASSERT(false);
#endif
return true;

View file

@ -46,7 +46,6 @@ enum class LayersBackend : int8_t {
LAYERS_NONE = 0,
LAYERS_BASIC,
LAYERS_OPENGL,
LAYERS_D3D9,
LAYERS_D3D11,
LAYERS_CLIENT,
LAYERS_WR,

View file

@ -336,9 +336,7 @@ TexClientFromReadback(SharedSurface* src, CompositableForwarder* allocator,
// RB_SWAPPED doesn't work with D3D11. (bug 1051010)
// RB_SWAPPED doesn't work with Basic. (bug ???????)
// RB_SWAPPED doesn't work with D3D9. (bug ???????)
bool layersNeedsManualSwap = layersBackend == LayersBackend::LAYERS_BASIC ||
layersBackend == LayersBackend::LAYERS_D3D9 ||
layersBackend == LayersBackend::LAYERS_D3D11;
if (texClient->HasFlags(TextureFlags::RB_SWAPPED) &&
layersNeedsManualSwap)

View file

@ -826,7 +826,6 @@ ClientLayerManager::GetBackendName(nsAString& aName)
case LayersBackend::LAYERS_NONE: aName.AssignLiteral("None"); return;
case LayersBackend::LAYERS_BASIC: aName.AssignLiteral("Basic"); return;
case LayersBackend::LAYERS_OPENGL: aName.AssignLiteral("OpenGL"); return;
case LayersBackend::LAYERS_D3D9: aName.AssignLiteral("Direct3D 9"); return;
case LayersBackend::LAYERS_D3D11: {
#ifdef XP_WIN
if (DeviceManagerDx::Get()->IsWARP()) {

View file

@ -61,7 +61,6 @@ ContentClient::CreateContentClient(CompositableForwarder* aForwarder)
{
LayersBackend backend = aForwarder->GetCompositorBackendType();
if (backend != LayersBackend::LAYERS_OPENGL &&
backend != LayersBackend::LAYERS_D3D9 &&
backend != LayersBackend::LAYERS_D3D11 &&
backend != LayersBackend::LAYERS_WR &&
backend != LayersBackend::LAYERS_BASIC) {

View file

@ -1579,7 +1579,7 @@ CompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipel
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t* aIdNamespace)
{
#ifndef MOZ_ENABLE_WEBRENDER
#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);
@ -1614,7 +1614,7 @@ CompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipel
bool
CompositorBridgeParent::DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor)
{
#ifndef MOZ_ENABLE_WEBRENDER
#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);

View file

@ -203,7 +203,7 @@ CrossProcessCompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::Pipeli
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t *aIdNamespace)
{
#ifndef MOZ_ENABLE_WEBRENDER
#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);
@ -238,7 +238,7 @@ CrossProcessCompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::Pipeli
bool
CrossProcessCompositorBridgeParent::DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor)
{
#ifndef MOZ_ENABLE_WEBRENDER
#ifndef MOZ_BUILD_WEBRENDER
// Extra guard since this in the parent process and we don't want a malicious
// child process invoking this codepath before it's ready
MOZ_RELEASE_ASSERT(false);

View file

@ -63,9 +63,6 @@ static already_AddRefed<Compositor> CreateTestCompositor(LayersBackend backend,
} else if (backend == LayersBackend::LAYERS_D3D11) {
//compositor = new CompositorD3D11();
MOZ_CRASH(); // No support yet
} else if (backend == LayersBackend::LAYERS_D3D9) {
//compositor = new CompositorD3D9(this, mWidget);
MOZ_CRASH(); // No support yet
#endif
}
nsCString failureReason;

View file

@ -2289,12 +2289,13 @@ gfxPlatform::InitWebRenderConfig()
{
FeatureState& featureWebRender = gfxConfig::GetFeature(Feature::WEBRENDER);
featureWebRender.EnableByDefault();
featureWebRender.DisableByDefault(
FeatureStatus::OptIn,
"WebRender is an opt-in feature",
NS_LITERAL_CSTRING("FEATURE_FAILURE_DEFAULT_OFF"));
if (!Preferences::GetBool("gfx.webrender.enabled", false)) {
featureWebRender.UserDisable(
"User disabled WebRender",
NS_LITERAL_CSTRING("FEATURE_FAILURE_WEBRENDER_DISABLED"));
if (Preferences::GetBool("gfx.webrender.enabled", false)) {
featureWebRender.UserEnable("Enabled by pref");
}
// WebRender relies on the GPU process when on Windows
@ -2314,7 +2315,7 @@ gfxPlatform::InitWebRenderConfig()
NS_LITERAL_CSTRING("FEATURE_FAILURE_SAFE_MODE"));
}
#ifndef MOZ_ENABLE_WEBRENDER
#ifndef MOZ_BUILD_WEBRENDER
featureWebRender.ForceDisable(
FeatureStatus::Unavailable,
"Build doesn't include WebRender",

View file

@ -1217,6 +1217,14 @@ gfxWindowsPlatform::SetupClearTypeParams()
}
}
if (GetDefaultContentBackend() == BackendType::SKIA) {
// Skia doesn't support a contrast value outside of 0-1, so default to 1.0
if (contrast < 0.0 || contrast > 1.0) {
NS_WARNING("Custom dwrite contrast not supported in Skia. Defaulting to 1.0.");
contrast = 1.0;
}
}
// For parameters that have not been explicitly set,
// we copy values from default params (or our overridden value for contrast)
if (gamma < 1.0 || gamma > 2.2) {

View file

@ -452,7 +452,7 @@ struct WrVecU8 {
// an error and causes the build to fail. So for wr_* functions called by
// destructors in C++ classes, use WR_DESTRUCTOR_SAFE_FUNC instead, which omits
// the unreachable annotation.
#ifdef MOZ_ENABLE_WEBRENDER
#ifdef MOZ_BUILD_WEBRENDER
# define WR_INLINE
# define WR_FUNC
# define WR_DESTRUCTOR_SAFE_FUNC

View file

@ -141,28 +141,33 @@ AnimationState::LoopLength() const
// FrameAnimator implementation.
///////////////////////////////////////////////////////////////////////////////
TimeStamp
Maybe<TimeStamp>
FrameAnimator::GetCurrentImgFrameEndTime(AnimationState& aState) const
{
TimeStamp currentFrameTime = aState.mCurrentAnimationFrameTime;
FrameTimeout timeout = GetTimeoutForFrame(aState.mCurrentAnimationFrameIndex);
Maybe<FrameTimeout> timeout = GetTimeoutForFrame(aState, aState.mCurrentAnimationFrameIndex);
if (timeout == FrameTimeout::Forever()) {
if (timeout.isNothing()) {
MOZ_ASSERT(aState.GetHasBeenDecoded() && !aState.GetIsCurrentlyDecoded());
return Nothing();
}
if (*timeout == FrameTimeout::Forever()) {
// We need to return a sentinel value in this case, because our logic
// doesn't work correctly if we have an infinitely long timeout. We use one
// year in the future as the sentinel because it works with the loop in
// RequestRefresh() below.
// XXX(seth): It'd be preferable to make our logic work correctly with
// infinitely long timeouts.
return TimeStamp::NowLoRes() +
TimeDuration::FromMilliseconds(31536000.0);
return Some(TimeStamp::NowLoRes() +
TimeDuration::FromMilliseconds(31536000.0));
}
TimeDuration durationOfTimeout =
TimeDuration::FromMilliseconds(double(timeout.AsMilliseconds()));
TimeDuration::FromMilliseconds(double(timeout->AsMilliseconds()));
TimeStamp currentFrameEndTime = currentFrameTime + durationOfTimeout;
return currentFrameEndTime;
return Some(currentFrameEndTime);
}
RefreshResult
@ -238,7 +243,11 @@ FrameAnimator::AdvanceFrame(AnimationState& aState, TimeStamp aTime)
return ret;
}
if (GetTimeoutForFrame(nextFrameIndex) == FrameTimeout::Forever()) {
Maybe<FrameTimeout> nextFrameTimeout = GetTimeoutForFrame(aState, nextFrameIndex);
// GetTimeoutForFrame can only return none if frame doesn't exist,
// but we just got it above.
MOZ_ASSERT(nextFrameTimeout.isSome());
if (*nextFrameTimeout == FrameTimeout::Forever()) {
ret.mAnimationFinished = true;
}
@ -252,7 +261,9 @@ FrameAnimator::AdvanceFrame(AnimationState& aState, TimeStamp aTime)
// something went wrong, move on to next
NS_WARNING("FrameAnimator::AdvanceFrame(): Compositing of frame failed");
nextFrame->SetCompositingFailed(true);
aState.mCurrentAnimationFrameTime = GetCurrentImgFrameEndTime(aState);
Maybe<TimeStamp> currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
MOZ_ASSERT(currentFrameEndTime.isSome());
aState.mCurrentAnimationFrameTime = *currentFrameEndTime;
aState.mCurrentAnimationFrameIndex = nextFrameIndex;
return ret;
@ -261,7 +272,9 @@ FrameAnimator::AdvanceFrame(AnimationState& aState, TimeStamp aTime)
nextFrame->SetCompositingFailed(false);
}
aState.mCurrentAnimationFrameTime = GetCurrentImgFrameEndTime(aState);
Maybe<TimeStamp> currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
MOZ_ASSERT(currentFrameEndTime.isSome());
aState.mCurrentAnimationFrameTime = *currentFrameEndTime;
// If we can get closer to the current time by a multiple of the image's loop
// time, we should. We can only do this if we're done decoding; otherwise, we
@ -301,10 +314,18 @@ FrameAnimator::RequestRefresh(AnimationState& aState, const TimeStamp& aTime)
// only advance the frame if the current time is greater than or
// equal to the current frame's end time.
TimeStamp currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
Maybe<TimeStamp> currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
if (currentFrameEndTime.isNothing()) {
MOZ_ASSERT(gfxPrefs::ImageMemAnimatedDiscardable());
MOZ_ASSERT(aState.GetHasBeenDecoded() && !aState.GetIsCurrentlyDecoded());
MOZ_ASSERT(aState.mCompositedFrameInvalid);
// Nothing we can do but wait for our previous current frame to be decoded
// again so we can determine what to do next.
return ret;
}
while (currentFrameEndTime <= aTime) {
TimeStamp oldFrameEndTime = currentFrameEndTime;
while (*currentFrameEndTime <= aTime) {
TimeStamp oldFrameEndTime = *currentFrameEndTime;
RefreshResult frameRes = AdvanceFrame(aState, aTime);
@ -312,17 +333,19 @@ FrameAnimator::RequestRefresh(AnimationState& aState, const TimeStamp& aTime)
ret.Accumulate(frameRes);
currentFrameEndTime = GetCurrentImgFrameEndTime(aState);
// AdvanceFrame can't advance to a frame that doesn't exist yet.
MOZ_ASSERT(currentFrameEndTime.isSome());
// If we didn't advance a frame, and our frame end time didn't change,
// then we need to break out of this loop & wait for the frame(s)
// to finish downloading.
if (!frameRes.mFrameAdvanced && (currentFrameEndTime == oldFrameEndTime)) {
if (!frameRes.mFrameAdvanced && (*currentFrameEndTime == oldFrameEndTime)) {
break;
}
}
// Advanced to the correct frame, the composited frame is now valid to be drawn.
if (currentFrameEndTime > aTime) {
if (*currentFrameEndTime > aTime) {
aState.mCompositedFrameInvalid = false;
}
@ -371,17 +394,18 @@ FrameAnimator::GetCompositedFrame(AnimationState& aState)
return result;
}
FrameTimeout
FrameAnimator::GetTimeoutForFrame(uint32_t aFrameNum) const
Maybe<FrameTimeout>
FrameAnimator::GetTimeoutForFrame(AnimationState& aState,
uint32_t aFrameNum) const
{
RawAccessFrameRef frame = GetRawFrame(aFrameNum);
if (frame) {
AnimationData data = frame->GetAnimationData();
return data.mTimeout;
return Some(data.mTimeout);
}
NS_WARNING("No frame; called GetTimeoutForFrame too early?");
return FrameTimeout::FromRawMilliseconds(100);
MOZ_ASSERT(aState.mHasBeenDecoded && !aState.mIsCurrentlyDecoded);
return Nothing();
}
static void

View file

@ -312,15 +312,17 @@ private: // methods
*/
RawAccessFrameRef GetRawFrame(uint32_t aFrameNum) const;
/// @return the given frame's timeout.
FrameTimeout GetTimeoutForFrame(uint32_t aFrameNum) const;
/// @return the given frame's timeout if it is available
Maybe<FrameTimeout> GetTimeoutForFrame(AnimationState& aState,
uint32_t aFrameNum) const;
/**
* Get the time the frame we're currently displaying is supposed to end.
*
* In the error case, returns an "infinity" timestamp.
* In the error case (like if the requested frame is not currently
* decoded), returns None().
*/
TimeStamp GetCurrentImgFrameEndTime(AnimationState& aState) const;
Maybe<TimeStamp> GetCurrentImgFrameEndTime(AnimationState& aState) const;
bool DoBlend(gfx::IntRect* aDirtyRect,
uint32_t aPrevFrameIndex,

View file

@ -107,6 +107,9 @@ RasterImage::~RasterImage()
// Record Telemetry.
Telemetry::Accumulate(Telemetry::IMAGE_DECODE_COUNT, mDecodeCount);
if (mAnimationState) {
Telemetry::Accumulate(Telemetry::IMAGE_ANIMATED_DECODE_COUNT, mDecodeCount);
}
}
nsresult
@ -1428,6 +1431,11 @@ RasterImage::Draw(gfxContext* aContext,
TimeDuration drawLatency = TimeStamp::Now() - mDrawStartTime;
Telemetry::Accumulate(Telemetry::IMAGE_DECODE_ON_DRAW_LATENCY,
int32_t(drawLatency.ToMicroseconds()));
if (mAnimationState) {
Telemetry::Accumulate(Telemetry::IMAGE_ANIMATED_DECODE_ON_DRAW_LATENCY,
int32_t(drawLatency.ToMicroseconds()));
}
mDrawStartTime = TimeStamp();
}
@ -1677,6 +1685,11 @@ RasterImage::NotifyDecodeComplete(const DecoderFinalStatus& aStatus,
Telemetry::Accumulate(Telemetry::IMAGE_DECODE_TIME,
int32_t(aTelemetry.mDecodeTime.ToMicroseconds()));
if (mAnimationState) {
Telemetry::Accumulate(Telemetry::IMAGE_ANIMATED_DECODE_TIME,
int32_t(aTelemetry.mDecodeTime.ToMicroseconds()));
}
if (aTelemetry.mSpeedHistogram) {
Telemetry::Accumulate(*aTelemetry.mSpeedHistogram, aTelemetry.Speed());
}

View file

@ -151,7 +151,9 @@ for f in files:
log(3, ' pretty printed code:')
ipdl.genipdl(ast, codedir)
ipdl.checkFixedSyncMessages(parser)
if not ipdl.checkFixedSyncMessages(parser):
# Errors have alraedy been printed to stderr, just exit
sys.exit(1)
# Second pass: generate code
for f in files:

View file

@ -55,6 +55,7 @@ def checkSyncMessage(tu, syncMsgList, errout=sys.stderr):
def checkFixedSyncMessages(config, errout=sys.stderr):
fixed = SyncMessageChecker.getFixedSyncMessages()
error_free = True
for item in fixed:
protocol = item.split('::')[0]
# Ignore things like sync messages in test protocols we didn't compile.
@ -63,3 +64,5 @@ def checkFixedSyncMessages(config, errout=sys.stderr):
'platform' not in config.options(item):
print >>errout, 'Error: Sync IPC message %s not found, it appears to be fixed.\n' \
'Please remove it from sync-messages.ini.' % item
error_free = False
return error_free

View file

@ -233,244 +233,364 @@ description =
# A11y code
[PDocAccessible::State]
description =
platform = notwin
[PDocAccessible::NativeState]
description =
platform = notwin
[PDocAccessible::Name]
description =
platform = notwin
[PDocAccessible::Value]
description =
platform = notwin
[PDocAccessible::Help]
description =
platform = notwin
[PDocAccessible::Description]
description =
platform = notwin
[PDocAccessible::Attributes]
description =
platform = notwin
[PDocAccessible::RelationByType]
description =
platform = notwin
[PDocAccessible::Relations]
description =
platform = notwin
[PDocAccessible::IsSearchbox]
description =
platform = notwin
[PDocAccessible::LandmarkRole]
description =
platform = notwin
[PDocAccessible::ARIARoleAtom]
description =
platform = notwin
[PDocAccessible::GetLevelInternal]
description =
platform = notwin
[PDocAccessible::CaretLineNumber]
description =
platform = notwin
[PDocAccessible::CaretOffset]
description =
platform = notwin
[PDocAccessible::CharacterCount]
description =
platform = notwin
[PDocAccessible::SelectionCount]
description =
platform = notwin
[PDocAccessible::TextSubstring]
description =
platform = notwin
[PDocAccessible::GetTextAfterOffset]
description =
platform = notwin
[PDocAccessible::GetTextAtOffset]
description =
platform = notwin
[PDocAccessible::GetTextBeforeOffset]
description =
platform = notwin
[PDocAccessible::CharAt]
description =
platform = notwin
[PDocAccessible::TextAttributes]
description =
platform = notwin
[PDocAccessible::DefaultTextAttributes]
description =
platform = notwin
[PDocAccessible::TextBounds]
description =
platform = notwin
[PDocAccessible::CharBounds]
description =
platform = notwin
[PDocAccessible::OffsetAtPoint]
description =
platform = notwin
[PDocAccessible::SelectionBoundsAt]
description =
platform = notwin
[PDocAccessible::SetSelectionBoundsAt]
description =
platform = notwin
[PDocAccessible::AddToSelection]
description =
platform = notwin
[PDocAccessible::RemoveFromSelection]
description =
platform = notwin
[PDocAccessible::Text]
description =
platform = notwin
[PDocAccessible::ReplaceText]
description =
platform = notwin
[PDocAccessible::InsertText]
description =
platform = notwin
[PDocAccessible::CopyText]
description =
platform = notwin
[PDocAccessible::CutText]
description =
platform = notwin
[PDocAccessible::DeleteText]
description =
platform = notwin
[PDocAccessible::PasteText]
description =
platform = notwin
[PDocAccessible::ImagePosition]
description =
platform = notwin
[PDocAccessible::ImageSize]
description =
platform = notwin
[PDocAccessible::StartOffset]
description =
platform = notwin
[PDocAccessible::EndOffset]
description =
platform = notwin
[PDocAccessible::IsLinkValid]
description =
platform = notwin
[PDocAccessible::AnchorCount]
description =
platform = notwin
[PDocAccessible::AnchorURIAt]
description =
platform = notwin
[PDocAccessible::AnchorAt]
description =
platform = notwin
[PDocAccessible::LinkCount]
description =
platform = notwin
[PDocAccessible::LinkAt]
description =
platform = notwin
[PDocAccessible::LinkIndexOf]
description =
platform = notwin
[PDocAccessible::LinkIndexAtOffset]
description =
platform = notwin
[PDocAccessible::TableOfACell]
description =
platform = notwin
[PDocAccessible::ColIdx]
description =
platform = notwin
[PDocAccessible::RowIdx]
description =
platform = notwin
[PDocAccessible::GetPosition]
description =
platform = notwin
[PDocAccessible::ColExtent]
description =
platform = notwin
[PDocAccessible::RowExtent]
description =
platform = notwin
[PDocAccessible::GetColRowExtents]
description =
platform = notwin
[PDocAccessible::ColHeaderCells]
description =
platform = notwin
[PDocAccessible::RowHeaderCells]
description =
platform = notwin
[PDocAccessible::IsCellSelected]
description =
platform = notwin
[PDocAccessible::TableCaption]
description =
platform = notwin
[PDocAccessible::TableSummary]
description =
platform = notwin
[PDocAccessible::TableColumnCount]
description =
platform = notwin
[PDocAccessible::TableRowCount]
description =
platform = notwin
[PDocAccessible::TableCellAt]
description =
platform = notwin
[PDocAccessible::TableCellIndexAt]
description =
platform = notwin
[PDocAccessible::TableColumnIndexAt]
description =
platform = notwin
[PDocAccessible::TableRowIndexAt]
description =
platform = notwin
[PDocAccessible::TableRowAndColumnIndicesAt]
description =
platform = notwin
[PDocAccessible::TableColumnExtentAt]
description =
platform = notwin
[PDocAccessible::TableRowExtentAt]
description =
platform = notwin
[PDocAccessible::TableColumnDescription]
description =
platform = notwin
[PDocAccessible::TableRowDescription]
description =
platform = notwin
[PDocAccessible::TableColumnSelected]
description =
platform = notwin
[PDocAccessible::TableRowSelected]
description =
platform = notwin
[PDocAccessible::TableCellSelected]
description =
platform = notwin
[PDocAccessible::TableSelectedCellCount]
description =
platform = notwin
[PDocAccessible::TableSelectedColumnCount]
description =
platform = notwin
[PDocAccessible::TableSelectedRowCount]
description =
platform = notwin
[PDocAccessible::TableSelectedCells]
description =
platform = notwin
[PDocAccessible::TableSelectedCellIndices]
description =
platform = notwin
[PDocAccessible::TableSelectedColumnIndices]
description =
platform = notwin
[PDocAccessible::TableSelectedRowIndices]
description =
platform = notwin
[PDocAccessible::TableSelectColumn]
description =
platform = notwin
[PDocAccessible::TableSelectRow]
description =
platform = notwin
[PDocAccessible::TableUnselectColumn]
description =
platform = notwin
[PDocAccessible::TableUnselectRow]
description =
platform = notwin
[PDocAccessible::TableIsProbablyForLayout]
description =
platform = notwin
[PDocAccessible::AtkTableColumnHeader]
description =
platform = notwin
[PDocAccessible::AtkTableRowHeader]
description =
platform = notwin
[PDocAccessible::SelectedItems]
description =
platform = notwin
[PDocAccessible::SelectedItemCount]
description =
platform = notwin
[PDocAccessible::GetSelectedItem]
description =
platform = notwin
[PDocAccessible::IsItemSelected]
description =
platform = notwin
[PDocAccessible::AddItemToSelection]
description =
platform = notwin
[PDocAccessible::RemoveItemFromSelection]
description =
platform = notwin
[PDocAccessible::SelectAll]
description =
platform = notwin
[PDocAccessible::UnselectAll]
description =
platform = notwin
[PDocAccessible::DoAction]
description =
platform = notwin
[PDocAccessible::ActionCount]
description =
platform = notwin
[PDocAccessible::ActionDescriptionAt]
description =
platform = notwin
[PDocAccessible::ActionNameAt]
description =
platform = notwin
[PDocAccessible::AccessKey]
description =
platform = notwin
[PDocAccessible::KeyboardShortcut]
description =
platform = notwin
[PDocAccessible::AtkKeyBinding]
description =
platform = notwin
[PDocAccessible::CurValue]
description =
platform = notwin
[PDocAccessible::SetCurValue]
description =
platform = notwin
[PDocAccessible::MinValue]
description =
platform = notwin
[PDocAccessible::MaxValue]
description =
platform = notwin
[PDocAccessible::Step]
description =
platform = notwin
[PDocAccessible::FocusedChild]
description =
platform = notwin
[PDocAccessible::Language]
description =
platform = notwin
[PDocAccessible::DocType]
description =
platform = notwin
[PDocAccessible::Title]
description =
platform = notwin
[PDocAccessible::URL]
description =
platform = notwin
[PDocAccessible::MimeType]
description =
platform = notwin
[PDocAccessible::URLDocTypeMimeType]
description =
platform = notwin
[PDocAccessible::AccessibleAtPoint]
description =
platform = notwin
[PDocAccessible::Extents]
description =
platform = notwin
[PDocAccessible::DOMNodeID]
description =
platform = notwin
[PDocAccessible::GetWindowedPluginIAccessible]
description =
platform = win

View file

@ -93,9 +93,10 @@ class MOZ_NON_PARAM JS_PUBLIC_API(ProfilingFrameIterator)
public:
struct RegisterState
{
RegisterState() : pc(nullptr), sp(nullptr), lr(nullptr) {}
RegisterState() : pc(nullptr), sp(nullptr), fp(nullptr), lr(nullptr) {}
void* pc;
void* sp;
void* fp;
void* lr;
};

View file

@ -330,6 +330,7 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM
// Trace active interpreter and JIT stack roots.
TraceInterpreterActivations(cx, target, trc);
jit::TraceJitActivations(cx, target, trc);
wasm::TraceActivations(cx, target, trc);
// Trace legacy C stack roots.
AutoGCRooter::traceAll(target, trc);

View file

@ -1872,15 +1872,51 @@ setARMHwCapFlags('vfp');
asmCompile('stdlib', 'ffi', 'heap',
USE_ASM + `
var atomic_cmpxchg = stdlib.Atomics.compareExchange;
var atomic_exchange = stdlib.Atomics.exchange;
var atomic_add = stdlib.Atomics.add;
var atomic_sub = stdlib.Atomics.sub;
var atomic_and = stdlib.Atomics.and;
var atomic_or = stdlib.Atomics.or;
var atomic_xor = stdlib.Atomics.xor;
var i8a = new stdlib.Int8Array(heap);
function do_cas() {
var v = 0;
v = atomic_cmpxchg(i8a, 100, 0, -1);
return v|0;
}
function do_xchg() {
var v = 0;
v = atomic_exchange(i8a, 200, 37);
return v|0;
}
function do_add() {
var v = 0;
v = atomic_add(i8a, 10, 37);
return v|0;
}
function do_sub() {
var v = 0;
v = atomic_sub(i8a, 10, 37);
return v|0;
}
function do_and() {
var v = 0;
v = atomic_and(i8a, 10, 37);
return v|0;
}
function do_or() {
var v = 0;
v = atomic_or(i8a, 10, 37);
return v|0;
}
function do_xor() {
var v = 0;
v = atomic_xor(i8a, 10, 37);
return v|0;
}
return { xchg: do_xchg }
return { cas:do_cas, xchg: do_xchg, add: do_add, sub: do_sub, and: do_and, or: do_or, xor: do_xor }
`);

View file

@ -69,7 +69,7 @@ var f = asmLink(asmCompile('global','ffis',USE_ASM + "var ffi=ffis.ffi; function
f(0);
assertStackContainsSeq(stacks, "");
f(+1);
assertStackContainsSeq(stacks, "");
assertStackContainsSeq(stacks, "<,g,f,>");
f(0);
assertStackContainsSeq(stacks, "<,g,f,>");
f(-1);
@ -112,7 +112,7 @@ function testBuiltinD2D(name) {
enableSingleStepProfiling();
assertEq(f(.1), eval("Math." + name + "(.1)"));
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
assertStackContainsSeq(stacks, ">,f,>,f,>,>");
}
}
for (name of ['sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'ceil', 'floor', 'exp', 'log'])
@ -125,7 +125,7 @@ function testBuiltinF2F(name) {
enableSingleStepProfiling();
assertEq(f(.1), eval("Math.fround(Math." + name + "(Math.fround(.1)))"));
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
assertStackContainsSeq(stacks, ">,f,>,f,>,>");
}
}
for (name of ['ceil', 'floor'])
@ -138,7 +138,7 @@ function testBuiltinDD2D(name) {
enableSingleStepProfiling();
assertEq(f(.1, .2), eval("Math." + name + "(.1, .2)"));
var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f,>,native call,>,f,>,>");
assertStackContainsSeq(stacks, ">,f,>,f,>,>");
}
}
for (name of ['atan2', 'pow'])

View file

@ -0,0 +1,50 @@
if (!wasmIsSupported())
quit();
var sandbox = newGlobal();
var dbg = new Debugger(sandbox);
var counter = 0;
dbg.onExceptionUnwind = (frame, value) => {
if (frame.type !== "wasmcall")
return;
if (++counter != 2)
return;
gc();
};
sandbox.innerCode = wasmTextToBinary(`(module
(import "imports" "tbl" (table 1 anyfunc))
(import $setNull "imports" "setNull" (func))
(func $trap
call $setNull
unreachable
)
(elem (i32.const 0) $trap)
)`);
sandbox.outerCode = wasmTextToBinary(`(module
(import "imports" "tbl" (table 1 anyfunc))
(type $v2v (func))
(func (export "run")
i32.const 0
call_indirect $v2v
)
)`);
sandbox.eval(`
(function() {
var tbl = new WebAssembly.Table({initial:1, element:"anyfunc"});
function setNull() { tbl.set(0, null) }
new WebAssembly.Instance(new WebAssembly.Module(innerCode), {imports:{tbl,setNull}});
var outer = new WebAssembly.Instance(new WebAssembly.Module(outerCode), {imports:{tbl}});
var caught;
try {
outer.exports.run();
} catch (e) {
caught = e;
}
assertEq(caught instanceof WebAssembly.RuntimeError, true);
})();
`);

View file

@ -125,7 +125,7 @@ testError(
(func (export "") (call $foo))
)`,
WebAssembly.RuntimeError,
["", ">", "1,>", "0,1,>", "trap handling,0,1,>", "inline stub,0,1,>", "trap handling,0,1,>", ""]);
["", ">", "1,>", "0,1,>", "interstitial,0,1,>", "trap handling,0,1,>", ""]);
testError(
`(module
@ -140,7 +140,7 @@ WebAssembly.RuntimeError,
// Technically we have this one *one-instruction* interval where
// the caller is lost (the stack with "1,>"). It's annoying to fix and shouldn't
// mess up profiles in practice so we ignore it.
["", ">", "0,>", "1,0,>", "1,>", "trap handling,0,>", "inline stub,0,>", "trap handling,0,>", ""]);
["", ">", "0,>", "1,0,>", "1,>", "trap handling,0,>", ""]);
(function() {
var e = wasmEvalText(`

View file

@ -0,0 +1,17 @@
// |jit-test| exitstatus: 6;
// Don't include wasm.js in timeout tests: when wasm isn't supported, it will
// quit(0) which will cause the test to fail.
if (!wasmIsSupported())
quit(6);
var code = wasmTextToBinary(`(module
(func (export "iloop")
(loop $top br $top)
)
)`);
var i = new WebAssembly.Instance(new WebAssembly.Module(code));
timeout(1);
i.exports.iloop();
assertEq(true, false);

View file

@ -0,0 +1,31 @@
// |jit-test| exitstatus: 6;
// Don't include wasm.js in timeout tests: when wasm isn't supported, it will
// quit(0) which will cause the test to fail.
if (!wasmIsSupported())
quit(6);
var tbl = new WebAssembly.Table({initial:1, element:"anyfunc"});
new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(`(module
(func $iloop
loop $top
br $top
end
)
(import "imports" "tbl" (table 1 anyfunc))
(elem (i32.const 0) $iloop)
)`)), {imports:{tbl}});
var outer = new WebAssembly.Instance(new WebAssembly.Module(wasmTextToBinary(`(module
(import "imports" "tbl" (table 1 anyfunc))
(type $v2v (func))
(func (export "run")
i32.const 0
call_indirect $v2v
)
)`)), {imports:{tbl}});
timeout(1, () => { tbl.set(0, null); gc() });
outer.exports.run();
assertEq(true, false);

View file

@ -0,0 +1,2 @@
|jit-test| test-also-wasm-baseline

View file

@ -447,14 +447,18 @@ class CompileInfo
// the frame is active on the stack. This implies that these definitions
// would have to be executed and that they cannot be removed even if they
// are unused.
bool isObservableSlot(uint32_t slot) const {
if (isObservableFrameSlot(slot))
return true;
inline bool isObservableSlot(uint32_t slot) const {
if (slot >= firstLocalSlot()) {
// The |this| slot for a derived class constructor is a local slot.
if (thisSlotForDerivedClassConstructor_)
return *thisSlotForDerivedClassConstructor_ == slot;
return false;
}
if (isObservableArgumentSlot(slot))
return true;
if (slot < firstArgSlot())
return isObservableFrameSlot(slot);
return false;
return isObservableArgumentSlot(slot);
}
bool isObservableFrameSlot(uint32_t slot) const {

View file

@ -1861,6 +1861,17 @@ OptimizeMIR(MIRGenerator* mir)
return false;
}
// BCE marks bounds checks as dead, so do BCE before DCE.
if (mir->compilingWasm() && !JitOptions.wasmAlwaysCheckBounds) {
if (!EliminateBoundsChecks(mir, graph))
return false;
gs.spewPass("Redundant Bounds Check Elimination");
AssertGraphCoherency(graph);
if (mir->shouldCancel("BCE"))
return false;
}
{
AutoTraceLog log(logger, TraceLogger_EliminateDeadCode);
if (!EliminateDeadCode(mir, graph))
@ -1933,13 +1944,6 @@ OptimizeMIR(MIRGenerator* mir)
AssertGraphCoherency(graph);
}
if (mir->compilingWasm()) {
if (!EliminateBoundsChecks(mir, graph))
return false;
gs.spewPass("Redundant Bounds Check Elimination");
AssertGraphCoherency(graph);
}
AssertGraphCoherency(graph, /* force = */ true);
DumpMIRExpressions(graph);

View file

@ -196,6 +196,8 @@ FlagPhiInputsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block, MBasicBl
static bool
FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
{
const CompileInfo& info = block->info();
// Flag all instructions operands as having removed uses.
MInstructionIterator end = block->end();
for (MInstructionIterator it = block->begin(); it != end; it++) {
@ -210,8 +212,10 @@ FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
if (MResumePoint* rp = ins->resumePoint()) {
// Note: no need to iterate over the caller's of the resume point as
// this is the same as the entry resume point.
for (size_t i = 0, e = rp->numOperands(); i < e; i++)
rp->getOperand(i)->setUseRemovedUnchecked();
for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
if (info.isObservableSlot(i))
rp->getOperand(i)->setUseRemovedUnchecked();
}
}
}
@ -221,8 +225,10 @@ FlagAllOperandsAsHavingRemovedUses(MIRGenerator* mir, MBasicBlock* block)
if (mir->shouldCancel("FlagAllOperandsAsHavingRemovedUses loop 2"))
return false;
for (size_t i = 0, e = rp->numOperands(); i < e; i++)
rp->getOperand(i)->setUseRemovedUnchecked();
for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
if (info.isObservableSlot(i))
rp->getOperand(i)->setUseRemovedUnchecked();
}
rp = rp->caller();
}

View file

@ -4278,10 +4278,7 @@ LIRGenerator::visitWasmBoundsCheck(MWasmBoundsCheck* ins)
#ifdef WASM_HUGE_MEMORY
MOZ_CRASH("No bounds checking on huge memory");
#else
if (ins->isRedundant()) {
if (MOZ_LIKELY(!JitOptions.wasmAlwaysCheckBounds))
return;
}
MOZ_ASSERT(!ins->isRedundant());
MDefinition* index = ins->index();
MOZ_ASSERT(index->type() == MIRType::Int32);
@ -4369,13 +4366,7 @@ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
MDefinition* rval = ins->getOperand(0);
if (rval->type() == MIRType::Int64) {
LWasmReturnI64* lir = new(alloc()) LWasmReturnI64(useInt64Fixed(rval, ReturnReg64));
// Preserve the TLS pointer we were passed in `WasmTlsReg`.
MDefinition* tlsPtr = ins->getOperand(1);
lir->setOperand(INT64_PIECES, useFixed(tlsPtr, WasmTlsReg));
add(lir);
add(new(alloc()) LWasmReturnI64(useInt64Fixed(rval, ReturnReg64)));
return;
}
@ -4391,23 +4382,13 @@ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
else
MOZ_CRASH("Unexpected wasm return type");
// Preserve the TLS pointer we were passed in `WasmTlsReg`.
MDefinition* tlsPtr = ins->getOperand(1);
lir->setOperand(1, useFixed(tlsPtr, WasmTlsReg));
add(lir);
}
void
LIRGenerator::visitWasmReturnVoid(MWasmReturnVoid* ins)
{
auto* lir = new(alloc()) LWasmReturnVoid;
// Preserve the TLS pointer we were passed in `WasmTlsReg`.
MDefinition* tlsPtr = ins->getOperand(0);
lir->setOperand(0, useFixed(tlsPtr, WasmTlsReg));
add(lir);
add(new(alloc()) LWasmReturnVoid);
}
void

View file

@ -3171,7 +3171,8 @@ MBinaryArithInstruction::foldsTo(TempAllocator& alloc)
if (isTruncated()) {
if (!folded->block())
block()->insertBefore(this, folded);
return MTruncateToInt32::New(alloc, folded);
if (folded->type() != MIRType::Int32)
return MTruncateToInt32::New(alloc, folded);
}
return folded;
}
@ -5700,10 +5701,9 @@ MWasmUnsignedToFloat32::foldsTo(TempAllocator& alloc)
MWasmCall*
MWasmCall::New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee,
const Args& args, MIRType resultType, uint32_t spIncrement, uint32_t tlsStackOffset,
MDefinition* tableIndex)
const Args& args, MIRType resultType, uint32_t spIncrement, MDefinition* tableIndex)
{
MWasmCall* call = new(alloc) MWasmCall(desc, callee, spIncrement, tlsStackOffset);
MWasmCall* call = new(alloc) MWasmCall(desc, callee, spIncrement);
call->setResultType(resultType);
if (!call->argRegs_.init(alloc, args.length()))
@ -5729,12 +5729,10 @@ MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
uint32_t spIncrement,
uint32_t tlsStackOffset)
uint32_t spIncrement)
{
auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
tlsStackOffset, nullptr);
MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement, nullptr);
if (!call)
return nullptr;

View file

@ -13718,15 +13718,14 @@ class MWasmBoundsCheck
: public MBinaryInstruction,
public NoTypePolicy::Data
{
bool redundant_;
wasm::TrapOffset trapOffset_;
explicit MWasmBoundsCheck(MDefinition* index, MDefinition* boundsCheckLimit, wasm::TrapOffset trapOffset)
: MBinaryInstruction(index, boundsCheckLimit),
redundant_(false),
trapOffset_(trapOffset)
{
setGuard(); // Effectful: throws for OOB.
// Bounds check is effectful: it throws for OOB.
setGuard();
}
public:
@ -13739,11 +13738,11 @@ class MWasmBoundsCheck
}
bool isRedundant() const {
return redundant_;
return !isGuard();
}
void setRedundant(bool val) {
redundant_ = val;
void setRedundant() {
setNotGuard();
}
wasm::TrapOffset trapOffset() const {
@ -14247,12 +14246,11 @@ class MWasmParameter : public MNullaryInstruction
};
class MWasmReturn
: public MAryControlInstruction<2, 0>,
: public MAryControlInstruction<1, 0>,
public NoTypePolicy::Data
{
explicit MWasmReturn(MDefinition* ins, MDefinition* tlsPtr) {
explicit MWasmReturn(MDefinition* ins) {
initOperand(0, ins);
initOperand(1, tlsPtr);
}
public:
@ -14261,13 +14259,9 @@ class MWasmReturn
};
class MWasmReturnVoid
: public MAryControlInstruction<1, 0>,
: public MAryControlInstruction<0, 0>,
public NoTypePolicy::Data
{
explicit MWasmReturnVoid(MDefinition* tlsPtr) {
initOperand(0, tlsPtr);
}
public:
INSTRUCTION_HEADER(WasmReturnVoid)
TRIVIAL_NEW_WRAPPERS
@ -14305,15 +14299,12 @@ class MWasmCall final
wasm::CalleeDesc callee_;
FixedList<AnyRegister> argRegs_;
uint32_t spIncrement_;
uint32_t tlsStackOffset_;
ABIArg instanceArg_;
MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement,
uint32_t tlsStackOffset)
MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement)
: desc_(desc),
callee_(callee),
spIncrement_(spIncrement),
tlsStackOffset_(tlsStackOffset)
spIncrement_(spIncrement)
{ }
public:
@ -14326,15 +14317,12 @@ class MWasmCall final
};
typedef Vector<Arg, 8, SystemAllocPolicy> Args;
static const uint32_t DontSaveTls = UINT32_MAX;
static MWasmCall* New(TempAllocator& alloc,
const wasm::CallSiteDesc& desc,
const wasm::CalleeDesc& callee,
const Args& args,
MIRType resultType,
uint32_t spIncrement,
uint32_t tlsStackOffset,
MDefinition* tableIndex = nullptr);
static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
@ -14343,8 +14331,7 @@ class MWasmCall final
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
uint32_t spIncrement,
uint32_t tlsStackOffset);
uint32_t spIncrement);
size_t numArgs() const {
return argRegs_.length();
@ -14362,13 +14349,6 @@ class MWasmCall final
uint32_t spIncrement() const {
return spIncrement_;
}
bool saveTls() const {
return tlsStackOffset_ != DontSaveTls;
}
uint32_t tlsStackOffset() const {
MOZ_ASSERT(saveTls());
return tlsStackOffset_;
}
bool possiblyCalls() const override {
return true;

View file

@ -84,21 +84,21 @@ void
MacroAssembler::call(const wasm::CallSiteDesc& desc, const Register reg)
{
CodeOffset l = call(reg);
append(desc, l, framePushed());
append(desc, l);
}
void
MacroAssembler::call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex)
{
CodeOffset l = callWithPatch();
append(desc, l, framePushed(), funcDefIndex);
append(desc, l, funcDefIndex);
}
void
MacroAssembler::call(const wasm::CallSiteDesc& desc, wasm::Trap trap)
{
CodeOffset l = callWithPatch();
append(desc, l, framePushed(), trap);
append(desc, l, trap);
}
// ===============================================================

View file

@ -2931,6 +2931,14 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
if (size_t dec = StackDecrementForCall(ABIStackAlignment, alreadyPushed, toPush))
reserveStack(dec);
// To call the trap handler function, we must have the WasmTlsReg
// filled since this is the normal calling ABI. To avoid requiring
// every trapping operation to have the TLS register filled for the
// rare case that it takes a trap, we restore it from the frame on
// the out-of-line path. However, there are millions of out-of-line
// paths (viz. for loads/stores), so the load is factored out into
// the shared FarJumpIsland generated by patchCallSites.
// Call the trap's exit, using the bytecode offset of the trap site.
// Note that this code is inside the same CodeRange::Function as the
// trap site so it's as if the trapping instruction called the
@ -2955,8 +2963,34 @@ MacroAssembler::wasmEmitTrapOutOfLineCode()
clearTrapSites();
}
void
MacroAssembler::wasmAssertNonExitInvariants(Register activation)
{
#ifdef DEBUG
// WasmActivation.exitFP should be null when outside any exit frame.
Label ok;
Address exitFP(activation, WasmActivation::offsetOfExitFP());
branchPtr(Assembler::Equal, exitFP, ImmWord(0), &ok);
breakpoint();
bind(&ok);
#endif
}
//}}} check_macroassembler_style
void
MacroAssembler::loadWasmActivationFromTls(Register dest)
{
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
}
void
MacroAssembler::loadWasmTlsRegFromFrame(Register dest)
{
loadPtr(Address(getStackPointer(), framePushed() + offsetof(wasm::Frame, tls)), dest);
}
void
MacroAssembler::BranchType::emit(MacroAssembler& masm)
{

View file

@ -447,6 +447,7 @@ class MacroAssembler : public MacroAssemblerSpecific
void Push(const ImmPtr imm) PER_SHARED_ARCH;
void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
void Push(FloatRegister reg) PER_SHARED_ARCH;
void PushFlags() DEFINED_ON(x86_shared);
void Push(jsid id, Register scratchReg);
void Push(TypedOrValueRegister v);
void Push(const ConstantOrRegister& v);
@ -462,6 +463,7 @@ class MacroAssembler : public MacroAssemblerSpecific
void Pop(Register reg) PER_SHARED_ARCH;
void Pop(FloatRegister t) PER_SHARED_ARCH;
void Pop(const ValueOperand& val) PER_SHARED_ARCH;
void PopFlags() DEFINED_ON(x86_shared);
void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
// Move the stack pointer based on the requested amount.
@ -1462,6 +1464,9 @@ class MacroAssembler : public MacroAssemblerSpecific
// including "normal" OutOfLineCode.
void wasmEmitTrapOutOfLineCode();
// Assert invariants that should be true within any non-exit-stub wasm code.
void wasmAssertNonExitInvariants(Register activation);
public:
// ========================================================================
// Clamping functions.
@ -1517,15 +1522,9 @@ class MacroAssembler : public MacroAssemblerSpecific
loadJSContext(dest);
loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
}
void loadWasmActivationFromTls(Register dest) {
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
}
void loadWasmActivationFromSymbolicAddress(Register dest) {
movePtr(wasm::SymbolicAddress::ContextPtr, dest);
loadPtr(Address(dest, 0), dest);
loadPtr(Address(dest, JSContext::offsetOfWasmActivation()), dest);
}
void loadWasmActivationFromTls(Register dest);
void loadWasmTlsRegFromFrame(Register dest = WasmTlsReg);
template<typename T>
void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {

View file

@ -288,9 +288,12 @@ class RegisterAllocator
allRegisters_.take(AnyRegister(HeapReg));
allRegisters_.take(AnyRegister(HeapLenReg));
#endif
allRegisters_.take(FramePointer);
} else {
if (FramePointer != InvalidReg && mir->instrumentedProfiling())
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
if (mir->instrumentedProfiling())
allRegisters_.take(AnyRegister(FramePointer));
#endif
}
}

View file

@ -6,6 +6,7 @@
#include "jit/WasmBCE.h"
#include "jit/MIRGenerator.h"
#include "jit/MIRGraph.h"
#include "wasm/WasmTypes.h"
using namespace js;
using namespace js::jit;
@ -42,15 +43,34 @@ jit::EliminateBoundsChecks(MIRGenerator* mir, MIRGraph& graph)
switch (def->op()) {
case MDefinition::Op_WasmBoundsCheck: {
MWasmBoundsCheck* bc = def->toWasmBoundsCheck();
MDefinition* addr = def->getOperand(0);
MDefinition* addr = bc->index();
LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
if (ptr) {
if (ptr->value()->block()->dominates(block))
bc->setRedundant(true);
} else {
if (!lastSeen.add(ptr, addr->id(), def))
return false;
// Eliminate constant-address bounds checks to addresses below
// the heap minimum.
//
// The payload of the MConstant will be Double if the constant
// result is above 2^31-1, but we don't care about that for BCE.
#ifndef WASM_HUGE_MEMORY
MOZ_ASSERT(wasm::MaxMemoryAccessSize < wasm::GuardSize,
"Guard page handles partial out-of-bounds");
#endif
if (addr->isConstant() && addr->toConstant()->type() == MIRType::Int32 &&
uint32_t(addr->toConstant()->toInt32()) < mir->minWasmHeapLength())
{
bc->setRedundant();
}
else
{
LastSeenMap::AddPtr ptr = lastSeen.lookupForAdd(addr->id());
if (ptr) {
if (ptr->value()->block()->dominates(block))
bc->setRedundant();
} else {
if (!lastSeen.add(ptr, addr->id(), def))
return false;
}
}
break;
}

View file

@ -73,7 +73,7 @@ static constexpr Register IntArgReg0 = r0;
static constexpr Register IntArgReg1 = r1;
static constexpr Register IntArgReg2 = r2;
static constexpr Register IntArgReg3 = r3;
static constexpr Register HeapReg = r11;
static constexpr Register HeapReg = r10;
static constexpr Register CallTempNonArgRegs[] = { r5, r6, r7, r8 };
static const uint32_t NumCallTempNonArgRegs =
mozilla::ArrayLength(CallTempNonArgRegs);
@ -134,7 +134,7 @@ static constexpr FloatRegister InvalidFloatReg;
static constexpr Register JSReturnReg_Type = r3;
static constexpr Register JSReturnReg_Data = r2;
static constexpr Register StackPointer = sp;
static constexpr Register FramePointer = InvalidReg;
static constexpr Register FramePointer = r11;
static constexpr Register ReturnReg = r0;
static constexpr Register64 ReturnReg64(r1, r0);
static constexpr FloatRegister ReturnFloat32Reg = { FloatRegisters::d0, VFPRegister::Single };
@ -168,6 +168,7 @@ static constexpr Register WasmIonExitRegE1 = r1;
// None of these may be the second scratch register (lr).
static constexpr Register WasmIonExitRegReturnData = r2;
static constexpr Register WasmIonExitRegReturnType = r3;
static constexpr Register WasmIonExitTlsReg = r9;
static constexpr Register WasmIonExitRegD0 = r0;
static constexpr Register WasmIonExitRegD1 = r1;
static constexpr Register WasmIonExitRegD2 = r4;

View file

@ -893,11 +893,12 @@ LIRGeneratorARM::visitAsmJSCompareExchangeHeap(MAsmJSCompareExchangeHeap* ins)
if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
LAsmJSCompareExchangeCallout* lir =
new(alloc()) LAsmJSCompareExchangeCallout(useRegisterAtStart(base),
useRegisterAtStart(ins->oldValue()),
useRegisterAtStart(ins->newValue()),
useFixed(ins->tls(), WasmTlsReg),
temp(), temp());
new(alloc()) LAsmJSCompareExchangeCallout(useFixedAtStart(base, IntArgReg2),
useFixedAtStart(ins->oldValue(), IntArgReg3),
useFixedAtStart(ins->newValue(), CallTempReg0),
useFixedAtStart(ins->tls(), WasmTlsReg),
tempFixed(IntArgReg0),
tempFixed(IntArgReg1));
defineReturn(lir, ins);
return;
}
@ -917,17 +918,18 @@ LIRGeneratorARM::visitAsmJSAtomicExchangeHeap(MAsmJSAtomicExchangeHeap* ins)
MOZ_ASSERT(ins->access().type() < Scalar::Float32);
MOZ_ASSERT(ins->access().offset() == 0);
const LAllocation base = useRegisterAtStart(ins->base());
const LAllocation value = useRegisterAtStart(ins->value());
if (byteSize(ins->access().type()) < 4 && !HasLDSTREXBHD()) {
// Call out on ARMv6.
defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(base, value,
useFixed(ins->tls(), WasmTlsReg),
temp(), temp()), ins);
defineReturn(new(alloc()) LAsmJSAtomicExchangeCallout(useFixedAtStart(ins->base(), IntArgReg2),
useFixedAtStart(ins->value(), IntArgReg3),
useFixedAtStart(ins->tls(), WasmTlsReg),
tempFixed(IntArgReg0),
tempFixed(IntArgReg1)), ins);
return;
}
const LAllocation base = useRegisterAtStart(ins->base());
const LAllocation value = useRegisterAtStart(ins->value());
define(new(alloc()) LAsmJSAtomicExchangeHeap(base, value), ins);
}
@ -942,10 +944,11 @@ LIRGeneratorARM::visitAsmJSAtomicBinopHeap(MAsmJSAtomicBinopHeap* ins)
if (byteSize(ins->access().type()) != 4 && !HasLDSTREXBHD()) {
LAsmJSAtomicBinopCallout* lir =
new(alloc()) LAsmJSAtomicBinopCallout(useRegisterAtStart(base),
useRegisterAtStart(ins->value()),
useFixed(ins->tls(), WasmTlsReg),
temp(), temp());
new(alloc()) LAsmJSAtomicBinopCallout(useFixedAtStart(base, IntArgReg2),
useFixedAtStart(ins->value(), IntArgReg3),
useFixedAtStart(ins->tls(), WasmTlsReg),
tempFixed(IntArgReg0),
tempFixed(IntArgReg1));
defineReturn(lir, ins);
return;
}

View file

@ -5138,7 +5138,7 @@ MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
{
CodeOffset offset(currentOffset());
ma_nop();
append(desc, CodeOffset(currentOffset()), framePushed());
append(desc, CodeOffset(currentOffset()));
return offset;
}

View file

@ -81,6 +81,8 @@ class Simulator
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
r8, r9, r10, r11, r12, r13, r14, r15,
num_registers,
fp = 11,
ip = 12,
sp = 13,
lr = 14,
pc = 15,

View file

@ -126,6 +126,7 @@ static constexpr Register WasmIonExitRegE1 = r1;
// None of these may be the second scratch register.
static constexpr Register WasmIonExitRegReturnData = r2;
static constexpr Register WasmIonExitRegReturnType = r3;
static constexpr Register WasmIonExitTlsReg = r17;
static constexpr Register WasmIonExitRegD0 = r0;
static constexpr Register WasmIonExitRegD1 = r1;
static constexpr Register WasmIonExitRegD2 = r4;

View file

@ -77,6 +77,7 @@ static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f16, F
// None of these may be the second scratch register (t8).
static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
static constexpr Register WasmIonExitTlsReg = s5;
static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegister::Double };
static constexpr FloatRegister f2 = { FloatRegisters::f2, FloatRegister::Double };

View file

@ -71,6 +71,7 @@ static constexpr FloatRegister SecondScratchDoubleReg = { FloatRegisters::f21, F
// None of these may be the second scratch register (t8).
static constexpr Register WasmIonExitRegReturnData = JSReturnReg_Data;
static constexpr Register WasmIonExitRegReturnType = JSReturnReg_Type;
static constexpr Register WasmIonExitTlsReg = s5;
static constexpr FloatRegister f0 = { FloatRegisters::f0, FloatRegisters::Double };
static constexpr FloatRegister f1 = { FloatRegisters::f1, FloatRegisters::Double };

View file

@ -48,6 +48,7 @@ static constexpr Register WasmIonExitRegE1 { Registers::invalid_reg };
static constexpr Register WasmIonExitRegReturnData { Registers::invalid_reg };
static constexpr Register WasmIonExitRegReturnType { Registers::invalid_reg };
static constexpr Register WasmIonExitTlsReg = { Registers::invalid_reg };
static constexpr Register WasmIonExitRegD0 { Registers::invalid_reg };
static constexpr Register WasmIonExitRegD1 { Registers::invalid_reg };
static constexpr Register WasmIonExitRegD2 { Registers::invalid_reg };
@ -415,7 +416,6 @@ class MacroAssemblerNone : public Assembler
bool buildOOLFakeExitFrame(void*) { MOZ_CRASH(); }
void loadWasmGlobalPtr(uint32_t, Register) { MOZ_CRASH(); }
void loadWasmActivationFromTls(Register) { MOZ_CRASH(); }
void loadWasmActivationFromSymbolicAddress(Register) { MOZ_CRASH(); }
void loadWasmPinnedRegsFromTls() { MOZ_CRASH(); }
void setPrinter(Sprinter*) { MOZ_CRASH(); }

View file

@ -749,6 +749,25 @@ struct GlobalAccess
typedef Vector<GlobalAccess, 0, SystemAllocPolicy> GlobalAccessVector;
// A CallFarJump records the offset of a jump that needs to be patched to a
// call at the end of the module when all calls have been emitted.
struct CallFarJump
{
uint32_t funcIndex;
jit::CodeOffset jump;
CallFarJump(uint32_t funcIndex, jit::CodeOffset jump)
: funcIndex(funcIndex), jump(jump)
{}
void offsetBy(size_t delta) {
jump.offsetBy(delta);
}
};
typedef Vector<CallFarJump, 0, SystemAllocPolicy> CallFarJumpVector;
// The TrapDesc struct describes a wasm trap that is about to be emitted. This
// includes the logical wasm bytecode offset to report, the kind of instruction
// causing the trap, and the stack depth right before control is transferred to
@ -808,6 +827,7 @@ namespace jit {
class AssemblerShared
{
wasm::CallSiteAndTargetVector callSites_;
wasm::CallFarJumpVector callFarJumps_;
wasm::TrapSiteVector trapSites_;
wasm::TrapFarJumpVector trapFarJumps_;
wasm::MemoryAccessVector memoryAccesses_;
@ -842,16 +862,18 @@ class AssemblerShared
}
template <typename... Args>
void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
Args&&... args)
void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, Args&&... args)
{
// framePushed does not include sizeof(wasm:Frame), so add it in explicitly when
// setting the CallSite::stackDepth.
wasm::CallSite cs(desc, retAddr.offset(), framePushed + sizeof(wasm::Frame));
wasm::CallSite cs(desc, retAddr.offset());
enoughMemory_ &= callSites_.emplaceBack(cs, mozilla::Forward<Args>(args)...);
}
wasm::CallSiteAndTargetVector& callSites() { return callSites_; }
void append(wasm::CallFarJump jmp) {
enoughMemory_ &= callFarJumps_.append(jmp);
}
const wasm::CallFarJumpVector& callFarJumps() const { return callFarJumps_; }
void append(wasm::TrapSite trapSite) {
enoughMemory_ &= trapSites_.append(trapSite);
}
@ -911,6 +933,11 @@ class AssemblerShared
MOZ_ASSERT(other.trapSites_.empty(), "should have been cleared by wasmEmitTrapOutOfLineCode");
i = callFarJumps_.length();
enoughMemory_ &= callFarJumps_.appendAll(other.callFarJumps_);
for (; i < callFarJumps_.length(); i++)
callFarJumps_[i].offsetBy(delta);
i = trapFarJumps_.length();
enoughMemory_ &= trapFarJumps_.appendAll(other.trapFarJumps_);
for (; i < trapFarJumps_.length(); i++)

View file

@ -1499,35 +1499,37 @@ CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
masm.bind(&ok);
#endif
// Save the caller's TLS register in a reserved stack slot (below the
// call's stack arguments) for retrieval after the call.
if (mir->saveTls())
masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), mir->tlsStackOffset()));
// LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the
// TLS and pinned regs. The only case where where we don't have to reload
// the TLS and pinned regs is when the callee preserves them.
bool reloadRegs = true;
const wasm::CallSiteDesc& desc = mir->desc();
const wasm::CalleeDesc& callee = mir->callee();
switch (callee.which()) {
case wasm::CalleeDesc::Func:
masm.call(desc, callee.funcIndex());
reloadRegs = false;
break;
case wasm::CalleeDesc::Import:
masm.wasmCallImport(desc, callee);
break;
case wasm::CalleeDesc::WasmTable:
case wasm::CalleeDesc::AsmJSTable:
case wasm::CalleeDesc::WasmTable:
masm.wasmCallIndirect(desc, callee, ins->needsBoundsCheck());
reloadRegs = callee.which() == wasm::CalleeDesc::WasmTable && callee.wasmTableIsExternal();
break;
case wasm::CalleeDesc::Builtin:
masm.call(callee.builtin());
reloadRegs = false;
break;
case wasm::CalleeDesc::BuiltinInstanceMethod:
masm.wasmCallBuiltinInstanceMethod(mir->instanceArg(), callee.builtin());
break;
}
// After return, restore the caller's TLS and pinned registers.
if (mir->saveTls()) {
masm.loadPtr(Address(masm.getStackPointer(), mir->tlsStackOffset()), WasmTlsReg);
if (reloadRegs) {
masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
}

View file

@ -8605,13 +8605,13 @@ class LWasmParameterI64 : public LInstructionHelper<INT64_PIECES, 0, 0>
LIR_HEADER(WasmParameterI64);
};
class LWasmReturn : public LInstructionHelper<0, 2, 0>
class LWasmReturn : public LInstructionHelper<0, 1, 0>
{
public:
LIR_HEADER(WasmReturn);
};
class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0>
class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES, 0>
{
public:
LIR_HEADER(WasmReturnI64)
@ -8621,7 +8621,7 @@ class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0>
}
};
class LWasmReturnVoid : public LInstructionHelper<0, 1, 0>
class LWasmReturnVoid : public LInstructionHelper<0, 0, 0>
{
public:
LIR_HEADER(WasmReturnVoid);
@ -8683,6 +8683,7 @@ class LWasmCallBase : public LInstruction
// - internal/indirect calls do by the internal wasm ABI
// - import calls do by explicitly saving/restoring at the callsite
// - builtin calls do because the TLS reg is non-volatile
// See also CodeGeneratorShared::emitWasmCallBase.
return !reg.isFloat() && reg.gpr() == WasmTlsReg;
}

View file

@ -817,6 +817,12 @@ LIRGeneratorShared::useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtS
#endif
}
LInt64Allocation
LIRGeneratorShared::useInt64FixedAtStart(MDefinition* mir, Register64 regs)
{
return useInt64Fixed(mir, regs, true);
}
LInt64Allocation
LIRGeneratorShared::useInt64(MDefinition* mir, bool useAtStart)
{

View file

@ -208,6 +208,7 @@ class LIRGeneratorShared : public MDefinitionVisitor
inline LInt64Allocation useInt64Register(MDefinition* mir, bool useAtStart = false);
inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir, bool useAtStart = false);
inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs, bool useAtStart = false);
inline LInt64Allocation useInt64FixedAtStart(MDefinition* mir, Register64 regs);
LInt64Allocation useInt64RegisterAtStart(MDefinition* mir) {
return useInt64Register(mir, /* useAtStart = */ true);

View file

@ -157,6 +157,7 @@ static constexpr Register WasmIonExitRegE1 = rdi;
// Registers used in the GenerateFFIIonExit Disable Activation block.
static constexpr Register WasmIonExitRegReturnData = ecx;
static constexpr Register WasmIonExitRegReturnType = ecx;
static constexpr Register WasmIonExitTlsReg = r14;
static constexpr Register WasmIonExitRegD0 = rax;
static constexpr Register WasmIonExitRegD1 = rdi;
static constexpr Register WasmIonExitRegD2 = rbx;

View file

@ -474,9 +474,8 @@ MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm)
stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
ABIStackAlignment);
} else {
static_assert(sizeof(wasm::Frame) % ABIStackAlignment == 0,
"wasm::Frame should be part of the stack alignment.");
stackForCall += ComputeByteAlignment(stackForCall + framePushed(),
uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
stackForCall += ComputeByteAlignment(stackForCall + framePushed() + alignmentAtPrologue,
ABIStackAlignment);
}

View file

@ -600,6 +600,13 @@ MacroAssembler::Push(FloatRegister t)
adjustFrame(sizeof(double));
}
void
MacroAssembler::PushFlags()
{
pushFlags();
adjustFrame(sizeof(intptr_t));
}
void
MacroAssembler::Pop(const Operand op)
{
@ -628,6 +635,13 @@ MacroAssembler::Pop(const ValueOperand& val)
implicitPop(sizeof(Value));
}
void
MacroAssembler::PopFlags()
{
popFlags();
implicitPop(sizeof(intptr_t));
}
// ===============================================================
// Simple call functions.
@ -741,7 +755,7 @@ MacroAssembler::nopPatchableToCall(const wasm::CallSiteDesc& desc)
{
CodeOffset offset(currentOffset());
masm.nop_five();
append(desc, CodeOffset(currentOffset()), framePushed());
append(desc, CodeOffset(currentOffset()));
MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
return offset;
}

View file

@ -112,9 +112,10 @@ static constexpr Register WasmIonExitRegE1 = eax;
// Registers used in the GenerateFFIIonExit Disable Activation block.
static constexpr Register WasmIonExitRegReturnData = edx;
static constexpr Register WasmIonExitRegReturnType = ecx;
static constexpr Register WasmIonExitTlsReg = esi;
static constexpr Register WasmIonExitRegD0 = edi;
static constexpr Register WasmIonExitRegD1 = eax;
static constexpr Register WasmIonExitRegD2 = esi;
static constexpr Register WasmIonExitRegD2 = ebx;
// Registerd used in RegExpMatcher instruction (do not use JSReturnOperand).
static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;

View file

@ -949,20 +949,11 @@ CodeGeneratorX86::visitDivOrModI64(LDivOrModI64* lir)
{
Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
Register temp = ToRegister(lir->temp());
Register64 output = ToOutRegister64(lir);
MOZ_ASSERT(output == ReturnReg64);
// We are free to clobber all registers, since this is a call instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(lhs.low);
regs.take(lhs.high);
if (lhs != rhs) {
regs.take(rhs.low);
regs.take(rhs.high);
}
Register temp = regs.takeAny();
Label done;
// Handle divide by zero.
@ -1006,20 +997,11 @@ CodeGeneratorX86::visitUDivOrModI64(LUDivOrModI64* lir)
{
Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
Register temp = ToRegister(lir->temp());
Register64 output = ToOutRegister64(lir);
MOZ_ASSERT(output == ReturnReg64);
// We are free to clobber all registers, since this is a call instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(lhs.low);
regs.take(lhs.high);
if (lhs != rhs) {
regs.take(rhs.low);
regs.take(rhs.high);
}
Register temp = regs.takeAny();
// Prevent divide by zero.
if (lir->canBeDivideByZero())
masm.branchTest64(Assembler::Zero, rhs, rhs, temp, trap(lir, wasm::Trap::IntegerDivideByZero));

View file

@ -109,7 +109,7 @@ class LWasmUint32ToFloat32: public LInstructionHelper<1, 1, 1>
}
};
class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 1>
{
public:
LIR_HEADER(DivOrModI64)
@ -117,10 +117,11 @@ class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2,
static const size_t Lhs = 0;
static const size_t Rhs = INT64_PIECES;
LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs, const LDefinition& temp)
{
setInt64Operand(Lhs, lhs);
setInt64Operand(Rhs, rhs);
setTemp(0, temp);
}
MBinaryArithInstruction* mir() const {
@ -143,9 +144,12 @@ class LDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2,
return mir_->toMod()->trapOffset();
return mir_->toDiv()->trapOffset();
}
const LDefinition* temp() {
return getTemp(0);
}
};
class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 0>
class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2, 1>
{
public:
LIR_HEADER(UDivOrModI64)
@ -153,10 +157,11 @@ class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2
static const size_t Lhs = 0;
static const size_t Rhs = INT64_PIECES;
LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs, const LDefinition& temp)
{
setInt64Operand(Lhs, lhs);
setInt64Operand(Rhs, rhs);
setTemp(0, temp);
}
MBinaryArithInstruction* mir() const {
@ -179,6 +184,9 @@ class LUDivOrModI64 : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES*2
return mir_->toMod()->trapOffset();
return mir_->toDiv()->trapOffset();
}
const LDefinition* temp() {
return getTemp(0);
}
};
class LWasmTruncateToInt64 : public LInstructionHelper<INT64_PIECES, 1, 1>

View file

@ -618,8 +618,9 @@ LIRGeneratorX86::lowerDivI64(MDiv* div)
return;
}
LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(div->lhs()),
useInt64RegisterAtStart(div->rhs()));
LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
tempFixed(esi));
defineReturn(lir, div);
}
@ -631,24 +632,27 @@ LIRGeneratorX86::lowerModI64(MMod* mod)
return;
}
LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
useInt64RegisterAtStart(mod->rhs()));
LDivOrModI64* lir = new(alloc()) LDivOrModI64(useInt64FixedAtStart(mod->lhs(), Register64(eax, ebx)),
useInt64FixedAtStart(mod->rhs(), Register64(ecx, edx)),
tempFixed(esi));
defineReturn(lir, mod);
}
void
LIRGeneratorX86::lowerUDivI64(MDiv* div)
{
LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(div->lhs()),
useInt64RegisterAtStart(div->rhs()));
LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
tempFixed(esi));
defineReturn(lir, div);
}
void
LIRGeneratorX86::lowerUModI64(MMod* mod)
{
LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64RegisterAtStart(mod->lhs()),
useInt64RegisterAtStart(mod->rhs()));
LUDivOrModI64* lir = new(alloc()) LUDivOrModI64(useInt64FixedAtStart(mod->lhs(), Register64(eax, ebx)),
useInt64FixedAtStart(mod->rhs(), Register64(ecx, edx)),
tempFixed(esi));
defineReturn(lir, mod);
}

View file

@ -381,7 +381,6 @@ UNIFIED_SOURCES += [
'wasm/WasmCode.cpp',
'wasm/WasmCompartment.cpp',
'wasm/WasmCompile.cpp',
'wasm/WasmDebugFrame.cpp',
'wasm/WasmFrameIterator.cpp',
'wasm/WasmGenerator.cpp',
'wasm/WasmInstance.cpp',

View file

@ -5274,9 +5274,11 @@ SingleStepCallback(void* arg, jit::Simulator* sim, void* pc)
#if defined(JS_SIMULATOR_ARM)
state.sp = (void*)sim->get_register(jit::Simulator::sp);
state.lr = (void*)sim->get_register(jit::Simulator::lr);
state.fp = (void*)sim->get_register(jit::Simulator::fp);
#elif defined(JS_SIMULATOR_MIPS64)
state.sp = (void*)sim->getRegister(jit::Simulator::sp);
state.lr = (void*)sim->getRegister(jit::Simulator::ra);
state.fp = (void*)sim->getRegister(jit::Simulator::fp);
#else
# error "NYI: Single-step profiling support"
#endif

View file

@ -2549,7 +2549,7 @@ Debugger::updateExecutionObservabilityOfFrames(JSContext* cx, const ExecutionObs
oldestEnabledFrame.setIsDebuggee();
}
if (iter.abstractFramePtr().isWasmDebugFrame())
iter.abstractFramePtr().asWasmDebugFrame()->observeFrame(cx);
iter.abstractFramePtr().asWasmDebugFrame()->observe(cx);
} else {
#ifdef DEBUG
// Debugger.Frame lifetimes are managed by the debug epilogue,

View file

@ -141,6 +141,12 @@ GeckoProfiler::enable(bool enabled)
}
}
// WebAssembly code does not need to be released, but profiling string
// labels have to be generated so that they are available during async
// profiling stack iteration.
for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
c->wasm.ensureProfilingLabels(enabled);
return true;
}

View file

@ -19,7 +19,6 @@
#include "js/Debug.h"
#include "vm/EnvironmentObject.h"
#include "vm/GeneratorObject.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmInstance.h"
#include "jsobjinlines.h"
@ -455,7 +454,7 @@ AbstractFramePtr::environmentChain() const
if (isBaselineFrame())
return asBaselineFrame()->environmentChain();
if (isWasmDebugFrame())
return asWasmDebugFrame()->environmentChain();
return &global()->lexicalEnvironment();
return asRematerializedFrame()->environmentChain();
}

View file

@ -17,7 +17,6 @@
#include "js/GCAPI.h"
#include "vm/Debugger.h"
#include "vm/Opcodes.h"
#include "wasm/WasmDebugFrame.h"
#include "jit/JitFrameIterator-inl.h"
#include "vm/EnvironmentObject-inl.h"
@ -1647,7 +1646,7 @@ WasmActivation::WasmActivation(JSContext* cx)
: Activation(cx, Wasm),
entrySP_(nullptr),
resumePC_(nullptr),
fp_(nullptr),
exitFP_(nullptr),
exitReason_(wasm::ExitReason::None)
{
(void) entrySP_; // silence "unused private member" warning
@ -1655,8 +1654,6 @@ WasmActivation::WasmActivation(JSContext* cx)
prevWasm_ = cx->wasmActivationStack_;
cx->wasmActivationStack_ = this;
cx->compartment()->wasm.activationCount_++;
// Now that the WasmActivation is fully initialized, make it visible to
// asynchronous profiling.
registerProfiling();
@ -1667,13 +1664,11 @@ WasmActivation::~WasmActivation()
// Hide this activation from the profiler before is is destroyed.
unregisterProfiling();
MOZ_ASSERT(fp_ == nullptr);
MOZ_ASSERT(exitFP_ == nullptr);
MOZ_ASSERT(exitReason_ == wasm::ExitReason::None);
MOZ_ASSERT(cx_->wasmActivationStack_ == this);
cx_->wasmActivationStack_ = prevWasm_;
MOZ_ASSERT(cx_->compartment()->wasm.activationCount_ > 0);
cx_->compartment()->wasm.activationCount_--;
}
InterpreterFrameIterator&

View file

@ -25,6 +25,7 @@
#include "vm/ArgumentsObject.h"
#include "vm/SavedFrame.h"
#include "wasm/WasmFrameIterator.h"
#include "wasm/WasmTypes.h"
struct JSCompartment;
@ -166,6 +167,7 @@ class AbstractFramePtr
MOZ_IMPLICIT AbstractFramePtr(wasm::DebugFrame* fp)
: ptr_(fp ? uintptr_t(fp) | Tag_WasmDebugFrame : 0)
{
static_assert(wasm::DebugFrame::Alignment >= TagMask, "aligned");
MOZ_ASSERT_IF(fp, asWasmDebugFrame() == fp);
}
@ -1733,7 +1735,7 @@ class WasmActivation : public Activation
WasmActivation* prevWasm_;
void* entrySP_;
void* resumePC_;
uint8_t* fp_;
uint8_t* exitFP_;
wasm::ExitReason exitReason_;
public:
@ -1746,20 +1748,16 @@ class WasmActivation : public Activation
return true;
}
// Returns a pointer to the base of the innermost stack frame of wasm code
// in this activation.
uint8_t* fp() const { return fp_; }
// Returns null or the final wasm::Frame* when wasm exited this
// WasmActivation.
uint8_t* exitFP() const { return exitFP_; }
// Returns the reason why wasm code called out of wasm code.
wasm::ExitReason exitReason() const { return exitReason_; }
// Read by JIT code:
static unsigned offsetOfContext() { return offsetof(WasmActivation, cx_); }
static unsigned offsetOfResumePC() { return offsetof(WasmActivation, resumePC_); }
// Written by JIT code:
static unsigned offsetOfEntrySP() { return offsetof(WasmActivation, entrySP_); }
static unsigned offsetOfFP() { return offsetof(WasmActivation, fp_); }
static unsigned offsetOfExitFP() { return offsetof(WasmActivation, exitFP_); }
static unsigned offsetOfExitReason() { return offsetof(WasmActivation, exitReason_); }
// Read/written from SIGSEGV handler:
@ -1767,7 +1765,7 @@ class WasmActivation : public Activation
void* resumePC() const { return resumePC_; }
// Used by wasm::FrameIterator during stack unwinding.
void unwindFP(uint8_t* fp) { fp_ = fp; }
void unwindExitFP(uint8_t* exitFP) { exitFP_ = exitFP; exitReason_ = wasm::ExitReason::None; }
};
// A FrameIter walks over a context's stack of JS script activations,

View file

@ -97,7 +97,6 @@
#endif
#include "wasm/WasmBinaryIterator.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmGenerator.h"
#include "wasm/WasmSignalHandlers.h"
#include "wasm/WasmValidate.h"
@ -199,31 +198,19 @@ static constexpr int32_t TlsSlotSize = sizeof(void*);
static constexpr int32_t TlsSlotOffset = TlsSlotSize;
BaseLocalIter::BaseLocalIter(const ValTypeVector& locals,
size_t argsLength,
bool debugEnabled)
size_t argsLength,
bool debugEnabled)
: locals_(locals),
argsLength_(argsLength),
argsRange_(locals.begin(), argsLength),
argsIter_(argsRange_),
index_(0),
localSize_(0),
localSize_(debugEnabled ? DebugFrame::offsetOfFrame() : 0),
reservedSize_(localSize_),
done_(false)
{
MOZ_ASSERT(argsLength <= locals.length());
// Reserve a stack slot for the TLS pointer outside the locals range so it
// isn't zero-filled like the normal locals.
DebugOnly<int32_t> tlsSlotOffset = pushLocal(TlsSlotSize);
MOZ_ASSERT(tlsSlotOffset == TlsSlotOffset);
if (debugEnabled) {
// If debug information is generated, constructing DebugFrame record:
// reserving some data before TLS pointer. The TLS pointer allocated
// above and regular wasm::Frame data starts after locals.
localSize_ += DebugFrame::offsetOfTlsData();
MOZ_ASSERT(DebugFrame::offsetOfFrame() == localSize_);
}
reservedSize_ = localSize_;
settle();
}
@ -628,10 +615,6 @@ class BaseCompiler
Vector<Local, 8, SystemAllocPolicy> localInfo_;
Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
// Index into localInfo_ of the special local used for saving the TLS
// pointer. This follows the function's real arguments and locals.
uint32_t tlsSlot_;
// On specific platforms we sometimes need to use specific registers.
#ifdef JS_CODEGEN_X64
@ -2229,9 +2212,6 @@ class BaseCompiler
maxFramePushed_ = localSize_;
// The TLS pointer is always passed as a hidden argument in WasmTlsReg.
// Save it into its assigned local slot.
storeToFramePtr(WasmTlsReg, localInfo_[tlsSlot_].offs());
if (debugEnabled_) {
// Initialize funcIndex and flag fields of DebugFrame.
size_t debugFrame = masm.framePushed() - DebugFrame::offsetOfFrame();
@ -2361,8 +2341,9 @@ class BaseCompiler
masm.breakpoint();
// Patch the add in the prologue so that it checks against the correct
// frame size.
// frame size. Flush the constant pool in case it needs to be patched.
MOZ_ASSERT(maxFramePushed_ >= localSize_);
masm.flush();
masm.patchAdd32ToPtr(stackAddOffset_, Imm32(-int32_t(maxFramePushed_ - localSize_)));
// Since we just overflowed the stack, to be on the safe side, pop the
@ -2390,9 +2371,6 @@ class BaseCompiler
restoreResult();
}
// Restore the TLS register in case it was overwritten by the function.
loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
GenerateFunctionEpilogue(masm, localSize_, &offsets_);
#if defined(JS_ION_PERF)
@ -2481,7 +2459,7 @@ class BaseCompiler
// On x86 there are no pinned registers, so don't waste time
// reloading the Tls.
#ifndef JS_CODEGEN_X86
loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
#endif
}
@ -2678,7 +2656,7 @@ class BaseCompiler
const FunctionCall& call)
{
// Builtin method calls assume the TLS register has been set.
loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame();
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
masm.wasmCallBuiltinInstanceMethod(instanceArg, builtin);
@ -3317,56 +3295,56 @@ class BaseCompiler
void loadGlobalVarI32(unsigned globalDataOffset, RegI32 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.load32(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void loadGlobalVarI64(unsigned globalDataOffset, RegI64 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.load64(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void loadGlobalVarF32(unsigned globalDataOffset, RegF32 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.loadFloat32(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void loadGlobalVarF64(unsigned globalDataOffset, RegF64 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.loadDouble(Address(tmp, globalToTlsOffset(globalDataOffset)), r);
}
void storeGlobalVarI32(unsigned globalDataOffset, RegI32 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.store32(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
void storeGlobalVarI64(unsigned globalDataOffset, RegI64 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.store64(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
void storeGlobalVarF32(unsigned globalDataOffset, RegF32 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.storeFloat32(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
void storeGlobalVarF64(unsigned globalDataOffset, RegF64 r)
{
ScratchI32 tmp(*this);
loadFromFramePtr(tmp, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tmp);
masm.storeDouble(r, Address(tmp, globalToTlsOffset(globalDataOffset)));
}
@ -5187,8 +5165,15 @@ BaseCompiler::sniffConditionalControlCmp(Cond compareOp, ValType operandType)
MOZ_ASSERT(latentOp_ == LatentOp::None, "Latent comparison state not properly reset");
switch (iter_.peekOp()) {
case uint16_t(Op::BrIf):
case uint16_t(Op::Select):
#ifdef JS_CODEGEN_X86
// On x86, with only 5 available registers, a latent i64 binary
// comparison takes 4 leaving only 1 which is not enough for select.
if (operandType == ValType::I64)
return false;
#endif
MOZ_FALLTHROUGH;
case uint16_t(Op::BrIf):
case uint16_t(Op::If):
setLatentCompare(compareOp, operandType);
return true;
@ -5804,11 +5789,8 @@ BaseCompiler::emitCallArgs(const ValTypeVector& argTypes, FunctionCall& baseline
for (size_t i = 0; i < numArgs; ++i)
passArg(baselineCall, argTypes[i], peek(numArgs - 1 - i));
// Pass the TLS pointer as a hidden argument in WasmTlsReg. Load
// it directly out if its stack slot so we don't interfere with
// the stk_.
if (baselineCall.loadTlsBefore)
loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame();
return true;
}
@ -6450,7 +6432,7 @@ BaseCompiler::maybeLoadTlsForAccess(bool omitBoundsCheck)
RegI32 tls = invalidI32();
if (needTlsForAccess(omitBoundsCheck)) {
tls = needI32();
loadFromFramePtr(tls, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
masm.loadWasmTlsRegFromFrame(tls);
}
return tls;
}
@ -7473,7 +7455,6 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& env,
#ifdef DEBUG
scratchRegisterTaken_(false),
#endif
tlsSlot_(0),
#ifdef JS_CODEGEN_X64
specific_rax(RegI64(Register64(rax))),
specific_rcx(RegI64(Register64(rcx))),
@ -7511,6 +7492,7 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& env,
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
availGPR_.take(HeapReg);
#endif
availGPR_.take(FramePointer);
#ifdef DEBUG
setupRegisterLeakCheck();
@ -7533,15 +7515,9 @@ BaseCompiler::init()
const ValTypeVector& args = func_.sig().args();
// localInfo_ contains an entry for every local in locals_, followed by
// entries for special locals. Currently the only special local is the TLS
// pointer.
tlsSlot_ = locals_.length();
if (!localInfo_.resize(locals_.length() + 1))
if (!localInfo_.resize(locals_.length()))
return false;
localInfo_[tlsSlot_].init(MIRType::Pointer, TlsSlotOffset);
BaseLocalIter i(locals_, args.length(), debugEnabled_);
varLow_ = i.reservedSize();
for (; !i.done() && i.index() < args.length(); i++) {

View file

@ -111,7 +111,7 @@ StaticallyLink(CodeSegment& cs, const LinkData& linkData, JSContext* cx)
const Uint32Vector& offsets = linkData.symbolicLinks[imm];
for (size_t i = 0; i < offsets.length(); i++) {
uint8_t* patchAt = cs.base() + offsets[i];
void* target = AddressOf(imm, cx);
void* target = AddressOf(imm);
Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
PatchedImmPtr(target),
PatchedImmPtr((void*)-1));
@ -299,60 +299,44 @@ FuncImport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
CodeRange::CodeRange(Kind kind, Offsets offsets)
: begin_(offsets.begin),
profilingReturn_(0),
ret_(0),
end_(offsets.end),
funcIndex_(0),
funcLineOrBytecode_(0),
funcBeginToTableEntry_(0),
funcBeginToTableProfilingJump_(0),
funcBeginToNonProfilingEntry_(0),
funcProfilingJumpToProfilingReturn_(0),
funcProfilingEpilogueToProfilingReturn_(0),
funcBeginToNormalEntry_(0),
kind_(kind)
{
MOZ_ASSERT(begin_ <= end_);
MOZ_ASSERT(kind_ == Entry || kind_ == Inline ||
MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == Throw ||
kind_ == FarJumpIsland || kind_ == DebugTrap);
}
CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
CodeRange::CodeRange(Kind kind, CallableOffsets offsets)
: begin_(offsets.begin),
profilingReturn_(offsets.profilingReturn),
ret_(offsets.ret),
end_(offsets.end),
funcIndex_(0),
funcLineOrBytecode_(0),
funcBeginToTableEntry_(0),
funcBeginToTableProfilingJump_(0),
funcBeginToNonProfilingEntry_(0),
funcProfilingJumpToProfilingReturn_(0),
funcProfilingEpilogueToProfilingReturn_(0),
funcBeginToNormalEntry_(0),
kind_(kind)
{
MOZ_ASSERT(begin_ < profilingReturn_);
MOZ_ASSERT(profilingReturn_ < end_);
MOZ_ASSERT(begin_ < ret_);
MOZ_ASSERT(ret_ < end_);
MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit || kind_ == TrapExit);
}
CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
: begin_(offsets.begin),
profilingReturn_(offsets.profilingReturn),
ret_(offsets.ret),
end_(offsets.end),
funcIndex_(funcIndex),
funcLineOrBytecode_(funcLineOrBytecode),
funcBeginToTableEntry_(offsets.tableEntry - begin_),
funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
funcBeginToNormalEntry_(offsets.normalEntry - begin_),
kind_(Function)
{
MOZ_ASSERT(begin_ < profilingReturn_);
MOZ_ASSERT(profilingReturn_ < end_);
MOZ_ASSERT(offsets.tableEntry - begin_ <= UINT8_MAX);
MOZ_ASSERT(offsets.tableProfilingJump - begin_ <= UINT8_MAX);
MOZ_ASSERT(offsets.nonProfilingEntry - begin_ <= UINT8_MAX);
MOZ_ASSERT(profilingReturn_ - offsets.profilingJump <= UINT8_MAX);
MOZ_ASSERT(profilingReturn_ - offsets.profilingEpilogue <= UINT8_MAX);
MOZ_ASSERT(begin_ < ret_);
MOZ_ASSERT(ret_ < end_);
MOZ_ASSERT(offsets.normalEntry - begin_ <= UINT8_MAX);
}
static size_t
@ -413,7 +397,6 @@ Metadata::serializedSize() const
SerializedPodVectorSize(memoryAccesses) +
SerializedPodVectorSize(codeRanges) +
SerializedPodVectorSize(callSites) +
SerializedPodVectorSize(callThunks) +
SerializedPodVectorSize(funcNames) +
SerializedPodVectorSize(customSections) +
filename.serializedSize();
@ -434,7 +417,6 @@ Metadata::serialize(uint8_t* cursor) const
cursor = SerializePodVector(cursor, memoryAccesses);
cursor = SerializePodVector(cursor, codeRanges);
cursor = SerializePodVector(cursor, callSites);
cursor = SerializePodVector(cursor, callThunks);
cursor = SerializePodVector(cursor, funcNames);
cursor = SerializePodVector(cursor, customSections);
cursor = filename.serialize(cursor);
@ -453,7 +435,6 @@ Metadata::deserialize(const uint8_t* cursor)
(cursor = DeserializePodVector(cursor, &memoryAccesses)) &&
(cursor = DeserializePodVector(cursor, &codeRanges)) &&
(cursor = DeserializePodVector(cursor, &callSites)) &&
(cursor = DeserializePodVector(cursor, &callThunks)) &&
(cursor = DeserializePodVector(cursor, &funcNames)) &&
(cursor = DeserializePodVector(cursor, &customSections)) &&
(cursor = filename.deserialize(cursor));
@ -476,7 +457,6 @@ Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
codeRanges.sizeOfExcludingThis(mallocSizeOf) +
callSites.sizeOfExcludingThis(mallocSizeOf) +
callThunks.sizeOfExcludingThis(mallocSizeOf) +
funcNames.sizeOfExcludingThis(mallocSizeOf) +
customSections.sizeOfExcludingThis(mallocSizeOf) +
filename.sizeOfExcludingThis(mallocSizeOf);
@ -578,7 +558,6 @@ Code::Code(UniqueCodeSegment segment,
: segment_(Move(segment)),
metadata_(&metadata),
maybeBytecode_(maybeBytecode),
profilingEnabled_(false),
enterAndLeaveFrameTrapsCounter_(0)
{
MOZ_ASSERT_IF(metadata_->debugEnabled, maybeBytecode);
@ -986,77 +965,67 @@ Code::clearBreakpointsIn(JSContext* cx, WasmInstanceObject* instance, js::Debugg
}
bool
Code::ensureProfilingState(JSRuntime* rt, bool newProfilingEnabled)
// When enabled, generate profiling labels for every name in funcNames_ that is
// the name of some Function CodeRange. This involves malloc() so do it now
// since, once we start sampling, we'll be in a signal-handing context where we
// cannot malloc.
void
Code::ensureProfilingLabels(bool profilingEnabled)
{
if (profilingEnabled_ == newProfilingEnabled)
return true;
if (!profilingEnabled) {
profilingLabels_.clear();
return;
}
// When enabled, generate profiling labels for every name in funcNames_
// that is the name of some Function CodeRange. This involves malloc() so
// do it now since, once we start sampling, we'll be in a signal-handing
// context where we cannot malloc.
if (newProfilingEnabled) {
for (const CodeRange& codeRange : metadata_->codeRanges) {
if (!codeRange.isFunction())
continue;
if (!profilingLabels_.empty())
return;
ToCStringBuf cbuf;
const char* bytecodeStr = NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
MOZ_ASSERT(bytecodeStr);
for (const CodeRange& codeRange : metadata_->codeRanges) {
if (!codeRange.isFunction())
continue;
UTF8Bytes name;
if (!getFuncName(codeRange.funcIndex(), &name) || !name.append(" (", 2))
return false;
ToCStringBuf cbuf;
const char* bytecodeStr = NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
MOZ_ASSERT(bytecodeStr);
if (const char* filename = metadata_->filename.get()) {
if (!name.append(filename, strlen(filename)))
return false;
} else {
if (!name.append('?'))
return false;
}
UTF8Bytes name;
if (!getFuncName(codeRange.funcIndex(), &name) || !name.append(" (", 2))
return;
if (!name.append(':') ||
!name.append(bytecodeStr, strlen(bytecodeStr)) ||
!name.append(")\0", 2))
{
return false;
}
UniqueChars label(name.extractOrCopyRawBuffer());
if (!label)
return false;
if (codeRange.funcIndex() >= funcLabels_.length()) {
if (!funcLabels_.resize(codeRange.funcIndex() + 1))
return false;
}
funcLabels_[codeRange.funcIndex()] = Move(label);
if (const char* filename = metadata_->filename.get()) {
if (!name.append(filename, strlen(filename)))
return;
} else {
if (!name.append('?'))
return;
}
} else {
funcLabels_.clear();
if (!name.append(':') ||
!name.append(bytecodeStr, strlen(bytecodeStr)) ||
!name.append(")\0", 2))
{
return;
}
UniqueChars label(name.extractOrCopyRawBuffer());
if (!label)
return;
if (codeRange.funcIndex() >= profilingLabels_.length()) {
if (!profilingLabels_.resize(codeRange.funcIndex() + 1))
return;
}
profilingLabels_[codeRange.funcIndex()] = Move(label);
}
}
// Only mutate the code after the fallible operations are complete to avoid
// the need to rollback.
profilingEnabled_ = newProfilingEnabled;
{
AutoWritableJitCode awjc(segment_->base(), segment_->length());
AutoFlushICache afc("Code::ensureProfilingState");
AutoFlushICache::setRange(uintptr_t(segment_->base()), segment_->length());
for (const CallSite& callSite : metadata_->callSites)
ToggleProfiling(*this, callSite, newProfilingEnabled);
for (const CallThunk& callThunk : metadata_->callThunks)
ToggleProfiling(*this, callThunk, newProfilingEnabled);
for (const CodeRange& codeRange : metadata_->codeRanges)
ToggleProfiling(*this, codeRange, newProfilingEnabled);
}
return true;
const char*
Code::profilingLabel(uint32_t funcIndex) const
{
if (funcIndex >= profilingLabels_.length() || !profilingLabels_[funcIndex])
return "?";
return profilingLabels_[funcIndex].get();
}
void

View file

@ -57,9 +57,6 @@ class CodeSegment
uint8_t* outOfBoundsCode_;
uint8_t* unalignedAccessCode_;
// The profiling mode may be changed dynamically.
bool profilingEnabled_;
public:
#ifdef MOZ_VTUNE
unsigned vtune_method_id_; // Zero if unset.
@ -242,28 +239,24 @@ class CodeRange
DebugTrap, // calls C++ to handle debug event such as
// enter/leave frame or breakpoint
FarJumpIsland, // inserted to connect otherwise out-of-range insns
Inline // stub that is jumped-to, not called, and thus
// replaces/loses preceding innermost frame
Inline, // stub that is jumped-to within prologue/epilogue
Throw // special stack-unwinding stub
};
private:
// All fields are treated as cacheable POD:
uint32_t begin_;
uint32_t profilingReturn_;
uint32_t ret_;
uint32_t end_;
uint32_t funcIndex_;
uint32_t funcLineOrBytecode_;
uint8_t funcBeginToTableEntry_;
uint8_t funcBeginToTableProfilingJump_;
uint8_t funcBeginToNonProfilingEntry_;
uint8_t funcProfilingJumpToProfilingReturn_;
uint8_t funcProfilingEpilogueToProfilingReturn_;
uint8_t funcBeginToNormalEntry_;
Kind kind_ : 8;
public:
CodeRange() = default;
CodeRange(Kind kind, Offsets offsets);
CodeRange(Kind kind, ProfilingOffsets offsets);
CodeRange(Kind kind, CallableOffsets offsets);
CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
// All CodeRanges have a begin and end.
@ -293,41 +286,30 @@ class CodeRange
bool isInline() const {
return kind() == Inline;
}
bool isThunk() const {
return kind() == FarJumpIsland;
}
// Every CodeRange except entry and inline stubs has a profiling return
// which is used for asynchronous profiling to determine the frame pointer.
// Every CodeRange except entry and inline stubs are callable and have a
// return statement. Asynchronous frame iteration needs to know the offset
// of the return instruction to calculate the frame pointer.
uint32_t profilingReturn() const {
uint32_t ret() const {
MOZ_ASSERT(isFunction() || isImportExit() || isTrapExit());
return profilingReturn_;
return ret_;
}
// Functions have offsets which allow patching to selectively execute
// profiling prologues/epilogues.
// Function CodeRanges have two entry points: one for normal calls (with a
// known signature) and one for table calls (which involves dynamic
// signature checking).
uint32_t funcProfilingEntry() const {
MOZ_ASSERT(isFunction());
return begin();
}
uint32_t funcTableEntry() const {
MOZ_ASSERT(isFunction());
return begin_ + funcBeginToTableEntry_;
return begin_;
}
uint32_t funcTableProfilingJump() const {
uint32_t funcNormalEntry() const {
MOZ_ASSERT(isFunction());
return begin_ + funcBeginToTableProfilingJump_;
}
uint32_t funcNonProfilingEntry() const {
MOZ_ASSERT(isFunction());
return begin_ + funcBeginToNonProfilingEntry_;
}
uint32_t funcProfilingJump() const {
MOZ_ASSERT(isFunction());
return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
}
uint32_t funcProfilingEpilogue() const {
MOZ_ASSERT(isFunction());
return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
return begin_ + funcBeginToNormalEntry_;
}
uint32_t funcIndex() const {
MOZ_ASSERT(isFunction());
@ -354,25 +336,6 @@ class CodeRange
WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
// A CallThunk describes the offset and target of thunks so that they may be
// patched at runtime when profiling is toggled. Thunks are emitted to connect
// callsites that are too far away from callees to fit in a single call
// instruction's relative offset.
struct CallThunk
{
uint32_t offset;
union {
uint32_t funcIndex;
uint32_t codeRangeIndex;
} u;
CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
CallThunk() = default;
};
WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
// shared memory (SharedArrayBuffer).
@ -463,7 +426,6 @@ struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
MemoryAccessVector memoryAccesses;
CodeRangeVector codeRanges;
CallSiteVector callSites;
CallThunkVector callThunks;
NameInBytecodeVector funcNames;
CustomSectionVector customSections;
CacheableChars filename;
@ -560,8 +522,9 @@ class Code
const SharedMetadata metadata_;
const SharedBytes maybeBytecode_;
UniqueGeneratedSourceMap maybeSourceMap_;
CacheableCharsVector funcLabels_;
bool profilingEnabled_;
// Mutated at runtime:
CacheableCharsVector profilingLabels_;
// State maintained when debugging is enabled:
@ -602,15 +565,11 @@ class Code
bool getOffsetLocation(JSContext* cx, uint32_t offset, bool* found, size_t* lineno, size_t* column);
bool totalSourceLines(JSContext* cx, uint32_t* count);
// Each Code has a profiling mode that is updated to match the runtime's
// profiling mode when there are no other activations of the code live on
// the stack. Once in profiling mode, ProfilingFrameIterator can be used to
// asynchronously walk the stack. Otherwise, the ProfilingFrameIterator will
// skip any activations of this code.
// To save memory, profilingLabels_ are generated lazily when profiling mode
// is enabled.
MOZ_MUST_USE bool ensureProfilingState(JSRuntime* rt, bool enabled);
bool profilingEnabled() const { return profilingEnabled_; }
const char* profilingLabel(uint32_t funcIndex) const { return funcLabels_[funcIndex].get(); }
void ensureProfilingLabels(bool profilingEnabled);
const char* profilingLabel(uint32_t funcIndex) const;
// The Code can track enter/leave frame events. Any such event triggers
// debug trap. The enter/leave frame events enabled or disabled across

View file

@ -29,13 +29,12 @@ using namespace wasm;
Compartment::Compartment(Zone* zone)
: mutatingInstances_(false),
activationCount_(0),
profilingEnabled_(false)
interruptedCount_(0)
{}
Compartment::~Compartment()
{
MOZ_ASSERT(activationCount_ == 0);
MOZ_ASSERT(interruptedCount_ == 0);
MOZ_ASSERT(instances_.empty());
MOZ_ASSERT(!mutatingInstances_);
}
@ -58,10 +57,14 @@ void
Compartment::trace(JSTracer* trc)
{
// A WasmInstanceObject that was initially reachable when called can become
// unreachable while executing on the stack. Since wasm does not otherwise
// scan the stack during GC to identify live instances, we mark all instance
// objects live if there is any running wasm in the compartment.
if (activationCount_) {
// unreachable while executing on the stack. When execution in a compartment
// is interrupted inside wasm code, wasm::TraceActivations() may miss frames
// due to its use of FrameIterator which assumes wasm has exited through an
// exit stub. This could be fixed by changing wasm::TraceActivations() to
// use a ProfilingFrameIterator, which inspects register state, but for now
// just mark everything in the compartment in this super-rare case.
if (interruptedCount_) {
for (Instance* i : instances_)
i->trace(trc);
}
@ -73,8 +76,7 @@ Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceOb
Instance& instance = instanceObj->instance();
MOZ_ASSERT(this == &instance.compartment()->wasm);
if (!instance.ensureProfilingState(cx, profilingEnabled_))
return false;
instance.code().ensureProfilingLabels(cx->runtime()->geckoProfiler().enabled());
size_t index;
if (BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
@ -139,38 +141,22 @@ Compartment::lookupInstanceDeprecated(const void* pc) const
return instances_[index];
}
bool
Compartment::ensureProfilingState(JSContext* cx)
void
Compartment::setInterrupted(bool interrupted)
{
bool newProfilingEnabled = cx->runtime()->geckoProfiler().enabled();
if (profilingEnabled_ == newProfilingEnabled)
return true;
// Since one Instance can call another Instance in the same compartment
// directly without calling through Instance::callExport(), when profiling
// is enabled, enable it for the entire compartment at once. It is only safe
// to enable profiling when the wasm is not on the stack, so delay enabling
// profiling until there are no live WasmActivations in this compartment.
if (activationCount_ > 0)
return true;
for (Instance* instance : instances_) {
if (!instance->ensureProfilingState(cx, newProfilingEnabled))
return false;
if (interrupted) {
interruptedCount_++;
} else {
MOZ_ASSERT(interruptedCount_ > 0);
interruptedCount_--;
}
profilingEnabled_ = newProfilingEnabled;
return true;
}
bool
Compartment::profilingEnabled() const
void
Compartment::ensureProfilingLabels(bool profilingEnabled)
{
// Profiling can asynchronously interrupt the mutation of the instances_
// vector which is used by lookupCode() during stack-walking. To handle
// this rare case, disable profiling during mutation.
return profilingEnabled_ && !mutatingInstances_;
for (Instance* instance : instances_)
instance->code().ensureProfilingLabels(profilingEnabled);
}
void

View file

@ -39,8 +39,7 @@ class Compartment
{
InstanceVector instances_;
volatile bool mutatingInstances_;
size_t activationCount_;
bool profilingEnabled_;
size_t interruptedCount_;
friend class js::WasmActivation;
@ -89,12 +88,14 @@ class Compartment
Instance* lookupInstanceDeprecated(const void* pc) const;
// To ensure profiling is enabled (so that wasm frames are not lost in
// profiling callstacks), ensureProfilingState must be called before calling
// the first wasm function in a compartment.
// The wasm::Compartment must be notified when execution is interrupted
// while executing in wasm code in this compartment.
bool ensureProfilingState(JSContext* cx);
bool profilingEnabled() const;
void setInterrupted(bool interrupted);
// Ensure all Instances in this JSCompartment have profiling labels created.
void ensureProfilingLabels(bool profilingEnabled);
// about:memory reporting

View file

@ -1,136 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "wasm/WasmDebugFrame.h"
#include "vm/EnvironmentObject.h"
#include "wasm/WasmBaselineCompile.h"
#include "wasm/WasmInstance.h"
#include "jsobjinlines.h"
using namespace js;
using namespace js::wasm;
Instance*
DebugFrame::instance() const
{
return tlsData_->instance;
}
GlobalObject*
DebugFrame::global() const
{
return &instance()->object()->global();
}
JSObject*
DebugFrame::environmentChain() const
{
return &global()->lexicalEnvironment();
}
void
DebugFrame::observeFrame(JSContext* cx)
{
if (observing_)
return;
instance()->code().adjustEnterAndLeaveFrameTrapsState(cx, /* enabled = */ true);
observing_ = true;
}
void
DebugFrame::leaveFrame(JSContext* cx)
{
if (!observing_)
return;
instance()->code().adjustEnterAndLeaveFrameTrapsState(cx, /* enabled = */ false);
observing_ = false;
}
void
DebugFrame::clearReturnJSValue()
{
hasCachedReturnJSValue_ = true;
cachedReturnJSValue_.setUndefined();
}
void
DebugFrame::updateReturnJSValue()
{
hasCachedReturnJSValue_ = true;
ExprType returnType = instance()->code().debugGetResultType(funcIndex());
switch (returnType) {
case ExprType::Void:
cachedReturnJSValue_.setUndefined();
break;
case ExprType::I32:
cachedReturnJSValue_.setInt32(resultI32_);
break;
case ExprType::I64:
// Just display as a Number; it's ok if we lose some precision
cachedReturnJSValue_.setDouble((double)resultI64_);
break;
case ExprType::F32:
cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF32_));
break;
case ExprType::F64:
cachedReturnJSValue_.setDouble(JS::CanonicalizeNaN(resultF64_));
break;
default:
MOZ_CRASH("result type");
}
}
bool
DebugFrame::getLocal(uint32_t localIndex, MutableHandleValue vp)
{
ValTypeVector locals;
size_t argsLength;
if (!instance()->code().debugGetLocalTypes(funcIndex(), &locals, &argsLength))
return false;
BaseLocalIter iter(locals, argsLength, /* debugEnabled = */ true);
while (!iter.done() && iter.index() < localIndex)
iter++;
MOZ_ALWAYS_TRUE(!iter.done());
uint8_t* frame = static_cast<uint8_t*>((void*)this) + offsetOfFrame();
void* dataPtr = frame - iter.frameOffset();
switch (iter.mirType()) {
case jit::MIRType::Int32:
vp.set(Int32Value(*static_cast<int32_t*>(dataPtr)));
break;
case jit::MIRType::Int64:
// Just display as a Number; it's ok if we lose some precision
vp.set(NumberValue((double)*static_cast<int64_t*>(dataPtr)));
break;
case jit::MIRType::Float32:
vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<float*>(dataPtr))));
break;
case jit::MIRType::Double:
vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
break;
default:
MOZ_CRASH("local type");
}
return true;
}

View file

@ -1,127 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
*
* Copyright 2016 Mozilla Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef wasmdebugframe_js_h
#define wasmdebugframe_js_h
#include "gc/Barrier.h"
#include "js/RootingAPI.h"
#include "js/TracingAPI.h"
#include "wasm/WasmTypes.h"
namespace js {
class WasmFunctionCallObject;
namespace wasm {
class DebugFrame
{
union
{
int32_t resultI32_;
int64_t resultI64_;
float resultF32_;
double resultF64_;
};
js::Value cachedReturnJSValue_;
// The fields below are initialized by the baseline compiler.
uint32_t funcIndex_;
uint32_t reserved0_;
union
{
struct
{
bool observing_ : 1;
bool isDebuggee_ : 1;
bool prevUpToDate_ : 1;
bool hasCachedSavedFrame_ : 1;
bool hasCachedReturnJSValue_ : 1;
};
void* reserved1_;
};
TlsData* tlsData_;
Frame frame_;
explicit DebugFrame() {}
void StaticAsserts() {
// VS2017 doesn't consider offsetOfResults() etc. to be constexpr, so we have to use
// offsetof directly. These asserts can't be at class-level because the type is incomplete.
static_assert(offsetof(DebugFrame, resultI32_) == 0, "results shall be at offset 0");
static_assert(offsetof(DebugFrame, tlsData_) + sizeof(TlsData*) ==
offsetof(DebugFrame, frame_),
"TLS pointer must be a field just before the wasm frame");
static_assert(sizeof(DebugFrame) % 8 == 0 && offsetof(DebugFrame, frame_) % 8 == 0,
"DebugFrame and its portion is 8-bytes aligned for AbstractFramePtr");
}
public:
inline uint32_t funcIndex() const { return funcIndex_; }
inline TlsData* tlsData() const { return tlsData_; }
inline Frame& frame() { return frame_; }
Instance* instance() const;
GlobalObject* global() const;
JSObject* environmentChain() const;
void observeFrame(JSContext* cx);
void leaveFrame(JSContext* cx);
void trace(JSTracer* trc);
// These are opaque boolean flags used by the debugger and
// saved-frame-chains code.
inline bool isDebuggee() const { return isDebuggee_; }
inline void setIsDebuggee() { isDebuggee_ = true; }
inline void unsetIsDebuggee() { isDebuggee_ = false; }
inline bool prevUpToDate() const { return prevUpToDate_; }
inline void setPrevUpToDate() { prevUpToDate_ = true; }
inline void unsetPrevUpToDate() { prevUpToDate_ = false; }
inline bool hasCachedSavedFrame() const { return hasCachedSavedFrame_; }
inline void setHasCachedSavedFrame() { hasCachedSavedFrame_ = true; }
inline void* resultsPtr() { return &resultI32_; }
inline HandleValue returnValue() const {
MOZ_ASSERT(hasCachedReturnJSValue_);
return HandleValue::fromMarkedLocation(&cachedReturnJSValue_);
}
void updateReturnJSValue();
void clearReturnJSValue();
bool getLocal(uint32_t localIndex, MutableHandleValue vp);
static constexpr size_t offsetOfResults() { return offsetof(DebugFrame, resultI32_); }
static constexpr size_t offsetOfFlagsWord() { return offsetof(DebugFrame, reserved1_); }
static constexpr size_t offsetOfFuncIndex() { return offsetof(DebugFrame, funcIndex_); }
static constexpr size_t offsetOfTlsData() { return offsetof(DebugFrame, tlsData_); }
static constexpr size_t offsetOfFrame() { return offsetof(DebugFrame, frame_); }
};
} // namespace wasm
} // namespace js
#endif // wasmdebugframe_js_h

View file

@ -18,7 +18,6 @@
#include "wasm/WasmFrameIterator.h"
#include "wasm/WasmDebugFrame.h"
#include "wasm/WasmInstance.h"
#include "jit/MacroAssembler-inl.h"
@ -45,11 +44,10 @@ CallerFPFromFP(void* fp)
return reinterpret_cast<Frame*>(fp)->callerFP;
}
static TlsData*
TlsDataFromFP(void *fp)
static DebugFrame*
FrameToDebugFrame(void* fp)
{
void* debugFrame = (uint8_t*)fp - DebugFrame::offsetOfFrame();
return reinterpret_cast<DebugFrame*>(debugFrame)->tlsData();
return reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
}
FrameIterator::FrameIterator()
@ -69,33 +67,30 @@ FrameIterator::FrameIterator(WasmActivation* activation, Unwind unwind)
code_(nullptr),
callsite_(nullptr),
codeRange_(nullptr),
fp_(activation->fp()),
fp_(nullptr),
unwind_(unwind),
missingFrameMessage_(false)
{
if (fp_) {
settle();
// When execution is interrupted, the embedding may capture a stack trace.
// Since we've lost all the register state, we can't unwind the full stack
// like ProfilingFrameIterator does. However, we can recover the interrupted
// function via the resumePC and at least print that frame.
if (void* resumePC = activation->resumePC()) {
code_ = activation->compartment()->wasm.lookupCode(resumePC);
codeRange_ = code_->lookupRange(resumePC);
MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
MOZ_ASSERT(!done());
return;
}
void* pc = activation_->resumePC();
if (!pc) {
fp_ = activation->exitFP();
if (!fp_) {
MOZ_ASSERT(done());
return;
}
code_ = activation_->compartment()->wasm.lookupCode(pc);
MOZ_ASSERT(code_);
const CodeRange* codeRange = code_->lookupRange(pc);
MOZ_ASSERT(codeRange);
if (codeRange->kind() == CodeRange::Function)
codeRange_ = codeRange;
else
missingFrameMessage_ = true;
MOZ_ASSERT(!done());
settle();
}
bool
@ -123,38 +118,33 @@ void
FrameIterator::settle()
{
if (unwind_ == Unwind::True)
activation_->unwindFP(fp_);
activation_->unwindExitFP(fp_);
void* returnAddress = ReturnAddressFromFP(fp_);
code_ = activation_->compartment()->wasm.lookupCode(returnAddress);
MOZ_ASSERT(code_);
fp_ = CallerFPFromFP(fp_);
codeRange_ = code_->lookupRange(returnAddress);
MOZ_ASSERT(codeRange_);
if (codeRange_->kind() == CodeRange::Entry) {
fp_ = nullptr;
if (!fp_) {
code_ = nullptr;
codeRange_ = nullptr;
callsite_ = nullptr;
if (unwind_ == Unwind::True)
activation_->unwindFP(nullptr);
activation_->unwindExitFP(nullptr);
MOZ_ASSERT(done());
return;
}
MOZ_RELEASE_ASSERT(codeRange_->kind() == CodeRange::Function);
code_ = activation_->compartment()->wasm.lookupCode(returnAddress);
MOZ_ASSERT(code_);
codeRange_ = code_->lookupRange(returnAddress);
MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
callsite_ = code_->lookupCallSite(returnAddress);
MOZ_ASSERT(callsite_);
DebugOnly<uint8_t*> oldfp = fp_;
fp_ += callsite_->stackDepth();
MOZ_ASSERT_IF(code_->profilingEnabled(), fp_ == CallerFPFromFP(oldfp));
MOZ_ASSERT(!done());
}
@ -187,8 +177,7 @@ FrameIterator::functionDisplayAtom() const
JSContext* cx = activation_->cx();
if (missingFrameMessage_) {
const char* msg = "asm.js/wasm frames may be missing; enable the profiler before running "
"to see all frames";
const char* msg = "asm.js/wasm frames may be missing below this one";
JSAtom* atom = Atomize(cx, msg, strlen(msg));
if (!atom) {
cx->clearPendingException();
@ -217,11 +206,19 @@ FrameIterator::lineOrBytecode() const
: (codeRange_ ? codeRange_->funcLineOrBytecode() : 0);
}
bool
FrameIterator::hasInstance() const
{
MOZ_ASSERT(!done());
return !!fp_;
}
Instance*
FrameIterator::instance() const
{
MOZ_ASSERT(!done() && debugEnabled());
return TlsDataFromFP(fp_)->instance;
MOZ_ASSERT(!done());
MOZ_ASSERT(hasInstance());
return FrameToDebugFrame(fp_)->instance();
}
bool
@ -238,9 +235,7 @@ DebugFrame*
FrameIterator::debugFrame() const
{
MOZ_ASSERT(!done() && debugEnabled());
// The fp() points to wasm::Frame.
void* buf = static_cast<uint8_t*>(fp_) - DebugFrame::offsetOfFrame();
return static_cast<DebugFrame*>(buf);
return FrameToDebugFrame(fp_);
}
const CallSite*
@ -255,70 +250,64 @@ FrameIterator::debugTrapCallsite() const
/*****************************************************************************/
// Prologue/epilogue code generation
// These constants reflect statically-determined offsets in the profiling
// These constants reflect statically-determined offsets in the
// prologue/epilogue. The offsets are dynamically asserted during code
// generation.
#if defined(JS_CODEGEN_X64)
# if defined(DEBUG)
static const unsigned PushedRetAddr = 0;
static const unsigned PostStorePrePopFP = 0;
# endif
static const unsigned PushedFP = 26;
static const unsigned StoredFP = 33;
static const unsigned PushedFP = 1;
static const unsigned PushedTLS = 3;
static const unsigned PoppedTLS = 1;
#elif defined(JS_CODEGEN_X86)
# if defined(DEBUG)
static const unsigned PushedRetAddr = 0;
static const unsigned PostStorePrePopFP = 0;
# endif
static const unsigned PushedFP = 16;
static const unsigned StoredFP = 19;
static const unsigned PushedFP = 1;
static const unsigned PushedTLS = 2;
static const unsigned PoppedTLS = 1;
#elif defined(JS_CODEGEN_ARM)
static const unsigned BeforePushRetAddr = 0;
static const unsigned PushedRetAddr = 4;
static const unsigned PushedFP = 28;
static const unsigned StoredFP = 32;
static const unsigned PostStorePrePopFP = 4;
static const unsigned PushedFP = 8;
static const unsigned PushedTLS = 12;
static const unsigned PoppedTLS = 4;
#elif defined(JS_CODEGEN_ARM64)
static const unsigned BeforePushRetAddr = 0;
static const unsigned PushedRetAddr = 0;
static const unsigned PushedFP = 0;
static const unsigned StoredFP = 0;
static const unsigned PostStorePrePopFP = 0;
static const unsigned PushedTLS = 0;
static const unsigned PoppedTLS = 0;
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static const unsigned PushedRetAddr = 8;
static const unsigned PushedFP = 36;
static const unsigned StoredFP = 40;
static const unsigned PostStorePrePopFP = 4;
static const unsigned BeforePushRetAddr = 0;
static const unsigned PushedRetAddr = 4;
static const unsigned PushedFP = 8;
static const unsigned PushedTLS = 12;
static const unsigned PoppedTLS = 4;
#elif defined(JS_CODEGEN_NONE)
# if defined(DEBUG)
static const unsigned PushedRetAddr = 0;
static const unsigned PostStorePrePopFP = 0;
# endif
static const unsigned PushedFP = 1;
static const unsigned StoredFP = 1;
static const unsigned PushedFP = 0;
static const unsigned PushedTLS = 0;
static const unsigned PoppedTLS = 0;
#else
# error "Unknown architecture!"
#endif
static void
PushRetAddr(MacroAssembler& masm)
PushRetAddr(MacroAssembler& masm, unsigned entry)
{
#if defined(JS_CODEGEN_ARM)
MOZ_ASSERT(masm.currentOffset() - entry == BeforePushRetAddr);
masm.push(lr);
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
MOZ_ASSERT(masm.currentOffset() - entry == BeforePushRetAddr);
masm.push(ra);
#else
// The x86/x64 call instruction pushes the return address.
#endif
}
// Generate a prologue that maintains WasmActivation::fp as the virtual frame
// pointer so that ProfilingFrameIterator can walk the stack at any pc in
// generated code.
static void
GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets)
GenerateCallablePrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
uint32_t* entry)
{
Register scratch = ABINonArgReg0;
// ProfilingFrameIterator needs to know the offsets of several key
// instructions from entry. To save space, we make these offsets static
// constants and assert that they match the actual codegen below. On ARM,
@ -329,102 +318,75 @@ GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason
AutoForbidPools afp(&masm, /* number of instructions in scope = */ 8);
#endif
offsets->begin = masm.currentOffset();
*entry = masm.currentOffset();
PushRetAddr(masm);
MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - offsets->begin);
masm.loadWasmActivationFromSymbolicAddress(scratch);
masm.push(Address(scratch, WasmActivation::offsetOfFP()));
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - offsets->begin);
masm.storePtr(masm.getStackPointer(), Address(scratch, WasmActivation::offsetOfFP()));
MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
PushRetAddr(masm, *entry);
MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
masm.push(FramePointer);
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
masm.push(WasmTlsReg);
MOZ_ASSERT_IF(!masm.oom(), PushedTLS == masm.currentOffset() - *entry);
masm.moveStackPtrTo(FramePointer);
}
if (reason != ExitReason::None)
masm.store32(Imm32(int32_t(reason)), Address(scratch, WasmActivation::offsetOfExitReason()));
if (reason != ExitReason::None) {
Register scratch = ABINonArgReg0;
masm.loadWasmActivationFromTls(scratch);
masm.wasmAssertNonExitInvariants(scratch);
Address exitReason(scratch, WasmActivation::offsetOfExitReason());
masm.store32(Imm32(int32_t(reason)), exitReason);
Address exitFP(scratch, WasmActivation::offsetOfExitFP());
masm.storePtr(FramePointer, exitFP);
}
if (framePushed)
masm.subFromStackPtr(Imm32(framePushed));
}
// Generate the inverse of GenerateProfilingPrologue.
static void
GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets)
GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
uint32_t* ret)
{
Register scratch = ABINonArgReturnReg0;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
Register scratch2 = ABINonArgReturnReg1;
#endif
if (framePushed)
masm.addToStackPtr(Imm32(framePushed));
masm.loadWasmActivationFromSymbolicAddress(scratch);
if (reason != ExitReason::None) {
masm.store32(Imm32(int32_t(ExitReason::None)),
Address(scratch, WasmActivation::offsetOfExitReason()));
Register scratch = ABINonArgReturnReg0;
masm.loadWasmActivationFromTls(scratch);
Address exitFP(scratch, WasmActivation::offsetOfExitFP());
masm.storePtr(ImmWord(0), exitFP);
Address exitReason(scratch, WasmActivation::offsetOfExitReason());
masm.store32(Imm32(int32_t(ExitReason::None)), exitReason);
}
// ProfilingFrameIterator assumes fixed offsets of the last few
// instructions from profilingReturn, so AutoForbidPools to ensure that
// unintended instructions are not automatically inserted.
{
// Forbid pools for the same reason as described in GenerateCallablePrologue.
#if defined(JS_CODEGEN_ARM)
AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
AutoForbidPools afp(&masm, /* number of instructions in scope = */ 3);
#endif
// sp protects the stack from clobber via asynchronous signal handlers
// and the async interrupt exit. Since activation.fp can be read at any
// time and still points to the current frame, be careful to only update
// sp after activation.fp has been repointed to the caller's frame.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
masm.loadPtr(Address(masm.getStackPointer(), 0), scratch2);
masm.storePtr(scratch2, Address(scratch, WasmActivation::offsetOfFP()));
DebugOnly<uint32_t> prePop = masm.currentOffset();
masm.addToStackPtr(Imm32(sizeof(void *)));
MOZ_ASSERT_IF(!masm.oom(), PostStorePrePopFP == masm.currentOffset() - prePop);
#else
masm.pop(Address(scratch, WasmActivation::offsetOfFP()));
MOZ_ASSERT(PostStorePrePopFP == 0);
#endif
offsets->profilingReturn = masm.currentOffset();
masm.ret();
}
masm.pop(WasmTlsReg);
DebugOnly<uint32_t> poppedTLS = masm.currentOffset();
masm.pop(FramePointer);
*ret = masm.currentOffset();
masm.ret();
MOZ_ASSERT_IF(!masm.oom(), PoppedTLS == *ret - poppedTLS);
}
// In profiling mode, we need to maintain fp so that we can unwind the stack at
// any pc. In non-profiling mode, the only way to observe WasmActivation::fp is
// to call out to C++ so, as an optimization, we don't update fp. To avoid
// recompilation when the profiling mode is toggled, we generate both prologues
// a priori and switch between prologues when the profiling mode is toggled.
// Specifically, ToggleProfiling patches all callsites to either call the
// profiling or non-profiling entry point.
void
wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, const SigIdDesc& sigId,
FuncOffsets* offsets)
{
#if defined(JS_CODEGEN_ARM)
// Flush pending pools so they do not get dumped between the 'begin' and
// 'entry' offsets since the difference must be less than UINT8_MAX.
// 'normalEntry' offsets since the difference must be less than UINT8_MAX
// to be stored in CodeRange::funcBeginToNormalEntry_.
masm.flushBuffer();
#endif
masm.haltingAlign(CodeAlignment);
GenerateProfilingPrologue(masm, framePushed, ExitReason::None, offsets);
Label body;
masm.jump(&body);
// Generate table entry thunk:
masm.haltingAlign(CodeAlignment);
offsets->tableEntry = masm.currentOffset();
// Generate table entry:
offsets->begin = masm.currentOffset();
TrapOffset trapOffset(0); // ignored by masm.wasmEmitTrapOutOfLineCode
TrapDesc trap(trapOffset, Trap::IndirectCallBadSig, masm.framePushed());
switch (sigId.kind()) {
@ -440,66 +402,38 @@ wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, const
case SigIdDesc::Kind::None:
break;
}
offsets->tableProfilingJump = masm.nopPatchableToNearJump().offset();
// Generate normal prologue:
// Generate normal entry:
masm.nopAlign(CodeAlignment);
offsets->nonProfilingEntry = masm.currentOffset();
PushRetAddr(masm);
masm.subFromStackPtr(Imm32(framePushed + FrameBytesAfterReturnAddress));
GenerateCallablePrologue(masm, framePushed, ExitReason::None, &offsets->normalEntry);
// Prologue join point, body begin:
masm.bind(&body);
masm.setFramePushed(framePushed);
}
// Similar to GenerateFunctionPrologue (see comment), we generate both a
// profiling and non-profiling epilogue a priori. When the profiling mode is
// toggled, ToggleProfiling patches the 'profiling jump' to either be a nop
// (falling through to the normal prologue) or a jump (jumping to the profiling
// epilogue).
void
wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
MOZ_ASSERT(masm.framePushed() == framePushed);
#if defined(JS_CODEGEN_ARM)
// Flush pending pools so they do not get dumped between the profilingReturn
// and profilingJump/profilingEpilogue offsets since the difference must be
// less than UINT8_MAX.
masm.flushBuffer();
#endif
// Generate a nop that is overwritten by a jump to the profiling epilogue
// when profiling is enabled.
offsets->profilingJump = masm.nopPatchableToNearJump().offset();
// Normal epilogue:
masm.addToStackPtr(Imm32(framePushed + FrameBytesAfterReturnAddress));
masm.ret();
GenerateCallableEpilogue(masm, framePushed, ExitReason::None, &offsets->ret);
masm.setFramePushed(0);
// Profiling epilogue:
offsets->profilingEpilogue = masm.currentOffset();
GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
}
void
wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets)
CallableOffsets* offsets)
{
masm.haltingAlign(CodeAlignment);
GenerateProfilingPrologue(masm, framePushed, reason, offsets);
GenerateCallablePrologue(masm, framePushed, reason, &offsets->begin);
masm.setFramePushed(framePushed);
}
void
wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets)
CallableOffsets* offsets)
{
// Inverse of GenerateExitPrologue:
MOZ_ASSERT(masm.framePushed() == framePushed);
GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
GenerateCallableEpilogue(masm, framePushed, reason, &offsets->ret);
masm.setFramePushed(0);
}
@ -527,21 +461,11 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation)
stackAddress_(nullptr),
exitReason_(ExitReason::None)
{
// If profiling hasn't been enabled for this instance, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
// happens if profiling is enabled while the instance is on the stack (in
// which case profiling will be enabled when the instance becomes inactive
// and gets called again).
if (!activation_->compartment()->wasm.profilingEnabled()) {
MOZ_ASSERT(done());
return;
}
initFromFP();
initFromExitFP();
}
static inline void
AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* callerFP, void* fp)
AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* callerFP)
{
#ifdef DEBUG
Code* code = activation.compartment()->wasm.lookupCode(callerPC);
@ -557,15 +481,13 @@ AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* ca
const CallSite* callsite = code->lookupCallSite(callerPC);
MOZ_ASSERT(callsite);
MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
#endif
}
void
ProfilingFrameIterator::initFromFP()
ProfilingFrameIterator::initFromExitFP()
{
uint8_t* fp = activation_->fp();
uint8_t* fp = activation_->exitFP();
stackAddress_ = fp;
// If a signal was handled while entering an activation, the frame will
@ -600,13 +522,14 @@ ProfilingFrameIterator::initFromFP()
fp = CallerFPFromFP(fp);
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
break;
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::TrapExit:
case CodeRange::DebugTrap:
case CodeRange::Inline:
case CodeRange::Throw:
case CodeRange::FarJumpIsland:
MOZ_CRASH("Unexpected CodeRange kind");
}
@ -615,28 +538,11 @@ ProfilingFrameIterator::initFromFP()
// This allows the variety of exit reasons to show up in the callstack.
exitReason_ = activation_->exitReason();
// In the case of calls to builtins or asynchronous interrupts, no exit path
// is taken so the exitReason is None. Coerce these to the Native exit
// reason so that self-time is accounted for.
if (exitReason_ == ExitReason::None)
exitReason_ = ExitReason::Native;
MOZ_ASSERT(!done());
}
typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
static bool
InThunk(const CodeRange& codeRange, uint32_t offsetInModule)
{
if (codeRange.kind() == CodeRange::FarJumpIsland)
return true;
return codeRange.isFunction() &&
offsetInModule >= codeRange.funcTableEntry() &&
offsetInModule < codeRange.funcNonProfilingEntry();
}
ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
const RegisterState& state)
: activation_(&activation),
@ -647,13 +553,10 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
stackAddress_(nullptr),
exitReason_(ExitReason::None)
{
// If profiling hasn't been enabled for this instance, then CallerFPFromFP
// will be trash, so ignore the entire activation. In practice, this only
// happens if profiling is enabled while the instance is on the stack (in
// which case profiling will be enabled when the instance becomes inactive
// and gets called again).
if (!activation_->compartment()->wasm.profilingEnabled()) {
MOZ_ASSERT(done());
// In the case of ImportJitExit, the fp register may be temporarily
// clobbered on return from Ion so always use activation.fp when it is set.
if (activation.exitFP()) {
initFromExitFP();
return;
}
@ -661,87 +564,98 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
// exit trampoline or signal handler.
code_ = activation_->compartment()->wasm.lookupCode(state.pc);
if (!code_) {
initFromFP();
MOZ_ASSERT(done());
return;
}
// Note: fp may be null while entering and leaving the activation.
uint8_t* fp = activation.fp();
// When the pc is inside the prologue/epilogue, the innermost call's Frame
// is not complete and thus fp points to the second-to-innermost call's
// Frame. Since fp can only tell you about its caller, naively unwinding
// while pc is in the prologue/epilogue would skip the second-to-innermost
// call. To avoid this problem, we use the static structure of the code in
// the prologue and epilogue to do the Right Thing.
uint8_t* fp = (uint8_t*)state.fp;
uint8_t* pc = (uint8_t*)state.pc;
void** sp = (void**)state.sp;
const CodeRange* codeRange = code_->lookupRange(pc);
uint32_t offsetInModule = pc - code_->segment().base();
MOZ_ASSERT(offsetInModule >= codeRange->begin());
MOZ_ASSERT(offsetInModule < codeRange->end());
// Compute the offset of the pc from the (normal) entry of the code range.
// The stack state of the pc for the entire table-entry is equivalent to
// that of the first pc of the normal-entry. Thus, we can simplify the below
// case analysis by redirecting all pc-in-table-entry cases to the
// pc-at-normal-entry case.
uint32_t offsetFromEntry;
if (codeRange->isFunction()) {
if (offsetInModule < codeRange->funcNormalEntry())
offsetFromEntry = 0;
else
offsetFromEntry = offsetInModule - codeRange->funcNormalEntry();
} else {
offsetFromEntry = offsetInModule - codeRange->begin();
}
const CodeRange* codeRange = code_->lookupRange(state.pc);
switch (codeRange->kind()) {
case CodeRange::Function:
case CodeRange::FarJumpIsland:
case CodeRange::ImportJitExit:
case CodeRange::ImportInterpExit:
case CodeRange::TrapExit: {
// When the pc is inside the prologue/epilogue, the innermost call's
// Frame is not complete and thus fp points to the second-to-innermost
// call's Frame. Since fp can only tell you about its caller (via
// ReturnAddressFromFP(fp)), naively unwinding while pc is in the
// prologue/epilogue would skip the second-to- innermost call. To avoid
// this problem, we use the static structure of the code in the prologue
// and epilogue to do the Right Thing.
uint32_t offsetInModule = (uint8_t*)state.pc - code_->segment().base();
MOZ_ASSERT(offsetInModule >= codeRange->begin());
MOZ_ASSERT(offsetInModule < codeRange->end());
uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
void** sp = (void**)state.sp;
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
if (offsetInCodeRange < PushedRetAddr || InThunk(*codeRange, offsetInModule)) {
// First instruction of the ARM/MIPS function; the return address is
// still in lr and fp still holds the caller's fp.
case CodeRange::TrapExit:
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
// The return address is still in lr and fp holds the caller's fp.
callerPC_ = state.lr;
callerFP_ = fp;
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 2);
} else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
// Second-to-last instruction of the ARM/MIPS function; fp points to
// the caller's fp; have not yet popped Frame.
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else
#endif
if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn() ||
InThunk(*codeRange, offsetInModule))
{
// The return address has been pushed on the stack but not fp; fp
// still points to the caller's fp.
callerPC_ = *sp;
if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
// The return address has been pushed on the stack but fp still
// points to the caller's fp.
callerPC_ = sp[0];
callerFP_ = fp;
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 1);
} else if (offsetInCodeRange < StoredFP) {
// The full Frame has been pushed; fp still points to the caller's
// frame.
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else if (offsetFromEntry == PushedFP) {
// The return address and caller's fp have been pushed on the stack; fp
// is still the caller's fp.
callerPC_ = sp[1];
callerFP_ = sp[0];
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else if (offsetFromEntry == PushedTLS) {
// The full Frame has been pushed; fp is still the caller's fp.
MOZ_ASSERT(fp == CallerFPFromFP(sp));
callerPC_ = ReturnAddressFromFP(sp);
callerFP_ = CallerFPFromFP(sp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
callerFP_ = fp;
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else if (offsetInModule == codeRange->ret() - PoppedTLS) {
// The TLS field of the Frame has been popped.
callerPC_ = sp[1];
callerFP_ = sp[0];
} else if (offsetInModule == codeRange->ret()) {
// Both the TLS and callerFP fields have been popped and fp now
// points to the caller's frame.
callerPC_ = sp[0];
callerFP_ = fp;
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
} else {
// Not in the prologue/epilogue.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
}
break;
}
case CodeRange::Entry: {
case CodeRange::Entry:
// The entry trampoline is the final frame in an WasmActivation. The entry
// trampoline also doesn't GeneratePrologue/Epilogue so we can't use
// the general unwinding logic above.
MOZ_ASSERT(!fp);
callerPC_ = nullptr;
callerFP_ = nullptr;
break;
}
case CodeRange::DebugTrap:
case CodeRange::Inline: {
// The throw stub clears WasmActivation::fp on it's way out.
if (!fp) {
MOZ_ASSERT(done());
return;
}
case CodeRange::Inline:
// Most inline code stubs execute after the prologue/epilogue have
// completed so we can simply unwind based on fp. The only exception is
// the async interrupt stub, since it can be executed at any time.
@ -749,13 +663,18 @@ ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
// skipped frames. Thus, we use simply unwind based on fp.
callerPC_ = ReturnAddressFromFP(fp);
callerFP_ = CallerFPFromFP(fp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
AssertMatchesCallSite(*activation_, callerPC_, callerFP_);
break;
}
case CodeRange::Throw:
// The throw stub executes a small number of instructions before popping
// the entire activation. To simplify testing, we simply pretend throw
// stubs have already popped the entire stack.
MOZ_ASSERT(done());
return;
}
codeRange_ = codeRange;
stackAddress_ = state.sp;
stackAddress_ = sp;
MOZ_ASSERT(!done());
}
@ -784,6 +703,7 @@ ProfilingFrameIterator::operator++()
switch (codeRange_->kind()) {
case CodeRange::Entry:
case CodeRange::Throw:
MOZ_ASSERT(callerFP_ == nullptr);
callerPC_ = nullptr;
break;
@ -796,7 +716,7 @@ ProfilingFrameIterator::operator++()
case CodeRange::FarJumpIsland:
stackAddress_ = callerFP_;
callerPC_ = ReturnAddressFromFP(callerFP_);
AssertMatchesCallSite(*activation_, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
AssertMatchesCallSite(*activation_, callerPC_, CallerFPFromFP(callerFP_));
callerFP_ = CallerFPFromFP(callerFP_);
break;
}
@ -816,7 +736,6 @@ ProfilingFrameIterator::label() const
// devtools/client/performance/modules/logic/frame-utils.js
const char* importJitDescription = "fast FFI trampoline (in asm.js)";
const char* importInterpDescription = "slow FFI trampoline (in asm.js)";
const char* nativeDescription = "native call (in asm.js)";
const char* trapDescription = "trap handling (in asm.js)";
const char* debugTrapDescription = "debug trap handling (in asm.js)";
@ -827,8 +746,6 @@ ProfilingFrameIterator::label() const
return importJitDescription;
case ExitReason::ImportInterp:
return importInterpDescription;
case ExitReason::Native:
return nativeDescription;
case ExitReason::Trap:
return trapDescription;
case ExitReason::DebugTrap:
@ -844,99 +761,21 @@ ProfilingFrameIterator::label() const
case CodeRange::DebugTrap: return debugTrapDescription;
case CodeRange::Inline: return "inline stub (in asm.js)";
case CodeRange::FarJumpIsland: return "interstitial (in asm.js)";
case CodeRange::Throw: MOZ_CRASH("no frame for throw stubs");
}
MOZ_CRASH("bad code range kind");
}
/*****************************************************************************/
// Runtime patching to enable/disable profiling
void
wasm::ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled)
wasm::TraceActivations(JSContext* cx, const CooperatingContext& target, JSTracer* trc)
{
if (callSite.kind() != CallSite::Func)
return;
uint8_t* callerRetAddr = code.segment().base() + callSite.returnAddressOffset();
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
void* callee = X86Encoding::GetRel32Target(callerRetAddr);
#elif defined(JS_CODEGEN_ARM)
uint8_t* caller = callerRetAddr - 4;
Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
BOffImm calleeOffset;
callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
void* callee = calleeOffset.getDest(callerInsn);
#elif defined(JS_CODEGEN_ARM64)
MOZ_CRASH();
void* callee = nullptr;
(void)callerRetAddr;
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
uint8_t* caller = callerRetAddr - 2 * sizeof(uint32_t);
InstImm* callerInsn = reinterpret_cast<InstImm*>(caller);
BOffImm16 calleeOffset;
callerInsn->extractImm16(&calleeOffset);
void* callee = calleeOffset.getDest(reinterpret_cast<Instruction*>(caller));
#elif defined(JS_CODEGEN_NONE)
MOZ_CRASH();
void* callee = nullptr;
#else
# error "Missing architecture"
#endif
const CodeRange* codeRange = code.lookupRange(callee);
if (!codeRange->isFunction())
return;
uint8_t* from = code.segment().base() + codeRange->funcNonProfilingEntry();
uint8_t* to = code.segment().base() + codeRange->funcProfilingEntry();
if (!enabled)
Swap(from, to);
MOZ_ASSERT(callee == from);
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
X86Encoding::SetRel32(callerRetAddr, to);
#elif defined(JS_CODEGEN_ARM)
new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always);
#elif defined(JS_CODEGEN_ARM64)
(void)to;
MOZ_CRASH();
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
new (caller) InstImm(op_regimm, zero, rt_bgezal, BOffImm16(to - caller));
#elif defined(JS_CODEGEN_NONE)
MOZ_CRASH();
#else
# error "Missing architecture"
#endif
}
void
wasm::ToggleProfiling(const Code& code, const CallThunk& callThunk, bool enabled)
{
const CodeRange& cr = code.metadata().codeRanges[callThunk.u.codeRangeIndex];
uint32_t calleeOffset = enabled ? cr.funcProfilingEntry() : cr.funcNonProfilingEntry();
MacroAssembler::repatchFarJump(code.segment().base(), callThunk.offset, calleeOffset);
}
void
wasm::ToggleProfiling(const Code& code, const CodeRange& codeRange, bool enabled)
{
if (!codeRange.isFunction())
return;
uint8_t* codeBase = code.segment().base();
uint8_t* profilingEntry = codeBase + codeRange.funcProfilingEntry();
uint8_t* tableProfilingJump = codeBase + codeRange.funcTableProfilingJump();
uint8_t* profilingJump = codeBase + codeRange.funcProfilingJump();
uint8_t* profilingEpilogue = codeBase + codeRange.funcProfilingEpilogue();
if (enabled) {
MacroAssembler::patchNopToNearJump(tableProfilingJump, profilingEntry);
MacroAssembler::patchNopToNearJump(profilingJump, profilingEpilogue);
} else {
MacroAssembler::patchNearJumpToNop(tableProfilingJump);
MacroAssembler::patchNearJumpToNop(profilingJump);
for (ActivationIterator iter(cx, target); !iter.done(); ++iter) {
if (iter.activation()->isWasm()) {
for (FrameIterator fi(iter.activation()->asWasm()); !fi.done(); ++fi) {
if (fi.hasInstance())
fi.instance()->trace(trc);
}
}
}
}

View file

@ -36,9 +36,8 @@ class CodeRange;
class DebugFrame;
class Instance;
class SigIdDesc;
struct CallThunk;
struct FuncOffsets;
struct ProfilingOffsets;
struct CallableOffsets;
struct TrapOffset;
// Iterates over the frames of a single WasmActivation, called synchronously
@ -76,6 +75,7 @@ class FrameIterator
JSAtom* functionDisplayAtom() const;
unsigned lineOrBytecode() const;
const CodeRange* codeRange() const { return codeRange_; }
bool hasInstance() const;
Instance* instance() const;
bool debugEnabled() const;
DebugFrame* debugFrame() const;
@ -89,25 +89,23 @@ enum class ExitReason : uint32_t
None, // default state, the pc is in wasm code
ImportJit, // fast-path call directly into JIT code
ImportInterp, // slow-path call into C++ Invoke()
Native, // call to native C++ code (e.g., Math.sin, ToInt32(), interrupt)
Trap, // call to trap handler for the trap in WasmActivation::trap
DebugTrap // call to debug trap handler
};
// Iterates over the frames of a single WasmActivation, given an
// asynchrously-interrupted thread's state. If the activation's
// module is not in profiling mode, the activation is skipped.
// asynchronously-interrupted thread's state.
class ProfilingFrameIterator
{
const WasmActivation* activation_;
const Code* code_;
const CodeRange* codeRange_;
uint8_t* callerFP_;
void* callerFP_;
void* callerPC_;
void* stackAddress_;
ExitReason exitReason_;
void initFromFP();
void initFromExitFP();
public:
ProfilingFrameIterator();
@ -125,26 +123,20 @@ class ProfilingFrameIterator
void
GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets);
CallableOffsets* offsets);
void
GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
ProfilingOffsets* offsets);
CallableOffsets* offsets);
void
GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, const SigIdDesc& sigId,
FuncOffsets* offsets);
void
GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
// Runtime patching to enable/disable profiling
// Mark all instance objects live on the stack.
void
ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled);
void
ToggleProfiling(const Code& code, const CallThunk& callThunk, bool enabled);
void
ToggleProfiling(const Code& code, const CodeRange& codeRange, bool enabled);
TraceActivations(JSContext* cx, const CooperatingContext& target, JSTracer* trc);
} // namespace wasm
} // namespace js

View file

@ -309,7 +309,7 @@ JumpRange()
typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy> OffsetMap;
bool
ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
ModuleGenerator::patchCallSites()
{
masm_.haltingAlign(CodeAlignment);
@ -338,7 +338,7 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
break;
case CallSiteDesc::Func: {
if (funcIsCompiled(cs.funcIndex())) {
uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNonProfilingEntry();
uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNormalEntry();
MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
@ -351,7 +351,7 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
if (!p) {
Offsets offsets;
offsets.begin = masm_.currentOffset();
uint32_t jumpOffset = masm_.farJumpWithPatch().offset();
masm_.append(CallFarJump(cs.funcIndex(), masm_.farJumpWithPatch()));
offsets.end = masm_.currentOffset();
if (masm_.oom())
return false;
@ -360,30 +360,18 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
return false;
if (!existingCallFarJumps.add(p, cs.funcIndex(), offsets.begin))
return false;
// Record calls' far jumps in metadata since they must be
// repatched at runtime when profiling mode is toggled.
if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcIndex()))
return false;
}
masm_.patchCall(callerOffset, p->value());
break;
}
case CallSiteDesc::TrapExit: {
if (maybeTrapExits) {
uint32_t calleeOffset = (*maybeTrapExits)[cs.trap()].begin;
MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
masm_.patchCall(callerOffset, calleeOffset);
break;
}
}
if (!existingTrapFarJumps[cs.trap()]) {
// See MacroAssembler::wasmEmitTrapOutOfLineCode for why we must
// reload the TLS register on this path.
Offsets offsets;
offsets.begin = masm_.currentOffset();
masm_.loadPtr(Address(FramePointer, offsetof(Frame, tls)), WasmTlsReg);
masm_.append(TrapFarJump(cs.trap(), masm_.farJumpWithPatch()));
offsets.end = masm_.currentOffset();
if (masm_.oom())
@ -429,12 +417,8 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
bool
ModuleGenerator::patchFarJumps(const TrapExitOffsetArray& trapExits, const Offsets& debugTrapStub)
{
for (CallThunk& callThunk : metadata_->callThunks) {
uint32_t funcIndex = callThunk.u.funcIndex;
callThunk.u.codeRangeIndex = funcToCodeRange_[funcIndex];
CodeOffset farJump(callThunk.offset);
masm_.patchFarJump(farJump, funcCodeRange(funcIndex).funcNonProfilingEntry());
}
for (const CallFarJump& farJump : masm_.callFarJumps())
masm_.patchFarJump(farJump.jump, funcCodeRange(farJump.funcIndex).funcNormalEntry());
for (const TrapFarJump& farJump : masm_.trapFarJumps())
masm_.patchFarJump(farJump.jump, trapExits[farJump.trap].begin);
@ -534,7 +518,7 @@ ModuleGenerator::finishFuncExports()
}
typedef Vector<Offsets, 0, SystemAllocPolicy> OffsetVector;
typedef Vector<ProfilingOffsets, 0, SystemAllocPolicy> ProfilingOffsetVector;
typedef Vector<CallableOffsets, 0, SystemAllocPolicy> CallableOffsetVector;
bool
ModuleGenerator::finishCodegen()
@ -550,8 +534,8 @@ ModuleGenerator::finishCodegen()
// due to the large absolute offsets temporarily stored by Label::bind().
OffsetVector entries;
ProfilingOffsetVector interpExits;
ProfilingOffsetVector jitExits;
CallableOffsetVector interpExits;
CallableOffsetVector jitExits;
TrapExitOffsetArray trapExits;
Offsets outOfBoundsExit;
Offsets unalignedAccessExit;
@ -632,7 +616,7 @@ ModuleGenerator::finishCodegen()
return false;
throwStub.offsetBy(offsetInWhole);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, throwStub))
if (!metadata_->codeRanges.emplaceBack(CodeRange::Throw, throwStub))
return false;
debugTrapStub.offsetBy(offsetInWhole);
@ -649,7 +633,7 @@ ModuleGenerator::finishCodegen()
// then far jumps. Patching callsites can generate far jumps so there is an
// ordering dependency.
if (!patchCallSites(&trapExits))
if (!patchCallSites())
return false;
if (!patchFarJumps(trapExits, debugTrapStub))
@ -1168,7 +1152,6 @@ ModuleGenerator::finish(const ShareableBytes& bytecode)
metadata_->memoryAccesses.podResizeToFit();
metadata_->codeRanges.podResizeToFit();
metadata_->callSites.podResizeToFit();
metadata_->callThunks.podResizeToFit();
metadata_->debugTrapFarJumpOffsets.podResizeToFit();
metadata_->debugFuncToCodeRange.podResizeToFit();

View file

@ -212,7 +212,7 @@ class MOZ_STACK_CLASS ModuleGenerator
typedef HashSet<uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy> Uint32Set;
typedef Vector<CompileTask, 0, SystemAllocPolicy> CompileTaskVector;
typedef Vector<CompileTask*, 0, SystemAllocPolicy> CompileTaskPtrVector;
typedef EnumeratedArray<Trap, Trap::Limit, ProfilingOffsets> TrapExitOffsetArray;
typedef EnumeratedArray<Trap, Trap::Limit, CallableOffsets> TrapExitOffsetArray;
// Constant parameters
CompileMode compileMode_;
@ -257,7 +257,7 @@ class MOZ_STACK_CLASS ModuleGenerator
bool funcIsCompiled(uint32_t funcIndex) const;
const CodeRange& funcCodeRange(uint32_t funcIndex) const;
uint32_t numFuncImports() const;
MOZ_MUST_USE bool patchCallSites(TrapExitOffsetArray* maybeTrapExits = nullptr);
MOZ_MUST_USE bool patchCallSites();
MOZ_MUST_USE bool patchFarJumps(const TrapExitOffsetArray& trapExits, const Offsets& debugTrapStub);
MOZ_MUST_USE bool finishTask(CompileTask* task);
MOZ_MUST_USE bool finishOutstandingTask();

View file

@ -352,7 +352,7 @@ Instance::Instance(JSContext* cx,
const CodeRange& codeRange = calleeInstanceObj->getExportedFunctionCodeRange(f);
Instance& calleeInstance = calleeInstanceObj->instance();
import.tls = calleeInstance.tlsData();
import.code = calleeInstance.codeSegment().base() + codeRange.funcNonProfilingEntry();
import.code = calleeInstance.codeSegment().base() + codeRange.funcNormalEntry();
import.baselineScript = nullptr;
import.obj = calleeInstanceObj;
} else {
@ -538,9 +538,6 @@ Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(!memory_ || tlsData()->memoryBase == memory_->buffer().dataPointerEither());
if (!cx->compartment()->wasm.ensureProfilingState(cx))
return false;
const FuncExport& func = metadata().lookupFuncExport(funcIndex);
// The calling convention for an external call into wasm is to pass an
@ -782,61 +779,6 @@ Instance::deoptimizeImportExit(uint32_t funcImportIndex)
import.baselineScript = nullptr;
}
static void
UpdateEntry(const Code& code, bool profilingEnabled, void** entry)
{
const CodeRange& codeRange = *code.lookupRange(*entry);
void* from = code.segment().base() + codeRange.funcNonProfilingEntry();
void* to = code.segment().base() + codeRange.funcProfilingEntry();
if (!profilingEnabled)
Swap(from, to);
MOZ_ASSERT(*entry == from);
*entry = to;
}
bool
Instance::ensureProfilingState(JSContext* cx, bool newProfilingEnabled)
{
if (code_->profilingEnabled() == newProfilingEnabled)
return true;
if (!code_->ensureProfilingState(cx->runtime(), newProfilingEnabled))
return false;
// Imported wasm functions and typed function tables point directly to
// either the profiling or non-profiling prologue and must therefore be
// updated when the profiling mode is toggled.
for (const FuncImport& fi : metadata().funcImports) {
FuncImportTls& import = funcImportTls(fi);
if (import.obj && import.obj->is<WasmInstanceObject>()) {
Code& code = import.obj->as<WasmInstanceObject>().instance().code();
UpdateEntry(code, newProfilingEnabled, &import.code);
}
}
for (const SharedTable& table : tables_) {
if (!table->isTypedFunction())
continue;
// This logic will have to be generalized to match the import logic
// above if wasm can create typed function tables since a single table
// can contain elements from multiple instances.
MOZ_ASSERT(metadata().kind == ModuleKind::AsmJS);
void** array = table->internalArray();
uint32_t length = table->length();
for (size_t i = 0; i < length; i++) {
if (array[i])
UpdateEntry(*code_, newProfilingEnabled, &array[i]);
}
}
return true;
}
void
Instance::ensureEnterFrameTrapsState(JSContext* cx, bool enabled)
{

View file

@ -78,7 +78,7 @@ class Instance
TableTls& tableTls(const TableDesc& td) const;
// Import call slow paths which are called directly from wasm code.
friend void* AddressOf(SymbolicAddress, JSContext*);
friend void* AddressOf(SymbolicAddress);
static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*);
static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
@ -152,10 +152,6 @@ class Instance
void onMovingGrowMemory(uint8_t* prevMemoryBase);
void onMovingGrowTable();
// See Code::ensureProfilingState comment.
MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
// Debug support:
bool debugEnabled() const { return code_->metadata().debugEnabled; }
bool enterFrameTrapsEnabled() const { return enterFrameTrapsEnabled_; }

View file

@ -55,22 +55,6 @@ typedef OpIter<IonCompilePolicy> IonOpIter;
class FunctionCompiler;
// TlsUsage describes how the TLS register is used during a function call.
enum class TlsUsage
{
Unused, // No particular action is taken with respect to the TLS register.
Need, // The TLS register must be reloaded just before the call.
CallerSaved // Same, plus space must be allocated to save/restore the TLS
// register.
};
static bool
NeedsTls(TlsUsage usage)
{
return usage == TlsUsage::Need || usage == TlsUsage::CallerSaved;
}
// CallCompileState describes a call that is being compiled. Due to expression
// nesting, multiple calls can be in the middle of compilation at the same time
// and these are tracked in a stack by FunctionCompiler.
@ -93,11 +77,6 @@ class CallCompileState
// FunctionCompiler::startCall() comment below.
uint32_t spIncrement_;
// Set by FunctionCompiler::finishCall(), tells a potentially-inter-module
// call the offset of the reserved space in which it can save the caller's
// WasmTlsReg.
uint32_t tlsStackOffset_;
// Accumulates the register arguments while compiling arguments.
MWasmCall::Args regArgs_;
@ -123,7 +102,6 @@ class CallCompileState
: lineOrBytecode_(lineOrBytecode),
maxChildStackBytes_(0),
spIncrement_(0),
tlsStackOffset_(MWasmCall::DontSaveTls),
childClobbers_(false)
{ }
};
@ -995,7 +973,7 @@ class FunctionCompiler
outer->childClobbers_ = true;
}
bool finishCall(CallCompileState* call, TlsUsage tls)
bool finishCall(CallCompileState* call)
{
MOZ_ALWAYS_TRUE(callStack_.popCopy() == call);
@ -1004,22 +982,10 @@ class FunctionCompiler
return true;
}
if (NeedsTls(tls)) {
if (!call->regArgs_.append(MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_)))
return false;
}
if (!call->regArgs_.append(MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_)))
return false;
uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
// If this is a potentially-inter-module call, allocate an extra word of
// stack space to save/restore the caller's WasmTlsReg during the call.
// Record the stack offset before including spIncrement since MWasmCall
// will use this offset after having bumped the stack pointer.
if (tls == TlsUsage::CallerSaved) {
call->tlsStackOffset_ = stackBytes;
stackBytes += sizeof(void*);
}
if (call->childClobbers_) {
call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, WasmStackAlignment);
for (MWasmStackArg* stackArg : call->stackArgs_)
@ -1052,8 +1018,7 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Func);
MIRType ret = ToMIRType(sig.ret());
auto callee = CalleeDesc::function(funcIndex);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
call.spIncrement_, MWasmCall::DontSaveTls);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret, call.spIncrement_);
if (!ins)
return false;
@ -1078,7 +1043,6 @@ class FunctionCompiler
const TableDesc& table = env_.tables[env_.asmJSSigToTableIndex[sigIndex]];
MOZ_ASSERT(IsPowerOfTwo(table.limits.initial));
MOZ_ASSERT(!table.external);
MOZ_ASSERT(call.tlsStackOffset_ == MWasmCall::DontSaveTls);
MConstant* mask = MConstant::New(alloc(), Int32Value(table.limits.initial - 1));
curBlock_->add(mask);
@ -1091,14 +1055,12 @@ class FunctionCompiler
MOZ_ASSERT(sig.id.kind() != SigIdDesc::Kind::None);
MOZ_ASSERT(env_.tables.length() == 1);
const TableDesc& table = env_.tables[0];
MOZ_ASSERT(table.external == (call.tlsStackOffset_ != MWasmCall::DontSaveTls));
callee = CalleeDesc::wasmTable(table, sig.id);
}
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Dynamic);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(sig.ret()),
call.spIncrement_, call.tlsStackOffset_, index);
call.spIncrement_, index);
if (!ins)
return false;
@ -1115,12 +1077,10 @@ class FunctionCompiler
return true;
}
MOZ_ASSERT(call.tlsStackOffset_ != MWasmCall::DontSaveTls);
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Dynamic);
auto callee = CalleeDesc::import(globalDataOffset);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(ret),
call.spIncrement_, call.tlsStackOffset_);
call.spIncrement_);
if (!ins)
return false;
@ -1140,7 +1100,7 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Symbolic);
auto callee = CalleeDesc::builtin(builtin);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(ret),
call.spIncrement_, MWasmCall::DontSaveTls);
call.spIncrement_);
if (!ins)
return false;
@ -1160,8 +1120,7 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Symbolic);
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
call.instanceArg_, call.regArgs_,
ToMIRType(ret), call.spIncrement_,
call.tlsStackOffset_);
ToMIRType(ret), call.spIncrement_);
if (!ins)
return false;
@ -1181,7 +1140,7 @@ class FunctionCompiler
if (inDeadCode())
return;
MWasmReturn* ins = MWasmReturn::New(alloc(), operand, tlsPointer_);
MWasmReturn* ins = MWasmReturn::New(alloc(), operand);
curBlock_->end(ins);
curBlock_ = nullptr;
}
@ -1191,7 +1150,7 @@ class FunctionCompiler
if (inDeadCode())
return;
MWasmReturnVoid* ins = MWasmReturnVoid::New(alloc(), tlsPointer_);
MWasmReturnVoid* ins = MWasmReturnVoid::New(alloc());
curBlock_->end(ins);
curBlock_ = nullptr;
}
@ -2018,11 +1977,8 @@ EmitUnreachable(FunctionCompiler& f)
typedef IonOpIter::ValueVector DefVector;
static bool
EmitCallArgs(FunctionCompiler& f, const Sig& sig, const DefVector& args, TlsUsage tls,
CallCompileState* call)
EmitCallArgs(FunctionCompiler& f, const Sig& sig, const DefVector& args, CallCompileState* call)
{
MOZ_ASSERT(NeedsTls(tls));
if (!f.startCall(call))
return false;
@ -2031,7 +1987,7 @@ EmitCallArgs(FunctionCompiler& f, const Sig& sig, const DefVector& args, TlsUsag
return false;
}
return f.finishCall(call, tls);
return f.finishCall(call);
}
static bool
@ -2048,15 +2004,13 @@ EmitCall(FunctionCompiler& f)
return true;
const Sig& sig = *f.env().funcSigs[funcIndex];
bool import = f.env().funcIsImport(funcIndex);
TlsUsage tls = import ? TlsUsage::CallerSaved : TlsUsage::Need;
CallCompileState call(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, args, tls, &call))
if (!EmitCallArgs(f, sig, args, &call))
return false;
MDefinition* def;
if (import) {
if (f.env().funcIsImport(funcIndex)) {
uint32_t globalDataOffset = f.env().funcImportGlobalDataOffsets[funcIndex];
if (!f.callImport(globalDataOffset, call, sig.ret(), &def))
return false;
@ -2093,12 +2047,8 @@ EmitCallIndirect(FunctionCompiler& f, bool oldStyle)
const Sig& sig = f.env().sigs[sigIndex];
TlsUsage tls = !f.env().isAsmJS() && f.env().tables[0].external
? TlsUsage::CallerSaved
: TlsUsage::Need;
CallCompileState call(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, args, tls, &call))
if (!EmitCallArgs(f, sig, args, &call))
return false;
MDefinition* def;
@ -2581,7 +2531,7 @@ EmitUnaryMathBuiltinCall(FunctionCompiler& f, SymbolicAddress callee, ValType op
if (!f.passArg(input, operandType, &call))
return false;
if (!f.finishCall(&call, TlsUsage::Unused))
if (!f.finishCall(&call))
return false;
MDefinition* def;
@ -2612,7 +2562,7 @@ EmitBinaryMathBuiltinCall(FunctionCompiler& f, SymbolicAddress callee, ValType o
if (!f.passArg(rhs, operandType, &call))
return false;
if (!f.finishCall(&call, TlsUsage::Unused))
if (!f.finishCall(&call))
return false;
MDefinition* def;
@ -3217,10 +3167,7 @@ EmitGrowMemory(FunctionCompiler& f)
if (!f.passArg(delta, ValType::I32, &args))
return false;
// As a short-cut, pretend this is an inter-module call so that any pinned
// heap pointer will be reloaded after the call. This hack will go away once
// we can stop pinning registers.
f.finishCall(&args, TlsUsage::CallerSaved);
f.finishCall(&args);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
@ -3246,7 +3193,7 @@ EmitCurrentMemory(FunctionCompiler& f)
if (!f.passInstance(&args))
return false;
f.finishCall(&args, TlsUsage::Need);
f.finishCall(&args);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, args, ValType::I32, &ret))

View file

@ -65,13 +65,6 @@ wasm::HasCompilerSupport(JSContext* cx)
if (!wasm::HaveSignalHandlers())
return false;
#if defined(JS_CODEGEN_ARM)
// movw/t are required for the loadWasmActivationFromSymbolicAddress in
// GenerateProfilingPrologue/Epilogue to avoid using the constant pool.
if (!HasMOVWT())
return false;
#endif
#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
return false;
#else

View file

@ -440,11 +440,11 @@ Module::extractCode(JSContext* cx, MutableHandleValue vp)
if (!JS_DefineProperty(cx, segment, "funcIndex", value, JSPROP_ENUMERATE))
return false;
value.setNumber((uint32_t)p.funcNonProfilingEntry());
value.setNumber((uint32_t)p.funcNormalEntry());
if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value, JSPROP_ENUMERATE))
return false;
value.setNumber((uint32_t)p.funcProfilingEpilogue());
value.setNumber((uint32_t)p.end());
if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value, JSPROP_ENUMERATE))
return false;
}
@ -521,7 +521,6 @@ Module::initSegments(JSContext* cx,
for (const ElemSegment& seg : elemSegments_) {
Table& table = *tables[seg.tableIndex];
uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
bool profilingEnabled = instance.code().profilingEnabled();
const CodeRangeVector& codeRanges = metadata().codeRanges;
uint8_t* codeBase = instance.codeBase();
@ -539,9 +538,7 @@ Module::initSegments(JSContext* cx,
} else {
const CodeRange& cr = codeRanges[seg.elemCodeRangeIndices[i]];
uint32_t entryOffset = table.isTypedFunction()
? profilingEnabled
? cr.funcProfilingEntry()
: cr.funcNonProfilingEntry()
? cr.funcNormalEntry()
: cr.funcTableEntry();
table.set(offset + i, codeBase + entryOffset, instance);
}

View file

@ -122,7 +122,7 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
Register scratch = ABINonArgReturnReg1;
// Read the arguments of wasm::ExportFuncPtr according to the native ABI.
// The entry stub's frame is only 1 word, not the usual 2 for wasm::Frame.
// The entry stub's frame is 1 word.
const unsigned argBase = sizeof(void*) + masm.framePushed();
ABIArgGenerator abi;
ABIArg arg;
@ -262,12 +262,25 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
}
}
// Set the FramePointer to null for the benefit of debugging.
masm.movePtr(ImmWord(0), FramePointer);
// Call into the real function.
masm.assertStackAlignment(WasmStackAlignment);
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
masm.assertStackAlignment(WasmStackAlignment);
#ifdef DEBUG
// Assert FramePointer was returned to null by the callee.
Label ok;
masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &ok);
masm.breakpoint();
masm.bind(&ok);
#endif
// Recover the stack pointer value before dynamic alignment.
masm.loadWasmActivationFromTls(scratch);
masm.wasmAssertNonExitInvariants(scratch);
masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
masm.setFramePushed(FramePushedForEntrySP);
@ -445,15 +458,14 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
// normal wasm function for the purposes of exports and table calls. In
// particular, the wrapper function provides:
// - a table entry, so JS imports can be put into tables
// - normal (non-)profiling entries, so that, if the import is re-exported,
// an entry stub can be generated and called without any special cases
// - normal entries, so that, if the import is re-exported, an entry stub can
// be generated and called without any special cases
FuncOffsets
wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDesc sigId)
{
masm.setFramePushed(0);
unsigned tlsBytes = sizeof(void*);
unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args(), tlsBytes);
unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args());
FuncOffsets offsets;
GenerateFunctionPrologue(masm, framePushed, sigId, &offsets);
@ -474,16 +486,12 @@ wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, Si
StackCopy(masm, i.mirType(), scratch, src, dst);
}
// Save the TLS register so it can be restored later.
uint32_t tlsStackOffset = i.stackBytesConsumedSoFar();
masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), tlsStackOffset));
// Call the import exit stub.
CallSiteDesc desc(CallSiteDesc::Dynamic);
masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
// Restore the TLS register and pinned regs, per wasm function ABI.
masm.loadPtr(Address(masm.getStackPointer(), tlsStackOffset), WasmTlsReg);
masm.loadWasmTlsRegFromFrame();
masm.loadWasmPinnedRegsFromTls();
GenerateFunctionEpilogue(masm, framePushed, &offsets);
@ -497,7 +505,7 @@ wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, Si
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into an appropriate callImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
ProfilingOffsets
CallableOffsets
wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
Label* throwLabel)
{
@ -519,7 +527,7 @@ wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint3
unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
ProfilingOffsets offsets;
CallableOffsets offsets;
GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
// Fill the argument array.
@ -621,12 +629,10 @@ wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint3
return offsets;
}
static const unsigned SavedTlsReg = sizeof(void*);
// Generate a stub that is called via the internal ABI derived from the
// signature of the import and calls into a compatible JIT function,
// having boxed all the ABI arguments into the JIT stack frame layout.
ProfilingOffsets
CallableOffsets
wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
{
masm.setFramePushed(0);
@ -640,11 +646,11 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
unsigned sizeOfRetAddr = sizeof(void*);
unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes;
unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
sizeOfRetAddr;
ProfilingOffsets offsets;
CallableOffsets offsets;
GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, &offsets);
// 1. Descriptor
@ -684,12 +690,6 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
argOffset += fi.sig().args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == jitFrameBytes);
// 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
// must be kept live for the benefit of the epilogue, so push it on the
// stack so that it can be restored before the epilogue.
static_assert(SavedTlsReg == sizeof(void*), "stack frame accounting");
masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), jitFrameBytes));
{
// Enable Activation.
//
@ -700,8 +700,7 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
Register act = WasmIonExitRegE1;
// JitActivation* act = cx->activation();
masm.movePtr(SymbolicAddress::ContextPtr, cx);
masm.loadPtr(Address(cx, 0), cx);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), cx);
masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
// act.active_ = true;
@ -718,20 +717,26 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
masm.callJitNoProfiler(callee);
AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
// The JIT callee clobbers all registers, including WasmTlsReg and
// FrameRegister, so restore those here.
masm.loadWasmTlsRegFromFrame();
masm.moveStackPtrTo(FramePointer);
masm.addPtr(Imm32(masm.framePushed()), FramePointer);
{
// Disable Activation.
//
// This sequence needs three registers, and must preserve the JSReturnReg_Data and
// JSReturnReg_Type, so there are five live registers.
// This sequence needs three registers and must preserve WasmTlsReg,
// JSReturnReg_Data and JSReturnReg_Type.
MOZ_ASSERT(JSReturnReg_Data == WasmIonExitRegReturnData);
MOZ_ASSERT(JSReturnReg_Type == WasmIonExitRegReturnType);
MOZ_ASSERT(WasmTlsReg == WasmIonExitTlsReg);
Register cx = WasmIonExitRegD0;
Register act = WasmIonExitRegD1;
Register tmp = WasmIonExitRegD2;
// JitActivation* act = cx->activation();
masm.movePtr(SymbolicAddress::ContextPtr, cx);
masm.loadPtr(Address(cx, 0), cx);
masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), cx);
masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
// cx->jitTop = act->prevJitTop_;
@ -795,12 +800,6 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
Label done;
masm.bind(&done);
// Ion code does not respect the system ABI's callee-saved register
// conventions so reload any assumed-non-volatile registers. Note that the
// reserveStack(sizeOfRetAddr) above means that the stack pointer is at a
// different offset than when WasmTlsReg was stored.
masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes + sizeOfRetAddr), WasmTlsReg);
GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, &offsets);
if (oolConvert.used()) {
@ -864,10 +863,10 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
}
// Generate a stub that calls into ReportTrap with the right trap reason.
// This stub is called with ABIStackAlignment by a trap out-of-line path. A
// profiling prologue/epilogue is used so that stack unwinding picks up the
// This stub is called with ABIStackAlignment by a trap out-of-line path. An
// exit prologue/epilogue is used so that stack unwinding picks up the
// current WasmActivation. Unwinding will begin at the caller of this trap exit.
ProfilingOffsets
CallableOffsets
wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
{
masm.haltingAlign(CodeAlignment);
@ -879,7 +878,7 @@ wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
ProfilingOffsets offsets;
CallableOffsets offsets;
GenerateExitPrologue(masm, framePushed, ExitReason::Trap, &offsets);
ABIArgMIRTypeIter i(args);
@ -904,11 +903,11 @@ wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
// Generate a stub which is only used by the signal handlers to handle out of
// bounds access by experimental SIMD.js and Atomics and unaligned accesses on
// ARM. This stub is executed by direct PC transfer from the faulting memory
// access and thus the stack depth is unknown. Since WasmActivation::fp is not
// set before calling the error reporter, the current wasm activation will be
// lost. This stub should be removed when SIMD.js and Atomics are moved to wasm
// and given proper traps and when we use a non-faulting strategy for unaligned
// ARM access.
// access and thus the stack depth is unknown. Since WasmActivation::exitFP is
// not set before calling the error reporter, the current wasm activation will
// be lost. This stub should be removed when SIMD.js and Atomics are moved to
// wasm and given proper traps and when we use a non-faulting strategy for
// unaligned ARM access.
static Offsets
GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel)
{
@ -943,13 +942,17 @@ wasm::GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel)
return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportUnalignedAccess, throwLabel);
}
#if defined(JS_CODEGEN_ARM)
static const LiveRegisterSet AllRegsExceptPCSP(
GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
(uint32_t(1) << Registers::pc))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
#else
static const LiveRegisterSet AllRegsExceptSP(
GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
FloatRegisterSet(FloatRegisters::AllMask));
static const LiveRegisterSet AllAllocatableRegs = LiveRegisterSet(
GeneralRegisterSet(Registers::AllocatableMask),
FloatRegisterSet(FloatRegisters::AllMask));
#endif
// The async interrupt-callback exit is called from arbitrarily-interrupted wasm
// code. That means we must first save *all* registers and restore *all*
@ -971,18 +974,11 @@ wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
// Be very careful here not to perturb the machine state before saving it
// to the stack. In particular, add/sub instructions may set conditions in
// the flags register.
masm.push(Imm32(0)); // space for resumePC
masm.pushFlags(); // after this we are safe to use sub
masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
masm.push(Imm32(0)); // space used as return address, updated below
masm.setFramePushed(0); // set to 0 now so that framePushed is offset of return address
masm.PushFlags(); // after this we are safe to use sub
masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
Register scratch = ABINonArgReturnReg0;
// Store resumePC into the reserved space.
masm.loadWasmActivationFromSymbolicAddress(scratch);
masm.loadPtr(Address(scratch, WasmActivation::offsetOfResumePC()), scratch);
masm.storePtr(scratch, Address(masm.getStackPointer(), masm.framePushed() + sizeof(void*)));
// We know that StackPointer is word-aligned, but not necessarily
// stack-aligned, so we need to align it dynamically.
masm.moveStackPtrTo(ABINonVolatileReg);
@ -990,18 +986,27 @@ wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
if (ShadowStackSpace)
masm.subFromStackPtr(Imm32(ShadowStackSpace));
// Make the call to C++, which preserves ABINonVolatileReg.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, throwLabel);
// HandleExecutionInterrupt returns null if execution is interrupted and
// the resumption pc otherwise.
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// Restore the StackPointer to its position before the call.
// Restore the stack pointer then store resumePC into the stack slow that
// will be popped by the 'ret' below.
masm.moveToStackPtr(ABINonVolatileReg);
masm.storePtr(ReturnReg, Address(StackPointer, masm.framePushed()));
// Restore the machine state to before the interrupt.
masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
masm.popFlags(); // after this, nothing that sets conditions
masm.ret(); // pop resumePC into PC
// Restore the machine state to before the interrupt. After popping flags,
// no instructions can be executed which set flags.
masm.PopRegsInMask(AllRegsExceptSP);
masm.PopFlags();
// Return to the resumePC stored into this stack slot above.
MOZ_ASSERT(masm.framePushed() == 0);
masm.ret();
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
// Reserve space to store resumePC and HeapReg.
masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
@ -1049,61 +1054,40 @@ wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
masm.as_jr(HeapReg);
masm.loadPtr(Address(StackPointer, -sizeof(intptr_t)), HeapReg);
#elif defined(JS_CODEGEN_ARM)
masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
masm.push(Imm32(0)); // space used as return address, updated below
masm.setFramePushed(0); // set to 0 now so that framePushed is offset of return address
masm.PushRegsInMask(AllRegsExceptPCSP); // save all GP/FP registers (except PC and SP)
// Save all GPR, except the stack pointer.
masm.PushRegsInMask(LiveRegisterSet(
GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)),
FloatRegisterSet(uint32_t(0))));
// Save both the APSR and FPSCR in non-volatile registers.
// Save SP, APSR and FPSCR in non-volatile registers.
masm.as_mrs(r4);
masm.as_vmrs(r5);
// Save the stack pointer in a non-volatile register.
masm.mov(sp,r6);
// Align the stack.
masm.as_bic(sp, sp, Imm8(7));
masm.mov(sp, r6);
// Store resumePC into the return PC stack slot.
masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);
masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));
// Save all FP registers
static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
masm.PushRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
FloatRegisterSet(FloatRegisters::AllDoubleMask)));
// We know that StackPointer is word-aligned, but not necessarily
// stack-aligned, so we need to align it dynamically.
masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
// Make the call to C++, which preserves the non-volatile registers.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, throwLabel);
// HandleExecutionInterrupt returns null if execution is interrupted and
// the resumption pc otherwise.
masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
// Restore the machine state to before the interrupt. this will set the pc!
// Restore the stack pointer then store resumePC into the stack slow that
// will be popped by the 'ret' below.
masm.mov(r6, sp);
masm.storePtr(ReturnReg, Address(sp, masm.framePushed()));
// Restore all FP registers
masm.PopRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
FloatRegisterSet(FloatRegisters::AllDoubleMask)));
masm.mov(r6,sp);
// Restore the machine state to before the interrupt. After popping flags,
// no instructions can be executed which set flags.
masm.as_vmsr(r5);
masm.as_msr(r4);
// Restore all GP registers
masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
masm.transferReg(r0);
masm.transferReg(r1);
masm.transferReg(r2);
masm.transferReg(r3);
masm.transferReg(r4);
masm.transferReg(r5);
masm.transferReg(r6);
masm.transferReg(r7);
masm.transferReg(r8);
masm.transferReg(r9);
masm.transferReg(r10);
masm.transferReg(r11);
masm.transferReg(r12);
masm.transferReg(lr);
masm.finishDataTransfer();
masm.PopRegsInMask(AllRegsExceptPCSP);
// Return to the resumePC stored into this stack slot above.
MOZ_ASSERT(masm.framePushed() == 0);
masm.ret();
#elif defined(JS_CODEGEN_ARM64)
MOZ_CRASH();
@ -1138,22 +1122,13 @@ wasm::GenerateThrowStub(MacroAssembler& masm, Label* throwLabel)
masm.subFromStackPtr(Imm32(ShadowStackSpace));
masm.call(SymbolicAddress::HandleThrow);
Register scratch = ABINonArgReturnReg0;
masm.loadWasmActivationFromSymbolicAddress(scratch);
#ifdef DEBUG
// We are about to pop all frames in this WasmActivation. Checking if fp is
// set to null to maintain the invariant that fp is either null or pointing
// to a valid frame.
Label ok;
masm.branchPtr(Assembler::Equal, Address(scratch, WasmActivation::offsetOfFP()), ImmWord(0), &ok);
masm.breakpoint();
masm.bind(&ok);
#endif
// HandleThrow returns the innermost WasmActivation* in ReturnReg.
Register act = ReturnReg;
masm.wasmAssertNonExitInvariants(act);
masm.setFramePushed(FramePushedForEntrySP);
masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
masm.Pop(scratch);
masm.loadStackPtr(Address(act, WasmActivation::offsetOfEntrySP()));
masm.Pop(ReturnReg);
masm.PopRegsInMask(NonVolatileRegs);
MOZ_ASSERT(masm.framePushed() == 0);
@ -1164,6 +1139,10 @@ wasm::GenerateThrowStub(MacroAssembler& masm, Label* throwLabel)
return offsets;
}
static const LiveRegisterSet AllAllocatableRegs = LiveRegisterSet(
GeneralRegisterSet(Registers::AllocatableMask),
FloatRegisterSet(FloatRegisters::AllMask));
// Generate a stub that handle toggable enter/leave frame traps or breakpoints.
// The trap records frame pointer (via GenerateExitPrologue) and saves most of
// registers to not affect the code generated by WasmBaselineCompile.
@ -1174,7 +1153,7 @@ wasm::GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel)
masm.setFramePushed(0);
ProfilingOffsets offsets;
CallableOffsets offsets;
GenerateExitPrologue(masm, 0, ExitReason::DebugTrap, &offsets);
// Save all registers used between baseline compiler operations.

Some files were not shown because too many files have changed in this diff Show more