/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "jit/CodeGenerator.h" #include "mozilla/Assertions.h" #include "mozilla/Attributes.h" #include "mozilla/Casting.h" #include "mozilla/DebugOnly.h" #include "mozilla/EnumeratedArray.h" #include "mozilla/EnumeratedRange.h" #include "mozilla/MathAlgorithms.h" #include "mozilla/ScopeExit.h" #include "mozilla/Unused.h" #include #include "jslibmath.h" #include "jsmath.h" #include "jsnum.h" #include "builtin/Eval.h" #include "builtin/RegExp.h" #include "builtin/SelfHostingDefines.h" #include "builtin/String.h" #include "builtin/TypedObject.h" #include "gc/Nursery.h" #include "irregexp/NativeRegExpMacroAssembler.h" #include "jit/AtomicOperations.h" #include "jit/BaselineCompiler.h" #include "jit/IonBuilder.h" #include "jit/IonIC.h" #include "jit/IonOptimizationLevels.h" #include "jit/JitcodeMap.h" #include "jit/JitSpewer.h" #include "jit/Linker.h" #include "jit/Lowering.h" #include "jit/MIRGenerator.h" #include "jit/MoveEmitter.h" #include "jit/RangeAnalysis.h" #include "jit/SharedICHelpers.h" #include "jit/StackSlotAllocator.h" #include "jit/VMFunctions.h" #include "util/Unicode.h" #include "vm/AsyncFunction.h" #include "vm/AsyncIteration.h" #include "vm/MatchPairs.h" #include "vm/RegExpObject.h" #include "vm/RegExpStatics.h" #include "vm/StringType.h" #include "vm/TraceLogging.h" #include "vm/TypedArrayObject.h" #include "vtune/VTuneWrapper.h" #include "wasm/WasmStubs.h" #include "builtin/Boolean-inl.h" #include "jit/MacroAssembler-inl.h" #include "jit/shared/CodeGenerator-shared-inl.h" #include "jit/shared/Lowering-shared-inl.h" #include "jit/TemplateObject-inl.h" #include "vm/Interpreter-inl.h" #include "vm/JSScript-inl.h" using namespace js; using namespace js::jit; using mozilla::AssertedCast; using mozilla::DebugOnly; using mozilla::FloatingPoint; using mozilla::Maybe; using mozilla::NegativeInfinity; using mozilla::PositiveInfinity; using JS::GenericNaN; namespace js { namespace jit { class OutOfLineICFallback : public OutOfLineCodeBase { private: LInstruction* lir_; size_t cacheIndex_; size_t cacheInfoIndex_; public: OutOfLineICFallback(LInstruction* lir, size_t cacheIndex, size_t cacheInfoIndex) : lir_(lir), cacheIndex_(cacheIndex), cacheInfoIndex_(cacheInfoIndex) { } void bind(MacroAssembler* masm) override { // The binding of the initial jump is done in // CodeGenerator::visitOutOfLineICFallback. } size_t cacheIndex() const { return cacheIndex_; } size_t cacheInfoIndex() const { return cacheInfoIndex_; } LInstruction* lir() const { return lir_; } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineICFallback(this); } }; void CodeGeneratorShared::addIC(LInstruction* lir, size_t cacheIndex) { if (cacheIndex == SIZE_MAX) { masm.setOOM(); return; } DataPtr cache(this, cacheIndex); MInstruction* mir = lir->mirRaw()->toInstruction(); if (mir->resumePoint()) { cache->setScriptedLocation(mir->block()->info().script(), mir->resumePoint()->pc()); } else { cache->setIdempotent(); } Register temp = cache->scratchRegisterForEntryJump(); icInfo_.back().icOffsetForJump = masm.movWithPatch(ImmWord(-1), temp); masm.jump(Address(temp, 0)); MOZ_ASSERT(!icInfo_.empty()); OutOfLineICFallback* ool = new(alloc()) OutOfLineICFallback(lir, cacheIndex, icInfo_.length() - 1); addOutOfLineCode(ool, mir); masm.bind(ool->rejoin()); cache->setRejoinLabel(CodeOffset(ool->rejoin()->offset())); } typedef bool (*IonGetPropertyICFn)(JSContext*, HandleScript, IonGetPropertyIC*, HandleValue, HandleValue, MutableHandleValue); static const VMFunction IonGetPropertyICInfo = FunctionInfo(IonGetPropertyIC::update, "IonGetPropertyIC::update"); typedef bool (*IonSetPropertyICFn)(JSContext*, HandleScript, IonSetPropertyIC*, HandleObject, HandleValue, HandleValue); static const VMFunction IonSetPropertyICInfo = FunctionInfo(IonSetPropertyIC::update, "IonSetPropertyIC::update"); typedef bool (*IonGetPropSuperICFn)(JSContext*, HandleScript, IonGetPropSuperIC*, HandleObject, HandleValue, HandleValue, MutableHandleValue); static const VMFunction IonGetPropSuperICInfo = FunctionInfo(IonGetPropSuperIC::update, "IonGetPropSuperIC::update"); typedef bool (*IonGetNameICFn)(JSContext*, HandleScript, IonGetNameIC*, HandleObject, MutableHandleValue); static const VMFunction IonGetNameICInfo = FunctionInfo(IonGetNameIC::update, "IonGetNameIC::update"); typedef bool (*IonHasOwnICFn)(JSContext*, HandleScript, IonHasOwnIC*, HandleValue, HandleValue, int32_t*); static const VMFunction IonHasOwnICInfo = FunctionInfo(IonHasOwnIC::update, "IonHasOwnIC::update"); typedef JSObject* (*IonBindNameICFn)(JSContext*, HandleScript, IonBindNameIC*, HandleObject); static const VMFunction IonBindNameICInfo = FunctionInfo(IonBindNameIC::update, "IonBindNameIC::update"); typedef JSObject* (*IonGetIteratorICFn)(JSContext*, HandleScript, IonGetIteratorIC*, HandleValue); static const VMFunction IonGetIteratorICInfo = FunctionInfo(IonGetIteratorIC::update, "IonGetIteratorIC::update"); typedef bool (*IonInICFn)(JSContext*, HandleScript, IonInIC*, HandleValue, HandleObject, bool*); static const VMFunction IonInICInfo = FunctionInfo(IonInIC::update, "IonInIC::update"); typedef bool (*IonInstanceOfICFn)(JSContext*, HandleScript, IonInstanceOfIC*, HandleValue lhs, HandleObject rhs, bool* res); static const VMFunction IonInstanceOfInfo = FunctionInfo(IonInstanceOfIC::update, "IonInstanceOfIC::update"); typedef bool (*IonUnaryArithICFn)(JSContext* cx, HandleScript outerScript, IonUnaryArithIC* stub, HandleValue val, MutableHandleValue res); static const VMFunction IonUnaryArithICInfo = FunctionInfo(IonUnaryArithIC::update, "IonUnaryArithIC::update"); typedef bool (*IonBinaryArithICFn)(JSContext* cx, HandleScript outerScript, IonBinaryArithIC* stub, HandleValue lhs, HandleValue rhs, MutableHandleValue res); static const VMFunction IonBinaryArithICInfo = FunctionInfo(IonBinaryArithIC::update, "IonBinaryArithIC::update"); typedef bool (*IonCompareICFn)(JSContext* cx, HandleScript outerScript, IonCompareIC* stub, HandleValue lhs, HandleValue rhs, MutableHandleValue res); static const VMFunction IonCompareICInfo = FunctionInfo(IonCompareIC::update, "IonCompareIC::update"); void CodeGenerator::visitOutOfLineICFallback(OutOfLineICFallback* ool) { LInstruction* lir = ool->lir(); size_t cacheIndex = ool->cacheIndex(); size_t cacheInfoIndex = ool->cacheInfoIndex(); DataPtr ic(this, cacheIndex); // Register the location of the OOL path in the IC. ic->setFallbackLabel(masm.labelForPatch()); switch (ic->kind()) { case CacheKind::GetProp: case CacheKind::GetElem: { IonGetPropertyIC* getPropIC = ic->asGetPropertyIC(); saveLive(lir); pushArg(getPropIC->id()); pushArg(getPropIC->value()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonGetPropertyICInfo, lir); StoreValueTo(getPropIC->output()).generate(this); restoreLiveIgnore(lir, StoreValueTo(getPropIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::GetPropSuper: case CacheKind::GetElemSuper: { IonGetPropSuperIC* getPropSuperIC = ic->asGetPropSuperIC(); saveLive(lir); pushArg(getPropSuperIC->id()); pushArg(getPropSuperIC->receiver()); pushArg(getPropSuperIC->object()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonGetPropSuperICInfo, lir); StoreValueTo(getPropSuperIC->output()).generate(this); restoreLiveIgnore(lir, StoreValueTo(getPropSuperIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::SetProp: case CacheKind::SetElem: { IonSetPropertyIC* setPropIC = ic->asSetPropertyIC(); saveLive(lir); pushArg(setPropIC->rhs()); pushArg(setPropIC->id()); pushArg(setPropIC->object()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonSetPropertyICInfo, lir); restoreLive(lir); masm.jump(ool->rejoin()); return; } case CacheKind::GetName: { IonGetNameIC* getNameIC = ic->asGetNameIC(); saveLive(lir); pushArg(getNameIC->environment()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonGetNameICInfo, lir); StoreValueTo(getNameIC->output()).generate(this); restoreLiveIgnore(lir, StoreValueTo(getNameIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::BindName: { IonBindNameIC* bindNameIC = ic->asBindNameIC(); saveLive(lir); pushArg(bindNameIC->environment()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonBindNameICInfo, lir); StoreRegisterTo(bindNameIC->output()).generate(this); restoreLiveIgnore(lir, StoreRegisterTo(bindNameIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::GetIterator: { IonGetIteratorIC* getIteratorIC = ic->asGetIteratorIC(); saveLive(lir); pushArg(getIteratorIC->value()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonGetIteratorICInfo, lir); StoreRegisterTo(getIteratorIC->output()).generate(this); restoreLiveIgnore(lir, StoreRegisterTo(getIteratorIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::In: { IonInIC* inIC = ic->asInIC(); saveLive(lir); pushArg(inIC->object()); pushArg(inIC->key()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonInICInfo, lir); StoreRegisterTo(inIC->output()).generate(this); restoreLiveIgnore(lir, StoreRegisterTo(inIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::HasOwn: { IonHasOwnIC* hasOwnIC = ic->asHasOwnIC(); saveLive(lir); pushArg(hasOwnIC->id()); pushArg(hasOwnIC->value()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonHasOwnICInfo, lir); StoreRegisterTo(hasOwnIC->output()).generate(this); restoreLiveIgnore(lir, StoreRegisterTo(hasOwnIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::InstanceOf: { IonInstanceOfIC* hasInstanceOfIC = ic->asInstanceOfIC(); saveLive(lir); pushArg(hasInstanceOfIC->rhs()); pushArg(hasInstanceOfIC->lhs()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonInstanceOfInfo, lir); StoreRegisterTo(hasInstanceOfIC->output()).generate(this); restoreLiveIgnore(lir, StoreRegisterTo(hasInstanceOfIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::UnaryArith: { IonUnaryArithIC* unaryArithIC = ic->asUnaryArithIC(); saveLive(lir); pushArg(unaryArithIC->input()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonUnaryArithICInfo, lir); StoreValueTo(unaryArithIC->output()).generate(this); restoreLiveIgnore(lir, StoreValueTo(unaryArithIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::BinaryArith: { IonBinaryArithIC* binaryArithIC = ic->asBinaryArithIC(); saveLive(lir); pushArg(binaryArithIC->rhs()); pushArg(binaryArithIC->lhs()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonBinaryArithICInfo, lir); StoreValueTo(binaryArithIC->output()).generate(this); restoreLiveIgnore(lir, StoreValueTo(binaryArithIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::Compare: { IonCompareIC* compareIC = ic->asCompareIC(); saveLive(lir); pushArg(compareIC->rhs()); pushArg(compareIC->lhs()); icInfo_[cacheInfoIndex].icOffsetForPush = pushArgWithPatch(ImmWord(-1)); pushArg(ImmGCPtr(gen->info().script())); callVM(IonCompareICInfo, lir); StoreValueTo(compareIC->output()).generate(this); restoreLiveIgnore(lir, StoreValueTo(compareIC->output()).clobbered()); masm.jump(ool->rejoin()); return; } case CacheKind::Call: case CacheKind::TypeOf: case CacheKind::ToBool: case CacheKind::GetIntrinsic: case CacheKind::NewObject: MOZ_CRASH("Unsupported IC"); } MOZ_CRASH(); } StringObject* MNewStringObject::templateObj() const { return &templateObj_->as(); } CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm) : CodeGeneratorSpecific(gen, graph, masm) , ionScriptLabels_(gen->alloc()) , scriptCounts_(nullptr) , realmStubsToReadBarrier_(0) { } CodeGenerator::~CodeGenerator() { js_delete(scriptCounts_); } typedef bool (*StringToNumberFn)(JSContext*, JSString*, double*); static const VMFunction StringToNumberInfo = FunctionInfo(StringToNumber, "StringToNumber"); void CodeGenerator::visitValueToInt32(LValueToInt32* lir) { ValueOperand operand = ToValue(lir, LValueToInt32::Input); Register output = ToRegister(lir->output()); FloatRegister temp = ToFloatRegister(lir->tempFloat()); MDefinition* input; if (lir->mode() == LValueToInt32::NORMAL) input = lir->mirNormal()->input(); else input = lir->mirTruncate()->input(); Label fails; if (lir->mode() == LValueToInt32::TRUNCATE) { OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir()); // We can only handle strings in truncation contexts, like bitwise // operations. Label* stringEntry; Label* stringRejoin; Register stringReg; if (input->mightBeType(MIRType::String)) { stringReg = ToRegister(lir->temp()); OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(stringReg), StoreFloatRegisterTo(temp)); stringEntry = oolString->entry(); stringRejoin = oolString->rejoin(); } else { stringReg = InvalidReg; stringEntry = nullptr; stringRejoin = nullptr; } masm.truncateValueToInt32(operand, input, stringEntry, stringRejoin, oolDouble->entry(), stringReg, temp, output, &fails); masm.bind(oolDouble->rejoin()); } else { masm.convertValueToInt32(operand, input, temp, output, &fails, lir->mirNormal()->canBeNegativeZero(), lir->mirNormal()->conversion()); } bailoutFrom(&fails, lir->snapshot()); } void CodeGenerator::visitValueToDouble(LValueToDouble* lir) { MToDouble* mir = lir->mir(); ValueOperand operand = ToValue(lir, LValueToDouble::Input); FloatRegister output = ToFloatRegister(lir->output()); Label isDouble, isInt32, isBool, isNull, isUndefined, done; bool hasBoolean = false, hasNull = false, hasUndefined = false; { ScratchTagScope tag(masm, operand); masm.splitTagForTest(operand, tag); masm.branchTestDouble(Assembler::Equal, tag, &isDouble); masm.branchTestInt32(Assembler::Equal, tag, &isInt32); if (mir->conversion() != MToFPInstruction::NumbersOnly) { masm.branchTestBoolean(Assembler::Equal, tag, &isBool); masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined); hasBoolean = true; hasUndefined = true; if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) { masm.branchTestNull(Assembler::Equal, tag, &isNull); hasNull = true; } } } bailout(lir->snapshot()); if (hasNull) { masm.bind(&isNull); masm.loadConstantDouble(0.0, output); masm.jump(&done); } if (hasUndefined) { masm.bind(&isUndefined); masm.loadConstantDouble(GenericNaN(), output); masm.jump(&done); } if (hasBoolean) { masm.bind(&isBool); masm.boolValueToDouble(operand, output); masm.jump(&done); } masm.bind(&isInt32); masm.int32ValueToDouble(operand, output); masm.jump(&done); masm.bind(&isDouble); masm.unboxDouble(operand, output); masm.bind(&done); } void CodeGenerator::visitValueToFloat32(LValueToFloat32* lir) { MToFloat32* mir = lir->mir(); ValueOperand operand = ToValue(lir, LValueToFloat32::Input); FloatRegister output = ToFloatRegister(lir->output()); Label isDouble, isInt32, isBool, isNull, isUndefined, done; bool hasBoolean = false, hasNull = false, hasUndefined = false; { ScratchTagScope tag(masm, operand); masm.splitTagForTest(operand, tag); masm.branchTestDouble(Assembler::Equal, tag, &isDouble); masm.branchTestInt32(Assembler::Equal, tag, &isInt32); if (mir->conversion() != MToFPInstruction::NumbersOnly) { masm.branchTestBoolean(Assembler::Equal, tag, &isBool); masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined); hasBoolean = true; hasUndefined = true; if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) { masm.branchTestNull(Assembler::Equal, tag, &isNull); hasNull = true; } } } bailout(lir->snapshot()); if (hasNull) { masm.bind(&isNull); masm.loadConstantFloat32(0.0f, output); masm.jump(&done); } if (hasUndefined) { masm.bind(&isUndefined); masm.loadConstantFloat32(float(GenericNaN()), output); masm.jump(&done); } if (hasBoolean) { masm.bind(&isBool); masm.boolValueToFloat32(operand, output); masm.jump(&done); } masm.bind(&isInt32); masm.int32ValueToFloat32(operand, output); masm.jump(&done); masm.bind(&isDouble); // ARM and MIPS may not have a double register available if we've // allocated output as a float32. #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) masm.unboxDouble(operand, ScratchDoubleReg); masm.convertDoubleToFloat32(ScratchDoubleReg, output); #else masm.unboxDouble(operand, output); masm.convertDoubleToFloat32(output, output); #endif masm.bind(&done); } void CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir) { masm.convertInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir) { masm.convertFloat32ToDouble(ToFloatRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir) { masm.convertDoubleToFloat32(ToFloatRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir) { masm.convertInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir) { Label fail; FloatRegister input = ToFloatRegister(lir->input()); Register output = ToRegister(lir->output()); masm.convertDoubleToInt32(input, output, &fail, lir->mir()->canBeNegativeZero()); bailoutFrom(&fail, lir->snapshot()); } void CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir) { Label fail; FloatRegister input = ToFloatRegister(lir->input()); Register output = ToRegister(lir->output()); masm.convertFloat32ToInt32(input, output, &fail, lir->mir()->canBeNegativeZero()); bailoutFrom(&fail, lir->snapshot()); } void CodeGenerator::emitOOLTestObject(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch) { saveVolatile(scratch); masm.setupUnalignedABICall(scratch); masm.passABIArg(objreg); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined)); masm.storeCallBoolResult(scratch); restoreVolatile(scratch); masm.branchIfTrueBool(scratch, ifEmulatesUndefined); masm.jump(ifDoesntEmulateUndefined); } // Base out-of-line code generator for all tests of the truthiness of an // object, where the object might not be truthy. (Recall that per spec all // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class // flag to permit objects to look like |undefined| in certain contexts, // including in object truthiness testing.) We check truthiness inline except // when we're testing it on a proxy (or if TI guarantees us that the specified // object will never emulate |undefined|), in which case out-of-line code will // call EmulatesUndefined for a conclusive answer. class OutOfLineTestObject : public OutOfLineCodeBase { Register objreg_; Register scratch_; Label* ifEmulatesUndefined_; Label* ifDoesntEmulateUndefined_; #ifdef DEBUG bool initialized() { return ifEmulatesUndefined_ != nullptr; } #endif public: OutOfLineTestObject() : ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr) { } void accept(CodeGenerator* codegen) final { MOZ_ASSERT(initialized()); codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_, ifDoesntEmulateUndefined_, scratch_); } // Specify the register where the object to be tested is found, labels to // jump to if the object is truthy or falsy, and a scratch register for // use in the out-of-line path. void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch) { MOZ_ASSERT(!initialized()); MOZ_ASSERT(ifEmulatesUndefined); objreg_ = objreg; scratch_ = scratch; ifEmulatesUndefined_ = ifEmulatesUndefined; ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined; } }; // A subclass of OutOfLineTestObject containing two extra labels, for use when // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line // code. The user should bind these labels in inline code, and specify them as // targets via setInputAndTargets, as appropriate. class OutOfLineTestObjectWithLabels : public OutOfLineTestObject { Label label1_; Label label2_; public: OutOfLineTestObjectWithLabels() { } Label* label1() { return &label1_; } Label* label2() { return &label2_; } }; void CodeGenerator::testObjectEmulatesUndefinedKernel(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch, OutOfLineTestObject* ool) { ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch); // Perform a fast-path check of the object's class flags if the object's // not a proxy. Let out-of-line code handle the slow cases that require // saving registers, making a function call, and restoring registers. masm.branchIfObjectEmulatesUndefined(objreg, scratch, ool->entry(), ifEmulatesUndefined); } void CodeGenerator::branchTestObjectEmulatesUndefined(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch, OutOfLineTestObject* ool) { MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(), "ifDoesntEmulateUndefined will be bound to the fallthrough path"); testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch, ool); masm.bind(ifDoesntEmulateUndefined); } void CodeGenerator::testObjectEmulatesUndefined(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined, Register scratch, OutOfLineTestObject* ool) { testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch, ool); masm.jump(ifDoesntEmulateUndefined); } void CodeGenerator::testValueTruthyKernel(const ValueOperand& value, const LDefinition* scratch1, const LDefinition* scratch2, FloatRegister fr, Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool, MDefinition* valueMIR) { // Count the number of possible type tags we might have, so we'll know when // we've checked them all and hence can avoid emitting a tag check for the // last one. In particular, whenever tagCount is 1 that means we've tried // all but one of them already so we know exactly what's left based on the // mightBe* booleans. bool mightBeUndefined = valueMIR->mightBeType(MIRType::Undefined); bool mightBeNull = valueMIR->mightBeType(MIRType::Null); bool mightBeBoolean = valueMIR->mightBeType(MIRType::Boolean); bool mightBeInt32 = valueMIR->mightBeType(MIRType::Int32); bool mightBeObject = valueMIR->mightBeType(MIRType::Object); bool mightBeString = valueMIR->mightBeType(MIRType::String); bool mightBeSymbol = valueMIR->mightBeType(MIRType::Symbol); bool mightBeDouble = valueMIR->mightBeType(MIRType::Double); int tagCount = int(mightBeUndefined) + int(mightBeNull) + int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) + int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble); MOZ_ASSERT_IF(!valueMIR->emptyResultTypeSet(), tagCount > 0); // If we know we're null or undefined, we're definitely falsy, no // need to even check the tag. if (int(mightBeNull) + int(mightBeUndefined) == tagCount) { masm.jump(ifFalsy); return; } ScratchTagScope tag(masm, value); masm.splitTagForTest(value, tag); if (mightBeUndefined) { MOZ_ASSERT(tagCount > 1); masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy); --tagCount; } if (mightBeNull) { MOZ_ASSERT(tagCount > 1); masm.branchTestNull(Assembler::Equal, tag, ifFalsy); --tagCount; } if (mightBeBoolean) { MOZ_ASSERT(tagCount != 0); Label notBoolean; if (tagCount != 1) masm.branchTestBoolean(Assembler::NotEqual, tag, ¬Boolean); { ScratchTagScopeRelease _(&tag); masm.branchTestBooleanTruthy(false, value, ifFalsy); } if (tagCount != 1) masm.jump(ifTruthy); // Else just fall through to truthiness. masm.bind(¬Boolean); --tagCount; } if (mightBeInt32) { MOZ_ASSERT(tagCount != 0); Label notInt32; if (tagCount != 1) masm.branchTestInt32(Assembler::NotEqual, tag, ¬Int32); { ScratchTagScopeRelease _(&tag); masm.branchTestInt32Truthy(false, value, ifFalsy); } if (tagCount != 1) masm.jump(ifTruthy); // Else just fall through to truthiness. masm.bind(¬Int32); --tagCount; } if (mightBeObject) { MOZ_ASSERT(tagCount != 0); if (ool) { Label notObject; if (tagCount != 1) masm.branchTestObject(Assembler::NotEqual, tag, ¬Object); { ScratchTagScopeRelease _(&tag); Register objreg = masm.extractObject(value, ToRegister(scratch1)); testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, ToRegister(scratch2), ool); } masm.bind(¬Object); } else { if (tagCount != 1) masm.branchTestObject(Assembler::Equal, tag, ifTruthy); // Else just fall through to truthiness. } --tagCount; } else { MOZ_ASSERT(!ool, "We better not have an unused OOL path, since the code generator will try to " "generate code for it but we never set up its labels, which will cause null " "derefs of those labels."); } if (mightBeString) { // Test if a string is non-empty. MOZ_ASSERT(tagCount != 0); Label notString; if (tagCount != 1) masm.branchTestString(Assembler::NotEqual, tag, ¬String); { ScratchTagScopeRelease _(&tag); masm.branchTestStringTruthy(false, value, ifFalsy); } if (tagCount != 1) masm.jump(ifTruthy); // Else just fall through to truthiness. masm.bind(¬String); --tagCount; } if (mightBeSymbol) { // All symbols are truthy. MOZ_ASSERT(tagCount != 0); if (tagCount != 1) masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy); // Else fall through to ifTruthy. --tagCount; } if (mightBeDouble) { MOZ_ASSERT(tagCount == 1); // If we reach here the value is a double. { ScratchTagScopeRelease _(&tag); masm.unboxDouble(value, fr); masm.branchTestDoubleTruthy(false, fr, ifFalsy); } --tagCount; } MOZ_ASSERT(tagCount == 0); // Fall through for truthy. } void CodeGenerator::testValueTruthy(const ValueOperand& value, const LDefinition* scratch1, const LDefinition* scratch2, FloatRegister fr, Label* ifTruthy, Label* ifFalsy, OutOfLineTestObject* ool, MDefinition* valueMIR) { testValueTruthyKernel(value, scratch1, scratch2, fr, ifTruthy, ifFalsy, ool, valueMIR); masm.jump(ifTruthy); } void CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir) { MIRType inputType = lir->mir()->input()->type(); MOZ_ASSERT(inputType == MIRType::ObjectOrNull || lir->mir()->operandMightEmulateUndefined(), "If the object couldn't emulate undefined, this should have been folded."); Label* truthy = getJumpLabelForBranch(lir->ifTruthy()); Label* falsy = getJumpLabelForBranch(lir->ifFalsy()); Register input = ToRegister(lir->input()); if (lir->mir()->operandMightEmulateUndefined()) { if (inputType == MIRType::ObjectOrNull) masm.branchTestPtr(Assembler::Zero, input, input, falsy); OutOfLineTestObject* ool = new(alloc()) OutOfLineTestObject(); addOutOfLineCode(ool, lir->mir()); testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()), ool); } else { MOZ_ASSERT(inputType == MIRType::ObjectOrNull); testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(), lir->ifFalsy()); } } void CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir) { OutOfLineTestObject* ool = nullptr; MDefinition* input = lir->mir()->input(); // Unfortunately, it's possible that someone (e.g. phi elimination) switched // out our input after we did cacheOperandMightEmulateUndefined. So we // might think it can emulate undefined _and_ know that it can't be an // object. if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType::Object)) { ool = new(alloc()) OutOfLineTestObject(); addOutOfLineCode(ool, lir->mir()); } Label* truthy = getJumpLabelForBranch(lir->ifTruthy()); Label* falsy = getJumpLabelForBranch(lir->ifFalsy()); testValueTruthy(ToValue(lir, LTestVAndBranch::Input), lir->temp1(), lir->temp2(), ToFloatRegister(lir->tempFloat()), truthy, falsy, ool, input); } void CodeGenerator::visitFunctionDispatch(LFunctionDispatch* lir) { MFunctionDispatch* mir = lir->mir(); Register input = ToRegister(lir->input()); Label* lastLabel; size_t casesWithFallback; // Determine if the last case is fallback or an ordinary case. if (!mir->hasFallback()) { MOZ_ASSERT(mir->numCases() > 0); casesWithFallback = mir->numCases(); lastLabel = skipTrivialBlocks(mir->getCaseBlock(mir->numCases() - 1))->lir()->label(); } else { casesWithFallback = mir->numCases() + 1; lastLabel = skipTrivialBlocks(mir->getFallback())->lir()->label(); } // Compare function pointers, except for the last case. for (size_t i = 0; i < casesWithFallback - 1; i++) { MOZ_ASSERT(i < mir->numCases()); LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir(); if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) { masm.branchTestObjGroupUnsafe(Assembler::Equal, input, funcGroup, target->label()); } else { JSFunction* func = mir->getCase(i); masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label()); } } // Jump to the last case. masm.jump(lastLabel); } void CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch* lir) { MObjectGroupDispatch* mir = lir->mir(); Register input = ToRegister(lir->input()); Register temp = ToRegister(lir->temp()); // Load the incoming ObjectGroup in temp. masm.loadObjGroupUnsafe(input, temp); // Compare ObjectGroups. MacroAssembler::BranchGCPtr lastBranch; LBlock* lastBlock = nullptr; InlinePropertyTable* propTable = mir->propTable(); for (size_t i = 0; i < mir->numCases(); i++) { JSFunction* func = mir->getCase(i); LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir(); DebugOnly found = false; for (size_t j = 0; j < propTable->numEntries(); j++) { if (propTable->getFunction(j) != func) continue; if (lastBranch.isInitialized()) lastBranch.emit(masm); ObjectGroup* group = propTable->getObjectGroup(j); lastBranch = MacroAssembler::BranchGCPtr(Assembler::Equal, temp, ImmGCPtr(group), target->label()); lastBlock = target; found = true; } MOZ_ASSERT(found); } // Jump to fallback block if we have an unknown ObjectGroup. If there's no // fallback block, we should have handled all cases. if (!mir->hasFallback()) { MOZ_ASSERT(lastBranch.isInitialized()); #ifdef DEBUG Label ok; lastBranch.relink(&ok); lastBranch.emit(masm); masm.assumeUnreachable("Unexpected ObjectGroup"); masm.bind(&ok); #endif if (!isNextBlock(lastBlock)) masm.jump(lastBlock->label()); return; } LBlock* fallback = skipTrivialBlocks(mir->getFallback())->lir(); if (!lastBranch.isInitialized()) { if (!isNextBlock(fallback)) masm.jump(fallback->label()); return; } lastBranch.invertCondition(); lastBranch.relink(fallback->label()); lastBranch.emit(masm); if (!isNextBlock(lastBlock)) masm.jump(lastBlock->label()); } void CodeGenerator::visitBooleanToString(LBooleanToString* lir) { Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); const JSAtomState& names = gen->runtime->names(); Label true_, done; masm.branchTest32(Assembler::NonZero, input, input, &true_); masm.movePtr(ImmGCPtr(names.false_), output); masm.jump(&done); masm.bind(&true_); masm.movePtr(ImmGCPtr(names.true_), output); masm.bind(&done); } void CodeGenerator::emitIntToString(Register input, Register output, Label* ool) { masm.boundsCheck32PowerOfTwo(input, StaticStrings::INT_STATIC_LIMIT, ool); // Fast path for small integers. masm.movePtr(ImmPtr(&gen->runtime->staticStrings().intStaticTable), output); masm.loadPtr(BaseIndex(output, input, ScalePointer), output); } typedef JSFlatString* (*IntToStringFn)(JSContext*, int); static const VMFunction IntToStringInfo = FunctionInfo(Int32ToString, "Int32ToString"); void CodeGenerator::visitIntToString(LIntToString* lir) { Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(IntToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); emitIntToString(input, output, ool->entry()); masm.bind(ool->rejoin()); } typedef JSString* (*DoubleToStringFn)(JSContext*, double); static const VMFunction DoubleToStringInfo = FunctionInfo(NumberToString, "NumberToString"); void CodeGenerator::visitDoubleToString(LDoubleToString* lir) { FloatRegister input = ToFloatRegister(lir->input()); Register temp = ToRegister(lir->tempInt()); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(DoubleToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); // Try double to integer conversion and run integer to string code. masm.convertDoubleToInt32(input, temp, ool->entry(), true); emitIntToString(temp, output, ool->entry()); masm.bind(ool->rejoin()); } typedef JSString* (*PrimitiveToStringFn)(JSContext*, HandleValue); static const VMFunction PrimitiveToStringInfo = FunctionInfo(ToStringSlow, "ToStringSlow"); void CodeGenerator::visitValueToString(LValueToString* lir) { ValueOperand input = ToValue(lir, LValueToString::Input); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(PrimitiveToStringInfo, lir, ArgList(input), StoreRegisterTo(output)); Label done; Register tag = masm.extractTag(input, output); const JSAtomState& names = gen->runtime->names(); // String if (lir->mir()->input()->mightBeType(MIRType::String)) { Label notString; masm.branchTestString(Assembler::NotEqual, tag, ¬String); masm.unboxString(input, output); masm.jump(&done); masm.bind(¬String); } // Integer if (lir->mir()->input()->mightBeType(MIRType::Int32)) { Label notInteger; masm.branchTestInt32(Assembler::NotEqual, tag, ¬Integer); Register unboxed = ToTempUnboxRegister(lir->tempToUnbox()); unboxed = masm.extractInt32(input, unboxed); emitIntToString(unboxed, output, ool->entry()); masm.jump(&done); masm.bind(¬Integer); } // Double if (lir->mir()->input()->mightBeType(MIRType::Double)) { // Note: no fastpath. Need two extra registers and can only convert doubles // that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT. masm.branchTestDouble(Assembler::Equal, tag, ool->entry()); } // Undefined if (lir->mir()->input()->mightBeType(MIRType::Undefined)) { Label notUndefined; masm.branchTestUndefined(Assembler::NotEqual, tag, ¬Undefined); masm.movePtr(ImmGCPtr(names.undefined), output); masm.jump(&done); masm.bind(¬Undefined); } // Null if (lir->mir()->input()->mightBeType(MIRType::Null)) { Label notNull; masm.branchTestNull(Assembler::NotEqual, tag, ¬Null); masm.movePtr(ImmGCPtr(names.null), output); masm.jump(&done); masm.bind(¬Null); } // Boolean if (lir->mir()->input()->mightBeType(MIRType::Boolean)) { Label notBoolean, true_; masm.branchTestBoolean(Assembler::NotEqual, tag, ¬Boolean); masm.branchTestBooleanTruthy(true, input, &true_); masm.movePtr(ImmGCPtr(names.false_), output); masm.jump(&done); masm.bind(&true_); masm.movePtr(ImmGCPtr(names.true_), output); masm.jump(&done); masm.bind(¬Boolean); } // Object if (lir->mir()->input()->mightBeType(MIRType::Object)) { // Bail. MOZ_ASSERT(lir->mir()->fallible()); Label bail; masm.branchTestObject(Assembler::Equal, tag, &bail); bailoutFrom(&bail, lir->snapshot()); } // Symbol if (lir->mir()->input()->mightBeType(MIRType::Symbol)) { // Bail. MOZ_ASSERT(lir->mir()->fallible()); Label bail; masm.branchTestSymbol(Assembler::Equal, tag, &bail); bailoutFrom(&bail, lir->snapshot()); } #ifdef DEBUG masm.assumeUnreachable("Unexpected type for MValueToString."); #endif masm.bind(&done); masm.bind(ool->rejoin()); } typedef JSObject* (*ToObjectFn)(JSContext*, HandleValue, bool); static const VMFunction ToObjectInfo = FunctionInfo(ToObjectSlow, "ToObjectSlow"); void CodeGenerator::visitValueToObject(LValueToObject* lir) { ValueOperand input = ToValue(lir, LValueToObject::Input); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, ArgList(input, Imm32(0)), StoreRegisterTo(output)); masm.branchTestObject(Assembler::NotEqual, input, ool->entry()); masm.unboxObject(input, output); masm.bind(ool->rejoin()); } void CodeGenerator::visitValueToObjectOrNull(LValueToObjectOrNull* lir) { ValueOperand input = ToValue(lir, LValueToObjectOrNull::Input); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, ArgList(input, Imm32(0)), StoreRegisterTo(output)); Label isObject; masm.branchTestObject(Assembler::Equal, input, &isObject); masm.branchTestNull(Assembler::NotEqual, input, ool->entry()); masm.movePtr(ImmWord(0), output); masm.jump(ool->rejoin()); masm.bind(&isObject); masm.unboxObject(input, output); masm.bind(ool->rejoin()); } static void EmitStoreBufferMutation(MacroAssembler& masm, Register holder, size_t offset, Register buffer, LiveGeneralRegisterSet& liveVolatiles, void (*fun)(js::gc::StoreBuffer*, js::gc::Cell**)) { Label callVM; Label exit; // Call into the VM to barrier the write. The only registers that need to // be preserved are those in liveVolatiles, so once they are saved on the // stack all volatile registers are available for use. masm.bind(&callVM); masm.PushRegsInMask(liveVolatiles); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); regs.takeUnchecked(buffer); regs.takeUnchecked(holder); Register addrReg = regs.takeAny(); masm.computeEffectiveAddress(Address(holder, offset), addrReg); bool needExtraReg = !regs.hasAny(); if (needExtraReg) { masm.push(holder); masm.setupUnalignedABICall(holder); } else { masm.setupUnalignedABICall(regs.takeAny()); } masm.passABIArg(buffer); masm.passABIArg(addrReg); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, fun), MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther); if (needExtraReg) masm.pop(holder); masm.PopRegsInMask(liveVolatiles); masm.bind(&exit); } // Warning: this function modifies prev and next. static void EmitPostWriteBarrierS(MacroAssembler& masm, Register holder, size_t offset, Register prev, Register next, LiveGeneralRegisterSet& liveVolatiles) { Label exit; Label checkRemove, putCell; // if (next && (buffer = next->storeBuffer())) // but we never pass in nullptr for next. Register storebuffer = next; masm.loadStoreBuffer(next, storebuffer); masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &checkRemove); // if (prev && prev->storeBuffer()) masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &putCell); masm.loadStoreBuffer(prev, prev); masm.branchPtr(Assembler::NotEqual, prev, ImmWord(0), &exit); // buffer->putCell(cellp) masm.bind(&putCell); EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles, JSString::addCellAddressToStoreBuffer); masm.jump(&exit); // if (prev && (buffer = prev->storeBuffer())) masm.bind(&checkRemove); masm.branchPtr(Assembler::Equal, prev, ImmWord(0), &exit); masm.loadStoreBuffer(prev, storebuffer); masm.branchPtr(Assembler::Equal, storebuffer, ImmWord(0), &exit); EmitStoreBufferMutation(masm, holder, offset, storebuffer, liveVolatiles, JSString::removeCellAddressFromStoreBuffer); masm.bind(&exit); } typedef JSObject* (*CloneRegExpObjectFn)(JSContext*, Handle); static const VMFunction CloneRegExpObjectInfo = FunctionInfo(CloneRegExpObject, "CloneRegExpObject"); void CodeGenerator::visitRegExp(LRegExp* lir) { Register output = ToRegister(lir->output()); Register temp = ToRegister(lir->temp()); JSObject* source = lir->mir()->source(); OutOfLineCode* ool = oolCallVM(CloneRegExpObjectInfo, lir, ArgList(ImmGCPtr(source)), StoreRegisterTo(output)); if (lir->mir()->hasShared()) { TemplateObject templateObject(source); masm.createGCObject(output, temp, templateObject, gc::DefaultHeap, ool->entry()); } else { masm.jump(ool->entry()); } masm.bind(ool->rejoin()); } // Amount of space to reserve on the stack when executing RegExps inline. static const size_t RegExpReservedStack = sizeof(irregexp::InputOutputData) + sizeof(MatchPairs) + RegExpObject::MaxPairCount * sizeof(MatchPair); static size_t RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset) { return inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) + sizeof(MatchPairs); } static Address RegExpPairCountAddress(MacroAssembler& masm, size_t inputOutputDataStartOffset) { return Address(masm.getStackPointer(), inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) + MatchPairs::offsetOfPairCount()); } // Prepare an InputOutputData and optional MatchPairs which space has been // allocated for on the stack, and try to execute a RegExp on a string input. // If the RegExp was successfully executed and matched the input, fallthrough, // otherwise jump to notFound or failure. static bool PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Register input, Register lastIndex, Register temp1, Register temp2, Register temp3, size_t inputOutputDataStartOffset, RegExpShared::CompilationMode mode, bool stringsCanBeInNursery, Label* notFound, Label* failure) { JitSpew(JitSpew_Codegen, "# Emitting PrepareAndExecuteRegExp"); /* * [SMDOC] Stack layout for PrepareAndExecuteRegExp * * inputOutputDataStartOffset +-----> +---------------+ * |InputOutputData| * inputStartAddress +----------> inputStart| * inputEndAddress +----------> inputEnd| * startIndexAddress +----------> startIndex| * endIndexAddress +----------> endIndex| * matchesPointerAddress +----------> matches| * matchResultAddress +----------> result| * +---------------+ * matchPairsStartOffset +-----> +---------------+ * | MatchPairs | * pairCountAddress +-----------> count | * pairsPointerAddress +-----------> pairs | * | | * +---------------+ * pairsVectorStartOffset +-----> +---------------+ * | MatchPair | * | start | <-------+ * | limit | | Reserved space for * +---------------+ | `RegExpObject::MaxPairCount` * . | MatchPair objects. * . | * . | * +---------------+ | * | MatchPair | | * | start | <-------+ * | limit | * +---------------+ */ size_t matchPairsStartOffset = inputOutputDataStartOffset + sizeof(irregexp::InputOutputData); size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset); Address inputStartAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputStart)); Address inputEndAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputEnd)); Address matchesPointerAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, matches)); Address startIndexAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, startIndex)); Address endIndexAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, endIndex)); Address matchResultAddress(masm.getStackPointer(), inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, result)); Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset); Address pairsPointerAddress(masm.getStackPointer(), matchPairsStartOffset + MatchPairs::offsetOfPairs()); RegExpStatics* res = GlobalObject::getRegExpStatics(cx, cx->global()); if (!res) return false; #ifdef JS_USE_LINK_REGISTER if (mode != RegExpShared::MatchOnly) masm.pushReturnAddress(); #endif if (mode == RegExpShared::Normal) { // First, fill in a skeletal MatchPairs instance on the stack. This will be // passed to the OOL stub in the caller if we aren't able to execute the // RegExp inline, and that stub needs to be able to determine whether the // execution finished successfully. // Initialize MatchPairs::pairCount to 1, the correct value can only // be determined after loading the RegExpShared. masm.store32(Imm32(1), pairCountAddress); // Initialize MatchPairs::pairs[0]::start to MatchPair::NoMatch. Address firstMatchPairStartAddress(masm.getStackPointer(), pairsVectorStartOffset + offsetof(MatchPair, start)); masm.store32(Imm32(MatchPair::NoMatch), firstMatchPairStartAddress); // Assign the MatchPairs::pairs pointer to the first MatchPair object. Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset); masm.computeEffectiveAddress(pairsVectorAddress, temp1); masm.storePtr(temp1, pairsPointerAddress); } // Check for a linear input string. masm.branchIfRopeOrExternal(input, temp1, failure); // Get the RegExpShared for the RegExp. masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp1); masm.branchPtr(Assembler::Equal, temp1, ImmWord(0), failure); // ES6 21.2.2.2 step 2. // See RegExp.cpp ExecuteRegExp for more detail. { Label done; masm.branchTest32(Assembler::Zero, Address(temp1, RegExpShared::offsetOfFlags()), Imm32(UnicodeFlag), &done); // If input is latin1, there should not be surrogate pair. masm.branchLatin1String(input, &done); // Check if |lastIndex > 0 && lastIndex < input->length()|. // lastIndex should already have no sign here. masm.branchTest32(Assembler::Zero, lastIndex, lastIndex, &done); masm.loadStringLength(input, temp2); masm.branch32(Assembler::AboveOrEqual, lastIndex, temp2, &done); // For TrailSurrogateMin ≤ x ≤ TrailSurrogateMax and // LeadSurrogateMin ≤ x ≤ LeadSurrogateMax, the following // equations hold. // // SurrogateMin ≤ x ≤ SurrogateMax // <> SurrogateMin ≤ x ≤ SurrogateMin + 2^10 - 1 // <> ((x - SurrogateMin) >>> 10) = 0 where >>> is an unsigned-shift // See Hacker's Delight, section 4-1 for details. // // ((x - SurrogateMin) >>> 10) = 0 // <> floor((x - SurrogateMin) / 1024) = 0 // <> floor((x / 1024) - (SurrogateMin / 1024)) = 0 // <> floor(x / 1024) = SurrogateMin / 1024 // <> floor(x / 1024) * 1024 = SurrogateMin // <> (x >>> 10) << 10 = SurrogateMin // <> x & ~(2^10 - 1) = SurrogateMin constexpr char16_t SurrogateMask = 0xFC00; // Check if input[lastIndex] is trail surrogate. masm.loadStringChars(input, temp2, CharEncoding::TwoByte); masm.loadChar(temp2, lastIndex, temp3, CharEncoding::TwoByte); masm.and32(Imm32(SurrogateMask), temp3); masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::TrailSurrogateMin), &done); // Check if input[lastIndex-1] is lead surrogate. masm.loadChar(temp2, lastIndex, temp3, CharEncoding::TwoByte, -int32_t(sizeof(char16_t))); masm.and32(Imm32(SurrogateMask), temp3); masm.branch32(Assembler::NotEqual, temp3, Imm32(unicode::LeadSurrogateMin), &done); // Move lastIndex to lead surrogate. masm.sub32(Imm32(1), lastIndex); masm.bind(&done); } if (mode == RegExpShared::Normal) { // Don't handle RegExps with excessive parens. masm.load32(Address(temp1, RegExpShared::offsetOfParenCount()), temp2); masm.branch32(Assembler::AboveOrEqual, temp2, Imm32(RegExpObject::MaxPairCount), failure); // Fill in the paren count in the MatchPairs on the stack. masm.add32(Imm32(1), temp2); masm.store32(temp2, pairCountAddress); } // Load the code pointer for the type of input string we have, and compute // the input start/end pointers in the InputOutputData. Register codePointer = temp1; { masm.loadStringLength(input, temp3); Label isLatin1, done; masm.branchLatin1String(input, &isLatin1); { masm.loadStringChars(input, temp2, CharEncoding::TwoByte); masm.storePtr(temp2, inputStartAddress); masm.lshiftPtr(Imm32(1), temp3); masm.loadPtr(Address(temp1, RegExpShared::offsetOfTwoByteJitCode(mode)), codePointer); masm.jump(&done); } masm.bind(&isLatin1); { masm.loadStringChars(input, temp2, CharEncoding::Latin1); masm.storePtr(temp2, inputStartAddress); masm.loadPtr(Address(temp1, RegExpShared::offsetOfLatin1JitCode(mode)), codePointer); } masm.bind(&done); masm.addPtr(temp3, temp2); masm.storePtr(temp2, inputEndAddress); } // Check the RegExpShared has been compiled for this type of input. masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure); masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer); // Finish filling in the InputOutputData instance on the stack. if (mode == RegExpShared::Normal) { masm.computeEffectiveAddress(Address(masm.getStackPointer(), matchPairsStartOffset), temp2); masm.storePtr(temp2, matchesPointerAddress); } else { // Use InputOutputData.endIndex itself for output. masm.computeEffectiveAddress(endIndexAddress, temp2); masm.storePtr(temp2, endIndexAddress); } masm.storePtr(lastIndex, startIndexAddress); masm.store32(Imm32(RegExpRunStatus_Error), matchResultAddress); // Save any volatile inputs. LiveGeneralRegisterSet volatileRegs; if (lastIndex.volatile_()) volatileRegs.add(lastIndex); if (input.volatile_()) volatileRegs.add(input); if (regexp.volatile_()) volatileRegs.add(regexp); #ifdef JS_TRACE_LOGGING if (TraceLogTextIdEnabled(TraceLogger_IrregexpExecute)) { masm.push(temp1); masm.loadTraceLogger(temp1); masm.tracelogStartId(temp1, TraceLogger_IrregexpExecute); masm.pop(temp1); } #endif // Execute the RegExp. masm.computeEffectiveAddress(Address(masm.getStackPointer(), inputOutputDataStartOffset), temp2); masm.PushRegsInMask(volatileRegs); masm.setupUnalignedABICall(temp3); masm.passABIArg(temp2); masm.callWithABI(codePointer); masm.PopRegsInMask(volatileRegs); #ifdef JS_TRACE_LOGGING if (TraceLogTextIdEnabled(TraceLogger_IrregexpExecute)) { masm.loadTraceLogger(temp1); masm.tracelogStopId(temp1, TraceLogger_IrregexpExecute); } #endif Label success; masm.branch32(Assembler::Equal, matchResultAddress, Imm32(RegExpRunStatus_Success_NotFound), notFound); masm.branch32(Assembler::Equal, matchResultAddress, Imm32(RegExpRunStatus_Error), failure); // Lazily update the RegExpStatics. masm.movePtr(ImmPtr(res), temp1); Address pendingInputAddress(temp1, RegExpStatics::offsetOfPendingInput()); Address matchesInputAddress(temp1, RegExpStatics::offsetOfMatchesInput()); Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource()); Address lazyIndexAddress(temp1, RegExpStatics::offsetOfLazyIndex()); masm.guardedCallPreBarrier(pendingInputAddress, MIRType::String); masm.guardedCallPreBarrier(matchesInputAddress, MIRType::String); masm.guardedCallPreBarrier(lazySourceAddress, MIRType::String); if (stringsCanBeInNursery) { // Writing into RegExpStatics tenured memory; must post-barrier. if (temp1.volatile_()) volatileRegs.add(temp1); masm.loadPtr(pendingInputAddress, temp2); masm.storePtr(input, pendingInputAddress); masm.movePtr(input, temp3); EmitPostWriteBarrierS(masm, temp1, RegExpStatics::offsetOfPendingInput(), temp2 /* prev */, temp3 /* next */, volatileRegs); masm.loadPtr(matchesInputAddress, temp2); masm.storePtr(input, matchesInputAddress); masm.movePtr(input, temp3); EmitPostWriteBarrierS(masm, temp1, RegExpStatics::offsetOfMatchesInput(), temp2 /* prev */, temp3 /* next */, volatileRegs); } else { masm.storePtr(input, pendingInputAddress); masm.storePtr(input, matchesInputAddress); } masm.storePtr(lastIndex, Address(temp1, RegExpStatics::offsetOfLazyIndex())); masm.store32(Imm32(1), Address(temp1, RegExpStatics::offsetOfPendingLazyEvaluation())); masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp2); masm.loadPtr(Address(temp2, RegExpShared::offsetOfSource()), temp3); masm.storePtr(temp3, lazySourceAddress); masm.load32(Address(temp2, RegExpShared::offsetOfFlags()), temp3); masm.store32(temp3, Address(temp1, RegExpStatics::offsetOfLazyFlags())); if (mode == RegExpShared::MatchOnly) { // endIndex is passed via temp3. masm.load32(endIndexAddress, temp3); } return true; } static void CopyStringChars(MacroAssembler& masm, Register to, Register from, Register len, Register byteOpScratch, CharEncoding encoding); class CreateDependentString { CharEncoding encoding_; Register string_; Register temp1_; Register temp2_; Label* failure_; enum class FallbackKind : uint8_t { InlineString, FatInlineString, NotInlineString, Count }; mozilla::EnumeratedArray fallbacks_, joins_; public: CreateDependentString(CharEncoding encoding, Register string, Register temp1, Register temp2, Label* failure) : encoding_(encoding), string_(string), temp1_(temp1), temp2_(temp2), failure_(failure) { } Register string() const { return string_; } CharEncoding encoding() const { return encoding_; } // Generate code that creates DependentString. // Caller should call generateFallback after masm.ret(), to generate // fallback path. void generate(MacroAssembler& masm, const JSAtomState& names, CompileRuntime* runtime, Register base, BaseIndex startIndexAddress, BaseIndex limitIndexAddress, bool stringsCanBeInNursery); // Generate fallback path for creating DependentString. void generateFallback(MacroAssembler& masm); }; void CreateDependentString::generate(MacroAssembler& masm, const JSAtomState& names, CompileRuntime* runtime, Register base, BaseIndex startIndexAddress, BaseIndex limitIndexAddress, bool stringsCanBeInNursery) { JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString (encoding=%s)", (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte")); auto newGCString = [&](FallbackKind kind) { uint32_t flags = kind == FallbackKind::InlineString ? JSString::INIT_THIN_INLINE_FLAGS : kind == FallbackKind::FatInlineString ? JSString::INIT_FAT_INLINE_FLAGS : JSString::DEPENDENT_FLAGS; if (encoding_ == CharEncoding::Latin1) flags |= JSString::LATIN1_CHARS_BIT; if (kind != FallbackKind::FatInlineString) masm.newGCString(string_, temp2_, &fallbacks_[kind], stringsCanBeInNursery); else masm.newGCFatInlineString(string_, temp2_, &fallbacks_[kind], stringsCanBeInNursery); masm.bind(&joins_[kind]); masm.store32(Imm32(flags), Address(string_, JSString::offsetOfFlags())); }; // Compute the string length. masm.load32(startIndexAddress, temp2_); masm.load32(limitIndexAddress, temp1_); masm.sub32(temp2_, temp1_); Label done, nonEmpty; // Zero length matches use the empty string. masm.branchTest32(Assembler::NonZero, temp1_, temp1_, &nonEmpty); masm.movePtr(ImmGCPtr(names.empty), string_); masm.jump(&done); masm.bind(&nonEmpty); Label notInline; int32_t maxInlineLength = encoding_ == CharEncoding::Latin1 ? JSFatInlineString::MAX_LENGTH_LATIN1 : JSFatInlineString::MAX_LENGTH_TWO_BYTE; masm.branch32(Assembler::Above, temp1_, Imm32(maxInlineLength), ¬Inline); { // Make a thin or fat inline string. Label stringAllocated, fatInline; int32_t maxThinInlineLength = encoding_ == CharEncoding::Latin1 ? JSThinInlineString::MAX_LENGTH_LATIN1 : JSThinInlineString::MAX_LENGTH_TWO_BYTE; masm.branch32(Assembler::Above, temp1_, Imm32(maxThinInlineLength), &fatInline); { newGCString(FallbackKind::InlineString); masm.jump(&stringAllocated); } masm.bind(&fatInline); { newGCString(FallbackKind::FatInlineString); } masm.bind(&stringAllocated); masm.store32(temp1_, Address(string_, JSString::offsetOfLength())); masm.push(string_); masm.push(base); // Adjust the start index address for the above pushes. MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer()); BaseIndex newStartIndexAddress = startIndexAddress; newStartIndexAddress.offset += 2 * sizeof(void*); // Load chars pointer for the new string. masm.loadInlineStringCharsForStore(string_, string_); // Load the source characters pointer. masm.loadStringChars(base, temp2_, encoding_); masm.load32(newStartIndexAddress, base); masm.addToCharPtr(temp2_, base, encoding_); CopyStringChars(masm, string_, temp2_, temp1_, base, encoding_); // Null-terminate. masm.storeChar(Imm32(0), Address(string_, 0), encoding_); masm.pop(base); masm.pop(string_); masm.jump(&done); } masm.bind(¬Inline); { // Make a dependent string. // Warning: string may be tenured (if the fallback case is hit), so // stores into it must be post barriered. newGCString(FallbackKind::NotInlineString); masm.store32(temp1_, Address(string_, JSString::offsetOfLength())); masm.loadNonInlineStringChars(base, temp1_, encoding_); masm.load32(startIndexAddress, temp2_); masm.addToCharPtr(temp1_, temp2_, encoding_); masm.storeNonInlineStringChars(temp1_, string_); masm.storeDependentStringBase(base, string_); masm.movePtr(base, temp1_); // Follow any base pointer if the input is itself a dependent string. // Watch for undepended strings, which have a base pointer but don't // actually share their characters with it. Label noBase; masm.load32(Address(base, JSString::offsetOfFlags()), temp2_); masm.and32(Imm32(JSString::TYPE_FLAGS_MASK), temp2_); masm.branch32(Assembler::NotEqual, temp2_, Imm32(JSString::DEPENDENT_FLAGS), &noBase); masm.loadDependentStringBase(base, temp1_); masm.storeDependentStringBase(temp1_, string_); masm.bind(&noBase); // Post-barrier the base store, whether it was the direct or indirect // base (both will end up in temp1 here). masm.branchPtrInNurseryChunk(Assembler::Equal, string_, temp2_, &done); masm.branchPtrInNurseryChunk(Assembler::NotEqual, temp1_, temp2_, &done); LiveRegisterSet regsToSave(RegisterSet::Volatile()); regsToSave.takeUnchecked(temp1_); regsToSave.takeUnchecked(temp2_); masm.PushRegsInMask(regsToSave); masm.mov(ImmPtr(runtime), temp1_); masm.setupUnalignedABICall(temp2_); masm.passABIArg(temp1_); masm.passABIArg(string_); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier)); masm.PopRegsInMask(regsToSave); } masm.bind(&done); } static void* AllocateString(JSContext* cx) { AutoUnsafeCallWithABI unsafe; return js::Allocate(cx, js::gc::TenuredHeap); } static void* AllocateFatInlineString(JSContext* cx) { AutoUnsafeCallWithABI unsafe; return js::Allocate(cx, js::gc::TenuredHeap); } void CreateDependentString::generateFallback(MacroAssembler& masm) { JitSpew(JitSpew_Codegen, "# Emitting CreateDependentString fallback (encoding=%s)", (encoding_ == CharEncoding::Latin1 ? "Latin-1" : "Two-Byte")); LiveRegisterSet regsToSave(RegisterSet::Volatile()); regsToSave.takeUnchecked(string_); regsToSave.takeUnchecked(temp2_); for (FallbackKind kind : mozilla::MakeEnumeratedRange(FallbackKind::Count)) { masm.bind(&fallbacks_[kind]); masm.PushRegsInMask(regsToSave); masm.setupUnalignedABICall(string_); masm.loadJSContext(string_); masm.passABIArg(string_); masm.callWithABI(kind == FallbackKind::FatInlineString ? JS_FUNC_TO_DATA_PTR(void*, AllocateFatInlineString) : JS_FUNC_TO_DATA_PTR(void*, AllocateString)); masm.storeCallPointerResult(string_); masm.PopRegsInMask(regsToSave); masm.branchPtr(Assembler::Equal, string_, ImmWord(0), failure_); masm.jump(&joins_[kind]); } } static void* CreateMatchResultFallbackFunc(JSContext* cx, gc::AllocKind kind, size_t nDynamicSlots) { AutoUnsafeCallWithABI unsafe; return js::Allocate(cx, kind, nDynamicSlots, gc::DefaultHeap, &ArrayObject::class_); } static void CreateMatchResultFallback(MacroAssembler& masm, Register object, Register temp1, Register temp2, const TemplateObject& templateObject, Label* fail) { JitSpew(JitSpew_Codegen, "# Emitting CreateMatchResult fallback"); MOZ_ASSERT(templateObject.isArrayObject()); LiveRegisterSet regsToSave(RegisterSet::Volatile()); regsToSave.takeUnchecked(object); regsToSave.takeUnchecked(temp1); regsToSave.takeUnchecked(temp2); masm.PushRegsInMask(regsToSave); masm.setupUnalignedABICall(object); masm.loadJSContext(object); masm.passABIArg(object); masm.move32(Imm32(int32_t(templateObject.getAllocKind())), temp1); masm.passABIArg(temp1); masm.move32(Imm32(int32_t(templateObject.asNativeTemplateObject().numDynamicSlots())), temp2); masm.passABIArg(temp2); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, CreateMatchResultFallbackFunc)); masm.storeCallPointerResult(object); masm.PopRegsInMask(regsToSave); masm.branchPtr(Assembler::Equal, object, ImmWord(0), fail); masm.initGCThing(object, temp1, templateObject, true); } JitCode* JitRealm::generateRegExpMatcherStub(JSContext* cx) { JitSpew(JitSpew_Codegen, "# Emitting RegExpMatcher stub"); Register regexp = RegExpMatcherRegExpReg; Register input = RegExpMatcherStringReg; Register lastIndex = RegExpMatcherLastIndexReg; ValueOperand result = JSReturnOperand; // We are free to clobber all registers, as LRegExpMatcher is a call instruction. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); regs.take(regexp); regs.take(lastIndex); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); Register temp3 = regs.takeAny(); Register temp4 = regs.takeAny(); Register maybeTemp5 = InvalidReg; if (!regs.empty()) { // There are not enough registers on x86. maybeTemp5 = regs.takeAny(); } ArrayObject* templateObject = cx->realm()->regExps.getOrCreateMatchResultTemplateObject(cx); if (!templateObject) return nullptr; TemplateObject templateObj(templateObject); const NativeTemplateObject& nativeTemplateObj = templateObj.asNativeTemplateObject(); // The template object should have enough space for the maximum number of // pairs this stub can handle. MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount == gc::GetGCKindSlots(templateObj.getAllocKind())); StackMacroAssembler masm(cx); // The InputOutputData is placed above the return address on the stack. size_t inputOutputDataStartOffset = sizeof(void*); Label notFound, oolEntry; if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2, temp3, inputOutputDataStartOffset, RegExpShared::Normal, stringsCanBeInNursery, ¬Found, &oolEntry)) { return nullptr; } // Construct the result. Register object = temp1; Label matchResultFallback, matchResultJoin; masm.createGCObject(object, temp2, templateObj, gc::DefaultHeap, &matchResultFallback); masm.bind(&matchResultJoin); // Initialize slots of result object. MOZ_ASSERT(nativeTemplateObj.numFixedSlots() == 0); MOZ_ASSERT(nativeTemplateObj.numDynamicSlots() == 2); static_assert(RegExpRealm::MatchResultObjectIndexSlot == 0, "First slot holds the 'index' property"); static_assert(RegExpRealm::MatchResultObjectInputSlot == 1, "Second slot holds the 'input' property"); masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2); masm.storeValue(nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectIndexSlot), Address(temp2, 0)); masm.storeValue(nativeTemplateObj.getSlot(RegExpRealm::MatchResultObjectInputSlot), Address(temp2, sizeof(Value))); /* * [SMDOC] Stack layout for the RegExpMatcher stub * * +---------------+ * |Return-Address | * +---------------+ * inputOutputDataStartOffset +-----> +---------------+ * |InputOutputData| * +---------------+ * +---------------+ * | MatchPairs | * pairsCountAddress +-----------> count | * | pairs | * | | * +---------------+ * pairsVectorStartOffset +-----> +---------------+ * | MatchPair | * matchPairStart +------------> start | <-------+ * matchPairLimit +------------> limit | | Reserved space for * +---------------+ | `RegExpObject::MaxPairCount` * . | MatchPair objects. * . | * . | `count` objects will be * +---------------+ | initialized and can be * | MatchPair | | accessed below. * | start | <-------+ * | limit | * +---------------+ */ static_assert(sizeof(MatchPair) == 2 * sizeof(int32_t), "MatchPair consists of two int32 values representing the start" "and the end offset of the match"); Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset); size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset); Address firstMatchPairStartAddress(masm.getStackPointer(), pairsVectorStartOffset + offsetof(MatchPair, start)); // Incremented by one below for each match pair. Register matchIndex = temp2; masm.move32(Imm32(0), matchIndex); // The element in which to store the result of the current match. size_t elementsOffset = NativeObject::offsetOfFixedElements(); BaseObjectElementIndex objectMatchElement(object, matchIndex, elementsOffset); // The current match pair's "start" and "limit" member. BaseIndex matchPairStart(masm.getStackPointer(), matchIndex, TimesEight, pairsVectorStartOffset + offsetof(MatchPair, start)); BaseIndex matchPairLimit(masm.getStackPointer(), matchIndex, TimesEight, pairsVectorStartOffset + offsetof(MatchPair, limit)); Register temp5; if (maybeTemp5 == InvalidReg) { // We don't have enough registers for a fifth temporary. Reuse // |lastIndex| as a temporary. We don't need to restore its value, // because |lastIndex| is no longer used after a successful match. // (Neither here nor in the OOL path, cf. js::RegExpMatcherRaw.) temp5 = lastIndex; } else { temp5 = maybeTemp5; } // Loop to construct the match strings. There are two different loops, // depending on whether the input is a Two-Byte or a Latin-1 string. CreateDependentString depStrs[] { { CharEncoding::TwoByte, temp3, temp4, temp5, &oolEntry }, { CharEncoding::Latin1, temp3, temp4, temp5, &oolEntry }, }; { Label isLatin1, done; masm.branchLatin1String(input, &isLatin1); for (auto& depStr : depStrs) { if (depStr.encoding() == CharEncoding::Latin1) masm.bind(&isLatin1); Label matchLoop; masm.bind(&matchLoop); static_assert(MatchPair::NoMatch == -1, "MatchPair::start is negative if no match was found"); Label isUndefined, storeDone; masm.branch32(Assembler::LessThan, matchPairStart, Imm32(0), &isUndefined); { depStr.generate(masm, cx->names(), CompileRuntime::get(cx->runtime()), input, matchPairStart, matchPairLimit, stringsCanBeInNursery); // Storing into nursery-allocated results object's elements; no post barrier. masm.storeValue(JSVAL_TYPE_STRING, depStr.string(), objectMatchElement); masm.jump(&storeDone); } masm.bind(&isUndefined); { masm.storeValue(UndefinedValue(), objectMatchElement); } masm.bind(&storeDone); masm.add32(Imm32(1), matchIndex); masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex, &done); masm.jump(&matchLoop); } #ifdef DEBUG masm.assumeUnreachable("The match string loop doesn't fall through."); #endif masm.bind(&done); } // Fill in the rest of the output object. masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength())); masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfLength())); masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2); masm.load32(firstMatchPairStartAddress, temp3); masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0)); // No post barrier needed (address is within nursery object.) masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value))); // All done! masm.tagValue(JSVAL_TYPE_OBJECT, object, result); masm.ret(); masm.bind(¬Found); masm.moveValue(NullValue(), result); masm.ret(); // Fallback paths for CreateDependentString. for (auto& depStr : depStrs) depStr.generateFallback(masm); // Fallback path for createGCObject. masm.bind(&matchResultFallback); CreateMatchResultFallback(masm, object, temp2, temp3, templateObj, &oolEntry); masm.jump(&matchResultJoin); // Use an undefined value to signal to the caller that the OOL stub needs to be called. masm.bind(&oolEntry); masm.moveValue(UndefinedValue(), result); masm.ret(); Linker linker(masm); AutoFlushICache afc("RegExpMatcherStub"); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) return nullptr; #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpMatcherStub"); #endif #ifdef MOZ_VTUNE vtune::MarkStub(code, "RegExpMatcherStub"); #endif return code; } class OutOfLineRegExpMatcher : public OutOfLineCodeBase { LRegExpMatcher* lir_; public: explicit OutOfLineRegExpMatcher(LRegExpMatcher* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineRegExpMatcher(this); } LRegExpMatcher* lir() const { return lir_; } }; typedef bool (*RegExpMatcherRawFn)(JSContext* cx, HandleObject regexp, HandleString input, int32_t lastIndex, MatchPairs* pairs, MutableHandleValue output); static const VMFunction RegExpMatcherRawInfo = FunctionInfo(RegExpMatcherRaw, "RegExpMatcherRaw"); void CodeGenerator::visitOutOfLineRegExpMatcher(OutOfLineRegExpMatcher* ool) { LRegExpMatcher* lir = ool->lir(); Register lastIndex = ToRegister(lir->lastIndex()); Register input = ToRegister(lir->string()); Register regexp = ToRegister(lir->regexp()); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(lastIndex); regs.take(input); regs.take(regexp); Register temp = regs.takeAny(); masm.computeEffectiveAddress(Address(masm.getStackPointer(), sizeof(irregexp::InputOutputData)), temp); pushArg(temp); pushArg(lastIndex); pushArg(input); pushArg(regexp); // We are not using oolCallVM because we are in a Call, and that live // registers are already saved by the the register allocator. callVM(RegExpMatcherRawInfo, lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitRegExpMatcher(LRegExpMatcher* lir) { MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpMatcherRegExpReg); MOZ_ASSERT(ToRegister(lir->string()) == RegExpMatcherStringReg); MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpMatcherLastIndexReg); MOZ_ASSERT(ToOutValue(lir) == JSReturnOperand); #if defined(JS_NUNBOX32) MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Type); MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg_Data); MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Type); MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg_Data); MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Type); MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg_Data); #elif defined(JS_PUNBOX64) MOZ_ASSERT(RegExpMatcherRegExpReg != JSReturnReg); MOZ_ASSERT(RegExpMatcherStringReg != JSReturnReg); MOZ_ASSERT(RegExpMatcherLastIndexReg != JSReturnReg); #endif masm.reserveStack(RegExpReservedStack); OutOfLineRegExpMatcher* ool = new(alloc()) OutOfLineRegExpMatcher(lir); addOutOfLineCode(ool, lir->mir()); const JitRealm* jitRealm = gen->realm->jitRealm(); JitCode* regExpMatcherStub = jitRealm->regExpMatcherStubNoBarrier(&realmStubsToReadBarrier_); masm.call(regExpMatcherStub); masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry()); masm.bind(ool->rejoin()); masm.freeStack(RegExpReservedStack); } static const int32_t RegExpSearcherResultNotFound = -1; static const int32_t RegExpSearcherResultFailed = -2; JitCode* JitRealm::generateRegExpSearcherStub(JSContext* cx) { JitSpew(JitSpew_Codegen, "# Emitting RegExpSearcher stub"); Register regexp = RegExpTesterRegExpReg; Register input = RegExpTesterStringReg; Register lastIndex = RegExpTesterLastIndexReg; Register result = ReturnReg; // We are free to clobber all registers, as LRegExpSearcher is a call instruction. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); regs.take(regexp); regs.take(lastIndex); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); Register temp3 = regs.takeAny(); StackMacroAssembler masm(cx); // The InputOutputData is placed above the return address on the stack. size_t inputOutputDataStartOffset = sizeof(void*); Label notFound, oolEntry; if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2, temp3, inputOutputDataStartOffset, RegExpShared::Normal, stringsCanBeInNursery, ¬Found, &oolEntry)) { return nullptr; } /* * [SMDOC] Stack layout for the RegExpSearcher stub * * +---------------+ * |Return-Address | * +---------------+ * inputOutputDataStartOffset +-----> +---------------+ * |InputOutputData| * +---------------+ * +---------------+ * | MatchPairs | * | count | * | pairs | * | | * +---------------+ * pairsVectorStartOffset +-----> +---------------+ * | MatchPair | * matchPairStart +------------> start | <-------+ * matchPairLimit +------------> limit | | Reserved space for * +---------------+ | `RegExpObject::MaxPairCount` * . | MatchPair objects. * . | * . | Only a single object will * +---------------+ | be initialized and can be * | MatchPair | | accessed below. * | start | <-------+ * | limit | * +---------------+ */ size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset); Address matchPairStart(masm.getStackPointer(), pairsVectorStartOffset + offsetof(MatchPair, start)); Address matchPairLimit(masm.getStackPointer(), pairsVectorStartOffset + offsetof(MatchPair, limit)); masm.load32(matchPairStart, result); masm.load32(matchPairLimit, input); masm.lshiftPtr(Imm32(15), input); masm.or32(input, result); masm.ret(); masm.bind(¬Found); masm.move32(Imm32(RegExpSearcherResultNotFound), result); masm.ret(); masm.bind(&oolEntry); masm.move32(Imm32(RegExpSearcherResultFailed), result); masm.ret(); Linker linker(masm); AutoFlushICache afc("RegExpSearcherStub"); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) return nullptr; #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpSearcherStub"); #endif #ifdef MOZ_VTUNE vtune::MarkStub(code, "RegExpSearcherStub"); #endif return code; } class OutOfLineRegExpSearcher : public OutOfLineCodeBase { LRegExpSearcher* lir_; public: explicit OutOfLineRegExpSearcher(LRegExpSearcher* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineRegExpSearcher(this); } LRegExpSearcher* lir() const { return lir_; } }; typedef bool (*RegExpSearcherRawFn)(JSContext* cx, HandleObject regexp, HandleString input, int32_t lastIndex, MatchPairs* pairs, int32_t* result); static const VMFunction RegExpSearcherRawInfo = FunctionInfo(RegExpSearcherRaw, "RegExpSearcherRaw"); void CodeGenerator::visitOutOfLineRegExpSearcher(OutOfLineRegExpSearcher* ool) { LRegExpSearcher* lir = ool->lir(); Register lastIndex = ToRegister(lir->lastIndex()); Register input = ToRegister(lir->string()); Register regexp = ToRegister(lir->regexp()); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(lastIndex); regs.take(input); regs.take(regexp); Register temp = regs.takeAny(); masm.computeEffectiveAddress(Address(masm.getStackPointer(), sizeof(irregexp::InputOutputData)), temp); pushArg(temp); pushArg(lastIndex); pushArg(input); pushArg(regexp); // We are not using oolCallVM because we are in a Call, and that live // registers are already saved by the the register allocator. callVM(RegExpSearcherRawInfo, lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitRegExpSearcher(LRegExpSearcher* lir) { MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg); MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg); MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg); MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg); MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg); MOZ_ASSERT(RegExpTesterStringReg != ReturnReg); MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg); masm.reserveStack(RegExpReservedStack); OutOfLineRegExpSearcher* ool = new(alloc()) OutOfLineRegExpSearcher(lir); addOutOfLineCode(ool, lir->mir()); const JitRealm* jitRealm = gen->realm->jitRealm(); JitCode* regExpSearcherStub = jitRealm->regExpSearcherStubNoBarrier(&realmStubsToReadBarrier_); masm.call(regExpSearcherStub); masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpSearcherResultFailed), ool->entry()); masm.bind(ool->rejoin()); masm.freeStack(RegExpReservedStack); } static const int32_t RegExpTesterResultNotFound = -1; static const int32_t RegExpTesterResultFailed = -2; JitCode* JitRealm::generateRegExpTesterStub(JSContext* cx) { JitSpew(JitSpew_Codegen, "# Emitting RegExpTester stub"); Register regexp = RegExpTesterRegExpReg; Register input = RegExpTesterStringReg; Register lastIndex = RegExpTesterLastIndexReg; Register result = ReturnReg; StackMacroAssembler masm(cx); #ifdef JS_USE_LINK_REGISTER masm.pushReturnAddress(); #endif // We are free to clobber all registers, as LRegExpTester is a call instruction. AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); regs.take(regexp); regs.take(lastIndex); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); Register temp3 = regs.takeAny(); masm.reserveStack(sizeof(irregexp::InputOutputData)); Label notFound, oolEntry; if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, lastIndex, temp1, temp2, temp3, 0, RegExpShared::MatchOnly, stringsCanBeInNursery, ¬Found, &oolEntry)) { return nullptr; } Label done; // temp3 contains endIndex. masm.move32(temp3, result); masm.jump(&done); masm.bind(¬Found); masm.move32(Imm32(RegExpTesterResultNotFound), result); masm.jump(&done); masm.bind(&oolEntry); masm.move32(Imm32(RegExpTesterResultFailed), result); masm.bind(&done); masm.freeStack(sizeof(irregexp::InputOutputData)); masm.ret(); Linker linker(masm); AutoFlushICache afc("RegExpTesterStub"); JitCode* code = linker.newCode(cx, CodeKind::Other); if (!code) return nullptr; #ifdef JS_ION_PERF writePerfSpewerJitCodeProfile(code, "RegExpTesterStub"); #endif #ifdef MOZ_VTUNE vtune::MarkStub(code, "RegExpTesterStub"); #endif return code; } class OutOfLineRegExpTester : public OutOfLineCodeBase { LRegExpTester* lir_; public: explicit OutOfLineRegExpTester(LRegExpTester* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineRegExpTester(this); } LRegExpTester* lir() const { return lir_; } }; typedef bool (*RegExpTesterRawFn)(JSContext* cx, HandleObject regexp, HandleString input, int32_t lastIndex, int32_t* result); static const VMFunction RegExpTesterRawInfo = FunctionInfo(RegExpTesterRaw, "RegExpTesterRaw"); void CodeGenerator::visitOutOfLineRegExpTester(OutOfLineRegExpTester* ool) { LRegExpTester* lir = ool->lir(); Register lastIndex = ToRegister(lir->lastIndex()); Register input = ToRegister(lir->string()); Register regexp = ToRegister(lir->regexp()); pushArg(lastIndex); pushArg(input); pushArg(regexp); // We are not using oolCallVM because we are in a Call, and that live // registers are already saved by the the register allocator. callVM(RegExpTesterRawInfo, lir); masm.jump(ool->rejoin()); } void CodeGenerator::visitRegExpTester(LRegExpTester* lir) { MOZ_ASSERT(ToRegister(lir->regexp()) == RegExpTesterRegExpReg); MOZ_ASSERT(ToRegister(lir->string()) == RegExpTesterStringReg); MOZ_ASSERT(ToRegister(lir->lastIndex()) == RegExpTesterLastIndexReg); MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg); MOZ_ASSERT(RegExpTesterRegExpReg != ReturnReg); MOZ_ASSERT(RegExpTesterStringReg != ReturnReg); MOZ_ASSERT(RegExpTesterLastIndexReg != ReturnReg); OutOfLineRegExpTester* ool = new(alloc()) OutOfLineRegExpTester(lir); addOutOfLineCode(ool, lir->mir()); const JitRealm* jitRealm = gen->realm->jitRealm(); JitCode* regExpTesterStub = jitRealm->regExpTesterStubNoBarrier(&realmStubsToReadBarrier_); masm.call(regExpTesterStub); masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpTesterResultFailed), ool->entry()); masm.bind(ool->rejoin()); } class OutOfLineRegExpPrototypeOptimizable : public OutOfLineCodeBase { LRegExpPrototypeOptimizable* ins_; public: explicit OutOfLineRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins) : ins_(ins) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineRegExpPrototypeOptimizable(this); } LRegExpPrototypeOptimizable* ins() const { return ins_; } }; void CodeGenerator::visitRegExpPrototypeOptimizable(LRegExpPrototypeOptimizable* ins) { Register object = ToRegister(ins->object()); Register output = ToRegister(ins->output()); Register temp = ToRegister(ins->temp()); OutOfLineRegExpPrototypeOptimizable* ool = new(alloc()) OutOfLineRegExpPrototypeOptimizable(ins); addOutOfLineCode(ool, ins->mir()); masm.loadJSContext(temp); masm.loadPtr(Address(temp, JSContext::offsetOfRealm()), temp); size_t offset = Realm::offsetOfRegExps() + RegExpRealm::offsetOfOptimizableRegExpPrototypeShape(); masm.loadPtr(Address(temp, offset), temp); masm.branchTestObjShapeUnsafe(Assembler::NotEqual, object, temp, ool->entry()); masm.move32(Imm32(0x1), output); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineRegExpPrototypeOptimizable(OutOfLineRegExpPrototypeOptimizable* ool) { LRegExpPrototypeOptimizable* ins = ool->ins(); Register object = ToRegister(ins->object()); Register output = ToRegister(ins->output()); saveVolatile(output); masm.setupUnalignedABICall(output); masm.loadJSContext(output); masm.passABIArg(output); masm.passABIArg(object); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpPrototypeOptimizableRaw)); masm.storeCallBoolResult(output); restoreVolatile(output); masm.jump(ool->rejoin()); } class OutOfLineRegExpInstanceOptimizable : public OutOfLineCodeBase { LRegExpInstanceOptimizable* ins_; public: explicit OutOfLineRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins) : ins_(ins) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineRegExpInstanceOptimizable(this); } LRegExpInstanceOptimizable* ins() const { return ins_; } }; void CodeGenerator::visitRegExpInstanceOptimizable(LRegExpInstanceOptimizable* ins) { Register object = ToRegister(ins->object()); Register output = ToRegister(ins->output()); Register temp = ToRegister(ins->temp()); OutOfLineRegExpInstanceOptimizable* ool = new(alloc()) OutOfLineRegExpInstanceOptimizable(ins); addOutOfLineCode(ool, ins->mir()); masm.loadJSContext(temp); masm.loadPtr(Address(temp, JSContext::offsetOfRealm()), temp); size_t offset = Realm::offsetOfRegExps() + RegExpRealm::offsetOfOptimizableRegExpInstanceShape(); masm.loadPtr(Address(temp, offset), temp); masm.branchTestObjShapeUnsafe(Assembler::NotEqual, object, temp, ool->entry()); masm.move32(Imm32(0x1), output); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineRegExpInstanceOptimizable(OutOfLineRegExpInstanceOptimizable* ool) { LRegExpInstanceOptimizable* ins = ool->ins(); Register object = ToRegister(ins->object()); Register proto = ToRegister(ins->proto()); Register output = ToRegister(ins->output()); saveVolatile(output); masm.setupUnalignedABICall(output); masm.loadJSContext(output); masm.passABIArg(output); masm.passABIArg(object); masm.passABIArg(proto); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, RegExpInstanceOptimizableRaw)); masm.storeCallBoolResult(output); restoreVolatile(output); masm.jump(ool->rejoin()); } static void FindFirstDollarIndex(MacroAssembler& masm, Register str, Register len, Register temp0, Register temp1, Register output, CharEncoding encoding) { #ifdef DEBUG Label ok; masm.branch32(Assembler::GreaterThan, len, Imm32(0), &ok); masm.assumeUnreachable("Length should be greater than 0."); masm.bind(&ok); #endif Register chars = temp0; masm.loadStringChars(str, chars, encoding); masm.move32(Imm32(0), output); Label start, done; masm.bind(&start); Register currentChar = temp1; masm.loadChar(chars, output, currentChar, encoding); masm.branch32(Assembler::Equal, currentChar, Imm32('$'), &done); masm.add32(Imm32(1), output); masm.branch32(Assembler::NotEqual, output, len, &start); masm.move32(Imm32(-1), output); masm.bind(&done); } typedef bool (*GetFirstDollarIndexRawFn)(JSContext*, JSString*, int32_t*); static const VMFunction GetFirstDollarIndexRawInfo = FunctionInfo(GetFirstDollarIndexRaw, "GetFirstDollarIndexRaw"); void CodeGenerator::visitGetFirstDollarIndex(LGetFirstDollarIndex* ins) { Register str = ToRegister(ins->str()); Register output = ToRegister(ins->output()); Register temp0 = ToRegister(ins->temp0()); Register temp1 = ToRegister(ins->temp1()); Register len = ToRegister(ins->temp2()); OutOfLineCode* ool = oolCallVM(GetFirstDollarIndexRawInfo, ins, ArgList(str), StoreRegisterTo(output)); masm.branchIfRope(str, ool->entry()); masm.loadStringLength(str, len); Label isLatin1, done; masm.branchLatin1String(str, &isLatin1); { FindFirstDollarIndex(masm, str, len, temp0, temp1, output, CharEncoding::TwoByte); masm.jump(&done); } masm.bind(&isLatin1); { FindFirstDollarIndex(masm, str, len, temp0, temp1, output, CharEncoding::Latin1); } masm.bind(&done); masm.bind(ool->rejoin()); } typedef JSString* (*StringReplaceFn)(JSContext*, HandleString, HandleString, HandleString); static const VMFunction StringFlatReplaceInfo = FunctionInfo(js::str_flat_replace_string, "str_flat_replace_string"); static const VMFunction StringReplaceInfo = FunctionInfo(StringReplace, "StringReplace"); void CodeGenerator::visitStringReplace(LStringReplace* lir) { if (lir->replacement()->isConstant()) pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString())); else pushArg(ToRegister(lir->replacement())); if (lir->pattern()->isConstant()) pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString())); else pushArg(ToRegister(lir->pattern())); if (lir->string()->isConstant()) pushArg(ImmGCPtr(lir->string()->toConstant()->toString())); else pushArg(ToRegister(lir->string())); if (lir->mir()->isFlatReplacement()) callVM(StringFlatReplaceInfo, lir); else callVM(StringReplaceInfo, lir); } void CodeGenerator::visitBinaryCache(LBinaryCache* lir) { LiveRegisterSet liveRegs = lir->safepoint()->liveRegs(); TypedOrValueRegister lhs = TypedOrValueRegister(ToValue(lir, LBinaryCache::LhsInput)); TypedOrValueRegister rhs = TypedOrValueRegister(ToValue(lir, LBinaryCache::RhsInput)); ValueOperand output = ToOutValue(lir); JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc()); switch (jsop) { case JSOP_ADD: case JSOP_SUB: case JSOP_MUL: case JSOP_DIV: case JSOP_MOD: case JSOP_POW: { IonBinaryArithIC ic(liveRegs, lhs, rhs, output); addIC(lir, allocateIC(ic)); return; } case JSOP_LT: case JSOP_LE: case JSOP_GT: case JSOP_GE: case JSOP_EQ: case JSOP_NE: case JSOP_STRICTEQ: case JSOP_STRICTNE: { IonCompareIC ic(liveRegs, lhs, rhs, output); addIC(lir, allocateIC(ic)); return; } default: MOZ_CRASH("Unsupported jsop in MBinaryCache"); } } void CodeGenerator::visitUnaryCache(LUnaryCache* lir) { LiveRegisterSet liveRegs = lir->safepoint()->liveRegs(); TypedOrValueRegister input = TypedOrValueRegister(ToValue(lir, LUnaryCache::Input)); ValueOperand output = ToOutValue(lir); IonUnaryArithIC ic(liveRegs, input, output); addIC(lir, allocateIC(ic)); } typedef JSFunction* (*MakeDefaultConstructorFn)(JSContext*, HandleScript, jsbytecode*, HandleObject); static const VMFunction MakeDefaultConstructorInfo = FunctionInfo(js::MakeDefaultConstructor, "MakeDefaultConstructor"); void CodeGenerator::visitClassConstructor(LClassConstructor* lir) { pushArg(ImmPtr(nullptr)); pushArg(ImmPtr(lir->mir()->pc())); pushArg(ImmGCPtr(current->mir()->info().script())); callVM(MakeDefaultConstructorInfo, lir); } typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject); static const VMFunction LambdaInfo = FunctionInfo(js::Lambda, "Lambda"); void CodeGenerator::visitLambdaForSingleton(LLambdaForSingleton* lir) { pushArg(ToRegister(lir->environmentChain())); pushArg(ImmGCPtr(lir->mir()->info().funUnsafe())); callVM(LambdaInfo, lir); } void CodeGenerator::visitLambda(LLambda* lir) { Register envChain = ToRegister(lir->environmentChain()); Register output = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); const LambdaFunctionInfo& info = lir->mir()->info(); OutOfLineCode* ool = oolCallVM(LambdaInfo, lir, ArgList(ImmGCPtr(info.funUnsafe()), envChain), StoreRegisterTo(output)); MOZ_ASSERT(!info.singletonType); TemplateObject templateObject(info.funUnsafe()); masm.createGCObject(output, tempReg, templateObject, gc::DefaultHeap, ool->entry()); emitLambdaInit(output, envChain, info); if (info.flags & JSFunction::EXTENDED) { static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized"); masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(0))); masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1))); } masm.bind(ool->rejoin()); } class OutOfLineLambdaArrow : public OutOfLineCodeBase { public: LLambdaArrow* lir; Label entryNoPop_; explicit OutOfLineLambdaArrow(LLambdaArrow* lir) : lir(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineLambdaArrow(this); } Label* entryNoPop() { return &entryNoPop_; } }; typedef JSObject* (*LambdaArrowFn)(JSContext*, HandleFunction, HandleObject, HandleValue); static const VMFunction LambdaArrowInfo = FunctionInfo(js::LambdaArrow, "LambdaArrow"); void CodeGenerator::visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool) { Register envChain = ToRegister(ool->lir->environmentChain()); ValueOperand newTarget = ToValue(ool->lir, LLambdaArrow::NewTargetValue); Register output = ToRegister(ool->lir->output()); const LambdaFunctionInfo& info = ool->lir->mir()->info(); // When we get here, we may need to restore part of the newTarget, // which has been conscripted into service as a temp register. masm.pop(newTarget.scratchReg()); masm.bind(ool->entryNoPop()); saveLive(ool->lir); pushArg(newTarget); pushArg(envChain); pushArg(ImmGCPtr(info.funUnsafe())); callVM(LambdaArrowInfo, ool->lir); StoreRegisterTo(output).generate(this); restoreLiveIgnore(ool->lir, StoreRegisterTo(output).clobbered()); masm.jump(ool->rejoin()); } void CodeGenerator::visitLambdaArrow(LLambdaArrow* lir) { Register envChain = ToRegister(lir->environmentChain()); ValueOperand newTarget = ToValue(lir, LLambdaArrow::NewTargetValue); Register output = ToRegister(lir->output()); const LambdaFunctionInfo& info = lir->mir()->info(); OutOfLineLambdaArrow* ool = new (alloc()) OutOfLineLambdaArrow(lir); addOutOfLineCode(ool, lir->mir()); MOZ_ASSERT(!info.useSingletonForClone); if (info.singletonType) { // If the function has a singleton type, this instruction will only be // executed once so we don't bother inlining it. masm.jump(ool->entryNoPop()); masm.bind(ool->rejoin()); return; } // There's not enough registers on x86 with the profiler enabled to request // a temp. Instead, spill part of one of the values, being prepared to // restore it if necessary on the out of line path. Register tempReg = newTarget.scratchReg(); masm.push(newTarget.scratchReg()); TemplateObject templateObject(info.funUnsafe()); masm.createGCObject(output, tempReg, templateObject, gc::DefaultHeap, ool->entry()); masm.pop(newTarget.scratchReg()); emitLambdaInit(output, envChain, info); // Initialize extended slots. Lexical |this| is stored in the first one. MOZ_ASSERT(info.flags & JSFunction::EXTENDED); static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized"); static_assert(FunctionExtended::ARROW_NEWTARGET_SLOT == 0, "|new.target| must be stored in first slot"); masm.storeValue(newTarget, Address(output, FunctionExtended::offsetOfExtendedSlot(0))); masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1))); masm.bind(ool->rejoin()); } void CodeGenerator::emitLambdaInit(Register output, Register envChain, const LambdaFunctionInfo& info) { // Initialize nargs and flags. We do this with a single uint32 to avoid // 16-bit writes. union { struct S { uint16_t nargs; uint16_t flags; } s; uint32_t word; } u; u.s.nargs = info.nargs; u.s.flags = info.flags; static_assert(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2, "the code below needs to be adapted"); masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs())); masm.storePtr(ImmGCPtr(info.scriptOrLazyScript), Address(output, JSFunction::offsetOfScriptOrLazyScript())); masm.storePtr(envChain, Address(output, JSFunction::offsetOfEnvironment())); // No post barrier needed because output is guaranteed to be allocated in // the nursery. masm.storePtr(ImmGCPtr(info.funUnsafe()->displayAtom()), Address(output, JSFunction::offsetOfAtom())); } typedef bool (*SetFunNameFn)(JSContext*, HandleFunction, HandleValue, FunctionPrefixKind); static const VMFunction SetFunNameInfo = FunctionInfo(js::SetFunctionNameIfNoOwnName, "SetFunName"); void CodeGenerator::visitSetFunName(LSetFunName* lir) { pushArg(Imm32(lir->mir()->prefixKind())); pushArg(ToValue(lir, LSetFunName::NameValue)); pushArg(ToRegister(lir->fun())); callVM(SetFunNameInfo, lir); } void CodeGenerator::visitOsiPoint(LOsiPoint* lir) { // Note: markOsiPoint ensures enough space exists between the last // LOsiPoint and this one to patch adjacent call instructions. MOZ_ASSERT(masm.framePushed() == frameSize()); uint32_t osiCallPointOffset = markOsiPoint(lir); LSafepoint* safepoint = lir->associatedSafepoint(); MOZ_ASSERT(!safepoint->osiCallPointOffset()); safepoint->setOsiCallPointOffset(osiCallPointOffset); #ifdef DEBUG // There should be no movegroups or other instructions between // an instruction and its OsiPoint. This is necessary because // we use the OsiPoint's snapshot from within VM calls. for (LInstructionReverseIterator iter(current->rbegin(lir)); iter != current->rend(); iter++) { if (*iter == lir) continue; MOZ_ASSERT(!iter->isMoveGroup()); MOZ_ASSERT(iter->safepoint() == safepoint); break; } #endif #ifdef CHECK_OSIPOINT_REGISTERS if (shouldVerifyOsiPointRegs(safepoint)) verifyOsiPointRegs(safepoint); #endif } void CodeGenerator::visitPhi(LPhi* lir) { MOZ_CRASH("Unexpected LPhi in CodeGenerator"); } void CodeGenerator::visitGoto(LGoto* lir) { jumpToBlock(lir->target()); } typedef bool (*InterruptCheckFn)(JSContext*); static const VMFunction InterruptCheckInfo = FunctionInfo(InterruptCheck, "InterruptCheck"); void CodeGenerator::visitTableSwitch(LTableSwitch* ins) { MTableSwitch* mir = ins->mir(); Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); const LAllocation* temp; if (mir->getOperand(0)->type() != MIRType::Int32) { temp = ins->tempInt()->output(); // The input is a double, so try and convert it to an integer. // If it does not fit in an integer, take the default case. masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp), defaultcase, false); } else { temp = ins->index(); } emitTableSwitchDispatch(mir, ToRegister(temp), ToRegisterOrInvalid(ins->tempPointer())); } void CodeGenerator::visitTableSwitchV(LTableSwitchV* ins) { MTableSwitch* mir = ins->mir(); Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); Register index = ToRegister(ins->tempInt()); ValueOperand value = ToValue(ins, LTableSwitchV::InputValue); Register tag = masm.extractTag(value, index); masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase); Label unboxInt, isInt; masm.branchTestInt32(Assembler::Equal, tag, &unboxInt); { FloatRegister floatIndex = ToFloatRegister(ins->tempFloat()); masm.unboxDouble(value, floatIndex); masm.convertDoubleToInt32(floatIndex, index, defaultcase, false); masm.jump(&isInt); } masm.bind(&unboxInt); masm.unboxInt32(value, index); masm.bind(&isInt); emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer())); } typedef JSObject* (*DeepCloneObjectLiteralFn)(JSContext*, HandleObject, NewObjectKind); static const VMFunction DeepCloneObjectLiteralInfo = FunctionInfo(DeepCloneObjectLiteral, "DeepCloneObjectLiteral"); void CodeGenerator::visitCloneLiteral(LCloneLiteral* lir) { pushArg(ImmWord(TenuredObject)); pushArg(ToRegister(lir->getObjectLiteral())); callVM(DeepCloneObjectLiteralInfo, lir); } void CodeGenerator::visitParameter(LParameter* lir) { } void CodeGenerator::visitCallee(LCallee* lir) { Register callee = ToRegister(lir->output()); Address ptr(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken()); masm.loadFunctionFromCalleeToken(ptr, callee); } void CodeGenerator::visitIsConstructing(LIsConstructing* lir) { Register output = ToRegister(lir->output()); Address calleeToken(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken()); masm.loadPtr(calleeToken, output); // We must be inside a function. MOZ_ASSERT(current->mir()->info().script()->functionNonDelazifying()); // The low bit indicates whether this call is constructing, just clear the // other bits. static_assert(CalleeToken_Function == 0x0, "CalleeTokenTag value should match"); static_assert(CalleeToken_FunctionConstructing == 0x1, "CalleeTokenTag value should match"); masm.andPtr(Imm32(0x1), output); } void CodeGenerator::visitStart(LStart* lir) { } void CodeGenerator::visitReturn(LReturn* lir) { #if defined(JS_NUNBOX32) DebugOnly type = lir->getOperand(TYPE_INDEX); DebugOnly payload = lir->getOperand(PAYLOAD_INDEX); MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type); MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data); #elif defined(JS_PUNBOX64) DebugOnly result = lir->getOperand(0); MOZ_ASSERT(ToRegister(result) == JSReturnReg); #endif // Don't emit a jump to the return label if this is the last block. if (current->mir() != *gen->graph().poBegin()) masm.jump(&returnLabel_); } void CodeGenerator::visitOsrEntry(LOsrEntry* lir) { Register temp = ToRegister(lir->temp()); // Remember the OSR entry offset into the code buffer. masm.flushBuffer(); setOsrEntryOffset(masm.size()); #ifdef JS_TRACE_LOGGING emitTracelogStopEvent(TraceLogger_Baseline); emitTracelogStartEvent(TraceLogger_IonMonkey); #endif // If profiling, save the current frame pointer to a per-thread global field. if (isProfilerInstrumentationEnabled()) masm.profilerEnterFrame(masm.getStackPointer(), temp); // Allocate the full frame for this function // Note we have a new entry here. So we reset MacroAssembler::framePushed() // to 0, before reserving the stack. MOZ_ASSERT(masm.framePushed() == frameSize()); masm.setFramePushed(0); // Ensure that the Ion frames is properly aligned. masm.assertStackAlignment(JitStackAlignment, 0); masm.reserveStack(frameSize()); } void CodeGenerator::visitOsrEnvironmentChain(LOsrEnvironmentChain* lir) { const LAllocation* frame = lir->getOperand(0); const LDefinition* object = lir->getDef(0); const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfEnvironmentChain(); masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object)); } void CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir) { const LAllocation* frame = lir->getOperand(0); const LDefinition* object = lir->getDef(0); const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj(); masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object)); } void CodeGenerator::visitOsrValue(LOsrValue* value) { const LAllocation* frame = value->getOperand(0); const ValueOperand out = ToOutValue(value); const ptrdiff_t frameOffset = value->mir()->frameOffset(); masm.loadValue(Address(ToRegister(frame), frameOffset), out); } void CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir) { const LAllocation* frame = lir->getOperand(0); const ValueOperand out = ToOutValue(lir); Address flags = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags()); Address retval = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue()); masm.moveValue(UndefinedValue(), out); Label done; masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done); masm.loadValue(retval, out); masm.bind(&done); } void CodeGenerator::visitStackArgT(LStackArgT* lir) { const LAllocation* arg = lir->getArgument(); MIRType argType = lir->type(); uint32_t argslot = lir->argslot(); MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount()); int32_t stack_offset = StackOffsetOfPassedArg(argslot); Address dest(masm.getStackPointer(), stack_offset); if (arg->isFloatReg()) masm.storeDouble(ToFloatRegister(arg), dest); else if (arg->isRegister()) masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest); else masm.storeValue(arg->toConstant()->toJSValue(), dest); } void CodeGenerator::visitStackArgV(LStackArgV* lir) { ValueOperand val = ToValue(lir, 0); uint32_t argslot = lir->argslot(); MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount()); int32_t stack_offset = StackOffsetOfPassedArg(argslot); masm.storeValue(val, Address(masm.getStackPointer(), stack_offset)); } void CodeGenerator::visitMoveGroup(LMoveGroup* group) { if (!group->numMoves()) return; MoveResolver& resolver = masm.moveResolver(); for (size_t i = 0; i < group->numMoves(); i++) { const LMove& move = group->getMove(i); LAllocation from = move.from(); LAllocation to = move.to(); LDefinition::Type type = move.type(); // No bogus moves. MOZ_ASSERT(from != to); MOZ_ASSERT(!from.isConstant()); MoveOp::Type moveType; switch (type) { case LDefinition::OBJECT: case LDefinition::SLOTS: #ifdef JS_NUNBOX32 case LDefinition::TYPE: case LDefinition::PAYLOAD: #else case LDefinition::BOX: #endif case LDefinition::GENERAL: moveType = MoveOp::GENERAL; break; case LDefinition::INT32: moveType = MoveOp::INT32; break; case LDefinition::FLOAT32: moveType = MoveOp::FLOAT32; break; case LDefinition::DOUBLE: moveType = MoveOp::DOUBLE; break; case LDefinition::SIMD128INT: moveType = MoveOp::SIMD128INT; break; case LDefinition::SIMD128FLOAT: moveType = MoveOp::SIMD128FLOAT; break; default: MOZ_CRASH("Unexpected move type"); } masm.propagateOOM(resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType)); } masm.propagateOOM(resolver.resolve()); if (masm.oom()) return; MoveEmitter emitter(masm); #ifdef JS_CODEGEN_X86 if (group->maybeScratchRegister().isGeneralReg()) emitter.setScratchRegister(group->maybeScratchRegister().toGeneralReg()->reg()); else resolver.sortMemoryToMemoryMoves(); #endif emitter.emit(resolver); emitter.finish(); } void CodeGenerator::visitInteger(LInteger* lir) { masm.move32(Imm32(lir->getValue()), ToRegister(lir->output())); } void CodeGenerator::visitInteger64(LInteger64* lir) { masm.move64(Imm64(lir->getValue()), ToOutRegister64(lir)); } void CodeGenerator::visitPointer(LPointer* lir) { if (lir->kind() == LPointer::GC_THING) masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output())); else masm.movePtr(ImmPtr(lir->ptr()), ToRegister(lir->output())); } void CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir) { // No-op. } void CodeGenerator::visitSlots(LSlots* lir) { Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots()); masm.loadPtr(slots, ToRegister(lir->output())); } void CodeGenerator::visitLoadSlotT(LLoadSlotT* lir) { Register base = ToRegister(lir->slots()); int32_t offset = lir->mir()->slot() * sizeof(js::Value); AnyRegister result = ToAnyRegister(lir->output()); masm.loadUnboxedValue(Address(base, offset), lir->mir()->type(), result); } void CodeGenerator::visitLoadSlotV(LLoadSlotV* lir) { ValueOperand dest = ToOutValue(lir); Register base = ToRegister(lir->input()); int32_t offset = lir->mir()->slot() * sizeof(js::Value); masm.loadValue(Address(base, offset), dest); } void CodeGenerator::visitStoreSlotT(LStoreSlotT* lir) { Register base = ToRegister(lir->slots()); int32_t offset = lir->mir()->slot() * sizeof(js::Value); Address dest(base, offset); if (lir->mir()->needsBarrier()) emitPreBarrier(dest); MIRType valueType = lir->mir()->value()->type(); if (valueType == MIRType::ObjectOrNull) { masm.storeObjectOrNull(ToRegister(lir->value()), dest); } else { mozilla::Maybe value; if (lir->value()->isConstant()) value.emplace(ConstantOrRegister(lir->value()->toConstant()->toJSValue())); else value.emplace(TypedOrValueRegister(valueType, ToAnyRegister(lir->value()))); masm.storeUnboxedValue(value.ref(), valueType, dest, lir->mir()->slotType()); } } void CodeGenerator::visitStoreSlotV(LStoreSlotV* lir) { Register base = ToRegister(lir->slots()); int32_t offset = lir->mir()->slot() * sizeof(Value); const ValueOperand value = ToValue(lir, LStoreSlotV::Value); if (lir->mir()->needsBarrier()) emitPreBarrier(Address(base, offset)); masm.storeValue(value, Address(base, offset)); } static void GuardReceiver(MacroAssembler& masm, const ReceiverGuard& guard, Register obj, Register expandoScratch, Register scratch, Label* miss, bool checkNullExpando) { if (guard.group) { masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.group, scratch, obj, miss); Address expandoAddress(obj, UnboxedPlainObject::offsetOfExpando()); if (guard.shape) { masm.loadPtr(expandoAddress, expandoScratch); masm.branchPtr(Assembler::Equal, expandoScratch, ImmWord(0), miss); masm.branchTestObjShape(Assembler::NotEqual, expandoScratch, guard.shape, scratch, expandoScratch, miss); } else if (checkNullExpando) { masm.branchPtr(Assembler::NotEqual, expandoAddress, ImmWord(0), miss); } } else { masm.branchTestObjShape(Assembler::NotEqual, obj, guard.shape, scratch, obj, miss); } } void CodeGenerator::emitGetPropertyPolymorphic(LInstruction* ins, Register obj, Register expandoScratch, Register scratch, const TypedOrValueRegister& output) { MGetPropertyPolymorphic* mir = ins->mirRaw()->toGetPropertyPolymorphic(); Label done; for (size_t i = 0; i < mir->numReceivers(); i++) { ReceiverGuard receiver = mir->receiver(i); Label next; masm.comment("GuardReceiver"); GuardReceiver(masm, receiver, obj, expandoScratch, scratch, &next, /* checkNullExpando = */ false); if (receiver.shape) { masm.comment("loadTypedOrValue"); // If this is an unboxed expando access, GuardReceiver loaded the // expando object into expandoScratch. Register target = receiver.group ? expandoScratch : obj; Shape* shape = mir->shape(i); if (shape->slot() < shape->numFixedSlots()) { // Fixed slot. masm.loadTypedOrValue(Address(target, NativeObject::getFixedSlotOffset(shape->slot())), output); } else { // Dynamic slot. uint32_t offset = (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value); masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch); masm.loadTypedOrValue(Address(scratch, offset), output); } } else { masm.comment("loadUnboxedProperty"); const UnboxedLayout::Property* property = receiver.group->unboxedLayoutDontCheckGeneration().lookup(mir->name()); Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset); masm.loadUnboxedProperty(propertyAddr, property->type, output); } if (i == mir->numReceivers() - 1) { bailoutFrom(&next, ins->snapshot()); } else { masm.jump(&done); masm.bind(&next); } } masm.bind(&done); } void CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins) { Register obj = ToRegister(ins->obj()); ValueOperand output = ToOutValue(ins); Register temp = ToRegister(ins->temp()); emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), temp, output); } void CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins) { Register obj = ToRegister(ins->obj()); TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output())); Register temp1 = ToRegister(ins->temp1()); Register temp2 = (output.type() == MIRType::Double) ? ToRegister(ins->temp2()) : output.typedReg().gpr(); emitGetPropertyPolymorphic(ins, obj, temp1, temp2, output); } template static void EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type) { if (type == JSVAL_TYPE_OBJECT) masm.guardedCallPreBarrier(address, MIRType::Object); else if (type == JSVAL_TYPE_STRING) masm.guardedCallPreBarrier(address, MIRType::String); else MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type)); } void CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register expandoScratch, Register scratch, const ConstantOrRegister& value) { MSetPropertyPolymorphic* mir = ins->mirRaw()->toSetPropertyPolymorphic(); Label done; for (size_t i = 0; i < mir->numReceivers(); i++) { ReceiverGuard receiver = mir->receiver(i); Label next; GuardReceiver(masm, receiver, obj, expandoScratch, scratch, &next, /* checkNullExpando = */ false); if (receiver.shape) { // If this is an unboxed expando access, GuardReceiver loaded the // expando object into expandoScratch. Register target = receiver.group ? expandoScratch : obj; Shape* shape = mir->shape(i); if (shape->slot() < shape->numFixedSlots()) { // Fixed slot. Address addr(target, NativeObject::getFixedSlotOffset(shape->slot())); if (mir->needsBarrier()) emitPreBarrier(addr); masm.storeConstantOrRegister(value, addr); } else { // Dynamic slot. masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch); Address addr(scratch, (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value)); if (mir->needsBarrier()) emitPreBarrier(addr); masm.storeConstantOrRegister(value, addr); } } else { const UnboxedLayout::Property* property = receiver.group->unboxedLayoutDontCheckGeneration().lookup(mir->name()); Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset); EmitUnboxedPreBarrier(masm, propertyAddr, property->type); masm.storeUnboxedProperty(propertyAddr, property->type, value, nullptr); } if (i == mir->numReceivers() - 1) { bailoutFrom(&next, ins->snapshot()); } else { masm.jump(&done); masm.bind(&next); } } masm.bind(&done); } void CodeGenerator::visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins) { Register obj = ToRegister(ins->obj()); Register temp1 = ToRegister(ins->temp1()); Register temp2 = ToRegister(ins->temp2()); ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value); emitSetPropertyPolymorphic(ins, obj, temp1, temp2, TypedOrValueRegister(value)); } void CodeGenerator::visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins) { Register obj = ToRegister(ins->obj()); Register temp1 = ToRegister(ins->temp1()); Register temp2 = ToRegister(ins->temp2()); mozilla::Maybe value; if (ins->mir()->value()->isConstant()) value.emplace(ConstantOrRegister(ins->mir()->value()->toConstant()->toJSValue())); else value.emplace(TypedOrValueRegister(ins->mir()->value()->type(), ToAnyRegister(ins->value()))); emitSetPropertyPolymorphic(ins, obj, temp1, temp2, value.ref()); } void CodeGenerator::visitElements(LElements* lir) { Address elements(ToRegister(lir->object()), NativeObject::offsetOfElements()); masm.loadPtr(elements, ToRegister(lir->output())); } typedef void (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t); static const VMFunction ConvertElementsToDoublesInfo = FunctionInfo(ObjectElements::ConvertElementsToDoubles, "ObjectElements::ConvertElementsToDoubles"); void CodeGenerator::visitConvertElementsToDoubles(LConvertElementsToDoubles* lir) { Register elements = ToRegister(lir->elements()); OutOfLineCode* ool = oolCallVM(ConvertElementsToDoublesInfo, lir, ArgList(elements), StoreNothing()); Address convertedAddress(elements, ObjectElements::offsetOfFlags()); Imm32 bit(ObjectElements::CONVERT_DOUBLE_ELEMENTS); masm.branchTest32(Assembler::Zero, convertedAddress, bit, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitMaybeToDoubleElement(LMaybeToDoubleElement* lir) { Register elements = ToRegister(lir->elements()); Register value = ToRegister(lir->value()); ValueOperand out = ToOutValue(lir); FloatRegister temp = ToFloatRegister(lir->tempFloat()); Label convert, done; // If the CONVERT_DOUBLE_ELEMENTS flag is set, convert the int32 // value to double. Else, just box it. masm.branchTest32(Assembler::NonZero, Address(elements, ObjectElements::offsetOfFlags()), Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS), &convert); masm.tagValue(JSVAL_TYPE_INT32, value, out); masm.jump(&done); masm.bind(&convert); masm.convertInt32ToDouble(value, temp); masm.boxDouble(temp, out, temp); masm.bind(&done); } typedef bool (*CopyElementsForWriteFn)(JSContext*, NativeObject*); static const VMFunction CopyElementsForWriteInfo = FunctionInfo(NativeObject::CopyElementsForWrite, "NativeObject::CopyElementsForWrite"); void CodeGenerator::visitMaybeCopyElementsForWrite(LMaybeCopyElementsForWrite* lir) { Register object = ToRegister(lir->object()); Register temp = ToRegister(lir->temp()); OutOfLineCode* ool = oolCallVM(CopyElementsForWriteInfo, lir, ArgList(object), StoreNothing()); if (lir->mir()->checkNative()) masm.branchIfNonNativeObj(object, temp, ool->rejoin()); masm.loadPtr(Address(object, NativeObject::offsetOfElements()), temp); masm.branchTest32(Assembler::NonZero, Address(temp, ObjectElements::offsetOfFlags()), Imm32(ObjectElements::COPY_ON_WRITE), ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment* lir) { Address environment(ToRegister(lir->function()), JSFunction::offsetOfEnvironment()); masm.loadPtr(environment, ToRegister(lir->output())); } void CodeGenerator::visitHomeObject(LHomeObject* lir) { Address homeObject(ToRegister(lir->function()), FunctionExtended::offsetOfMethodHomeObjectSlot()); #ifdef DEBUG Label isObject; masm.branchTestObject(Assembler::Equal, homeObject, &isObject); masm.assumeUnreachable("[[HomeObject]] must be Object"); masm.bind(&isObject); #endif masm.unboxObject(homeObject, ToRegister(lir->output())); } typedef JSObject* (*HomeObjectSuperBaseFn)(JSContext*, HandleObject); static const VMFunction HomeObjectSuperBaseInfo = FunctionInfo(HomeObjectSuperBase, "HomeObjectSuperBase"); void CodeGenerator::visitHomeObjectSuperBase(LHomeObjectSuperBase* lir) { Register homeObject = ToRegister(lir->homeObject()); Register output = ToRegister(lir->output()); OutOfLineCode* ool = oolCallVM(HomeObjectSuperBaseInfo, lir, ArgList(homeObject), StoreRegisterTo(output)); masm.loadObjProto(homeObject, output); masm.branchPtr(Assembler::BelowOrEqual, output, ImmWord(1), ool->entry()); masm.bind(ool->rejoin()); } typedef LexicalEnvironmentObject* (*NewLexicalEnvironmentObjectFn)(JSContext*, Handle, HandleObject, gc::InitialHeap); static const VMFunction NewLexicalEnvironmentObjectInfo = FunctionInfo(LexicalEnvironmentObject::create, "LexicalEnvironmentObject::create"); void CodeGenerator::visitNewLexicalEnvironmentObject(LNewLexicalEnvironmentObject* lir) { pushArg(Imm32(gc::DefaultHeap)); pushArg(ToRegister(lir->enclosing())); pushArg(ImmGCPtr(lir->mir()->scope())); callVM(NewLexicalEnvironmentObjectInfo, lir); } typedef JSObject* (*CopyLexicalEnvironmentObjectFn)(JSContext*, HandleObject, bool); static const VMFunction CopyLexicalEnvironmentObjectInfo = FunctionInfo(js::jit::CopyLexicalEnvironmentObject, "js::jit::CopyLexicalEnvironmentObject"); void CodeGenerator::visitCopyLexicalEnvironmentObject(LCopyLexicalEnvironmentObject* lir) { pushArg(Imm32(lir->mir()->copySlots())); pushArg(ToRegister(lir->env())); callVM(CopyLexicalEnvironmentObjectInfo, lir); } void CodeGenerator::visitGuardShape(LGuardShape* guard) { Register obj = ToRegister(guard->input()); Register temp = ToTempRegisterOrInvalid(guard->temp()); Label bail; masm.branchTestObjShape(Assembler::NotEqual, obj, guard->mir()->shape(), temp, obj, &bail); bailoutFrom(&bail, guard->snapshot()); } void CodeGenerator::visitGuardObjectGroup(LGuardObjectGroup* guard) { Register obj = ToRegister(guard->input()); Register temp = ToTempRegisterOrInvalid(guard->temp()); Assembler::Condition cond = guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual; Label bail; masm.branchTestObjGroup(cond, obj, guard->mir()->group(), temp, obj, &bail); bailoutFrom(&bail, guard->snapshot()); } void CodeGenerator::visitGuardObjectIdentity(LGuardObjectIdentity* guard) { Register input = ToRegister(guard->input()); Register expected = ToRegister(guard->expected()); Assembler::Condition cond = guard->mir()->bailOnEquality() ? Assembler::Equal : Assembler::NotEqual; bailoutCmpPtr(cond, input, expected, guard->snapshot()); } void CodeGenerator::visitGuardReceiverPolymorphic(LGuardReceiverPolymorphic* lir) { const MGuardReceiverPolymorphic* mir = lir->mir(); Register obj = ToRegister(lir->object()); Register temp1 = ToRegister(lir->temp1()); Register temp2 = ToRegister(lir->temp2()); Label done; for (size_t i = 0; i < mir->numReceivers(); i++) { const ReceiverGuard& receiver = mir->receiver(i); Label next; GuardReceiver(masm, receiver, obj, temp1, temp2, &next, /* checkNullExpando = */ true); if (i == mir->numReceivers() - 1) { bailoutFrom(&next, lir->snapshot()); } else { masm.jump(&done); masm.bind(&next); } } masm.bind(&done); } void CodeGenerator::visitGuardUnboxedExpando(LGuardUnboxedExpando* lir) { Label miss; Register obj = ToRegister(lir->object()); masm.branchPtr(lir->mir()->requireExpando() ? Assembler::Equal : Assembler::NotEqual, Address(obj, UnboxedPlainObject::offsetOfExpando()), ImmWord(0), &miss); bailoutFrom(&miss, lir->snapshot()); } void CodeGenerator::visitLoadUnboxedExpando(LLoadUnboxedExpando* lir) { Register obj = ToRegister(lir->object()); Register result = ToRegister(lir->getDef(0)); masm.loadPtr(Address(obj, UnboxedPlainObject::offsetOfExpando()), result); } void CodeGenerator::visitTypeBarrierV(LTypeBarrierV* lir) { ValueOperand operand = ToValue(lir, LTypeBarrierV::Input); Register unboxScratch = ToTempRegisterOrInvalid(lir->unboxTemp()); Register objScratch = ToTempRegisterOrInvalid(lir->objTemp()); // guardObjectType may zero the payload/Value register on speculative paths // (we should have a defineReuseInput allocation in this case). Register spectreRegToZero = operand.payloadOrValueReg(); Label miss; masm.guardTypeSet(operand, lir->mir()->resultTypeSet(), lir->mir()->barrierKind(), unboxScratch, objScratch, spectreRegToZero, &miss); bailoutFrom(&miss, lir->snapshot()); } void CodeGenerator::visitTypeBarrierO(LTypeBarrierO* lir) { Register obj = ToRegister(lir->object()); Register scratch = ToTempRegisterOrInvalid(lir->temp()); Label miss, ok; if (lir->mir()->type() == MIRType::ObjectOrNull) { masm.comment("Object or Null"); Label* nullTarget = lir->mir()->resultTypeSet()->mightBeMIRType(MIRType::Null) ? &ok : &miss; masm.branchTestPtr(Assembler::Zero, obj, obj, nullTarget); } else { MOZ_ASSERT(lir->mir()->type() == MIRType::Object); MOZ_ASSERT(lir->mir()->barrierKind() != BarrierKind::TypeTagOnly); } if (lir->mir()->barrierKind() != BarrierKind::TypeTagOnly) { masm.comment("Type tag only"); // guardObjectType may zero the object register on speculative paths // (we should have a defineReuseInput allocation in this case). Register spectreRegToZero = obj; masm.guardObjectType(obj, lir->mir()->resultTypeSet(), scratch, spectreRegToZero, &miss); } bailoutFrom(&miss, lir->snapshot()); masm.bind(&ok); } // Out-of-line path to update the store buffer. class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase { LInstruction* lir_; const LAllocation* object_; public: OutOfLineCallPostWriteBarrier(LInstruction* lir, const LAllocation* object) : lir_(lir), object_(object) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineCallPostWriteBarrier(this); } LInstruction* lir() const { return lir_; } const LAllocation* object() const { return object_; } }; static void EmitStoreBufferCheckForConstant(MacroAssembler& masm, const gc::TenuredCell* cell, AllocatableGeneralRegisterSet& regs, Label* exit, Label* callVM) { Register temp = regs.takeAny(); gc::Arena* arena = cell->arena(); Register cells = temp; masm.loadPtr(AbsoluteAddress(&arena->bufferedCells()), cells); size_t index = gc::ArenaCellSet::getCellIndex(cell); size_t word; uint32_t mask; gc::ArenaCellSet::getWordIndexAndMask(index, &word, &mask); size_t offset = gc::ArenaCellSet::offsetOfBits() + word * sizeof(uint32_t); masm.branchTest32(Assembler::NonZero, Address(cells, offset), Imm32(mask), exit); // Check whether this is the sentinel set and if so call the VM to allocate // one for this arena. masm.branchPtr(Assembler::Equal, Address(cells, gc::ArenaCellSet::offsetOfArena()), ImmPtr(nullptr), callVM); // Add the cell to the set. masm.or32(Imm32(mask), Address(cells, offset)); masm.jump(exit); regs.add(temp); } static void EmitPostWriteBarrier(MacroAssembler& masm, CompileRuntime* runtime, Register objreg, JSObject* maybeConstant, bool isGlobal, AllocatableGeneralRegisterSet& regs) { MOZ_ASSERT_IF(isGlobal, maybeConstant); Label callVM; Label exit; // We already have a fast path to check whether a global is in the store // buffer. if (!isGlobal && maybeConstant) EmitStoreBufferCheckForConstant(masm, &maybeConstant->asTenured(), regs, &exit, &callVM); // Call into the VM to barrier the write. masm.bind(&callVM); Register runtimereg = regs.takeAny(); masm.mov(ImmPtr(runtime), runtimereg); masm.setupUnalignedABICall(regs.takeAny()); masm.passABIArg(runtimereg); masm.passABIArg(objreg); if (isGlobal) masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostGlobalWriteBarrier)); else masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier)); masm.bind(&exit); } void CodeGenerator::emitPostWriteBarrier(const LAllocation* obj) { AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); Register objreg; JSObject* object = nullptr; bool isGlobal = false; if (obj->isConstant()) { object = &obj->toConstant()->toObject(); isGlobal = isGlobalObject(object); objreg = regs.takeAny(); masm.movePtr(ImmGCPtr(object), objreg); } else { objreg = ToRegister(obj); regs.takeUnchecked(objreg); } EmitPostWriteBarrier(masm, gen->runtime, objreg, object, isGlobal, regs); } void CodeGenerator::emitPostWriteBarrier(Register objreg) { AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); regs.takeUnchecked(objreg); EmitPostWriteBarrier(masm, gen->runtime, objreg, nullptr, false, regs); } void CodeGenerator::visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier* ool) { saveLiveVolatile(ool->lir()); const LAllocation* obj = ool->object(); emitPostWriteBarrier(obj); restoreLiveVolatile(ool->lir()); masm.jump(ool->rejoin()); } void CodeGenerator::maybeEmitGlobalBarrierCheck(const LAllocation* maybeGlobal, OutOfLineCode* ool) { // Check whether an object is a global that we have already barriered before // calling into the VM. // // We only check for the script's global, not other globals within the same // compartment, because we bake in a pointer to realm->globalWriteBarriered // and doing that would be invalid for other realms because they could be // collected before the Ion code is discarded. if (!maybeGlobal->isConstant()) return; JSObject* obj = &maybeGlobal->toConstant()->toObject(); if (gen->realm->maybeGlobal() != obj) return; const uint32_t* addr = gen->realm->addressOfGlobalWriteBarriered(); masm.branch32(Assembler::NotEqual, AbsoluteAddress(addr), Imm32(0), ool->rejoin()); } template void CodeGenerator::visitPostWriteBarrierCommon(LPostBarrierType* lir, OutOfLineCode* ool) { addOutOfLineCode(ool, lir->mir()); Register temp = ToTempRegisterOrInvalid(lir->temp()); if (lir->object()->isConstant()) { // Constant nursery objects cannot appear here, see // LIRGenerator::visitPostWriteElementBarrier. MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject())); } else { masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()), temp, ool->rejoin()); } maybeEmitGlobalBarrierCheck(lir->object(), ool); Register value = ToRegister(lir->value()); if (nurseryType == MIRType::Object) { if (lir->mir()->value()->type() == MIRType::ObjectOrNull) masm.branchTestPtr(Assembler::Zero, value, value, ool->rejoin()); else MOZ_ASSERT(lir->mir()->value()->type() == MIRType::Object); } else { MOZ_ASSERT(nurseryType == MIRType::String); MOZ_ASSERT(lir->mir()->value()->type() == MIRType::String); } masm.branchPtrInNurseryChunk(Assembler::Equal, value, temp, ool->entry()); masm.bind(ool->rejoin()); } template void CodeGenerator::visitPostWriteBarrierCommonV(LPostBarrierType* lir, OutOfLineCode* ool) { addOutOfLineCode(ool, lir->mir()); Register temp = ToTempRegisterOrInvalid(lir->temp()); if (lir->object()->isConstant()) { // Constant nursery objects cannot appear here, see LIRGenerator::visitPostWriteElementBarrier. MOZ_ASSERT(!IsInsideNursery(&lir->object()->toConstant()->toObject())); } else { masm.branchPtrInNurseryChunk(Assembler::Equal, ToRegister(lir->object()), temp, ool->rejoin()); } maybeEmitGlobalBarrierCheck(lir->object(), ool); ValueOperand value = ToValue(lir, LPostBarrierType::Input); // Bug 1386094 - most callers only need to check for object or string, not // both. masm.branchValueIsNurseryCell(Assembler::Equal, value, temp, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object()); visitPostWriteBarrierCommon(lir, ool); } void CodeGenerator::visitPostWriteBarrierS(LPostWriteBarrierS* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object()); visitPostWriteBarrierCommon(lir, ool); } void CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteBarrier(lir, lir->object()); visitPostWriteBarrierCommonV(lir, ool); } // Out-of-line path to update the store buffer. class OutOfLineCallPostWriteElementBarrier : public OutOfLineCodeBase { LInstruction* lir_; const LAllocation* object_; const LAllocation* index_; public: OutOfLineCallPostWriteElementBarrier(LInstruction* lir, const LAllocation* object, const LAllocation* index) : lir_(lir), object_(object), index_(index) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineCallPostWriteElementBarrier(this); } LInstruction* lir() const { return lir_; } const LAllocation* object() const { return object_; } const LAllocation* index() const { return index_; } }; void CodeGenerator::visitOutOfLineCallPostWriteElementBarrier(OutOfLineCallPostWriteElementBarrier* ool) { saveLiveVolatile(ool->lir()); const LAllocation* obj = ool->object(); const LAllocation* index = ool->index(); Register objreg = obj->isConstant() ? InvalidReg : ToRegister(obj); Register indexreg = ToRegister(index); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile()); regs.takeUnchecked(indexreg); if (obj->isConstant()) { objreg = regs.takeAny(); masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg); } else { regs.takeUnchecked(objreg); } Register runtimereg = regs.takeAny(); masm.setupUnalignedABICall(runtimereg); masm.mov(ImmPtr(gen->runtime), runtimereg); masm.passABIArg(runtimereg); masm.passABIArg(objreg); masm.passABIArg(indexreg); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, (PostWriteElementBarrier))); restoreLiveVolatile(ool->lir()); masm.jump(ool->rejoin()); } void CodeGenerator::visitPostWriteElementBarrierO(LPostWriteElementBarrierO* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index()); visitPostWriteBarrierCommon(lir, ool); } void CodeGenerator::visitPostWriteElementBarrierS(LPostWriteElementBarrierS* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index()); visitPostWriteBarrierCommon(lir, ool); } void CodeGenerator::visitPostWriteElementBarrierV(LPostWriteElementBarrierV* lir) { auto ool = new(alloc()) OutOfLineCallPostWriteElementBarrier(lir, lir->object(), lir->index()); visitPostWriteBarrierCommonV(lir, ool); } void CodeGenerator::visitCallNative(LCallNative* call) { WrappedFunction* target = call->getSingleTarget(); MOZ_ASSERT(target); MOZ_ASSERT(target->isNativeWithCppEntry()); int callargslot = call->argslot(); int unusedStack = StackOffsetOfPassedArg(callargslot); // Registers used for callWithABI() argument-passing. const Register argContextReg = ToRegister(call->getArgContextReg()); const Register argUintNReg = ToRegister(call->getArgUintNReg()); const Register argVpReg = ToRegister(call->getArgVpReg()); // Misc. temporary registers. const Register tempReg = ToRegister(call->getTempReg()); DebugOnly initialStack = masm.framePushed(); masm.checkStackAlignment(); // Native functions have the signature: // bool (*)(JSContext*, unsigned, Value* vp) // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward // are the function arguments. // Allocate space for the outparam, moving the StackPointer to what will be &vp[1]. masm.adjustStack(unusedStack); // Push a Value containing the callee object: natives are allowed to access // their callee before setting the return value. The StackPointer is moved // to &vp[0]. masm.Push(ObjectValue(*target->rawJSFunction())); // Preload arguments into registers. masm.loadJSContext(argContextReg); masm.move32(Imm32(call->numActualArgs()), argUintNReg); masm.moveStackPtrTo(argVpReg); masm.Push(argUintNReg); if (call->mir()->maybeCrossRealm()) { masm.movePtr(ImmGCPtr(target->rawJSFunction()), tempReg); masm.switchToObjectRealm(tempReg, tempReg); } // Construct native exit frame. uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg); masm.enterFakeExitFrameForNative(argContextReg, tempReg, call->mir()->isConstructing()); markSafepointAt(safepointOffset, call); emitTracelogStartEvent(TraceLogger_Call); // Construct and execute call. masm.setupUnalignedABICall(tempReg); masm.passABIArg(argContextReg); masm.passABIArg(argUintNReg); masm.passABIArg(argVpReg); JSNative native = target->native(); if (call->ignoresReturnValue() && target->hasJitInfo()) { const JSJitInfo* jitInfo = target->jitInfo(); if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) native = jitInfo->ignoresReturnValueMethod; } masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, native), MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame); emitTracelogStopEvent(TraceLogger_Call); // Test for failure. masm.branchIfFalseBool(ReturnReg, masm.failureLabel()); if (call->mir()->maybeCrossRealm()) masm.switchToRealm(gen->realm->realmPtr(), ReturnReg); // Load the outparam vp[0] into output register(s). masm.loadValue(Address(masm.getStackPointer(), NativeExitFrameLayout::offsetOfResult()), JSReturnOperand); // Until C++ code is instrumented against Spectre, prevent speculative // execution from returning any private data. if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() && call->mir()->hasLiveDefUses()) { masm.speculationBarrier(); } // The next instruction is removing the footer of the exit frame, so there // is no need for leaveFakeExitFrame. // Move the StackPointer back to its original location, unwinding the native exit frame. masm.adjustStack(NativeExitFrameLayout::Size() - unusedStack); MOZ_ASSERT(masm.framePushed() == initialStack); } static void LoadDOMPrivate(MacroAssembler& masm, Register obj, Register priv, DOMObjectKind kind) { // Load the value in DOM_OBJECT_SLOT for a native or proxy DOM object. This // will be in the first slot but may be fixed or non-fixed. MOZ_ASSERT(obj != priv); // Check if it's a proxy. Label isProxy, done; if (kind == DOMObjectKind::Unknown) masm.branchTestObjectIsProxy(true, obj, priv, &isProxy); if (kind != DOMObjectKind::Proxy) { // If it's a native object, the value must be in a fixed slot. masm.debugAssertObjHasFixedSlots(obj, priv); masm.loadPrivate(Address(obj, NativeObject::getFixedSlotOffset(0)), priv); if (kind == DOMObjectKind::Unknown) masm.jump(&done); } if (kind != DOMObjectKind::Native) { masm.bind(&isProxy); #ifdef DEBUG // Sanity check: it must be a DOM proxy. Label isDOMProxy; masm.branchTestProxyHandlerFamily(Assembler::Equal, obj, priv, GetDOMProxyHandlerFamily(), &isDOMProxy); masm.assumeUnreachable("Expected a DOM proxy"); masm.bind(&isDOMProxy); #endif masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), priv); masm.loadPrivate(Address(priv, detail::ProxyReservedSlots::offsetOfSlot(0)), priv); } masm.bind(&done); } void CodeGenerator::visitCallDOMNative(LCallDOMNative* call) { WrappedFunction* target = call->getSingleTarget(); MOZ_ASSERT(target); MOZ_ASSERT(target->isNative()); MOZ_ASSERT(target->hasJitInfo()); MOZ_ASSERT(call->mir()->isCallDOMNative()); int callargslot = call->argslot(); int unusedStack = StackOffsetOfPassedArg(callargslot); // Registers used for callWithABI() argument-passing. const Register argJSContext = ToRegister(call->getArgJSContext()); const Register argObj = ToRegister(call->getArgObj()); const Register argPrivate = ToRegister(call->getArgPrivate()); const Register argArgs = ToRegister(call->getArgArgs()); DebugOnly initialStack = masm.framePushed(); masm.checkStackAlignment(); // DOM methods have the signature: // bool (*)(JSContext*, HandleObject, void* private, const JSJitMethodCallArgs& args) // Where args is initialized from an argc and a vp, vp[0] is space for an // outparam and the callee, vp[1] is |this|, and vp[2] onward are the // function arguments. Note that args stores the argv, not the vp, and // argv == vp + 2. // Nestle the stack up against the pushed arguments, leaving StackPointer at // &vp[1] masm.adjustStack(unusedStack); // argObj is filled with the extracted object, then returned. Register obj = masm.extractObject(Address(masm.getStackPointer(), 0), argObj); MOZ_ASSERT(obj == argObj); // Push a Value containing the callee object: natives are allowed to access their callee before // setting the return value. After this the StackPointer points to &vp[0]. masm.Push(ObjectValue(*target->rawJSFunction())); // Now compute the argv value. Since StackPointer is pointing to &vp[0] and // argv is &vp[2] we just need to add 2*sizeof(Value) to the current // StackPointer. JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgv == 0); JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgc == IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv); masm.computeEffectiveAddress(Address(masm.getStackPointer(), 2 * sizeof(Value)), argArgs); LoadDOMPrivate(masm, obj, argPrivate, static_cast(call->mir())->objectKind()); // Push argc from the call instruction into what will become the IonExitFrame masm.Push(Imm32(call->numActualArgs())); // Push our argv onto the stack masm.Push(argArgs); // And store our JSJitMethodCallArgs* in argArgs. masm.moveStackPtrTo(argArgs); // Push |this| object for passing HandleObject. We push after argc to // maintain the same sp-relative location of the object pointer with other // DOMExitFrames. masm.Push(argObj); masm.moveStackPtrTo(argObj); if (call->mir()->maybeCrossRealm()) { // We use argJSContext as scratch register here. masm.movePtr(ImmGCPtr(target->rawJSFunction()), argJSContext); masm.switchToObjectRealm(argJSContext, argJSContext); } // Construct native exit frame. uint32_t safepointOffset = masm.buildFakeExitFrame(argJSContext); masm.loadJSContext(argJSContext); masm.enterFakeExitFrame(argJSContext, argJSContext, ExitFrameType::IonDOMMethod); markSafepointAt(safepointOffset, call); // Construct and execute call. masm.setupUnalignedABICall(argJSContext); masm.loadJSContext(argJSContext); masm.passABIArg(argJSContext); masm.passABIArg(argObj); masm.passABIArg(argPrivate); masm.passABIArg(argArgs); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, target->jitInfo()->method), MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame); if (target->jitInfo()->isInfallible) { masm.loadValue(Address(masm.getStackPointer(), IonDOMMethodExitFrameLayout::offsetOfResult()), JSReturnOperand); } else { // Test for failure. masm.branchIfFalseBool(ReturnReg, masm.exceptionLabel()); // Load the outparam vp[0] into output register(s). masm.loadValue(Address(masm.getStackPointer(), IonDOMMethodExitFrameLayout::offsetOfResult()), JSReturnOperand); } // Switch back to the current realm if needed. Note: if the DOM method threw // an exception, the exception handler will do this. if (call->mir()->maybeCrossRealm()) { static_assert(!JSReturnOperand.aliases(ReturnReg), "Clobbering ReturnReg should not affect the return value"); masm.switchToRealm(gen->realm->realmPtr(), ReturnReg); } // Until C++ code is instrumented against Spectre, prevent speculative // execution from returning any private data. if (JitOptions.spectreJitToCxxCalls && call->mir()->hasLiveDefUses()) masm.speculationBarrier(); // The next instruction is removing the footer of the exit frame, so there // is no need for leaveFakeExitFrame. // Move the StackPointer back to its original location, unwinding the native exit frame. masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack); MOZ_ASSERT(masm.framePushed() == initialStack); } typedef bool (*GetIntrinsicValueFn)(JSContext* cx, HandlePropertyName, MutableHandleValue); static const VMFunction GetIntrinsicValueInfo = FunctionInfo(GetIntrinsicValue, "GetIntrinsicValue"); void CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue* lir) { pushArg(ImmGCPtr(lir->mir()->name())); callVM(GetIntrinsicValueInfo, lir); } typedef bool (*InvokeFunctionFn)(JSContext*, HandleObject, bool, bool, uint32_t, Value*, MutableHandleValue); static const VMFunction InvokeFunctionInfo = FunctionInfo(InvokeFunction, "InvokeFunction"); void CodeGenerator::emitCallInvokeFunction(LInstruction* call, Register calleereg, bool constructing, bool ignoresReturnValue, uint32_t argc, uint32_t unusedStack) { // Nestle %esp up to the argument vector. // Each path must account for framePushed_ separately, for callVM to be valid. masm.freeStack(unusedStack); pushArg(masm.getStackPointer()); // argv. pushArg(Imm32(argc)); // argc. pushArg(Imm32(ignoresReturnValue)); pushArg(Imm32(constructing)); // constructing. pushArg(calleereg); // JSFunction*. callVM(InvokeFunctionInfo, call); // Un-nestle %esp from the argument vector. No prefix was pushed. masm.reserveStack(unusedStack); } void CodeGenerator::visitCallGeneric(LCallGeneric* call) { Register calleereg = ToRegister(call->getFunction()); Register objreg = ToRegister(call->getTempObject()); Register nargsreg = ToRegister(call->getNargsReg()); uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); Label invoke, thunk, makeCall, end; // Known-target case is handled by LCallKnown. MOZ_ASSERT(!call->hasSingleTarget()); masm.checkStackAlignment(); // Guard that calleereg is actually a function object. if (call->mir()->needsClassCheck()) { masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_, nargsreg, calleereg, &invoke); } // Guard that calleereg is an interpreted function with a JSScript or a // wasm function. // If we are constructing, also ensure the callee is a constructor. if (call->mir()->isConstructing()) { masm.branchIfNotInterpretedConstructor(calleereg, nargsreg, &invoke); } else { masm.branchIfFunctionHasNoJitEntry(calleereg, /* isConstructing */ false, &invoke); masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, calleereg, objreg, &invoke); } if (call->mir()->maybeCrossRealm()) masm.switchToObjectRealm(calleereg, objreg); if (call->mir()->needsArgCheck()) masm.loadJitCodeRaw(calleereg, objreg); else masm.loadJitCodeNoArgCheck(calleereg, objreg); // Nestle the StackPointer up to the argument vector. masm.freeStack(unusedStack); // Construct the IonFramePrefix. uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS, JitFrameLayout::Size()); masm.Push(Imm32(call->numActualArgs())); masm.PushCalleeToken(calleereg, call->mir()->isConstructing()); masm.Push(Imm32(descriptor)); // Check whether the provided arguments satisfy target argc. // We cannot have lowered to LCallGeneric with a known target. Assert that we didn't // add any undefineds in IonBuilder. NB: MCall::numStackArgs includes |this|. DebugOnly numNonArgsOnStack = 1 + call->isConstructing(); MOZ_ASSERT(call->numActualArgs() == call->mir()->numStackArgs() - numNonArgsOnStack); masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nargsreg); masm.branch32(Assembler::Above, nargsreg, Imm32(call->numActualArgs()), &thunk); masm.jump(&makeCall); // Argument fixup needed. Load the ArgumentsRectifier. masm.bind(&thunk); { TrampolinePtr argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier(); masm.movePtr(argumentsRectifier, objreg); } // Finally call the function in objreg. masm.bind(&makeCall); uint32_t callOffset = masm.callJit(objreg); markSafepointAt(callOffset, call); if (call->mir()->maybeCrossRealm()) { static_assert(!JSReturnOperand.aliases(ReturnReg), "ReturnReg available as scratch after scripted calls"); masm.switchToRealm(gen->realm->realmPtr(), ReturnReg); } // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*); masm.adjustStack(prefixGarbage - unusedStack); masm.jump(&end); // Handle uncompiled or native functions. masm.bind(&invoke); emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->ignoresReturnValue(), call->numActualArgs(), unusedStack); masm.bind(&end); // If the return value of the constructing function is Primitive, // replace the return value with the Object from CreateThis. if (call->mir()->isConstructing()) { Label notPrimitive; masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); masm.loadValue(Address(masm.getStackPointer(), unusedStack), JSReturnOperand); masm.bind(¬Primitive); } } typedef bool (*InvokeFunctionShuffleFn)(JSContext*, HandleObject, uint32_t, uint32_t, Value*, MutableHandleValue); static const VMFunction InvokeFunctionShuffleInfo = FunctionInfo(InvokeFunctionShuffleNewTarget, "InvokeFunctionShuffleNewTarget"); void CodeGenerator::emitCallInvokeFunctionShuffleNewTarget(LCallKnown* call, Register calleeReg, uint32_t numFormals, uint32_t unusedStack) { masm.freeStack(unusedStack); pushArg(masm.getStackPointer()); pushArg(Imm32(numFormals)); pushArg(Imm32(call->numActualArgs())); pushArg(calleeReg); callVM(InvokeFunctionShuffleInfo, call); masm.reserveStack(unusedStack); } void CodeGenerator::visitCallKnown(LCallKnown* call) { Register calleereg = ToRegister(call->getFunction()); Register objreg = ToRegister(call->getTempObject()); uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); WrappedFunction* target = call->getSingleTarget(); // Native single targets (except wasm) are handled by LCallNative. MOZ_ASSERT(!target->isNativeWithCppEntry()); // Missing arguments must have been explicitly appended by the IonBuilder. DebugOnly numNonArgsOnStack = 1 + call->isConstructing(); MOZ_ASSERT(target->nargs() <= call->mir()->numStackArgs() - numNonArgsOnStack); MOZ_ASSERT_IF(call->isConstructing(), target->isConstructor()); masm.checkStackAlignment(); if (target->isClassConstructor() && !call->isConstructing()) { emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->ignoresReturnValue(), call->numActualArgs(), unusedStack); return; } MOZ_ASSERT_IF(target->isClassConstructor(), call->isConstructing()); Label uncompiled; if (!target->isNativeWithJitEntry()) { // The calleereg is known to be a non-native function, but might point // to a LazyScript instead of a JSScript. masm.branchIfFunctionHasNoJitEntry(calleereg, call->isConstructing(), &uncompiled); } if (call->mir()->maybeCrossRealm()) masm.switchToObjectRealm(calleereg, objreg); if (call->mir()->needsArgCheck()) masm.loadJitCodeRaw(calleereg, objreg); else masm.loadJitCodeNoArgCheck(calleereg, objreg); // Nestle the StackPointer up to the argument vector. masm.freeStack(unusedStack); // Construct the IonFramePrefix. uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS, JitFrameLayout::Size()); masm.Push(Imm32(call->numActualArgs())); masm.PushCalleeToken(calleereg, call->mir()->isConstructing()); masm.Push(Imm32(descriptor)); // Finally call the function in objreg. uint32_t callOffset = masm.callJit(objreg); markSafepointAt(callOffset, call); if (call->mir()->maybeCrossRealm()) { static_assert(!JSReturnOperand.aliases(ReturnReg), "ReturnReg available as scratch after scripted calls"); masm.switchToRealm(gen->realm->realmPtr(), ReturnReg); } // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*); masm.adjustStack(prefixGarbage - unusedStack); if (uncompiled.used()) { Label end; masm.jump(&end); // Handle uncompiled functions. masm.bind(&uncompiled); if (call->isConstructing() && target->nargs() > call->numActualArgs()) { emitCallInvokeFunctionShuffleNewTarget(call, calleereg, target->nargs(), unusedStack); } else { emitCallInvokeFunction(call, calleereg, call->isConstructing(), call->ignoresReturnValue(), call->numActualArgs(), unusedStack); } masm.bind(&end); } // If the return value of the constructing function is Primitive, // replace the return value with the Object from CreateThis. if (call->mir()->isConstructing()) { Label notPrimitive; masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); masm.loadValue(Address(masm.getStackPointer(), unusedStack), JSReturnOperand); masm.bind(¬Primitive); } } template void CodeGenerator::emitCallInvokeFunction(T* apply, Register extraStackSize) { Register objreg = ToRegister(apply->getTempObject()); MOZ_ASSERT(objreg != extraStackSize); // Push the space used by the arguments. masm.moveStackPtrTo(objreg); masm.Push(extraStackSize); pushArg(objreg); // argv. pushArg(ToRegister(apply->getArgc())); // argc. pushArg(Imm32(false)); // ignoresReturnValue. pushArg(Imm32(false)); // isConstrucing. pushArg(ToRegister(apply->getFunction())); // JSFunction*. // This specialization og callVM restore the extraStackSize after the call. callVM(InvokeFunctionInfo, apply, &extraStackSize); masm.Pop(extraStackSize); } // Do not bailout after the execution of this function since the stack no longer // correspond to what is expected by the snapshots. void CodeGenerator::emitAllocateSpaceForApply(Register argcreg, Register extraStackSpace, Label* end) { // Initialize the loop counter AND Compute the stack usage (if == 0) masm.movePtr(argcreg, extraStackSpace); // Align the JitFrameLayout on the JitStackAlignment. if (JitStackValueAlignment > 1) { MOZ_ASSERT(frameSize() % JitStackAlignment == 0, "Stack padding assumes that the frameSize is correct"); MOZ_ASSERT(JitStackValueAlignment == 2); Label noPaddingNeeded; // if the number of arguments is odd, then we do not need any padding. masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded); masm.addPtr(Imm32(1), extraStackSpace); masm.bind(&noPaddingNeeded); } // Reserve space for copying the arguments. NativeObject::elementsSizeMustNotOverflow(); masm.lshiftPtr(Imm32(ValueShift), extraStackSpace); masm.subFromStackPtr(extraStackSpace); #ifdef DEBUG // Put a magic value in the space reserved for padding. Note, this code // cannot be merged with the previous test, as not all architectures can // write below their stack pointers. if (JitStackValueAlignment > 1) { MOZ_ASSERT(JitStackValueAlignment == 2); Label noPaddingNeeded; // if the number of arguments is odd, then we do not need any padding. masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded); BaseValueIndex dstPtr(masm.getStackPointer(), argcreg); masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr); masm.bind(&noPaddingNeeded); } #endif // Skip the copy of arguments if there are none. masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, end); } // Destroys argvIndex and copyreg. void CodeGenerator::emitCopyValuesForApply(Register argvSrcBase, Register argvIndex, Register copyreg, size_t argvSrcOffset, size_t argvDstOffset) { Label loop; masm.bind(&loop); // As argvIndex is off by 1, and we use the decBranchPtr instruction // to loop back, we have to substract the size of the word which are // copied. BaseValueIndex srcPtr(argvSrcBase, argvIndex, argvSrcOffset - sizeof(void*)); BaseValueIndex dstPtr(masm.getStackPointer(), argvIndex, argvDstOffset - sizeof(void*)); masm.loadPtr(srcPtr, copyreg); masm.storePtr(copyreg, dstPtr); // Handle 32 bits architectures. if (sizeof(Value) == 2 * sizeof(void*)) { BaseValueIndex srcPtrLow(argvSrcBase, argvIndex, argvSrcOffset - 2 * sizeof(void*)); BaseValueIndex dstPtrLow(masm.getStackPointer(), argvIndex, argvDstOffset - 2 * sizeof(void*)); masm.loadPtr(srcPtrLow, copyreg); masm.storePtr(copyreg, dstPtrLow); } masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop); } void CodeGenerator::emitPopArguments(Register extraStackSpace) { // Pop |this| and Arguments. masm.freeStack(extraStackSpace); } void CodeGenerator::emitPushArguments(LApplyArgsGeneric* apply, Register extraStackSpace) { // Holds the function nargs. Initially the number of args to the caller. Register argcreg = ToRegister(apply->getArgc()); Register copyreg = ToRegister(apply->getTempObject()); Label end; emitAllocateSpaceForApply(argcreg, extraStackSpace, &end); // We are making a copy of the arguments which are above the JitFrameLayout // of the current Ion frame. // // [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst // Compute the source and destination offsets into the stack. size_t argvSrcOffset = frameSize() + JitFrameLayout::offsetOfActualArgs(); size_t argvDstOffset = 0; // Save the extra stack space, and re-use the register as a base. masm.push(extraStackSpace); Register argvSrcBase = extraStackSpace; argvSrcOffset += sizeof(void*); argvDstOffset += sizeof(void*); // Save the actual number of register, and re-use the register as an index register. masm.push(argcreg); Register argvIndex = argcreg; argvSrcOffset += sizeof(void*); argvDstOffset += sizeof(void*); // srcPtr = (StackPointer + extraStackSpace) + argvSrcOffset // dstPtr = (StackPointer ) + argvDstOffset masm.addStackPtrTo(argvSrcBase); // Copy arguments. emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, argvSrcOffset, argvDstOffset); // Restore argcreg and the extra stack space counter. masm.pop(argcreg); masm.pop(extraStackSpace); // Join with all arguments copied and the extra stack usage computed. masm.bind(&end); // Push |this|. masm.addPtr(Imm32(sizeof(Value)), extraStackSpace); masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex)); } void CodeGenerator::emitPushArguments(LApplyArrayGeneric* apply, Register extraStackSpace) { Label noCopy, epilogue; Register tmpArgc = ToRegister(apply->getTempObject()); Register elementsAndArgc = ToRegister(apply->getElements()); // Invariants guarded in the caller: // - the array is not too long // - the array length equals its initialized length // The array length is our argc for the purposes of allocating space. Address length(ToRegister(apply->getElements()), ObjectElements::offsetOfLength()); masm.load32(length, tmpArgc); // Allocate space for the values. emitAllocateSpaceForApply(tmpArgc, extraStackSpace, &noCopy); // Copy the values. This code is skipped entirely if there are // no values. size_t argvDstOffset = 0; Register argvSrcBase = elementsAndArgc; // Elements value masm.push(extraStackSpace); Register copyreg = extraStackSpace; argvDstOffset += sizeof(void*); masm.push(tmpArgc); Register argvIndex = tmpArgc; argvDstOffset += sizeof(void*); // Copy emitCopyValuesForApply(argvSrcBase, argvIndex, copyreg, 0, argvDstOffset); // Restore. masm.pop(elementsAndArgc); masm.pop(extraStackSpace); masm.jump(&epilogue); // Clear argc if we skipped the copy step. masm.bind(&noCopy); masm.movePtr(ImmPtr(0), elementsAndArgc); // Join with all arguments copied and the extra stack usage computed. // Note, "elements" has become "argc". masm.bind(&epilogue); // Push |this|. masm.addPtr(Imm32(sizeof(Value)), extraStackSpace); masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex)); } template void CodeGenerator::emitApplyGeneric(T* apply) { // Holds the function object. Register calleereg = ToRegister(apply->getFunction()); // Temporary register for modifying the function object. Register objreg = ToRegister(apply->getTempObject()); Register extraStackSpace = ToRegister(apply->getTempStackCounter()); // Holds the function nargs, computed in the invoker or (for // ApplyArray) in the argument pusher. Register argcreg = ToRegister(apply->getArgc()); // Unless already known, guard that calleereg is actually a function object. if (!apply->hasSingleTarget()) { Label bail; masm.branchTestObjClass(Assembler::NotEqual, calleereg, &JSFunction::class_, objreg, calleereg, &bail); bailoutFrom(&bail, apply->snapshot()); } // Copy the arguments of the current function. // // In the case of ApplyArray, also compute argc: the argc register // and the elements register are the same; argc must not be // referenced before the call to emitPushArguments() and elements // must not be referenced after it returns. // // objreg is dead across this call. // // extraStackSpace is garbage on entry and defined on exit. emitPushArguments(apply, extraStackSpace); masm.checkStackAlignment(); // If the function is native, only emit the call to InvokeFunction. if (apply->hasSingleTarget() && apply->getSingleTarget()->isNativeWithCppEntry()) { emitCallInvokeFunction(apply, extraStackSpace); emitPopArguments(extraStackSpace); return; } Label end, invoke; // Guard that calleereg is an interpreted function with a JSScript. masm.branchIfFunctionHasNoJitEntry(calleereg, /* constructing */ false, &invoke); // Guard that calleereg is not a class constrcuctor masm.branchFunctionKind(Assembler::Equal, JSFunction::ClassConstructor, calleereg, objreg, &invoke); // Call with an Ion frame or a rectifier frame. { if (apply->mir()->maybeCrossRealm()) masm.switchToObjectRealm(calleereg, objreg); // Knowing that calleereg is a non-native function, load jitcode. masm.loadJitCodeRaw(calleereg, objreg); // Create the frame descriptor. unsigned pushed = masm.framePushed(); Register stackSpace = extraStackSpace; masm.addPtr(Imm32(pushed), stackSpace); masm.makeFrameDescriptor(stackSpace, JitFrame_IonJS, JitFrameLayout::Size()); masm.Push(argcreg); masm.Push(calleereg); masm.Push(stackSpace); // descriptor Label underflow, rejoin; // Check whether the provided arguments satisfy target argc. if (!apply->hasSingleTarget()) { Register nformals = extraStackSpace; masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nformals); masm.branch32(Assembler::Below, argcreg, nformals, &underflow); } else { masm.branch32(Assembler::Below, argcreg, Imm32(apply->getSingleTarget()->nargs()), &underflow); } // Skip the construction of the rectifier frame because we have no // underflow. masm.jump(&rejoin); // Argument fixup needed. Get ready to call the argumentsRectifier. { masm.bind(&underflow); // Hardcode the address of the argumentsRectifier code. TrampolinePtr argumentsRectifier = gen->jitRuntime()->getArgumentsRectifier(); masm.movePtr(argumentsRectifier, objreg); } masm.bind(&rejoin); // Finally call the function in objreg, as assigned by one of the paths above. uint32_t callOffset = masm.callJit(objreg); markSafepointAt(callOffset, apply); if (apply->mir()->maybeCrossRealm()) { static_assert(!JSReturnOperand.aliases(ReturnReg), "ReturnReg available as scratch after scripted calls"); masm.switchToRealm(gen->realm->realmPtr(), ReturnReg); } // Recover the number of arguments from the frame descriptor. masm.loadPtr(Address(masm.getStackPointer(), 0), stackSpace); masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), stackSpace); masm.subPtr(Imm32(pushed), stackSpace); // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. // The return address has already been removed from the Ion frame. int prefixGarbage = sizeof(JitFrameLayout) - sizeof(void*); masm.adjustStack(prefixGarbage); masm.jump(&end); } // Handle uncompiled or native functions. { masm.bind(&invoke); emitCallInvokeFunction(apply, extraStackSpace); } // Pop arguments and continue. masm.bind(&end); emitPopArguments(extraStackSpace); } void CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric* apply) { // Limit the number of parameters we can handle to a number that does not risk // us allocating too much stack, notably on Windows where there is a 4K guard page // that has to be touched to extend the stack. See bug 1351278. The value "3000" // is the size of the guard page minus an arbitrary, but large, safety margin. LSnapshot* snapshot = apply->snapshot(); Register argcreg = ToRegister(apply->getArgc()); uint32_t limit = 3000 / sizeof(Value); bailoutCmp32(Assembler::Above, argcreg, Imm32(limit), snapshot); emitApplyGeneric(apply); } void CodeGenerator::visitApplyArrayGeneric(LApplyArrayGeneric* apply) { LSnapshot* snapshot = apply->snapshot(); Register tmp = ToRegister(apply->getTempObject()); Address length(ToRegister(apply->getElements()), ObjectElements::offsetOfLength()); masm.load32(length, tmp); // See comment in visitApplyArgsGeneric, above. uint32_t limit = 3000 / sizeof(Value); bailoutCmp32(Assembler::Above, tmp, Imm32(limit), snapshot); // Ensure that the array does not contain an uninitialized tail. Address initializedLength(ToRegister(apply->getElements()), ObjectElements::offsetOfInitializedLength()); masm.sub32(initializedLength, tmp); bailoutCmp32(Assembler::NotEqual, tmp, Imm32(0), snapshot); emitApplyGeneric(apply); } void CodeGenerator::visitBail(LBail* lir) { bailout(lir->snapshot()); } void CodeGenerator::visitUnreachable(LUnreachable* lir) { masm.assumeUnreachable("end-of-block assumed unreachable"); } void CodeGenerator::visitEncodeSnapshot(LEncodeSnapshot* lir) { encode(lir->snapshot()); } void CodeGenerator::visitGetDynamicName(LGetDynamicName* lir) { Register envChain = ToRegister(lir->getEnvironmentChain()); Register name = ToRegister(lir->getName()); Register temp1 = ToRegister(lir->temp1()); Register temp2 = ToRegister(lir->temp2()); Register temp3 = ToRegister(lir->temp3()); masm.loadJSContext(temp3); /* Make space for the outparam. */ masm.adjustStack(-int32_t(sizeof(Value))); masm.moveStackPtrTo(temp2); masm.setupUnalignedABICall(temp1); masm.passABIArg(temp3); masm.passABIArg(envChain); masm.passABIArg(name); masm.passABIArg(temp2); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, GetDynamicName)); const ValueOperand out = ToOutValue(lir); masm.loadValue(Address(masm.getStackPointer(), 0), out); masm.adjustStack(sizeof(Value)); Label undefined; masm.branchTestUndefined(Assembler::Equal, out, &undefined); bailoutFrom(&undefined, lir->snapshot()); } typedef bool (*DirectEvalSFn)(JSContext*, HandleObject, HandleScript, HandleValue, HandleString, jsbytecode*, MutableHandleValue); static const VMFunction DirectEvalStringInfo = FunctionInfo(DirectEvalStringFromIon, "DirectEvalStringFromIon"); void CodeGenerator::visitCallDirectEval(LCallDirectEval* lir) { Register envChain = ToRegister(lir->getEnvironmentChain()); Register string = ToRegister(lir->getString()); pushArg(ImmPtr(lir->mir()->pc())); pushArg(string); pushArg(ToValue(lir, LCallDirectEval::NewTarget)); pushArg(ImmGCPtr(current->mir()->info().script())); pushArg(envChain); callVM(DirectEvalStringInfo, lir); } void CodeGenerator::generateArgumentsChecks(bool assert) { // This function can be used the normal way to check the argument types, // before entering the function and bailout when arguments don't match. // For debug purpose, this is can also be used to force/check that the // arguments are correct. Upon fail it will hit a breakpoint. MIRGraph& mir = gen->graph(); MResumePoint* rp = mir.entryResumePoint(); // No registers are allocated yet, so it's safe to grab anything. AllocatableGeneralRegisterSet temps(GeneralRegisterSet::All()); Register temp1 = temps.takeAny(); Register temp2 = temps.takeAny(); masm.debugAssertContextRealm(gen->realm->realmPtr(), temp1); const CompileInfo& info = gen->info(); Label miss; for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) { // All initial parameters are guaranteed to be MParameters. MParameter* param = rp->getOperand(i)->toParameter(); const TypeSet* types = param->resultTypeSet(); if (!types || types->unknown()) continue; // Calculate the offset on the stack of the argument. // (i - info.startArgSlot()) - Compute index of arg within arg vector. // ... * sizeof(Value) - Scale by value size. // ArgToStackOffset(...) - Compute displacement within arg vector. int32_t offset = ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value)); Address argAddr(masm.getStackPointer(), offset); // guardObjectType will zero the stack pointer register on speculative // paths. Register spectreRegToZero = AsRegister(masm.getStackPointer()); masm.guardTypeSet(argAddr, types, BarrierKind::TypeSet, temp1, temp2, spectreRegToZero, &miss); } if (miss.used()) { if (assert) { #ifdef DEBUG Label success; masm.jump(&success); masm.bind(&miss); // Check for cases where the type set guard might have missed due to // changing object groups. for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) { MParameter* param = rp->getOperand(i)->toParameter(); const TemporaryTypeSet* types = param->resultTypeSet(); if (!types || types->unknown()) continue; Label skip; Address addr(masm.getStackPointer(), ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value))); masm.branchTestObject(Assembler::NotEqual, addr, &skip); Register obj = masm.extractObject(addr, temp1); masm.guardTypeSetMightBeIncomplete(types, obj, temp1, &success); masm.bind(&skip); } masm.assumeUnreachable("Argument check fail."); masm.bind(&success); #else MOZ_CRASH("Shouldn't get here in opt builds"); #endif } else { bailoutFrom(&miss, graph.entrySnapshot()); } } } // Out-of-line path to report over-recursed error and fail. class CheckOverRecursedFailure : public OutOfLineCodeBase { LInstruction* lir_; public: explicit CheckOverRecursedFailure(LInstruction* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitCheckOverRecursedFailure(this); } LInstruction* lir() const { return lir_; } }; void CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed* lir) { // If we don't push anything on the stack, skip the check. if (omitOverRecursedCheck()) return; // Ensure that this frame will not cross the stack limit. // This is a weak check, justified by Ion using the C stack: we must always // be some distance away from the actual limit, since if the limit is // crossed, an error must be thrown, which requires more frames. // // It must always be possible to trespass past the stack limit. // Ion may legally place frames very close to the limit. Calling additional // C functions may then violate the limit without any checking. // // Since Ion frames exist on the C stack, the stack limit may be // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota(). CheckOverRecursedFailure* ool = new(alloc()) CheckOverRecursedFailure(lir); addOutOfLineCode(ool, lir->mir()); // Conditional forward (unlikely) branch to failure. const void* limitAddr = gen->runtime->addressOfJitStackLimit(); masm.branchStackPtrRhs(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr), ool->entry()); masm.bind(ool->rejoin()); } typedef bool (*DefVarFn)(JSContext*, HandlePropertyName, unsigned, HandleObject); static const VMFunction DefVarInfo = FunctionInfo(DefVar, "DefVar"); void CodeGenerator::visitDefVar(LDefVar* lir) { Register envChain = ToRegister(lir->environmentChain()); pushArg(envChain); // JSObject* pushArg(Imm32(lir->mir()->attrs())); // unsigned pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName* callVM(DefVarInfo, lir); } typedef bool (*DefLexicalFn)(JSContext*, HandlePropertyName, unsigned); static const VMFunction DefLexicalInfo = FunctionInfo(DefGlobalLexical, "DefGlobalLexical"); void CodeGenerator::visitDefLexical(LDefLexical* lir) { pushArg(Imm32(lir->mir()->attrs())); // unsigned pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName* callVM(DefLexicalInfo, lir); } typedef bool (*DefFunOperationFn)(JSContext*, HandleScript, HandleObject, HandleFunction); static const VMFunction DefFunOperationInfo = FunctionInfo(DefFunOperation, "DefFunOperation"); void CodeGenerator::visitDefFun(LDefFun* lir) { Register envChain = ToRegister(lir->environmentChain()); Register fun = ToRegister(lir->fun()); pushArg(fun); pushArg(envChain); pushArg(ImmGCPtr(current->mir()->info().script())); callVM(DefFunOperationInfo, lir); } typedef bool (*CheckOverRecursedFn)(JSContext*); static const VMFunction CheckOverRecursedInfo = FunctionInfo(CheckOverRecursed, "CheckOverRecursed"); void CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure* ool) { // The OOL path is hit if the recursion depth has been exceeded. // Throw an InternalError for over-recursion. // LFunctionEnvironment can appear before LCheckOverRecursed, so we have // to save all live registers to avoid crashes if CheckOverRecursed triggers // a GC. saveLive(ool->lir()); callVM(CheckOverRecursedInfo, ool->lir()); restoreLive(ool->lir()); masm.jump(ool->rejoin()); } IonScriptCounts* CodeGenerator::maybeCreateScriptCounts() { // If scripts are being profiled, create a new IonScriptCounts for the // profiling data, which will be attached to the associated JSScript or // wasm module after code generation finishes. if (!gen->hasProfilingScripts()) return nullptr; // This test inhibits IonScriptCount creation for wasm code which is // currently incompatible with wasm codegen for two reasons: (1) wasm code // must be serializable and script count codegen bakes in absolute // addresses, (2) wasm code does not have a JSScript with which to associate // code coverage data. JSScript* script = gen->info().script(); if (!script) return nullptr; auto counts = MakeUnique(); if (!counts || !counts->init(graph.numBlocks())) return nullptr; for (size_t i = 0; i < graph.numBlocks(); i++) { MBasicBlock* block = graph.getBlock(i)->mir(); uint32_t offset = 0; char* description = nullptr; if (MResumePoint* resume = block->entryResumePoint()) { // Find a PC offset in the outermost script to use. If this // block is from an inlined script, find a location in the // outer script to associate information about the inlining // with. while (resume->caller()) resume = resume->caller(); offset = script->pcToOffset(resume->pc()); if (block->entryResumePoint()->caller()) { // Get the filename and line number of the inner script. JSScript* innerScript = block->info().script(); description = js_pod_calloc(200); if (description) { snprintf(description, 200, "%s:%u", innerScript->filename(), innerScript->lineno()); } } } if (!counts->block(i).init(block->id(), offset, description, block->numSuccessors())) return nullptr; for (size_t j = 0; j < block->numSuccessors(); j++) counts->block(i).setSuccessor(j, skipTrivialBlocks(block->getSuccessor(j))->id()); } scriptCounts_ = counts.release(); return scriptCounts_; } // Structure for managing the state tracked for a block by script counters. struct ScriptCountBlockState { IonBlockCounts& block; MacroAssembler& masm; Sprinter printer; public: ScriptCountBlockState(IonBlockCounts* block, MacroAssembler* masm) : block(*block), masm(*masm), printer(GetJitContext()->cx, false) { } bool init() { if (!printer.init()) return false; // Bump the hit count for the block at the start. This code is not // included in either the text for the block or the instruction byte // counts. masm.inc64(AbsoluteAddress(block.addressOfHitCount())); // Collect human readable assembly for the code generated in the block. masm.setPrinter(&printer); return true; } void visitInstruction(LInstruction* ins) { #ifdef JS_JITSPEW // Prefix stream of assembly instructions with their LIR instruction // name and any associated high level info. if (const char* extra = ins->getExtraName()) printer.printf("[%s:%s]\n", ins->opName(), extra); else printer.printf("[%s]\n", ins->opName()); #endif } ~ScriptCountBlockState() { masm.setPrinter(nullptr); if (!printer.hadOutOfMemory()) block.setCode(printer.string()); } }; void CodeGenerator::branchIfInvalidated(Register temp, Label* invalidated) { CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), temp); masm.propagateOOM(ionScriptLabels_.append(label)); // If IonScript::invalidationCount_ != 0, the script has been invalidated. masm.branch32(Assembler::NotEqual, Address(temp, IonScript::offsetOfInvalidationCount()), Imm32(0), invalidated); } #ifdef DEBUG void CodeGenerator::emitAssertObjectOrStringResult(Register input, MIRType type, const TemporaryTypeSet* typeset) { MOZ_ASSERT(type == MIRType::Object || type == MIRType::ObjectOrNull || type == MIRType::String || type == MIRType::Symbol); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); Register temp = regs.takeAny(); masm.push(temp); // Don't check if the script has been invalidated. In that case invalid // types are expected (until we reach the OsiPoint and bailout). Label done; branchIfInvalidated(temp, &done); if ((type == MIRType::Object || type == MIRType::ObjectOrNull) && typeset && !typeset->unknownObject()) { // We have a result TypeSet, assert this object is in it. Label miss, ok; if (type == MIRType::ObjectOrNull) masm.branchPtr(Assembler::Equal, input, ImmWord(0), &ok); if (typeset->getObjectCount() > 0) masm.guardObjectType(input, typeset, temp, input, &miss); else masm.jump(&miss); masm.jump(&ok); masm.bind(&miss); masm.guardTypeSetMightBeIncomplete(typeset, input, temp, &ok); masm.assumeUnreachable("MIR instruction returned object with unexpected type"); masm.bind(&ok); } // Check that we have a valid GC pointer. if (JitOptions.fullDebugChecks) { saveVolatile(); masm.setupUnalignedABICall(temp); masm.loadJSContext(temp); masm.passABIArg(temp); masm.passABIArg(input); void* callee; switch (type) { case MIRType::Object: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectPtr); break; case MIRType::ObjectOrNull: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidObjectOrNullPtr); break; case MIRType::String: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidStringPtr); break; case MIRType::Symbol: callee = JS_FUNC_TO_DATA_PTR(void*, AssertValidSymbolPtr); break; default: MOZ_CRASH(); } masm.callWithABI(callee); restoreVolatile(); } masm.bind(&done); masm.pop(temp); } void CodeGenerator::emitAssertResultV(const ValueOperand input, const TemporaryTypeSet* typeset) { AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); regs.take(input); Register temp1 = regs.takeAny(); Register temp2 = regs.takeAny(); masm.push(temp1); masm.push(temp2); // Don't check if the script has been invalidated. In that case invalid // types are expected (until we reach the OsiPoint and bailout). Label done; branchIfInvalidated(temp1, &done); if (typeset && !typeset->unknown()) { // We have a result TypeSet, assert this value is in it. Label miss, ok; masm.guardTypeSet(input, typeset, BarrierKind::TypeSet, temp1, temp2, input.payloadOrValueReg(), &miss); masm.jump(&ok); masm.bind(&miss); // Check for cases where the type set guard might have missed due to // changing object groups. Label realMiss; masm.branchTestObject(Assembler::NotEqual, input, &realMiss); Register payload = masm.extractObject(input, temp1); masm.guardTypeSetMightBeIncomplete(typeset, payload, temp1, &ok); masm.bind(&realMiss); masm.assumeUnreachable("MIR instruction returned value with unexpected type"); masm.bind(&ok); } // Check that we have a valid GC pointer. if (JitOptions.fullDebugChecks) { saveVolatile(); masm.pushValue(input); masm.moveStackPtrTo(temp1); masm.setupUnalignedABICall(temp2); masm.loadJSContext(temp2); masm.passABIArg(temp2); masm.passABIArg(temp1); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssertValidValue)); masm.popValue(input); restoreVolatile(); } masm.bind(&done); masm.pop(temp2); masm.pop(temp1); } void CodeGenerator::emitObjectOrStringResultChecks(LInstruction* lir, MDefinition* mir) { if (lir->numDefs() == 0) return; MOZ_ASSERT(lir->numDefs() == 1); if (lir->getDef(0)->isBogusTemp()) return; Register output = ToRegister(lir->getDef(0)); emitAssertObjectOrStringResult(output, mir->type(), mir->resultTypeSet()); } void CodeGenerator::emitValueResultChecks(LInstruction* lir, MDefinition* mir) { if (lir->numDefs() == 0) return; MOZ_ASSERT(lir->numDefs() == BOX_PIECES); if (!lir->getDef(0)->output()->isRegister()) return; ValueOperand output = ToOutValue(lir); emitAssertResultV(output, mir->resultTypeSet()); } void CodeGenerator::emitDebugResultChecks(LInstruction* ins) { // In debug builds, check that LIR instructions return valid values. MDefinition* mir = ins->mirRaw(); if (!mir) return; switch (mir->type()) { case MIRType::Object: case MIRType::ObjectOrNull: case MIRType::String: case MIRType::Symbol: emitObjectOrStringResultChecks(ins, mir); break; case MIRType::Value: emitValueResultChecks(ins, mir); break; default: break; } } void CodeGenerator::emitDebugForceBailing(LInstruction* lir) { if (!lir->snapshot()) return; if (lir->isStart()) return; if (lir->isOsiPoint()) return; masm.comment("emitDebugForceBailing"); const void* bailAfterAddr = gen->realm->zone()->addressOfIonBailAfter(); AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); Label done, notBail, bail; masm.branch32(Assembler::Equal, AbsoluteAddress(bailAfterAddr), Imm32(0), &done); { Register temp = regs.takeAny(); masm.push(temp); masm.load32(AbsoluteAddress(bailAfterAddr), temp); masm.sub32(Imm32(1), temp); masm.store32(temp, AbsoluteAddress(bailAfterAddr)); masm.branch32(Assembler::NotEqual, temp, Imm32(0), ¬Bail); { masm.pop(temp); masm.jump(&bail); bailoutFrom(&bail, lir->snapshot()); } masm.bind(¬Bail); masm.pop(temp); } masm.bind(&done); } #endif static void DumpTrackedSite(const BytecodeSite* site) { if (!JitSpewEnabled(JitSpew_OptimizationTracking)) return; #ifdef JS_JITSPEW unsigned column = 0; unsigned lineNumber = PCToLineNumber(site->script(), site->pc(), &column); JitSpew(JitSpew_OptimizationTracking, "Types for %s at %s:%u:%u", CodeName[JSOp(*site->pc())], site->script()->filename(), lineNumber, column); #endif } static void DumpTrackedOptimizations(TrackedOptimizations* optimizations) { if (!JitSpewEnabled(JitSpew_OptimizationTracking)) return; optimizations->spew(JitSpew_OptimizationTracking); } bool CodeGenerator::generateBody() { IonScriptCounts* counts = maybeCreateScriptCounts(); #if defined(JS_ION_PERF) PerfSpewer* perfSpewer = &perfSpewer_; if (gen->compilingWasm()) perfSpewer = &gen->perfSpewer(); #endif for (size_t i = 0; i < graph.numBlocks(); i++) { current = graph.getBlock(i); // Don't emit any code for trivial blocks, containing just a goto. Such // blocks are created to split critical edges, and if we didn't end up // putting any instructions in them, we can skip them. if (current->isTrivial()) continue; #ifdef JS_JITSPEW const char* filename = nullptr; size_t lineNumber = 0; unsigned columnNumber = 0; if (current->mir()->info().script()) { filename = current->mir()->info().script()->filename(); if (current->mir()->pc()) lineNumber = PCToLineNumber(current->mir()->info().script(), current->mir()->pc(), &columnNumber); } else { #ifdef DEBUG lineNumber = current->mir()->lineno(); columnNumber = current->mir()->columnIndex(); #endif } JitSpew(JitSpew_Codegen, "# block%zu %s:%zu:%u%s:", i, filename ? filename : "?", lineNumber, columnNumber, current->mir()->isLoopHeader() ? " (loop header)" : ""); #endif masm.bind(current->label()); mozilla::Maybe blockCounts; if (counts) { blockCounts.emplace(&counts->block(i), &masm); if (!blockCounts->init()) return false; } TrackedOptimizations* last = nullptr; #if defined(JS_ION_PERF) if (!perfSpewer->startBasicBlock(current->mir(), masm)) return false; #endif for (LInstructionIterator iter = current->begin(); iter != current->end(); iter++) { if (!alloc().ensureBallast()) return false; #ifdef JS_JITSPEW JitSpewStart(JitSpew_Codegen, "instruction %s", iter->opName()); if (const char* extra = iter->getExtraName()) JitSpewCont(JitSpew_Codegen, ":%s", extra); JitSpewFin(JitSpew_Codegen); #endif if (counts) blockCounts->visitInstruction(*iter); #ifdef CHECK_OSIPOINT_REGISTERS if (iter->safepoint()) resetOsiPointRegs(iter->safepoint()); #endif if (iter->mirRaw()) { // Only add instructions that have a tracked inline script tree. if (iter->mirRaw()->trackedTree()) { if (!addNativeToBytecodeEntry(iter->mirRaw()->trackedSite())) return false; } // Track the start native offset of optimizations. if (iter->mirRaw()->trackedOptimizations()) { if (last != iter->mirRaw()->trackedOptimizations()) { DumpTrackedSite(iter->mirRaw()->trackedSite()); DumpTrackedOptimizations(iter->mirRaw()->trackedOptimizations()); last = iter->mirRaw()->trackedOptimizations(); } if (!addTrackedOptimizationsEntry(iter->mirRaw()->trackedOptimizations())) return false; } } setElement(*iter); // needed to encode correct snapshot location. #ifdef DEBUG emitDebugForceBailing(*iter); #endif switch (iter->op()) { #ifndef JS_CODEGEN_NONE # define LIROP(op) case LNode::Opcode::op: visit##op(iter->to##op()); break; LIR_OPCODE_LIST(LIROP) # undef LIROP #endif case LNode::Opcode::Invalid: default: MOZ_CRASH("Invalid LIR op"); } // Track the end native offset of optimizations. if (iter->mirRaw() && iter->mirRaw()->trackedOptimizations()) extendTrackedOptimizationsEntry(iter->mirRaw()->trackedOptimizations()); #ifdef DEBUG if (!counts) emitDebugResultChecks(*iter); #endif } if (masm.oom()) return false; #if defined(JS_ION_PERF) perfSpewer->endBasicBlock(masm); #endif } return true; } // Out-of-line object allocation for LNewArray. class OutOfLineNewArray : public OutOfLineCodeBase { LNewArray* lir_; public: explicit OutOfLineNewArray(LNewArray* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineNewArray(this); } LNewArray* lir() const { return lir_; } }; typedef JSObject* (*NewArrayOperationFn)(JSContext*, HandleScript, jsbytecode*, uint32_t, NewObjectKind); static const VMFunction NewArrayOperationInfo = FunctionInfo(NewArrayOperation, "NewArrayOperation"); static JSObject* NewArrayWithGroup(JSContext* cx, uint32_t length, HandleObjectGroup group, bool convertDoubleElements) { ArrayObject* res = NewFullyAllocatedArrayTryUseGroup(cx, group, length); if (!res) return nullptr; if (convertDoubleElements) res->setShouldConvertDoubleElements(); return res; } typedef JSObject* (*NewArrayWithGroupFn)(JSContext*, uint32_t, HandleObjectGroup, bool); static const VMFunction NewArrayWithGroupInfo = FunctionInfo(NewArrayWithGroup, "NewArrayWithGroup"); void CodeGenerator::visitNewArrayCallVM(LNewArray* lir) { Register objReg = ToRegister(lir->output()); MOZ_ASSERT(!lir->isCall()); saveLive(lir); JSObject* templateObject = lir->mir()->templateObject(); if (templateObject) { pushArg(Imm32(lir->mir()->convertDoubleElements())); pushArg(ImmGCPtr(templateObject->group())); pushArg(Imm32(lir->mir()->length())); callVM(NewArrayWithGroupInfo, lir); } else { pushArg(Imm32(GenericObject)); pushArg(Imm32(lir->mir()->length())); pushArg(ImmPtr(lir->mir()->pc())); pushArg(ImmGCPtr(lir->mir()->block()->info().script())); callVM(NewArrayOperationInfo, lir); } if (ReturnReg != objReg) masm.movePtr(ReturnReg, objReg); restoreLive(lir); } typedef JSObject* (*NewDerivedTypedObjectFn)(JSContext*, HandleObject type, HandleObject owner, int32_t offset); static const VMFunction CreateDerivedTypedObjInfo = FunctionInfo(CreateDerivedTypedObj, "CreateDerivedTypedObj"); void CodeGenerator::visitNewDerivedTypedObject(LNewDerivedTypedObject* lir) { pushArg(ToRegister(lir->offset())); pushArg(ToRegister(lir->owner())); pushArg(ToRegister(lir->type())); callVM(CreateDerivedTypedObjInfo, lir); } void CodeGenerator::visitAtan2D(LAtan2D* lir) { Register temp = ToRegister(lir->temp()); FloatRegister y = ToFloatRegister(lir->y()); FloatRegister x = ToFloatRegister(lir->x()); masm.setupUnalignedABICall(temp); masm.passABIArg(y, MoveOp::DOUBLE); masm.passABIArg(x, MoveOp::DOUBLE); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaAtan2), MoveOp::DOUBLE); MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg); } void CodeGenerator::visitHypot(LHypot* lir) { Register temp = ToRegister(lir->temp()); uint32_t numArgs = lir->numArgs(); masm.setupUnalignedABICall(temp); for (uint32_t i = 0 ; i < numArgs; ++i) masm.passABIArg(ToFloatRegister(lir->getOperand(i)), MoveOp::DOUBLE); switch(numArgs) { case 2: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaHypot), MoveOp::DOUBLE); break; case 3: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, hypot3), MoveOp::DOUBLE); break; case 4: masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, hypot4), MoveOp::DOUBLE); break; default: MOZ_CRASH("Unexpected number of arguments to hypot function."); } MOZ_ASSERT(ToFloatRegister(lir->output()) == ReturnDoubleReg); } void CodeGenerator::visitNewArray(LNewArray* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); DebugOnly length = lir->mir()->length(); MOZ_ASSERT(length <= NativeObject::MAX_DENSE_ELEMENTS_COUNT); if (lir->mir()->isVMCall()) { visitNewArrayCallVM(lir); return; } OutOfLineNewArray* ool = new(alloc()) OutOfLineNewArray(lir); addOutOfLineCode(ool, lir->mir()); TemplateObject templateObject(lir->mir()->templateObject()); if (lir->mir()->convertDoubleElements()) templateObject.setConvertDoubleElements(); masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray* ool) { visitNewArrayCallVM(ool->lir()); masm.jump(ool->rejoin()); } void CodeGenerator::visitNewArrayCopyOnWrite(LNewArrayCopyOnWrite* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); ArrayObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); // If we have a template object, we can inline call object creation. OutOfLineCode* ool = oolCallVM(NewArrayCopyOnWriteInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(objReg)); TemplateObject templateObj(templateObject); templateObj.setDenseElementsAreCopyOnWrite(); masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry()); masm.bind(ool->rejoin()); } typedef ArrayObject* (*ArrayConstructorOneArgFn)(JSContext*, HandleObjectGroup, int32_t length); static const VMFunction ArrayConstructorOneArgInfo = FunctionInfo(ArrayConstructorOneArg, "ArrayConstructorOneArg"); void CodeGenerator::visitNewArrayDynamicLength(LNewArrayDynamicLength* lir) { Register lengthReg = ToRegister(lir->length()); Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); JSObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); OutOfLineCode* ool = oolCallVM(ArrayConstructorOneArgInfo, lir, ArgList(ImmGCPtr(templateObject->group()), lengthReg), StoreRegisterTo(objReg)); bool canInline = true; size_t inlineLength = 0; if (templateObject->as().hasFixedElements()) { size_t numSlots = gc::GetGCKindSlots(templateObject->asTenured().getAllocKind()); inlineLength = numSlots - ObjectElements::VALUES_PER_HEADER; } else { canInline = false; } if (canInline) { // Try to do the allocation inline if the template object is big enough // for the length in lengthReg. If the length is bigger we could still // use the template object and not allocate the elements, but it's more // efficient to do a single big allocation than (repeatedly) reallocating // the array later on when filling it. masm.branch32(Assembler::Above, lengthReg, Imm32(inlineLength), ool->entry()); TemplateObject templateObj(templateObject); masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry()); size_t lengthOffset = NativeObject::offsetOfFixedElements() + ObjectElements::offsetOfLength(); masm.store32(lengthReg, Address(objReg, lengthOffset)); } else { masm.jump(ool->entry()); } masm.bind(ool->rejoin()); } typedef ArrayIteratorObject* (*NewArrayIteratorObjectFn)(JSContext*, NewObjectKind); static const VMFunction NewArrayIteratorObjectInfo = FunctionInfo(NewArrayIteratorObject, "NewArrayIteratorObject"); typedef StringIteratorObject* (*NewStringIteratorObjectFn)(JSContext*, NewObjectKind); static const VMFunction NewStringIteratorObjectInfo = FunctionInfo(NewStringIteratorObject, "NewStringIteratorObject"); void CodeGenerator::visitNewIterator(LNewIterator* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); OutOfLineCode* ool; switch (lir->mir()->type()) { case MNewIterator::ArrayIterator: ool = oolCallVM(NewArrayIteratorObjectInfo, lir, ArgList(Imm32(GenericObject)), StoreRegisterTo(objReg)); break; case MNewIterator::StringIterator: ool = oolCallVM(NewStringIteratorObjectInfo, lir, ArgList(Imm32(GenericObject)), StoreRegisterTo(objReg)); break; default: MOZ_CRASH("unexpected iterator type"); } TemplateObject templateObject(lir->mir()->templateObject()); masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap, ool->entry()); masm.bind(ool->rejoin()); } typedef TypedArrayObject* (*TypedArrayConstructorOneArgFn)(JSContext*, HandleObject, int32_t length); static const VMFunction TypedArrayConstructorOneArgInfo = FunctionInfo(TypedArrayCreateWithTemplate, "TypedArrayCreateWithTemplate"); void CodeGenerator::visitNewTypedArray(LNewTypedArray* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp1()); Register lengthReg = ToRegister(lir->temp2()); LiveRegisterSet liveRegs = lir->safepoint()->liveRegs(); JSObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); TypedArrayObject* ttemplate = &templateObject->as(); uint32_t n = ttemplate->length(); OutOfLineCode* ool = oolCallVM(TypedArrayConstructorOneArgInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(n)), StoreRegisterTo(objReg)); TemplateObject templateObj(templateObject); masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry()); masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(), ttemplate, MacroAssembler::TypedArrayLength::Fixed); masm.bind(ool->rejoin()); } void CodeGenerator::visitNewTypedArrayDynamicLength(LNewTypedArrayDynamicLength* lir) { Register lengthReg = ToRegister(lir->length()); Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); LiveRegisterSet liveRegs = lir->safepoint()->liveRegs(); JSObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); TypedArrayObject* ttemplate = &templateObject->as(); OutOfLineCode* ool = oolCallVM(TypedArrayConstructorOneArgInfo, lir, ArgList(ImmGCPtr(templateObject), lengthReg), StoreRegisterTo(objReg)); TemplateObject templateObj(templateObject); masm.createGCObject(objReg, tempReg, templateObj, initialHeap, ool->entry()); masm.initTypedArraySlots(objReg, tempReg, lengthReg, liveRegs, ool->entry(), ttemplate, MacroAssembler::TypedArrayLength::Dynamic); masm.bind(ool->rejoin()); } // Out-of-line object allocation for JSOP_NEWOBJECT. class OutOfLineNewObject : public OutOfLineCodeBase { LNewObject* lir_; public: explicit OutOfLineNewObject(LNewObject* lir) : lir_(lir) { } void accept(CodeGenerator* codegen) override { codegen->visitOutOfLineNewObject(this); } LNewObject* lir() const { return lir_; } }; typedef JSObject* (*NewInitObjectWithTemplateFn)(JSContext*, HandleObject); static const VMFunction NewInitObjectWithTemplateInfo = FunctionInfo(NewObjectOperationWithTemplate, "NewObjectOperationWithTemplate"); typedef JSObject* (*NewInitObjectFn)(JSContext*, HandleScript, jsbytecode* pc, NewObjectKind); static const VMFunction NewInitObjectInfo = FunctionInfo(NewObjectOperation, "NewObjectOperation"); typedef PlainObject* (*ObjectCreateWithTemplateFn)(JSContext*, HandlePlainObject); static const VMFunction ObjectCreateWithTemplateInfo = FunctionInfo(ObjectCreateWithTemplate, "ObjectCreateWithTemplate"); void CodeGenerator::visitNewObjectVMCall(LNewObject* lir) { Register objReg = ToRegister(lir->output()); MOZ_ASSERT(!lir->isCall()); saveLive(lir); JSObject* templateObject = lir->mir()->templateObject(); // If we're making a new object with a class prototype (that is, an object // that derives its class from its prototype instead of being // PlainObject::class_'d) from self-hosted code, we need a different init // function. switch (lir->mir()->mode()) { case MNewObject::ObjectLiteral: if (templateObject) { pushArg(ImmGCPtr(templateObject)); callVM(NewInitObjectWithTemplateInfo, lir); } else { pushArg(Imm32(GenericObject)); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); pushArg(ImmGCPtr(lir->mir()->block()->info().script())); callVM(NewInitObjectInfo, lir); } break; case MNewObject::ObjectCreate: pushArg(ImmGCPtr(templateObject)); callVM(ObjectCreateWithTemplateInfo, lir); break; } if (ReturnReg != objReg) masm.movePtr(ReturnReg, objReg); restoreLive(lir); } static bool ShouldInitFixedSlots(LInstruction* lir, const TemplateObject& obj) { if (!obj.isNative()) return true; const NativeTemplateObject& templateObj = obj.asNativeTemplateObject(); // Look for StoreFixedSlot instructions following an object allocation // that write to this object before a GC is triggered or this object is // passed to a VM call. If all fixed slots will be initialized, the // allocation code doesn't need to set the slots to |undefined|. uint32_t nfixed = templateObj.numUsedFixedSlots(); if (nfixed == 0) return false; // Only optimize if all fixed slots are initially |undefined|, so that we // can assume incremental pre-barriers are not necessary. See also the // comment below. for (uint32_t slot = 0; slot < nfixed; slot++) { if (!templateObj.getSlot(slot).isUndefined()) return true; } // Keep track of the fixed slots that are initialized. initializedSlots is // a bit mask with a bit for each slot. MOZ_ASSERT(nfixed <= NativeObject::MAX_FIXED_SLOTS); static_assert(NativeObject::MAX_FIXED_SLOTS <= 32, "Slot bits must fit in 32 bits"); uint32_t initializedSlots = 0; uint32_t numInitialized = 0; MInstruction* allocMir = lir->mirRaw()->toInstruction(); MBasicBlock* block = allocMir->block(); // Skip the allocation instruction. MInstructionIterator iter = block->begin(allocMir); MOZ_ASSERT(*iter == allocMir); iter++; while (true) { for (; iter != block->end(); iter++) { if (iter->isNop() || iter->isConstant() || iter->isPostWriteBarrier()) { // These instructions won't trigger a GC or read object slots. continue; } if (iter->isStoreFixedSlot()) { MStoreFixedSlot* store = iter->toStoreFixedSlot(); if (store->object() != allocMir) return true; // We may not initialize this object slot on allocation, so the // pre-barrier could read uninitialized memory. Simply disable // the barrier for this store: the object was just initialized // so the barrier is not necessary. store->setNeedsBarrier(false); uint32_t slot = store->slot(); MOZ_ASSERT(slot < nfixed); if ((initializedSlots & (1 << slot)) == 0) { numInitialized++; initializedSlots |= (1 << slot); if (numInitialized == nfixed) { // All fixed slots will be initialized. MOZ_ASSERT(mozilla::CountPopulation32(initializedSlots) == nfixed); return false; } } continue; } if (iter->isGoto()) { block = iter->toGoto()->target(); if (block->numPredecessors() != 1) return true; break; } // Unhandled instruction, assume it bails or reads object slots. return true; } iter = block->begin(); } MOZ_CRASH("Shouldn't get here"); } void CodeGenerator::visitNewObject(LNewObject* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); if (lir->mir()->isVMCall()) { visitNewObjectVMCall(lir); return; } OutOfLineNewObject* ool = new(alloc()) OutOfLineNewObject(lir); addOutOfLineCode(ool, lir->mir()); TemplateObject templateObject(lir->mir()->templateObject()); bool initContents = ShouldInitFixedSlots(lir, templateObject); masm.createGCObject(objReg, tempReg, templateObject, lir->mir()->initialHeap(), ool->entry(), initContents); masm.bind(ool->rejoin()); } void CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject* ool) { visitNewObjectVMCall(ool->lir()); masm.jump(ool->rejoin()); } typedef InlineTypedObject* (*NewTypedObjectFn)(JSContext*, Handle, gc::InitialHeap); static const VMFunction NewTypedObjectInfo = FunctionInfo(InlineTypedObject::createCopy, "InlineTypedObject::createCopy"); void CodeGenerator::visitNewTypedObject(LNewTypedObject* lir) { Register object = ToRegister(lir->output()); Register temp = ToRegister(lir->temp()); InlineTypedObject* templateObject = lir->mir()->templateObject(); gc::InitialHeap initialHeap = lir->mir()->initialHeap(); OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir, ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)), StoreRegisterTo(object)); TemplateObject templateObj(templateObject); masm.createGCObject(object, temp, templateObj, initialHeap, ool->entry()); masm.bind(ool->rejoin()); } typedef js::NamedLambdaObject* (*NewNamedLambdaObjectFn)(JSContext*, HandleFunction, gc::InitialHeap); static const VMFunction NewNamedLambdaObjectInfo = FunctionInfo(NamedLambdaObject::createTemplateObject, "NamedLambdaObject::createTemplateObject"); void CodeGenerator::visitNewNamedLambdaObject(LNewNamedLambdaObject* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); const CompileInfo& info = lir->mir()->block()->info(); // If we have a template object, we can inline call object creation. OutOfLineCode* ool = oolCallVM(NewNamedLambdaObjectInfo, lir, ArgList(ImmGCPtr(info.funMaybeLazy()), Imm32(gc::DefaultHeap)), StoreRegisterTo(objReg)); TemplateObject templateObject(lir->mir()->templateObj()); bool initContents = ShouldInitFixedSlots(lir, templateObject); masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap, ool->entry(), initContents); masm.bind(ool->rejoin()); } typedef JSObject* (*NewCallObjectFn)(JSContext*, HandleShape, HandleObjectGroup); static const VMFunction NewCallObjectInfo = FunctionInfo(NewCallObject, "NewCallObject"); void CodeGenerator::visitNewCallObject(LNewCallObject* lir) { Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); CallObject* templateObj = lir->mir()->templateObject(); OutOfLineCode* ool = oolCallVM(NewCallObjectInfo, lir, ArgList(ImmGCPtr(templateObj->lastProperty()), ImmGCPtr(templateObj->group())), StoreRegisterTo(objReg)); // Inline call object creation, using the OOL path only for tricky cases. TemplateObject templateObject(templateObj); bool initContents = ShouldInitFixedSlots(lir, templateObject); masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap, ool->entry(), initContents); masm.bind(ool->rejoin()); } typedef JSObject* (*NewSingletonCallObjectFn)(JSContext*, HandleShape); static const VMFunction NewSingletonCallObjectInfo = FunctionInfo(NewSingletonCallObject, "NewSingletonCallObject"); void CodeGenerator::visitNewSingletonCallObject(LNewSingletonCallObject* lir) { Register objReg = ToRegister(lir->output()); JSObject* templateObj = lir->mir()->templateObject(); OutOfLineCode* ool; ool = oolCallVM(NewSingletonCallObjectInfo, lir, ArgList(ImmGCPtr(templateObj->as().lastProperty())), StoreRegisterTo(objReg)); // Objects can only be given singleton types in VM calls. We make the call // out of line to not bloat inline code, even if (naively) this seems like // extra work. masm.jump(ool->entry()); masm.bind(ool->rejoin()); } typedef JSObject* (*NewStringObjectFn)(JSContext*, HandleString); static const VMFunction NewStringObjectInfo = FunctionInfo(NewStringObject, "NewStringObject"); void CodeGenerator::visitNewStringObject(LNewStringObject* lir) { Register input = ToRegister(lir->input()); Register output = ToRegister(lir->output()); Register temp = ToRegister(lir->temp()); StringObject* templateObj = lir->mir()->templateObj(); OutOfLineCode* ool = oolCallVM(NewStringObjectInfo, lir, ArgList(input), StoreRegisterTo(output)); TemplateObject templateObject(templateObj); masm.createGCObject(output, temp, templateObject, gc::DefaultHeap, ool->entry()); masm.loadStringLength(input, temp); masm.storeValue(JSVAL_TYPE_STRING, input, Address(output, StringObject::offsetOfPrimitiveValue())); masm.storeValue(JSVAL_TYPE_INT32, temp, Address(output, StringObject::offsetOfLength())); masm.bind(ool->rejoin()); } typedef bool(*InitElemFn)(JSContext* cx, jsbytecode* pc, HandleObject obj, HandleValue id, HandleValue value); static const VMFunction InitElemInfo = FunctionInfo(InitElemOperation, "InitElemOperation"); void CodeGenerator::visitInitElem(LInitElem* lir) { Register objReg = ToRegister(lir->getObject()); pushArg(ToValue(lir, LInitElem::ValueIndex)); pushArg(ToValue(lir, LInitElem::IdIndex)); pushArg(objReg); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); callVM(InitElemInfo, lir); } typedef bool (*InitElemGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandleValue, HandleObject); static const VMFunction InitElemGetterSetterInfo = FunctionInfo(InitGetterSetterOperation, "InitElemGetterSetterOperation"); void CodeGenerator::visitInitElemGetterSetter(LInitElemGetterSetter* lir) { Register obj = ToRegister(lir->object()); Register value = ToRegister(lir->value()); pushArg(value); pushArg(ToValue(lir, LInitElemGetterSetter::IdIndex)); pushArg(obj); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); callVM(InitElemGetterSetterInfo, lir); } typedef bool(*MutatePrototypeFn)(JSContext* cx, HandlePlainObject obj, HandleValue value); static const VMFunction MutatePrototypeInfo = FunctionInfo(MutatePrototype, "MutatePrototype"); void CodeGenerator::visitMutateProto(LMutateProto* lir) { Register objReg = ToRegister(lir->getObject()); pushArg(ToValue(lir, LMutateProto::ValueIndex)); pushArg(objReg); callVM(MutatePrototypeInfo, lir); } typedef bool(*InitPropGetterSetterFn)(JSContext*, jsbytecode*, HandleObject, HandlePropertyName, HandleObject); static const VMFunction InitPropGetterSetterInfo = FunctionInfo(InitGetterSetterOperation, "InitPropGetterSetterOperation"); void CodeGenerator::visitInitPropGetterSetter(LInitPropGetterSetter* lir) { Register obj = ToRegister(lir->object()); Register value = ToRegister(lir->value()); pushArg(value); pushArg(ImmGCPtr(lir->mir()->name())); pushArg(obj); pushArg(ImmPtr(lir->mir()->resumePoint()->pc())); callVM(InitPropGetterSetterInfo, lir); } typedef bool (*CreateThisFn)(JSContext* cx, HandleObject callee, HandleObject newTarget, MutableHandleValue rval); static const VMFunction CreateThisInfoCodeGen = FunctionInfo(CreateThis, "CreateThis"); void CodeGenerator::visitCreateThis(LCreateThis* lir) { const LAllocation* callee = lir->getCallee(); const LAllocation* newTarget = lir->getNewTarget(); if (newTarget->isConstant()) pushArg(ImmGCPtr(&newTarget->toConstant()->toObject())); else pushArg(ToRegister(newTarget)); if (callee->isConstant()) pushArg(ImmGCPtr(&callee->toConstant()->toObject())); else pushArg(ToRegister(callee)); callVM(CreateThisInfoCodeGen, lir); } static JSObject* CreateThisForFunctionWithProtoWrapper(JSContext* cx, HandleObject callee, HandleObject newTarget, HandleObject proto) { return CreateThisForFunctionWithProto(cx, callee, newTarget, proto); } typedef JSObject* (*CreateThisWithProtoFn)(JSContext* cx, HandleObject callee, HandleObject newTarget, HandleObject proto); static const VMFunction CreateThisWithProtoInfo = FunctionInfo(CreateThisForFunctionWithProtoWrapper, "CreateThisForFunctionWithProtoWrapper"); void CodeGenerator::visitCreateThisWithProto(LCreateThisWithProto* lir) { const LAllocation* callee = lir->getCallee(); const LAllocation* newTarget = lir->getNewTarget(); const LAllocation* proto = lir->getPrototype(); if (proto->isConstant()) pushArg(ImmGCPtr(&proto->toConstant()->toObject())); else pushArg(ToRegister(proto)); if (newTarget->isConstant()) pushArg(ImmGCPtr(&newTarget->toConstant()->toObject())); else pushArg(ToRegister(newTarget)); if (callee->isConstant()) pushArg(ImmGCPtr(&callee->toConstant()->toObject())); else pushArg(ToRegister(callee)); callVM(CreateThisWithProtoInfo, lir); } void CodeGenerator::visitCreateThisWithTemplate(LCreateThisWithTemplate* lir) { JSObject* templateObject = lir->mir()->templateObject(); Register objReg = ToRegister(lir->output()); Register tempReg = ToRegister(lir->temp()); OutOfLineCode* ool = oolCallVM(NewInitObjectWithTemplateInfo, lir, ArgList(ImmGCPtr(templateObject)), StoreRegisterTo(objReg)); // Allocate. If the FreeList is empty, call to VM, which may GC. TemplateObject templateObj(templateObject); bool initContents = !templateObj.isPlainObject() || ShouldInitFixedSlots(lir, templateObj); masm.createGCObject(objReg, tempReg, templateObj, lir->mir()->initialHeap(), ool->entry(), initContents); masm.bind(ool->rejoin()); } typedef JSObject* (*NewIonArgumentsObjectFn)(JSContext* cx, JitFrameLayout* frame, HandleObject); static const VMFunction NewIonArgumentsObjectInfo = FunctionInfo((NewIonArgumentsObjectFn) ArgumentsObject::createForIon, "ArgumentsObject::createForIon"); void CodeGenerator::visitCreateArgumentsObject(LCreateArgumentsObject* lir) { // This should be getting constructed in the first block only, and not any OSR entry blocks. MOZ_ASSERT(lir->mir()->block()->id() == 0); Register callObj = ToRegister(lir->getCallObject()); Register temp = ToRegister(lir->temp0()); Label done; if (ArgumentsObject* templateObj = lir->mir()->templateObject()) { Register objTemp = ToRegister(lir->temp1()); Register cxTemp = ToRegister(lir->temp2()); masm.Push(callObj); // Try to allocate an arguments object. This will leave the reserved // slots uninitialized, so it's important we don't GC until we // initialize these slots in ArgumentsObject::finishForIon. Label failure; TemplateObject templateObject(templateObj); masm.createGCObject(objTemp, temp, templateObject, gc::DefaultHeap, &failure, /* initContents = */ false); masm.moveStackPtrTo(temp); masm.addPtr(Imm32(masm.framePushed()), temp); masm.setupUnalignedABICall(cxTemp); masm.loadJSContext(cxTemp); masm.passABIArg(cxTemp); masm.passABIArg(temp); masm.passABIArg(callObj); masm.passABIArg(objTemp); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ArgumentsObject::finishForIon)); masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, &failure); // Discard saved callObj on the stack. masm.addToStackPtr(Imm32(sizeof(uintptr_t))); masm.jump(&done); masm.bind(&failure); masm.Pop(callObj); } masm.moveStackPtrTo(temp); masm.addPtr(Imm32(frameSize()), temp); pushArg(callObj); pushArg(temp); callVM(NewIonArgumentsObjectInfo, lir); masm.bind(&done); } void CodeGenerator::visitGetArgumentsObjectArg(LGetArgumentsObjectArg* lir) { Register temp = ToRegister(lir->getTemp(0)); Register argsObj = ToRegister(lir->getArgsObject()); ValueOperand out = ToOutValue(lir); masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()), temp); Address argAddr(temp, ArgumentsData::offsetOfArgs() + lir->mir()->argno() * sizeof(Value)); masm.loadValue(argAddr, out); #ifdef DEBUG Label success; masm.branchTestMagic(Assembler::NotEqual, out, &success); masm.assumeUnreachable("Result from ArgumentObject shouldn't be JSVAL_TYPE_MAGIC."); masm.bind(&success); #endif } void CodeGenerator::visitSetArgumentsObjectArg(LSetArgumentsObjectArg* lir) { Register temp = ToRegister(lir->getTemp(0)); Register argsObj = ToRegister(lir->getArgsObject()); ValueOperand value = ToValue(lir, LSetArgumentsObjectArg::ValueIndex); masm.loadPrivate(Address(argsObj, ArgumentsObject::getDataSlotOffset()), temp); Address argAddr(temp, ArgumentsData::offsetOfArgs() + lir->mir()->argno() * sizeof(Value)); emitPreBarrier(argAddr); #ifdef DEBUG Label success; masm.branchTestMagic(Assembler::NotEqual, argAddr, &success); masm.assumeUnreachable("Result in ArgumentObject shouldn't be JSVAL_TYPE_MAGIC."); masm.bind(&success); #endif masm.storeValue(value, argAddr); } void CodeGenerator::visitReturnFromCtor(LReturnFromCtor* lir) { ValueOperand value = ToValue(lir, LReturnFromCtor::ValueIndex); Register obj = ToRegister(lir->getObject()); Register output = ToRegister(lir->output()); Label valueIsObject, end; masm.branchTestObject(Assembler::Equal, value, &valueIsObject); // Value is not an object. Return that other object. masm.movePtr(obj, output); masm.jump(&end); // Value is an object. Return unbox(Value). masm.bind(&valueIsObject); Register payload = masm.extractObject(value, output); if (payload != output) masm.movePtr(payload, output); masm.bind(&end); } typedef bool (*BoxNonStrictThisFn)(JSContext*, HandleValue, MutableHandleValue); static const VMFunction BoxNonStrictThisInfo = FunctionInfo(BoxNonStrictThis, "BoxNonStrictThis"); void CodeGenerator::visitComputeThis(LComputeThis* lir) { ValueOperand value = ToValue(lir, LComputeThis::ValueIndex); ValueOperand output = ToOutValue(lir); OutOfLineCode* ool = oolCallVM(BoxNonStrictThisInfo, lir, ArgList(value), StoreValueTo(output)); masm.branchTestObject(Assembler::NotEqual, value, ool->entry()); masm.moveValue(value, output); masm.bind(ool->rejoin()); } void CodeGenerator::visitImplicitThis(LImplicitThis* lir) { pushArg(ImmGCPtr(lir->mir()->name())); pushArg(ToRegister(lir->env())); callVM(ImplicitThisInfo, lir); } void CodeGenerator::visitArrowNewTarget(LArrowNewTarget* lir) { Register callee = ToRegister(lir->callee()); ValueOperand output = ToOutValue(lir); masm.loadValue(Address(callee, FunctionExtended::offsetOfArrowNewTargetSlot()), output); } void CodeGenerator::visitArrayLength(LArrayLength* lir) { Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength()); masm.load32(length, ToRegister(lir->output())); } static void SetLengthFromIndex(MacroAssembler& masm, const LAllocation* index, const Address& length) { if (index->isConstant()) { masm.store32(Imm32(ToInt32(index) + 1), length); } else { Register newLength = ToRegister(index); masm.add32(Imm32(1), newLength); masm.store32(newLength, length); masm.sub32(Imm32(1), newLength); } } void CodeGenerator::visitSetArrayLength(LSetArrayLength* lir) { Address length(ToRegister(lir->elements()), ObjectElements::offsetOfLength()); SetLengthFromIndex(masm, lir->index(), length); } template static void RangeFront(MacroAssembler&, Register, Register, Register); template <> void RangeFront(MacroAssembler& masm, Register range, Register i, Register front) { masm.loadPtr(Address(range, ValueMap::Range::offsetOfHashTable()), front); masm.loadPtr(Address(front, ValueMap::offsetOfImplData()), front); MOZ_ASSERT(ValueMap::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0"); static_assert(ValueMap::sizeofImplData() == 24, "sizeof(Data) is 24"); masm.mulBy3(i, i); masm.lshiftPtr(Imm32(3), i); masm.addPtr(i, front); } template <> void RangeFront(MacroAssembler& masm, Register range, Register i, Register front) { masm.loadPtr(Address(range, ValueSet::Range::offsetOfHashTable()), front); masm.loadPtr(Address(front, ValueSet::offsetOfImplData()), front); MOZ_ASSERT(ValueSet::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0"); static_assert(ValueSet::sizeofImplData() == 16, "sizeof(Data) is 16"); masm.lshiftPtr(Imm32(4), i); masm.addPtr(i, front); } template static void RangePopFront(MacroAssembler& masm, Register range, Register front, Register dataLength, Register temp) { Register i = temp; masm.add32(Imm32(1), Address(range, OrderedHashTable::Range::offsetOfCount())); masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), i); Label done, seek; masm.bind(&seek); masm.add32(Imm32(1), i); masm.branch32(Assembler::AboveOrEqual, i, dataLength, &done); // We can add sizeof(Data) to |front| to select the next element, because // |front| and |range.ht.data[i]| point to the same location. MOZ_ASSERT(OrderedHashTable::offsetOfImplDataElement() == 0, "offsetof(Data, element) is 0"); masm.addPtr(Imm32(OrderedHashTable::sizeofImplData()), front); masm.branchTestMagic(Assembler::Equal, Address(front, OrderedHashTable::offsetOfEntryKey()), JS_HASH_KEY_EMPTY, &seek); masm.bind(&done); masm.store32(i, Address(range, OrderedHashTable::Range::offsetOfI())); } template static inline void RangeDestruct(MacroAssembler& masm, Register iter, Register range, Register temp0, Register temp1) { Register next = temp0; Register prevp = temp1; masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfNext()), next); masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfPrevP()), prevp); masm.storePtr(next, Address(prevp, 0)); Label hasNoNext; masm.branchTestPtr(Assembler::Zero, next, next, &hasNoNext); masm.storePtr(prevp, Address(next, OrderedHashTable::Range::offsetOfPrevP())); masm.bind(&hasNoNext); Label nurseryAllocated; masm.branchPtrInNurseryChunk(Assembler::Equal, iter, temp0, &nurseryAllocated); masm.callFreeStub(range); masm.bind(&nurseryAllocated); } template <> void CodeGenerator::emitLoadIteratorValues(Register result, Register temp, Register front) { size_t elementsOffset = NativeObject::offsetOfFixedElements(); Address keyAddress(front, ValueMap::Entry::offsetOfKey()); Address valueAddress(front, ValueMap::Entry::offsetOfValue()); Address keyElemAddress(result, elementsOffset); Address valueElemAddress(result, elementsOffset + sizeof(Value)); masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value); masm.guardedCallPreBarrier(valueElemAddress, MIRType::Value); masm.storeValue(keyAddress, keyElemAddress, temp); masm.storeValue(valueAddress, valueElemAddress, temp); Label emitBarrier, skipBarrier; masm.branchValueIsNurseryCell(Assembler::Equal, keyAddress, temp, &emitBarrier); masm.branchValueIsNurseryCell(Assembler::NotEqual, valueAddress, temp, &skipBarrier); { masm.bind(&emitBarrier); saveVolatile(temp); emitPostWriteBarrier(result); restoreVolatile(temp); } masm.bind(&skipBarrier); } template <> void CodeGenerator::emitLoadIteratorValues(Register result, Register temp, Register front) { size_t elementsOffset = NativeObject::offsetOfFixedElements(); Address keyAddress(front, ValueSet::offsetOfEntryKey()); Address keyElemAddress(result, elementsOffset); masm.guardedCallPreBarrier(keyElemAddress, MIRType::Value); masm.storeValue(keyAddress, keyElemAddress, temp); Label skipBarrier; masm.branchValueIsNurseryCell(Assembler::NotEqual, keyAddress, temp, &skipBarrier); { saveVolatile(temp); emitPostWriteBarrier(result); restoreVolatile(temp); } masm.bind(&skipBarrier); } template void CodeGenerator::emitGetNextEntryForIterator(LGetNextEntryForIterator* lir) { Register iter = ToRegister(lir->iter()); Register result = ToRegister(lir->result()); Register temp = ToRegister(lir->temp0()); Register dataLength = ToRegister(lir->temp1()); Register range = ToRegister(lir->temp2()); Register output = ToRegister(lir->output()); #ifdef DEBUG // Self-hosted code is responsible for ensuring GetNextEntryForIterator is // only called with the correct iterator class. Assert here all self- // hosted callers of GetNextEntryForIterator perform this class check. // No Spectre mitigations are needed because this is DEBUG-only code. Label success; masm.branchTestObjClassNoSpectreMitigations(Assembler::Equal, iter, &IteratorObject::class_, temp, &success); masm.assumeUnreachable("Iterator object should have the correct class."); masm.bind(&success); #endif masm.loadPrivate(Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot)), range); Label iterAlreadyDone, iterDone, done; masm.branchTestPtr(Assembler::Zero, range, range, &iterAlreadyDone); masm.load32(Address(range, OrderedHashTable::Range::offsetOfI()), temp); masm.loadPtr(Address(range, OrderedHashTable::Range::offsetOfHashTable()), dataLength); masm.load32(Address(dataLength, OrderedHashTable::offsetOfImplDataLength()), dataLength); masm.branch32(Assembler::AboveOrEqual, temp, dataLength, &iterDone); { masm.push(iter); Register front = iter; RangeFront(masm, range, temp, front); emitLoadIteratorValues(result, temp, front); RangePopFront(masm, range, front, dataLength, temp); masm.pop(iter); masm.move32(Imm32(0), output); } masm.jump(&done); { masm.bind(&iterDone); RangeDestruct(masm, iter, range, temp, dataLength); masm.storeValue(PrivateValue(nullptr), Address(iter, NativeObject::getFixedSlotOffset(IteratorObject::RangeSlot))); masm.bind(&iterAlreadyDone); masm.move32(Imm32(1), output); } masm.bind(&done); } void CodeGenerator::visitGetNextEntryForIterator(LGetNextEntryForIterator* lir) { if (lir->mir()->mode() == MGetNextEntryForIterator::Map) { emitGetNextEntryForIterator(lir); } else { MOZ_ASSERT(lir->mir()->mode() == MGetNextEntryForIterator::Set); emitGetNextEntryForIterator(lir); } } void CodeGenerator::emitWasmCallBase(MWasmCall* mir, bool needsBoundsCheck) { if (mir->spIncrement()) masm.freeStack(mir->spIncrement()); MOZ_ASSERT((sizeof(wasm::Frame) + masm.framePushed()) % WasmStackAlignment == 0); static_assert(WasmStackAlignment >= ABIStackAlignment && WasmStackAlignment % ABIStackAlignment == 0, "The wasm stack alignment should subsume the ABI-required alignment"); #ifdef DEBUG Label ok; masm.branchTestStackPtr(Assembler::Zero, Imm32(WasmStackAlignment - 1), &ok); masm.breakpoint(); masm.bind(&ok); #endif // LWasmCallBase::isCallPreserved() assumes that all MWasmCalls preserve the // TLS and pinned regs. The only case where where we don't have to reload // the TLS and pinned regs is when the callee preserves them. bool reloadRegs = true; bool switchRealm = true; const wasm::CallSiteDesc& desc = mir->desc(); const wasm::CalleeDesc& callee = mir->callee(); switch (callee.which()) { case wasm::CalleeDesc::Func: masm.call(desc, callee.funcIndex()); reloadRegs = false; switchRealm = false; break; case wasm::CalleeDesc::Import: masm.wasmCallImport(desc, callee); break; case wasm::CalleeDesc::AsmJSTable: case wasm::CalleeDesc::WasmTable: masm.wasmCallIndirect(desc, callee, needsBoundsCheck); reloadRegs = switchRealm = (callee.which() == wasm::CalleeDesc::WasmTable && callee.wasmTableIsExternal()); break; case wasm::CalleeDesc::Builtin: masm.call(desc, callee.builtin()); reloadRegs = false; switchRealm = false; break; case wasm::CalleeDesc::BuiltinInstanceMethod: masm.wasmCallBuiltinInstanceMethod(desc, mir->instanceArg(), callee.builtin()); switchRealm = false; break; } if (reloadRegs) { masm.loadWasmTlsRegFromFrame(); masm.loadWasmPinnedRegsFromTls(); if (switchRealm) masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1); } else { MOZ_ASSERT(!switchRealm); } if (mir->spIncrement()) masm.reserveStack(mir->spIncrement()); } void CodeGenerator::visitWasmCall(LWasmCall* ins) { emitWasmCallBase(ins->mir(), ins->needsBoundsCheck()); } void CodeGenerator::visitWasmCallVoid(LWasmCallVoid* ins) { emitWasmCallBase(ins->mir(), ins->needsBoundsCheck()); } void CodeGenerator::visitWasmCallI64(LWasmCallI64* ins) { emitWasmCallBase(ins->mir(), ins->needsBoundsCheck()); } static void LoadPrimitiveValue(MacroAssembler& masm, MIRType type, const Address& addr, AnyRegister dst) { switch (type) { case MIRType::Int32: masm.load32(addr, dst.gpr()); break; case MIRType::Float32: masm.loadFloat32(addr, dst.fpu()); break; case MIRType::Double: masm.loadDouble(addr, dst.fpu()); break; case MIRType::Pointer: masm.loadPtr(addr, dst.gpr()); break; // Aligned access: code is aligned on PageSize + there is padding // before the global data section. case MIRType::Int8x16: case MIRType::Int16x8: case MIRType::Int32x4: case MIRType::Bool8x16: case MIRType::Bool16x8: case MIRType::Bool32x4: case MIRType::Float32x4: default: MOZ_CRASH("unexpected type in LoadPrimitiveValue"); } } void CodeGenerator::visitWasmLoadGlobalVar(LWasmLoadGlobalVar* ins) { MWasmLoadGlobalVar* mir = ins->mir(); MIRType type = mir->type(); Register tls = ToRegister(ins->tlsPtr()); Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset()); LoadPrimitiveValue(masm, type, addr, ToAnyRegister(ins->output())); } void CodeGenerator::visitWasmLoadGlobalCell(LWasmLoadGlobalCell* ins) { MWasmLoadGlobalCell* mir = ins->mir(); MIRType type = mir->type(); MOZ_ASSERT(type != MIRType::Pointer); Register cell = ToRegister(ins->cellPtr()); Address addr(cell, 0); LoadPrimitiveValue(masm, type, addr, ToAnyRegister(ins->output())); } static void StorePrimitiveValue(MacroAssembler& masm, MIRType type, const Address& addr, AnyRegister src) { switch (type) { case MIRType::Int32: masm.store32(src.gpr(), addr); break; case MIRType::Float32: masm.storeFloat32(src.fpu(), addr); break; case MIRType::Double: masm.storeDouble(src.fpu(), addr); break; // Aligned access: code is aligned on PageSize + there is padding // before the global data section. case MIRType::Int8x16: case MIRType::Int16x8: case MIRType::Int32x4: case MIRType::Bool8x16: case MIRType::Bool16x8: case MIRType::Bool32x4: case MIRType::Float32x4: default: MOZ_CRASH("unexpected type in StorePrimitiveValue"); } } void CodeGenerator::visitWasmStoreGlobalVar(LWasmStoreGlobalVar* ins) { MWasmStoreGlobalVar* mir = ins->mir(); MIRType type = mir->value()->type(); Register tls = ToRegister(ins->tlsPtr()); Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset()); StorePrimitiveValue(masm, type, addr, ToAnyRegister(ins->value())); } void CodeGenerator::visitWasmStoreGlobalCell(LWasmStoreGlobalCell* ins) { MWasmStoreGlobalCell* mir = ins->mir(); MIRType type = mir->value()->type(); Register cell = ToRegister(ins->cellPtr()); Address addr(cell, 0); StorePrimitiveValue(masm, type, addr, ToAnyRegister(ins->value())); } void CodeGenerator::visitWasmLoadGlobalVarI64(LWasmLoadGlobalVarI64* ins) { MWasmLoadGlobalVar* mir = ins->mir(); MIRType type = mir->type(); MOZ_ASSERT(type == MIRType::Int64); Register tls = ToRegister(ins->tlsPtr()); Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset()); if (type == MIRType::Int64) { Register64 output = ToOutRegister64(ins); masm.load64(addr, output); } else { masm.loadPtr(addr, ToRegister(ins->output())); } } void CodeGenerator::visitWasmLoadGlobalCellI64(LWasmLoadGlobalCellI64* ins) { DebugOnly mir = ins->mir(); MOZ_ASSERT(mir->type() == MIRType::Int64); Register cell = ToRegister(ins->cellPtr()); Address addr(cell, 0); Register64 output = ToOutRegister64(ins); masm.load64(addr, output); } void CodeGenerator::visitWasmStoreGlobalVarI64(LWasmStoreGlobalVarI64* ins) { MWasmStoreGlobalVar* mir = ins->mir(); MOZ_ASSERT(mir->value()->type() == MIRType::Int64); Register tls = ToRegister(ins->tlsPtr()); Address addr(tls, offsetof(wasm::TlsData, globalArea) + mir->globalDataOffset()); Register64 value = ToRegister64(ins->value()); masm.store64(value, addr); } void CodeGenerator::visitWasmStoreGlobalCellI64(LWasmStoreGlobalCellI64* ins) { MWasmStoreGlobalCell* mir = ins->mir(); DebugOnly type = mir->value()->type(); MOZ_ASSERT(type.value == MIRType::Int64); Register cell = ToRegister(ins->cellPtr()); Address addr(cell, 0); Register64 value = ToRegister64(ins->value()); masm.store64(value, addr); } void CodeGenerator::visitTypedArrayLength(LTypedArrayLength* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), out); } void CodeGenerator::visitTypedArrayElements(LTypedArrayElements* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); masm.loadPtr(Address(obj, TypedArrayObject::dataOffset()), out); } void CodeGenerator::visitSetDisjointTypedElements(LSetDisjointTypedElements* lir) { Register target = ToRegister(lir->target()); Register targetOffset = ToRegister(lir->targetOffset()); Register source = ToRegister(lir->source()); Register temp = ToRegister(lir->temp()); masm.setupUnalignedABICall(temp); masm.passABIArg(target); masm.passABIArg(targetOffset); masm.passABIArg(source); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::SetDisjointTypedElements)); } void CodeGenerator::visitTypedObjectDescr(LTypedObjectDescr* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); masm.loadTypedObjectDescr(obj, out); } void CodeGenerator::visitTypedObjectElements(LTypedObjectElements* lir) { Register obj = ToRegister(lir->object()); Register out = ToRegister(lir->output()); if (lir->mir()->definitelyOutline()) { masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), out); } else { Label inlineObject, done; masm.branchIfInlineTypedObject(obj, out, &inlineObject); masm.loadPtr(Address(obj, OutlineTypedObject::offsetOfData()), out); masm.jump(&done); masm.bind(&inlineObject); masm.computeEffectiveAddress(Address(obj, InlineTypedObject::offsetOfDataStart()), out); masm.bind(&done); } } void CodeGenerator::visitSetTypedObjectOffset(LSetTypedObjectOffset* lir) { Register object = ToRegister(lir->object()); Register offset = ToRegister(lir->offset()); Register temp0 = ToRegister(lir->temp0()); Register temp1 = ToRegister(lir->temp1()); // Compute the base pointer for the typed object's owner. masm.loadPtr(Address(object, OutlineTypedObject::offsetOfOwner()), temp0); Label inlineObject, done; masm.branchIfInlineTypedObject(temp0, temp1, &inlineObject); masm.loadPrivate(Address(temp0, ArrayBufferObject::offsetOfDataSlot()), temp0); masm.jump(&done); masm.bind(&inlineObject); masm.addPtr(ImmWord(InlineTypedObject::offsetOfDataStart()), temp0); masm.bind(&done); // Compute the new data pointer and set it in the object. masm.addPtr(offset, temp0); masm.storePtr(temp0, Address(object, OutlineTypedObject::offsetOfData())); } void CodeGenerator::visitStringLength(LStringLength* lir) { Register input = ToRegister(lir->string()); Register output = ToRegister(lir->output()); masm.loadStringLength(input, output); } void CodeGenerator::visitMinMaxI(LMinMaxI* ins) { Register first = ToRegister(ins->first()); Register output = ToRegister(ins->output()); MOZ_ASSERT(first == output); Label done; Assembler::Condition cond = ins->mir()->isMax() ? Assembler::GreaterThan : Assembler::LessThan; if (ins->second()->isConstant()) { masm.branch32(cond, first, Imm32(ToInt32(ins->second())), &done); masm.move32(Imm32(ToInt32(ins->second())), output); } else { masm.branch32(cond, first, ToRegister(ins->second()), &done); masm.move32(ToRegister(ins->second()), output); } masm.bind(&done); } void CodeGenerator::visitAbsI(LAbsI* ins) { Register input = ToRegister(ins->input()); Label positive; MOZ_ASSERT(input == ToRegister(ins->output())); masm.branchTest32(Assembler::NotSigned, input, input, &positive); masm.neg32(input); LSnapshot* snapshot = ins->snapshot(); #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) if (snapshot) bailoutCmp32(Assembler::Equal, input, Imm32(INT32_MIN), snapshot); #else if (snapshot) bailoutIf(Assembler::Overflow, snapshot); #endif masm.bind(&positive); } void CodeGenerator::visitPowI(LPowI* ins) { FloatRegister value = ToFloatRegister(ins->value()); Register power = ToRegister(ins->power()); Register temp = ToRegister(ins->temp()); MOZ_ASSERT(power != temp); masm.setupUnalignedABICall(temp); masm.passABIArg(value, MoveOp::DOUBLE); masm.passABIArg(power); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::powi), MoveOp::DOUBLE); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); } void CodeGenerator::visitPowD(LPowD* ins) { FloatRegister value = ToFloatRegister(ins->value()); FloatRegister power = ToFloatRegister(ins->power()); Register temp = ToRegister(ins->temp()); masm.setupUnalignedABICall(temp); masm.passABIArg(value, MoveOp::DOUBLE); masm.passABIArg(power, MoveOp::DOUBLE); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ecmaPow), MoveOp::DOUBLE); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); } using PowFn = bool (*)(JSContext*, MutableHandleValue, MutableHandleValue, MutableHandleValue); static const VMFunction PowInfo = FunctionInfo(js::PowValues, "PowValues"); void CodeGenerator::visitPowV(LPowV* ins) { pushArg(ToValue(ins, LPowV::PowerInput)); pushArg(ToValue(ins, LPowV::ValueInput)); callVM(PowInfo, ins); } void CodeGenerator::visitSignI(LSignI* ins) { Register input = ToRegister(ins->input()); Register output = ToRegister(ins->output()); Label done; masm.move32(input, output); masm.rshift32Arithmetic(Imm32(31), output); masm.branch32(Assembler::LessThanOrEqual, input, Imm32(0), &done); masm.move32(Imm32(1), output); masm.bind(&done); } void CodeGenerator::visitSignD(LSignD* ins) { FloatRegister input = ToFloatRegister(ins->input()); FloatRegister output = ToFloatRegister(ins->output()); Label done, zeroOrNaN, negative; masm.loadConstantDouble(0.0, output); masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, output, &zeroOrNaN); masm.branchDouble(Assembler::DoubleLessThan, input, output, &negative); masm.loadConstantDouble(1.0, output); masm.jump(&done); masm.bind(&negative); masm.loadConstantDouble(-1.0, output); masm.jump(&done); masm.bind(&zeroOrNaN); masm.moveDouble(input, output); masm.bind(&done); } void CodeGenerator::visitSignDI(LSignDI* ins) { FloatRegister input = ToFloatRegister(ins->input()); FloatRegister temp = ToFloatRegister(ins->temp()); Register output = ToRegister(ins->output()); Label done, zeroOrNaN, negative; masm.loadConstantDouble(0.0, temp); masm.branchDouble(Assembler::DoubleEqualOrUnordered, input, temp, &zeroOrNaN); masm.branchDouble(Assembler::DoubleLessThan, input, temp, &negative); masm.move32(Imm32(1), output); masm.jump(&done); masm.bind(&negative); masm.move32(Imm32(-1), output); masm.jump(&done); // Bailout for NaN and negative zero. Label bailout; masm.bind(&zeroOrNaN); masm.branchDouble(Assembler::DoubleUnordered, input, input, &bailout); // The easiest way to distinguish -0.0 from 0.0 is that 1.0/-0.0 // is -Infinity instead of Infinity. masm.loadConstantDouble(1.0, temp); masm.divDouble(input, temp); masm.branchDouble(Assembler::DoubleLessThan, temp, input, &bailout); masm.move32(Imm32(0), output); bailoutFrom(&bailout, ins->snapshot()); masm.bind(&done); } void CodeGenerator::visitMathFunctionD(LMathFunctionD* ins) { Register temp = ToRegister(ins->temp()); FloatRegister input = ToFloatRegister(ins->input()); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); masm.setupUnalignedABICall(temp); masm.passABIArg(input, MoveOp::DOUBLE); void* funptr = nullptr; switch (ins->mir()->function()) { case MMathFunction::Log: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_log_impl); break; case MMathFunction::Sin: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_sin_impl); break; case MMathFunction::Cos: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_cos_impl); break; case MMathFunction::Exp: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_exp_impl); break; case MMathFunction::Tan: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_tan_impl); break; case MMathFunction::ATan: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_atan_impl); break; case MMathFunction::ASin: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_asin_impl); break; case MMathFunction::ACos: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_acos_impl); break; case MMathFunction::Log10: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_log10_impl); break; case MMathFunction::Log2: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_log2_impl); break; case MMathFunction::Log1P: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_log1p_impl); break; case MMathFunction::ExpM1: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_expm1_impl); break; case MMathFunction::CosH: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_cosh_impl); break; case MMathFunction::SinH: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_sinh_impl); break; case MMathFunction::TanH: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_tanh_impl); break; case MMathFunction::ACosH: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_acosh_impl); break; case MMathFunction::ASinH: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_asinh_impl); break; case MMathFunction::ATanH: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_atanh_impl); break; case MMathFunction::Trunc: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_trunc_impl); break; case MMathFunction::Cbrt: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_cbrt_impl); break; case MMathFunction::Floor: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_floor_impl); break; case MMathFunction::Ceil: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_ceil_impl); break; case MMathFunction::Round: funptr = JS_FUNC_TO_DATA_PTR(void*, js::math_round_impl); break; default: MOZ_CRASH("Unknown math function"); } # undef MAYBE_CACHED masm.callWithABI(funptr, MoveOp::DOUBLE); } void CodeGenerator::visitMathFunctionF(LMathFunctionF* ins) { Register temp = ToRegister(ins->temp()); FloatRegister input = ToFloatRegister(ins->input()); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnFloat32Reg); masm.setupUnalignedABICall(temp); masm.passABIArg(input, MoveOp::FLOAT32); void* funptr = nullptr; CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check; switch (ins->mir()->function()) { case MMathFunction::Floor: funptr = JS_FUNC_TO_DATA_PTR(void*, floorf); check = CheckUnsafeCallWithABI::DontCheckOther; break; case MMathFunction::Round: funptr = JS_FUNC_TO_DATA_PTR(void*, math_roundf_impl); break; case MMathFunction::Trunc: funptr = JS_FUNC_TO_DATA_PTR(void*, math_truncf_impl); break; case MMathFunction::Ceil: funptr = JS_FUNC_TO_DATA_PTR(void*, ceilf); check = CheckUnsafeCallWithABI::DontCheckOther; break; default: MOZ_CRASH("Unknown or unsupported float32 math function"); } masm.callWithABI(funptr, MoveOp::FLOAT32, check); } void CodeGenerator::visitModD(LModD* ins) { FloatRegister lhs = ToFloatRegister(ins->lhs()); FloatRegister rhs = ToFloatRegister(ins->rhs()); MOZ_ASSERT(ToFloatRegister(ins->output()) == ReturnDoubleReg); MOZ_ASSERT(ins->temp()->isBogusTemp() == gen->compilingWasm()); if (gen->compilingWasm()) { masm.setupWasmABICall(); masm.passABIArg(lhs, MoveOp::DOUBLE); masm.passABIArg(rhs, MoveOp::DOUBLE); masm.callWithABI(ins->mir()->bytecodeOffset(), wasm::SymbolicAddress::ModD, MoveOp::DOUBLE); } else { masm.setupUnalignedABICall(ToRegister(ins->temp())); masm.passABIArg(lhs, MoveOp::DOUBLE); masm.passABIArg(rhs, MoveOp::DOUBLE); masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NumberMod), MoveOp::DOUBLE); } } typedef bool (*BinaryFn)(JSContext*, MutableHandleValue, MutableHandleValue, MutableHandleValue); static const VMFunction AddInfo = FunctionInfo(js::AddValues, "AddValues"); static const VMFunction SubInfo = FunctionInfo(js::SubValues, "SubValues"); static const VMFunction MulInfo = FunctionInfo(js::MulValues, "MulValues"); static const VMFunction DivInfo = FunctionInfo(js::DivValues, "DivValues"); static const VMFunction ModInfo = FunctionInfo(js::ModValues, "ModValues"); static const VMFunction UrshInfo = FunctionInfo(js::UrshValues, "UrshValues"); void CodeGenerator::visitBinaryV(LBinaryV* lir) { pushArg(ToValue(lir, LBinaryV::RhsInput)); pushArg(ToValue(lir, LBinaryV::LhsInput)); switch (lir->jsop()) { case JSOP_ADD: callVM(AddInfo, lir); break; case JSOP_SUB: callVM(SubInfo, lir); break; case JSOP_MUL: callVM(MulInfo, lir); break; case JSOP_DIV: callVM(DivInfo, lir); break; case JSOP_MOD: callVM(ModInfo, lir); break; case JSOP_URSH: callVM(UrshInfo, lir); break; default: MOZ_CRASH("Unexpected binary op"); } } void CodeGenerator::emitCompareS(LInstruction* lir, JSOp op, Register left, Register right, Register output) { MOZ_ASSERT(lir->isCompareS() || lir->isCompareStrictS()); OutOfLineCode* ool = nullptr; if (op == JSOP_EQ || op == JSOP_STRICTEQ) { ool = oolCallVM(StringsEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output)); } else { MOZ_ASSERT(op == JSOP_NE || op == JSOP_STRICTNE); ool = oolCallVM(StringsNotEqualInfo, lir, ArgList(left, right), StoreRegisterTo(output)); } masm.compareStrings(op, left, right, output, ool->entry()); masm.bind(ool->rejoin()); } void CodeGenerator::visitCompareStrictS(LCompareStrictS* lir) { JSOp op = lir->mir()->jsop(); MOZ_ASSERT(op == JSOP_STRICTEQ || op == JSOP_STRICTNE); const ValueOperand leftV = ToValue(lir, LCompareStrictS::Lhs); Register right = ToRegister(lir->right()); Register output = ToRegister(lir->output()); Label string, done; masm.branchTestString(Assembler::Equal, leftV, &string); masm.move32(Imm32(op == JSOP_STRICTNE), output); masm.jump(&done); masm.bind(&string); #ifdef JS_NUNBOX32 Register left = leftV.payloadReg(); #else Register left = ToTempUnboxRegister(lir->tempToUnbox()); #endif masm.unboxString(leftV, left); emitCompareS(lir, op, left, right, output); masm.bind(&done); } void CodeGenerator::visitCompareS(LCompareS* lir) { JSOp op = lir->mir()->jsop(); Register left = ToRegister(lir->left()); Register right = ToRegister(lir->right()); Register output = ToRegister(lir->output()); emitCompareS(lir, op, left, right, output); } typedef bool (*CompareFn)(JSContext*, MutableHandleValue, MutableHandleValue, bool*); static const VMFunction EqInfo = FunctionInfo(jit::LooselyEqual, "LooselyEqual"); static const VMFunction NeInfo = FunctionInfo(jit::LooselyEqual, "LooselyEqual"); static const VMFunction StrictEqInfo = FunctionInfo(jit::StrictlyEqual, "StrictlyEqual"); static const VMFunction StrictNeInfo = FunctionInfo(jit::StrictlyEqual, "StrictlyEqual"); static const VMFunction LtInfo = FunctionInfo(jit::LessThan, "LessThan"); static const VMFunction LeInfo = FunctionInfo(jit::LessThanOrEqual, "LessThanOrEqual"); static const VMFunction GtInfo = FunctionInfo(jit::GreaterThan, "GreaterThan"); static const VMFunction GeInfo = FunctionInfo(jit::GreaterThanOrEqual, "GreaterThanOrEqual"); void CodeGenerator::visitCompareVM(LCompareVM* lir) { pushArg(ToValue(lir, LBinaryV::RhsInput)); pushArg(ToValue(lir, LBinaryV::LhsInput)); switch (lir->mir()->jsop()) { case JSOP_EQ: callVM(EqInfo, lir); break; case JSOP_NE: callVM(NeInfo, lir); break; case JSOP_STRICTEQ: callVM(StrictEqInfo, lir); break; case JSOP_STRICTNE: callVM(StrictNeInfo, lir); break; case JSOP_LT: callVM(LtInfo, lir); break; case JSOP_LE: callVM(LeInfo, lir); break; case JSOP_GT: callVM(GtInfo, lir); break; case JSOP_GE: callVM(GeInfo, lir); break; default: MOZ_CRASH("Unexpected compare op"); } } void CodeGenerator::visitIsNullOrLikeUndefinedV(LIsNullOrLikeUndefinedV* lir) { JSOp op = lir->mir()->jsop(); MCompare::CompareType compareType = lir->mir()->compareType(); MOZ_ASSERT(compareType == MCompare::Compare_Undefined || compareType == MCompare::Compare_Null); const ValueOperand value = ToValue(lir, LIsNullOrLikeUndefinedV::Value); Register output = ToRegister(lir->output()); if (op == JSOP_EQ || op == JSOP_NE) { MOZ_ASSERT(lir->mir()->lhs()->type() != MIRType::Object || lir->mir()->operandMightEmulateUndefined(), "Operands which can't emulate undefined should have been folded"); OutOfLineTestObjectWithLabels* ool = nullptr; Maybe