/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- * vim: set ts=8 sts=2 et sw=2 tw=80: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "jit/arm64/CodeGenerator-arm64.h" #include "mozilla/DebugOnly.h" #include "mozilla/MathAlgorithms.h" #include #include "builtin/Number.h" #include "jit/CodeGenerator.h" #include "jit/InlineScriptTree.h" #include "jit/JitRuntime.h" #include "jit/MIR-wasm.h" #include "jit/MIR.h" #include "jit/MIRGraph.h" #include "jit/ReciprocalMulConstants.h" #include "vm/JSContext.h" #include "vm/Realm.h" #include "vm/Shape.h" #include "jit/shared/CodeGenerator-shared-inl.h" #include "vm/JSScript-inl.h" using namespace js; using namespace js::jit; using JS::GenericNaN; using mozilla::FloorLog2; using mozilla::Maybe; using mozilla::NegativeInfinity; using mozilla::Nothing; using mozilla::Some; // shared CodeGeneratorARM64::CodeGeneratorARM64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm, const wasm::CodeMetadata* wasmCodeMeta) : CodeGeneratorShared(gen, graph, masm, wasmCodeMeta) {} bool CodeGeneratorARM64::generateOutOfLineCode() { AutoCreatedBy acb(masm, "CodeGeneratorARM64::generateOutOfLineCode"); if (!CodeGeneratorShared::generateOutOfLineCode()) { return false; } if (deoptLabel_.used()) { // All non-table-based bailouts will go here. masm.bind(&deoptLabel_); // Store the frame size, so the handler can recover the IonScript. masm.push(Imm32(frameSize())); TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler(); masm.jump(handler); } return !masm.oom(); } void CodeGeneratorARM64::emitBranch(Assembler::Condition cond, MBasicBlock* mirTrue, MBasicBlock* mirFalse) { if (isNextBlock(mirFalse->lir())) { jumpToBlock(mirTrue, cond); } else { jumpToBlock(mirFalse, Assembler::InvertCondition(cond)); jumpToBlock(mirTrue); } } void CodeGeneratorARM64::emitBailoutOOL(LSnapshot* snapshot) { masm.push(Imm32(snapshot->snapshotOffset())); masm.B(&deoptLabel_); } void CodeGeneratorARM64::bailoutIf(Assembler::Condition condition, LSnapshot* snapshot) { encode(snapshot); InlineScriptTree* tree = snapshot->mir()->block()->trackedTree(); auto* ool = new (alloc()) LambdaOutOfLineCode( [=, this](OutOfLineCode& ool) { emitBailoutOOL(snapshot); }); addOutOfLineCode(ool, new (alloc()) BytecodeSite(tree, tree->script()->code())); masm.B(ool->entry(), condition); } void CodeGeneratorARM64::bailoutIfZero(Assembler::Condition condition, ARMRegister rt, LSnapshot* snapshot) { MOZ_ASSERT(condition == Assembler::Zero || condition == Assembler::NonZero); encode(snapshot); InlineScriptTree* tree = snapshot->mir()->block()->trackedTree(); auto* ool = new (alloc()) LambdaOutOfLineCode( [=, this](OutOfLineCode& ool) { emitBailoutOOL(snapshot); }); addOutOfLineCode(ool, new (alloc()) BytecodeSite(tree, tree->script()->code())); if (condition == Assembler::Zero) { masm.Cbz(rt, ool->entry()); } else { masm.Cbnz(rt, ool->entry()); } } void CodeGeneratorARM64::bailoutFrom(Label* label, LSnapshot* snapshot) { MOZ_ASSERT_IF(!masm.oom(), label->used()); MOZ_ASSERT_IF(!masm.oom(), !label->bound()); encode(snapshot); InlineScriptTree* tree = snapshot->mir()->block()->trackedTree(); auto* ool = new (alloc()) LambdaOutOfLineCode( [=, this](OutOfLineCode& ool) { emitBailoutOOL(snapshot); }); addOutOfLineCode(ool, new (alloc()) BytecodeSite(tree, tree->script()->code())); masm.retarget(label, ool->entry()); } void CodeGeneratorARM64::bailout(LSnapshot* snapshot) { Label label; masm.b(&label); bailoutFrom(&label, snapshot); } void CodeGenerator::visitMinMaxD(LMinMaxD* ins) { ARMFPRegister lhs(ToFloatRegister(ins->first()), 64); ARMFPRegister rhs(ToFloatRegister(ins->second()), 64); ARMFPRegister output(ToFloatRegister(ins->output()), 64); if (ins->mir()->isMax()) { masm.Fmax(output, lhs, rhs); } else { masm.Fmin(output, lhs, rhs); } } void CodeGenerator::visitMinMaxF(LMinMaxF* ins) { ARMFPRegister lhs(ToFloatRegister(ins->first()), 32); ARMFPRegister rhs(ToFloatRegister(ins->second()), 32); ARMFPRegister output(ToFloatRegister(ins->output()), 32); if (ins->mir()->isMax()) { masm.Fmax(output, lhs, rhs); } else { masm.Fmin(output, lhs, rhs); } } template static ARMRegister toWRegister(const T* a) { return ARMRegister(ToRegister(a), 32); } template static ARMRegister toXRegister(const T* a) { return ARMRegister(ToRegister(a), 64); } static ARMRegister toXRegister(const LInt64Allocation& a) { return ARMRegister(ToRegister64(a).reg, 64); } static Operand toWOperand(const LAllocation* a) { if (a->isConstant()) { return Operand(ToInt32(a)); } return Operand(toWRegister(a)); } static Operand toXOperand(const LAllocation* a) { if (a->isConstant()) { return Operand(ToIntPtr(a)); } return Operand(toXRegister(a)); } static Operand toXOperand(const LInt64Allocation& a) { if (IsConstant(a)) { return Operand(ToInt64(a)); } return Operand(toXRegister(a)); } void CodeGenerator::visitAddI(LAddI* ins) { const LAllocation* lhs = ins->lhs(); const LAllocation* rhs = ins->rhs(); const LDefinition* dest = ins->output(); // Platforms with three-operand arithmetic ops don't need recovery. MOZ_ASSERT(!ins->recoversInput()); if (ins->snapshot()) { masm.Adds(toWRegister(dest), toWRegister(lhs), toWOperand(rhs)); bailoutIf(Assembler::Overflow, ins->snapshot()); } else { masm.Add(toWRegister(dest), toWRegister(lhs), toWOperand(rhs)); } } void CodeGenerator::visitSubI(LSubI* ins) { const LAllocation* lhs = ins->lhs(); const LAllocation* rhs = ins->rhs(); const LDefinition* dest = ins->output(); // Platforms with three-operand arithmetic ops don't need recovery. MOZ_ASSERT(!ins->recoversInput()); if (ins->snapshot()) { masm.Subs(toWRegister(dest), toWRegister(lhs), toWOperand(rhs)); bailoutIf(Assembler::Overflow, ins->snapshot()); } else { masm.Sub(toWRegister(dest), toWRegister(lhs), toWOperand(rhs)); } } void CodeGenerator::visitMulI(LMulI* ins) { const LAllocation* lhs = ins->lhs(); const LAllocation* rhs = ins->rhs(); const LDefinition* dest = ins->output(); MMul* mul = ins->mir(); MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow()); Register lhsreg = ToRegister(lhs); const ARMRegister lhsreg32 = ARMRegister(lhsreg, 32); Register destreg = ToRegister(dest); const ARMRegister destreg32 = ARMRegister(destreg, 32); if (rhs->isConstant()) { // Bailout on -0.0. int32_t constant = ToInt32(rhs); if (mul->canBeNegativeZero() && constant <= 0) { Assembler::Condition bailoutCond = (constant == 0) ? Assembler::LessThan : Assembler::Equal; bailoutCmp32(bailoutCond, lhsreg, Imm32(0), ins->snapshot()); } switch (constant) { case -1: masm.Negs(destreg32, Operand(lhsreg32)); break; // Go to overflow check. case 0: masm.Mov(destreg32, wzr); return; // Avoid overflow check. case 1: if (destreg != lhsreg) { masm.Mov(destreg32, lhsreg32); } return; // Avoid overflow check. case 2: if (!mul->canOverflow()) { masm.Add(destreg32, lhsreg32, Operand(lhsreg32)); return; // Avoid overflow check. } masm.Adds(destreg32, lhsreg32, Operand(lhsreg32)); break; // Go to overflow check. default: // Use shift if cannot overflow and constant is a power of 2 if (!mul->canOverflow() && constant > 0) { int32_t shift = FloorLog2(uint32_t(constant)); if ((1 << shift) == constant) { masm.Lsl(destreg32, lhsreg32, shift); return; } } // Otherwise, just multiply. We have to check for overflow. // Negative zero was handled above. Label bailout; Label* onOverflow = mul->canOverflow() ? &bailout : nullptr; vixl::UseScratchRegisterScope temps(&masm.asVIXL()); const Register scratch = temps.AcquireW().asUnsized(); masm.move32(Imm32(constant), scratch); masm.mul32(lhsreg, scratch, destreg, onOverflow); if (onOverflow) { bailoutFrom(&bailout, ins->snapshot()); } return; } // Overflow check. if (mul->canOverflow()) { bailoutIf(Assembler::Overflow, ins->snapshot()); } } else { Register rhsreg = ToRegister(rhs); const ARMRegister rhsreg32 = ARMRegister(rhsreg, 32); Label bailout; Label* onOverflow = mul->canOverflow() ? &bailout : nullptr; if (mul->canBeNegativeZero()) { // The product of two integer operands is negative zero iff one // operand is zero, and the other is negative. Therefore, the // sum of the two operands will also be negative (specifically, // it will be the non-zero operand). If the result of the // multiplication is 0, we can check the sign of the sum to // determine whether we should bail out. // This code can bailout, so lowering guarantees that the input // operands are not overwritten. MOZ_ASSERT(destreg != lhsreg); MOZ_ASSERT(destreg != rhsreg); // Do the multiplication. masm.mul32(lhsreg, rhsreg, destreg, onOverflow); // Set Zero flag if destreg is 0. masm.test32(destreg, destreg); // ccmn is 'conditional compare negative'. // If the Zero flag is set: // perform a compare negative (compute lhs+rhs and set flags) // else: // clear flags masm.Ccmn(lhsreg32, rhsreg32, vixl::NoFlag, Assembler::Zero); // Bails out if (lhs * rhs == 0) && (lhs + rhs < 0): bailoutIf(Assembler::LessThan, ins->snapshot()); } else { masm.mul32(lhsreg, rhsreg, destreg, onOverflow); } if (onOverflow) { bailoutFrom(&bailout, ins->snapshot()); } } } template static void TrapIfDivideByZero(MacroAssembler& masm, LIR* lir, ARMRegister rhs) { auto* mir = lir->mir(); MOZ_ASSERT(mir->trapOnError()); if (mir->canBeDivideByZero()) { Label nonZero; masm.Cbnz(rhs, &nonZero); masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc()); masm.bind(&nonZero); } } void CodeGenerator::visitDivI(LDivI* ins) { Register lhs = ToRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); ARMRegister lhs32 = toWRegister(ins->lhs()); ARMRegister rhs32 = toWRegister(ins->rhs()); ARMRegister output32 = toWRegister(ins->output()); MDiv* mir = ins->mir(); // Handle division by zero. if (mir->canBeDivideByZero()) { if (mir->trapOnError()) { TrapIfDivideByZero(masm, ins, rhs32); } else if (mir->canTruncateInfinities()) { // SDIV returns zero for division by zero, exactly what we want for // truncated division. Remainder computation expects a non-zero divisor, // so we must also be allowed to truncate the remainder. MOZ_ASSERT(mir->canTruncateRemainder(), "remainder computation expects a non-zero divisor"); } else { MOZ_ASSERT(mir->fallible()); bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot()); } } // Handle an integer overflow from (INT32_MIN / -1). // The integer division gives INT32_MIN, but should be -(double)INT32_MIN. // // SDIV returns INT32_MIN for (INT32_MIN / -1), so no extra code needed when // truncation is allowed. if (mir->canBeNegativeOverflow() && (mir->trapOnError() || !mir->canTruncateOverflow())) { Label notOverflow; // Branch to handle the non-overflow cases. masm.branch32(Assembler::NotEqual, lhs, Imm32(INT32_MIN), ¬Overflow); // Handle overflow. if (mir->trapOnError()) { masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), ¬Overflow); masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc()); } else { MOZ_ASSERT(mir->fallible()); bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot()); } masm.bind(¬Overflow); } // Handle negative zero: lhs == 0 && rhs < 0. if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) { Label nonZero; masm.branch32(Assembler::NotEqual, lhs, Imm32(0), &nonZero); bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot()); masm.bind(&nonZero); } // Perform integer division. masm.Sdiv(output32, lhs32, rhs32); if (!mir->canTruncateRemainder()) { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister remainder32 = temps.AcquireW(); Register remainder = remainder32.asUnsized(); // Compute the remainder: remainder = lhs - (output * rhs). masm.Msub(remainder32, output32, rhs32, lhs32); bailoutTest32(Assembler::NonZero, remainder, remainder, ins->snapshot()); } } void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) { const Register numerator = ToRegister(ins->numerator()); const ARMRegister numerator32 = toWRegister(ins->numerator()); const ARMRegister output32 = toWRegister(ins->output()); int32_t shift = ins->shift(); bool negativeDivisor = ins->negativeDivisor(); MDiv* mir = ins->mir(); if (!mir->isTruncated() && negativeDivisor) { // 0 divided by a negative number returns a -0 double. bailoutTest32(Assembler::Zero, numerator, numerator, ins->snapshot()); } if (shift) { if (!mir->isTruncated()) { // If the remainder is != 0, bailout since this must be a double. bailoutTest32(Assembler::NonZero, numerator, Imm32(UINT32_MAX >> (32 - shift)), ins->snapshot()); } if (mir->isUnsigned()) { // shift right masm.Lsr(output32, numerator32, shift); } else { ARMRegister temp32 = numerator32; // Adjust the value so that shifting produces a correctly // rounded result when the numerator is negative. See 10-1 // "Signed Division by a Known Power of 2" in Henry // S. Warren, Jr.'s Hacker's Delight. if (mir->canBeNegativeDividend() && mir->isTruncated()) { if (shift > 1) { // Copy the sign bit of the numerator. (= (2^32 - 1) or 0) masm.Asr(output32, numerator32, 31); temp32 = output32; } // Divide by 2^(32 - shift) // i.e. (= (2^32 - 1) / 2^(32 - shift) or 0) // i.e. (= (2^shift - 1) or 0) masm.Lsr(output32, temp32, 32 - shift); // If signed, make any 1 bit below the shifted bits to bubble up, such // that once shifted the value would be rounded towards 0. masm.Add(output32, output32, numerator32); temp32 = output32; } masm.Asr(output32, temp32, shift); if (negativeDivisor) { masm.Neg(output32, output32); } } return; } if (negativeDivisor) { // INT32_MIN / -1 overflows. if (!mir->isTruncated()) { masm.Negs(output32, numerator32); bailoutIf(Assembler::Overflow, ins->snapshot()); } else if (mir->trapOnError()) { Label ok; masm.Negs(output32, numerator32); masm.branch(Assembler::NoOverflow, &ok); masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc()); masm.bind(&ok); } else { // Do not set condition flags. masm.Neg(output32, numerator32); } } else { if (mir->isUnsigned() && !mir->isTruncated()) { // Copy and set flags. masm.Adds(output32, numerator32, 0); // Unsigned division by 1 can overflow if output is not truncated, as we // do not have an Unsigned type for MIR instructions. bailoutIf(Assembler::Signed, ins->snapshot()); } else { // Copy the result. masm.Mov(output32, numerator32); } } } void CodeGenerator::visitDivPowTwoI64(LDivPowTwoI64* ins) { ARMRegister numerator64 = toXRegister(ins->numerator()); ARMRegister output64 = toXRegister(ins->output()); int32_t shift = ins->shift(); bool negativeDivisor = ins->negativeDivisor(); MDiv* mir = ins->mir(); if (shift) { if (mir->isUnsigned()) { // shift right masm.Lsr(output64, numerator64, shift); } else { ARMRegister temp64 = numerator64; // Adjust the value so that shifting produces a correctly // rounded result when the numerator is negative. See 10-1 // "Signed Division by a Known Power of 2" in Henry // S. Warren, Jr.'s Hacker's Delight. if (mir->canBeNegativeDividend()) { if (shift > 1) { // Copy the sign bit of the numerator. (= (2^64 - 1) or 0) masm.Asr(output64, numerator64, 63); temp64 = output64; } // Divide by 2^(64 - shift) // i.e. (= (2^64 - 1) / 2^(64 - shift) or 0) // i.e. (= (2^shift - 1) or 0) masm.Lsr(output64, temp64, 64 - shift); // If signed, make any 1 bit below the shifted bits to bubble up, such // that once shifted the value would be rounded towards 0. masm.Add(output64, output64, numerator64); temp64 = output64; } masm.Asr(output64, temp64, shift); if (negativeDivisor) { masm.Neg(output64, output64); } } return; } if (negativeDivisor) { // INT64_MIN / -1 overflows. Label ok; masm.Negs(output64, numerator64); masm.branch(Assembler::NoOverflow, &ok); masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc()); masm.bind(&ok); } else { // Copy the result. masm.Mov(output64, numerator64); } } template static void DivideWithConstant(MacroAssembler& masm, LDivOrMod* ins) { ARMRegister lhs32 = toWRegister(ins->numerator()); ARMRegister lhs64 = toXRegister(ins->numerator()); ARMRegister output32 = toWRegister(ins->output()); ARMRegister output64 = toXRegister(ins->output()); int32_t d = ins->denominator(); vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister const32 = temps.AcquireW(); // The absolute value of the denominator isn't a power of 2. MOZ_ASSERT(!std::has_single_bit(mozilla::Abs(d))); auto* mir = ins->mir(); // We will first divide by Abs(d), and negate the answer if d is negative. // If desired, this can be avoided by generalizing computeDivisionConstants. auto rmc = ReciprocalMulConstants::computeSignedDivisionConstants(d); // We first compute (M * n) >> 32, where M = rmc.multiplier. masm.Mov(const32, int32_t(rmc.multiplier)); if (rmc.multiplier > INT32_MAX) { MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 32)); // We actually compute (int32_t(M) * n) instead, without the upper bit. // Thus, (M * n) = (int32_t(M) * n) + n << 32. // // ((int32_t(M) * n) + n << 32) can't overflow, as both operands have // opposite signs because int32_t(M) is negative. masm.Lsl(output64, lhs64, 32); // Store (M * n) in output64. masm.Smaddl(output64, const32, lhs32, output64); } else { // Store (M * n) in output64. masm.Smull(output64, const32, lhs32); } // (M * n) >> (32 + shift) is the truncated division answer if n is // non-negative, as proved in the comments of computeDivisionConstants. We // must add 1 later if n is negative to get the right answer in all cases. masm.Asr(output64, output64, 32 + rmc.shiftAmount); // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be // computed with just a sign-extending shift of 31 bits. if (mir->canBeNegativeDividend()) { masm.Sub(output32, output32, Operand(lhs32, vixl::ASR, 31)); } // After this, output32 contains the correct truncated division result. if (d < 0) { masm.Neg(output32, output32); } } void CodeGenerator::visitDivConstantI(LDivConstantI* ins) { ARMRegister lhs32 = toWRegister(ins->numerator()); ARMRegister output32 = toWRegister(ins->output()); int32_t d = ins->denominator(); MDiv* mir = ins->mir(); if (d == 0) { if (mir->trapOnError()) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc()); } else if (mir->canTruncateInfinities()) { masm.Mov(output32, wzr); } else { MOZ_ASSERT(mir->fallible()); bailout(ins->snapshot()); } return; } // Compute the truncated division result in output32. DivideWithConstant(masm, ins); if (!mir->isTruncated()) { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister temp32 = temps.AcquireW(); Register temp = temp32.asUnsized(); // This is a division op. Multiply the obtained value by d to check if // the correct answer is an integer. This cannot overflow, since |d| > 1. masm.Mov(temp32, d); masm.Msub(temp32, output32, temp32, lhs32); if (d > 0) { // bailout if (lhs - output * d != 0) bailoutTest32(Assembler::NonZero, temp, temp, ins->snapshot()); } else { MOZ_ASSERT(d < 0); // bailout if (lhs - output * d != 0) masm.Cmp(temp32, wzr); // If lhs is zero and the divisor is negative, the answer should have // been -0. // // or bailout if (lhs == 0). // ^ ^ // | '-- masm.Ccmp(lhs32, lhs32, .., ..) // '-- masm.Ccmp(.., .., vixl::ZFlag, Assembler::Zero) masm.Ccmp(lhs32, wzr, vixl::ZFlag, Assembler::Zero); // bailout if (lhs - output * d != 0) or (lhs == 0) bailoutIf(Assembler::Zero, ins->snapshot()); } } } template static void UnsignedDivideWithConstant(MacroAssembler& masm, LUDivOrUMod* ins) { ARMRegister lhs32 = toWRegister(ins->numerator()); ARMRegister lhs64 = toXRegister(ins->numerator()); ARMRegister output64 = toXRegister(ins->output()); uint32_t d = ins->denominator(); vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister const32 = temps.AcquireW(); // The denominator isn't a power of 2 (see LDivPowTwoI). MOZ_ASSERT(!std::has_single_bit(d)); auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(d); // We first compute (M * n), where M = rmc.multiplier. masm.Mov(const32, int32_t(rmc.multiplier)); masm.Umull(output64, const32, lhs32); if (rmc.multiplier > UINT32_MAX) { // M >= 2^32 and shift == 0 is impossible, as d >= 2 implies that // ((M * n) >> (32 + shift)) >= n > floor(n/d) whenever n >= d, // contradicting the proof of correctness in computeDivisionConstants. MOZ_ASSERT(rmc.shiftAmount > 0); MOZ_ASSERT(rmc.multiplier < (int64_t(1) << 33)); // We actually compute (uint32_t(M) * n) instead, without the upper bit. // Thus, (M * n) = (uint32_t(M) * n) + n << 32. // // ((uint32_t(M) * n) + n << 32) can overflow. Hacker's Delight explains a // trick to avoid this overflow case, but we can avoid it by computing the // addition on 64 bits registers. // // Compute ((uint32_t(M) * n) >> 32 + n) masm.Add(output64, lhs64, Operand(output64, vixl::LSR, 32)); // (M * n) >> (32 + shift) is the truncated division answer. masm.Lsr(output64, output64, rmc.shiftAmount); } else { // (M * n) >> (32 + shift) is the truncated division answer. masm.Lsr(output64, output64, 32 + rmc.shiftAmount); } } void CodeGenerator::visitUDivConstant(LUDivConstant* ins) { ARMRegister lhs32 = toWRegister(ins->numerator()); ARMRegister output32 = toWRegister(ins->output()); uint32_t d = ins->denominator(); MDiv* mir = ins->mir(); if (d == 0) { if (ins->mir()->trapOnError()) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc()); } else if (mir->canTruncateInfinities()) { masm.Mov(output32, wzr); } else { MOZ_ASSERT(mir->fallible()); bailout(ins->snapshot()); } return; } // Compute the truncated division result in output32. UnsignedDivideWithConstant(masm, ins); // We now have the truncated division value. We are checking whether the // division resulted in an integer, we multiply the obtained value by d and // check the remainder of the division. if (!mir->isTruncated()) { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister temp32 = temps.AcquireW(); Register temp = temp32.asUnsized(); masm.Mov(temp32, d); masm.Msub(temp32, output32, temp32, lhs32); // bailout if (lhs - output * d != 0) bailoutTest32(Assembler::NonZero, temp, temp, ins->snapshot()); } } template static void Divide64WithConstant(MacroAssembler& masm, LDivOrMod* ins) { ARMRegister lhs64 = toXRegister(ins->numerator()); ARMRegister output64 = toXRegister(ins->output()); int64_t d = ins->denominator(); vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister const64 = temps.AcquireX(); // The absolute value of the denominator isn't a power of 2. MOZ_ASSERT(!std::has_single_bit(mozilla::Abs(d))); auto* mir = ins->mir(); // We will first divide by Abs(d), and negate the answer if d is negative. // If desired, this can be avoided by generalizing computeDivisionConstants. auto rmc = ReciprocalMulConstants::computeSignedDivisionConstants(d); // We first compute (M * n) >> 64, where M = rmc.multiplier. masm.Mov(const64, uint64_t(rmc.multiplier)); masm.Smulh(output64, lhs64, const64); if (rmc.multiplier > Int128(INT64_MAX)) { MOZ_ASSERT(rmc.multiplier < (Int128(1) << 64)); // We actually computed output = ((int64_t(M) * n) >> 64) instead. Since // (M * n) >> 64 is the same as (output + n), we can correct for the // overflow. (output + n) can't overflow, as n and output have opposite // signs because int64_t(M) is negative. masm.Add(output64, output64, lhs64); } // (M * n) >> (64 + shift) is the truncated division answer if n is // non-negative, as proved in the comments of computeDivisionConstants. We // must add 1 later if n is negative to get the right answer in all cases. masm.Asr(output64, output64, rmc.shiftAmount); // We'll subtract -1 instead of adding 1, because (n < 0 ? -1 : 0) can be // computed with just a sign-extending shift of 63 bits. if (mir->canBeNegativeDividend()) { masm.Sub(output64, output64, Operand(lhs64, vixl::ASR, 63)); } // After this, output64 contains the correct truncated division result. if (d < 0) { masm.Neg(output64, output64); } } void CodeGenerator::visitDivConstantI64(LDivConstantI64* ins) { int64_t d = ins->denominator(); if (d == 0) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->mir()->trapSiteDesc()); return; } // Compute the truncated division result. Divide64WithConstant(masm, ins); } template static void UnsignedDivide64WithConstant(MacroAssembler& masm, LUDivOrUMod* ins) { ARMRegister lhs64 = toXRegister(ins->numerator()); ARMRegister output64 = toXRegister(ins->output()); uint64_t d = ins->denominator(); vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister const64 = temps.AcquireX(); // The denominator isn't a power of 2 (see LDivPowTwoI). MOZ_ASSERT(!std::has_single_bit(d)); auto rmc = ReciprocalMulConstants::computeUnsignedDivisionConstants(d); // We first compute (M * n) >> 64, where M = rmc.multiplier. masm.Mov(const64, uint64_t(rmc.multiplier)); masm.Umulh(output64, lhs64, const64); if (rmc.multiplier > Int128(UINT64_MAX)) { // M >= 2^64 and shift == 0 is impossible, as d >= 2 implies that // ((M * n) >> (64 + shift)) >= n > floor(n/d) whenever n >= d, // contradicting the proof of correctness in computeDivisionConstants. MOZ_ASSERT(rmc.shiftAmount > 0); MOZ_ASSERT(rmc.multiplier < (Int128(1) << 65)); // We actually computed output = ((uint64_t(M) * n) >> 64) instead. Since // (M * n) >> (64 + shift) is the same as (output + n) >> shift, we can // correct for the overflow. This case is a bit trickier than the signed // case, though, as the (output + n) addition itself can overflow; however, // note that // (output + n) >> shift == (((n - output) >> 1) + output) >> (shift - 1), // which is overflow-free. See Hacker's Delight, section 10-8 for details. masm.Sub(const64, lhs64, output64); masm.Add(output64, output64, Operand(const64, vixl::LSR, 1)); masm.Lsr(output64, output64, rmc.shiftAmount - 1); } else { masm.Lsr(output64, output64, rmc.shiftAmount); } } void CodeGenerator::visitUDivConstantI64(LUDivConstantI64* ins) { uint64_t d = ins->denominator(); if (d == 0) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->mir()->trapSiteDesc()); return; } // Compute the truncated division result. UnsignedDivide64WithConstant(masm, ins); } void CodeGenerator::visitModI(LModI* ins) { Register lhs = ToRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); ARMRegister lhs32 = toWRegister(ins->lhs()); ARMRegister rhs32 = toWRegister(ins->rhs()); ARMRegister output32 = toWRegister(ins->output()); Label done; MMod* mir = ins->mir(); // Prevent divide by zero. if (mir->canBeDivideByZero()) { if (mir->trapOnError()) { TrapIfDivideByZero(masm, ins, rhs32); } else if (mir->isTruncated()) { // Truncated division by zero yields integer zero. masm.Mov(output32, wzr); masm.Cbz(rhs32, &done); } else { // Non-truncated division by zero produces a non-integer. MOZ_ASSERT(mir->fallible()); bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot()); } } // Signed division. masm.Sdiv(output32, lhs32, rhs32); // Compute the remainder: output = lhs - (output * rhs). masm.Msub(output32, output32, rhs32, lhs32); if (mir->canBeNegativeDividend() && !mir->isTruncated()) { // If output == 0 and lhs < 0, then the result should be double -0.0. // Note that this guard handles lhs == INT_MIN and rhs == -1: // output = INT_MIN - (INT_MIN / -1) * -1 // = INT_MIN - INT_MIN // = 0 masm.Cbnz(output32, &done); bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot()); } if (done.used()) { masm.bind(&done); } } void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) { Register lhs = ToRegister(ins->input()); ARMRegister lhsw = toWRegister(ins->input()); ARMRegister outw = toWRegister(ins->output()); int32_t shift = ins->shift(); bool canBeNegative = !ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend(); if (shift == 0) { if (canBeNegative && !ins->mir()->isTruncated()) { bailoutTest32(Assembler::Signed, lhs, lhs, ins->snapshot()); } masm.Mov(outw, wzr); return; } Label negative; if (canBeNegative) { // Switch based on sign of the lhs. // Positive numbers are just a bitmask. masm.branchTest32(Assembler::Signed, lhs, lhs, &negative); } masm.And(outw, lhsw, Operand((uint32_t(1) << shift) - 1)); if (canBeNegative) { Label done; masm.jump(&done); // Negative numbers need a negate, bitmask, negate. masm.bind(&negative); masm.Neg(outw, Operand(lhsw)); masm.And(outw, outw, Operand((uint32_t(1) << shift) - 1)); // Since a%b has the same sign as b, and a is negative in this branch, // an answer of 0 means the correct result is actually -0. Bail out. if (!ins->mir()->isTruncated()) { masm.Negs(outw, Operand(outw)); bailoutIf(Assembler::Zero, ins->snapshot()); } else { masm.Neg(outw, Operand(outw)); } masm.bind(&done); } } void CodeGenerator::visitModPowTwoI64(LModPowTwoI64* ins) { Register lhs = ToRegister(ins->input()); ARMRegister lhs64 = toXRegister(ins->input()); ARMRegister out64 = toXRegister(ins->output()); int32_t shift = ins->shift(); bool canBeNegative = !ins->mir()->isUnsigned() && ins->mir()->canBeNegativeDividend(); if (shift == 0) { masm.Mov(out64, xzr); return; } auto clearHighBits = [&](ARMRegister reg) { switch (shift) { case 32: masm.Mov(out64.W(), reg.W()); break; default: masm.And(out64, reg, Operand((uint64_t(1) << shift) - 1)); break; } }; Label negative; if (canBeNegative) { // Switch based on sign of the lhs. // Positive numbers are just a bitmask. masm.branchTestPtr(Assembler::Signed, lhs, lhs, &negative); } clearHighBits(lhs64); if (canBeNegative) { Label done; masm.jump(&done); // Negative numbers need a negate, bitmask, negate. masm.bind(&negative); masm.Neg(out64, Operand(lhs64)); clearHighBits(out64); masm.Neg(out64, Operand(out64)); masm.bind(&done); } } void CodeGenerator::visitModConstantI(LModConstantI* ins) { Register lhs = ToRegister(ins->numerator()); ARMRegister lhs32 = toWRegister(ins->numerator()); ARMRegister output32 = toWRegister(ins->output()); MMod* mir = ins->mir(); int32_t d = ins->denominator(); if (d == 0) { if (mir->trapOnError()) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc()); } else if (mir->isTruncated()) { masm.Mov(output32, wzr); } else { MOZ_ASSERT(mir->fallible()); bailout(ins->snapshot()); } return; } // Compute the truncated division result in output32. DivideWithConstant(masm, ins); // Compute the remainder: output = lhs - (output * rhs). { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister rhs32 = temps.AcquireW(); masm.Mov(rhs32, d); masm.Msub(output32, output32, rhs32, lhs32); } if (mir->canBeNegativeDividend() && !mir->isTruncated()) { // If output == 0 and lhs < 0, then the result should be double -0.0. Label done; masm.Cbnz(output32, &done); bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot()); masm.bind(&done); } } void CodeGenerator::visitUModConstant(LUModConstant* ins) { Register output = ToRegister(ins->output()); ARMRegister lhs32 = toWRegister(ins->numerator()); ARMRegister output32 = toWRegister(ins->output()); MMod* mir = ins->mir(); uint32_t d = ins->denominator(); if (d == 0) { if (ins->mir()->trapOnError()) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->trapSiteDesc()); } else if (mir->isTruncated()) { masm.Mov(output32, wzr); } else { MOZ_ASSERT(mir->fallible()); bailout(ins->snapshot()); } return; } // Compute the truncated division result in output32. UnsignedDivideWithConstant(masm, ins); // Compute the remainder: output = lhs - (output * rhs). { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister rhs32 = temps.AcquireW(); masm.Mov(rhs32, d); masm.Msub(output32, output32, rhs32, lhs32); } // Bail if not truncated and the remainder is in the range [2^31, 2^32). if (!ins->mir()->isTruncated()) { bailoutTest32(Assembler::Signed, output, output, ins->snapshot()); } } void CodeGenerator::visitModConstantI64(LModConstantI64* ins) { ARMRegister lhs64 = toXRegister(ins->numerator()); ARMRegister output64 = toXRegister(ins->output()); int64_t d = ins->denominator(); if (d == 0) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->mir()->trapSiteDesc()); return; } // Compute the truncated division result in output64. Divide64WithConstant(masm, ins); // Compute the remainder: output = lhs - (output * rhs). { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister rhs64 = temps.AcquireX(); masm.Mov(rhs64, d); masm.Msub(output64, output64, rhs64, lhs64); } } void CodeGenerator::visitUModConstantI64(LUModConstantI64* ins) { ARMRegister lhs64 = toXRegister(ins->numerator()); ARMRegister output64 = toXRegister(ins->output()); uint64_t d = ins->denominator(); if (d == 0) { masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->mir()->trapSiteDesc()); return; } // Compute the truncated division result in output64. UnsignedDivide64WithConstant(masm, ins); // Compute the remainder: output = lhs - (output * rhs). { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister rhs64 = temps.AcquireX(); masm.Mov(rhs64, d); masm.Msub(output64, output64, rhs64, lhs64); } } void CodeGeneratorARM64::emitBigIntPtrDiv(LBigIntPtrDiv* ins, Register dividend, Register divisor, Register output) { // Callers handle division by zero and integer overflow. const ARMRegister dividend64(dividend, 64); const ARMRegister divisor64(divisor, 64); const ARMRegister output64(output, 64); masm.Sdiv(/* result= */ output64, dividend64, divisor64); } void CodeGeneratorARM64::emitBigIntPtrMod(LBigIntPtrMod* ins, Register dividend, Register divisor, Register output) { // Callers handle division by zero and integer overflow. const ARMRegister dividend64(dividend, 64); const ARMRegister divisor64(divisor, 64); const ARMRegister output64(output, 64); // Signed division. masm.Sdiv(output64, dividend64, divisor64); // Compute the remainder: output = dividend - (output * divisor). masm.Msub(/* result= */ output64, output64, divisor64, dividend64); } void CodeGenerator::visitBitNotI(LBitNotI* ins) { const LAllocation* input = ins->input(); const LDefinition* output = ins->output(); masm.Mvn(toWRegister(output), toWOperand(input)); } void CodeGenerator::visitBitNotI64(LBitNotI64* ins) { Register64 input = ToRegister64(ins->input()); Register64 output = ToOutRegister64(ins); masm.Mvn(vixl::Register(output.reg, 64), vixl::Register(input.reg, 64)); } void CodeGenerator::visitBitOpI(LBitOpI* ins) { const ARMRegister lhs = toWRegister(ins->lhs()); const Operand rhs = toWOperand(ins->rhs()); const ARMRegister dest = toWRegister(ins->output()); switch (ins->bitop()) { case JSOp::BitOr: masm.Orr(dest, lhs, rhs); break; case JSOp::BitXor: masm.Eor(dest, lhs, rhs); break; case JSOp::BitAnd: masm.And(dest, lhs, rhs); break; default: MOZ_CRASH("unexpected binary opcode"); } } void CodeGenerator::visitShiftI(LShiftI* ins) { const ARMRegister lhs = toWRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); const ARMRegister dest = toWRegister(ins->output()); if (rhs->isConstant()) { int32_t shift = ToInt32(rhs) & 0x1F; switch (ins->bitop()) { case JSOp::Lsh: if (shift) { masm.Lsl(dest, lhs, shift); } else { masm.Mov(dest, lhs); } break; case JSOp::Rsh: if (shift) { masm.Asr(dest, lhs, shift); } else { masm.Mov(dest, lhs); } break; case JSOp::Ursh: if (shift) { masm.Lsr(dest, lhs, shift); } else if (ins->mir()->toUrsh()->fallible()) { // x >>> 0 can overflow. masm.Ands(dest, lhs, Operand(0xFFFFFFFF)); bailoutIf(Assembler::Signed, ins->snapshot()); } else { masm.Mov(dest, lhs); } break; default: MOZ_CRASH("Unexpected shift op"); } } else { const ARMRegister rhsreg = toWRegister(rhs); switch (ins->bitop()) { case JSOp::Lsh: masm.Lsl(dest, lhs, rhsreg); break; case JSOp::Rsh: masm.Asr(dest, lhs, rhsreg); break; case JSOp::Ursh: masm.Lsr(dest, lhs, rhsreg); if (ins->mir()->toUrsh()->fallible()) { /// x >>> 0 can overflow. masm.Cmp(dest, Operand(0)); bailoutIf(Assembler::LessThan, ins->snapshot()); } break; default: MOZ_CRASH("Unexpected shift op"); } } } void CodeGenerator::visitShiftIntPtr(LShiftIntPtr* ins) { ARMRegister lhs = toXRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); ARMRegister dest = toXRegister(ins->output()); if (rhs->isConstant()) { int32_t shift = ToIntPtr(rhs) & 0x3F; if (shift == 0) { masm.Mov(dest, lhs); } else { switch (ins->bitop()) { case JSOp::Lsh: masm.Lsl(dest, lhs, shift); break; case JSOp::Rsh: masm.Asr(dest, lhs, shift); break; case JSOp::Ursh: masm.Lsr(dest, lhs, shift); break; default: MOZ_CRASH("Unexpected shift op"); } } } else { ARMRegister rhsreg = toXRegister(rhs); switch (ins->bitop()) { case JSOp::Lsh: masm.Lsl(dest, lhs, rhsreg); break; case JSOp::Rsh: masm.Asr(dest, lhs, rhsreg); break; case JSOp::Ursh: masm.Lsr(dest, lhs, rhsreg); break; default: MOZ_CRASH("Unexpected shift op"); } } } void CodeGenerator::visitUrshD(LUrshD* ins) { ARMRegister lhs = toWRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); FloatRegister out = ToFloatRegister(ins->output()); ARMRegister temp = toWRegister(ins->temp0()); if (rhs->isConstant()) { int32_t shift = ToInt32(rhs) & 0x1F; if (shift) { masm.Lsr(temp, lhs, shift); masm.convertUInt32ToDouble(temp.asUnsized(), out); } else { masm.convertUInt32ToDouble(lhs.asUnsized(), out); } } else { masm.Lsr(temp, lhs, toWRegister(rhs)); masm.convertUInt32ToDouble(temp.asUnsized(), out); } } void CodeGenerator::visitPowHalfD(LPowHalfD* ins) { FloatRegister input = ToFloatRegister(ins->input()); FloatRegister output = ToFloatRegister(ins->output()); ScratchDoubleScope scratch(masm); Label done, sqrt; if (!ins->mir()->operandIsNeverNegativeInfinity()) { // Branch if not -Infinity. masm.loadConstantDouble(NegativeInfinity(), scratch); Assembler::DoubleCondition cond = Assembler::DoubleNotEqualOrUnordered; if (ins->mir()->operandIsNeverNaN()) { cond = Assembler::DoubleNotEqual; } masm.branchDouble(cond, input, scratch, &sqrt); // Math.pow(-Infinity, 0.5) == Infinity. masm.Fneg(ARMFPRegister(output, 64), ARMFPRegister(scratch, 64)); masm.jump(&done); masm.bind(&sqrt); } if (!ins->mir()->operandIsNeverNegativeZero()) { // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5). // Adding 0 converts any -0 to 0. masm.zeroDouble(scratch); masm.addDouble(input, scratch); masm.sqrtDouble(scratch, output); } else { masm.sqrtDouble(input, output); } masm.bind(&done); } MoveOperand CodeGeneratorARM64::toMoveOperand(const LAllocation a) const { if (a.isGeneralReg()) { return MoveOperand(ToRegister(a)); } if (a.isFloatReg()) { return MoveOperand(ToFloatRegister(a)); } MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress : MoveOperand::Kind::Memory; return MoveOperand(ToAddress(a), kind); } class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase { MTableSwitch* mir_; CodeLabel jumpLabel_; void accept(CodeGeneratorARM64* codegen) override { codegen->visitOutOfLineTableSwitch(this); } public: explicit OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {} MTableSwitch* mir() const { return mir_; } CodeLabel* jumpLabel() { return &jumpLabel_; } }; void CodeGeneratorARM64::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool) { MTableSwitch* mir = ool->mir(); // Prevent nop and pools sequences to appear in the jump table. AutoForbidPoolsAndNops afp( &masm, (mir->numCases() + 1) * (sizeof(void*) / vixl::kInstructionSize)); masm.haltingAlign(sizeof(void*)); masm.bind(ool->jumpLabel()); masm.addCodeLabel(*ool->jumpLabel()); for (size_t i = 0; i < mir->numCases(); i++) { LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir(); Label* caseheader = caseblock->label(); uint32_t caseoffset = caseheader->offset(); // The entries of the jump table need to be absolute addresses, // and thus must be patched after codegen is finished. CodeLabel cl; masm.writeCodePointer(&cl); cl.target()->bind(caseoffset); masm.addCodeLabel(cl); } } void CodeGeneratorARM64::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Register base) { Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label(); // Let the lowest table entry be indexed at 0. if (mir->low() != 0) { masm.sub32(Imm32(mir->low()), index); } // Jump to the default case if input is out of range. int32_t cases = mir->numCases(); masm.branch32(Assembler::AboveOrEqual, index, Imm32(cases), defaultcase); // Because the target code has not yet been generated, we cannot know the // instruction offsets for use as jump targets. Therefore we construct // an OutOfLineTableSwitch that winds up holding the jump table. // // Because the jump table is generated as part of out-of-line code, // it is generated after all the regular codegen, so the jump targets // are guaranteed to exist when generating the jump table. OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir); addOutOfLineCode(ool, mir); // Use the index to get the address of the jump target from the table. masm.mov(ool->jumpLabel(), base); BaseIndex pointer(base, index, ScalePointer); // Load the target from the jump table and branch to it. masm.branchToComputedAddress(pointer); } void CodeGenerator::visitMathD(LMathD* math) { ARMFPRegister lhs(ToFloatRegister(math->lhs()), 64); ARMFPRegister rhs(ToFloatRegister(math->rhs()), 64); ARMFPRegister output(ToFloatRegister(math->output()), 64); switch (math->jsop()) { case JSOp::Add: masm.Fadd(output, lhs, rhs); break; case JSOp::Sub: masm.Fsub(output, lhs, rhs); break; case JSOp::Mul: masm.Fmul(output, lhs, rhs); break; case JSOp::Div: masm.Fdiv(output, lhs, rhs); break; default: MOZ_CRASH("unexpected opcode"); } } void CodeGenerator::visitMathF(LMathF* math) { ARMFPRegister lhs(ToFloatRegister(math->lhs()), 32); ARMFPRegister rhs(ToFloatRegister(math->rhs()), 32); ARMFPRegister output(ToFloatRegister(math->output()), 32); switch (math->jsop()) { case JSOp::Add: masm.Fadd(output, lhs, rhs); break; case JSOp::Sub: masm.Fsub(output, lhs, rhs); break; case JSOp::Mul: masm.Fmul(output, lhs, rhs); break; case JSOp::Div: masm.Fdiv(output, lhs, rhs); break; default: MOZ_CRASH("unexpected opcode"); } } void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) { FloatRegister input = ToFloatRegister(ins->input()); Register output = ToRegister(ins->output()); // Directly call Fjcvtzs if available to avoid generating unused OOL code in // emitTruncateDouble. if (masm.hasFjcvtzs()) { masm.Fjcvtzs(ARMRegister(output, 32), ARMFPRegister(input, 64)); } else { emitTruncateDouble(input, output, ins->mir()); } } void CodeGenerator::visitWasmBuiltinTruncateDToInt32( LWasmBuiltinTruncateDToInt32* lir) { FloatRegister input = ToFloatRegister(lir->input()); Register output = ToRegister(lir->output()); // Directly call Fjcvtzs if available to avoid generating unused OOL code in // emitTruncateDouble. if (masm.hasFjcvtzs()) { masm.Fjcvtzs(ARMRegister(output, 32), ARMFPRegister(input, 64)); } else { emitTruncateDouble(input, output, lir->mir()); } } void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) { masm.truncateFloat32ModUint32(ToFloatRegister(ins->input()), ToRegister(ins->output())); } void CodeGenerator::visitWasmBuiltinTruncateFToInt32( LWasmBuiltinTruncateFToInt32* lir) { MOZ_ASSERT(lir->instance()->isBogus(), "instance not used for arm64"); masm.truncateFloat32ModUint32(ToFloatRegister(lir->input()), ToRegister(lir->output())); } void CodeGenerator::visitBox(LBox* box) { const LAllocation* in = box->payload(); ValueOperand result = ToOutValue(box); masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result); } void CodeGenerator::visitUnbox(LUnbox* unbox) { MUnbox* mir = unbox->mir(); Register result = ToRegister(unbox->output()); if (mir->fallible()) { ValueOperand value = ToValue(unbox->input()); Label bail; switch (mir->type()) { case MIRType::Int32: masm.fallibleUnboxInt32(value, result, &bail); break; case MIRType::Boolean: masm.fallibleUnboxBoolean(value, result, &bail); break; case MIRType::Object: masm.fallibleUnboxObject(value, result, &bail); break; case MIRType::String: masm.fallibleUnboxString(value, result, &bail); break; case MIRType::Symbol: masm.fallibleUnboxSymbol(value, result, &bail); break; case MIRType::BigInt: masm.fallibleUnboxBigInt(value, result, &bail); break; default: MOZ_CRASH("Given MIRType cannot be unboxed."); } bailoutFrom(&bail, unbox->snapshot()); return; } // Infallible unbox. ValueOperand input = ToValue(unbox->input()); #ifdef DEBUG // Assert the types match. JSValueTag tag = MIRTypeToTag(mir->type()); Label ok; { ScratchTagScope scratch(masm, input); masm.splitTagForTest(input, scratch); masm.cmpTag(scratch, ImmTag(tag)); } masm.B(&ok, Assembler::Condition::Equal); masm.assumeUnreachable("Infallible unbox type mismatch"); masm.bind(&ok); #endif switch (mir->type()) { case MIRType::Int32: masm.unboxInt32(input, result); break; case MIRType::Boolean: masm.unboxBoolean(input, result); break; case MIRType::Object: masm.unboxObject(input, result); break; case MIRType::String: masm.unboxString(input, result); break; case MIRType::Symbol: masm.unboxSymbol(input, result); break; case MIRType::BigInt: masm.unboxBigInt(input, result); break; default: MOZ_CRASH("Given MIRType cannot be unboxed."); } } void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) { const LAllocation* opd = test->input(); MBasicBlock* ifTrue = test->ifTrue(); MBasicBlock* ifFalse = test->ifFalse(); masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 64), 0.0); // If the compare set the 0 bit, then the result is definitely false. jumpToBlock(ifFalse, Assembler::Zero); // Overflow means one of the operands was NaN, which is also false. jumpToBlock(ifFalse, Assembler::Overflow); jumpToBlock(ifTrue); } void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) { const LAllocation* opd = test->input(); MBasicBlock* ifTrue = test->ifTrue(); MBasicBlock* ifFalse = test->ifFalse(); masm.Fcmp(ARMFPRegister(ToFloatRegister(opd), 32), 0.0); // If the compare set the 0 bit, then the result is definitely false. jumpToBlock(ifFalse, Assembler::Zero); // Overflow means one of the operands was NaN, which is also false. jumpToBlock(ifFalse, Assembler::Overflow); jumpToBlock(ifTrue); } void CodeGenerator::visitCompareD(LCompareD* comp) { const FloatRegister left = ToFloatRegister(comp->left()); const FloatRegister right = ToFloatRegister(comp->right()); ARMRegister output = toWRegister(comp->output()); Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); masm.compareDouble(left, right); masm.cset(output, Assembler::ConditionFromDoubleCondition(cond)); } void CodeGenerator::visitCompareF(LCompareF* comp) { const FloatRegister left = ToFloatRegister(comp->left()); const FloatRegister right = ToFloatRegister(comp->right()); ARMRegister output = toWRegister(comp->output()); Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop()); masm.compareFloat(left, right); masm.cset(output, Assembler::ConditionFromDoubleCondition(cond)); } void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) { const FloatRegister left = ToFloatRegister(comp->left()); const FloatRegister right = ToFloatRegister(comp->right()); Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond); masm.compareDouble(left, right); emitBranch(cond, comp->ifTrue(), comp->ifFalse()); } void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) { const FloatRegister left = ToFloatRegister(comp->left()); const FloatRegister right = ToFloatRegister(comp->right()); Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(comp->cmpMir()->jsop()); Assembler::Condition cond = Assembler::ConditionFromDoubleCondition(doubleCond); masm.compareFloat(left, right); emitBranch(cond, comp->ifTrue(), comp->ifFalse()); } void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) { masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); } void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) { masm.convertUInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output())); } // NZCV // NAN -> 0011 // == -> 0110 // < -> 1000 // > -> 0010 void CodeGenerator::visitNotD(LNotD* ins) { ARMFPRegister input(ToFloatRegister(ins->input()), 64); ARMRegister output = toWRegister(ins->output()); // Set output to 1 if input compares equal to 0.0, else 0. masm.Fcmp(input, 0.0); masm.Cset(output, Assembler::Equal); // Comparison with NaN sets V in the NZCV register. // If the input was NaN, output must now be zero, so it can be incremented. // The instruction is read: "output = if NoOverflow then output else 0+1". masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow); } void CodeGenerator::visitNotF(LNotF* ins) { ARMFPRegister input(ToFloatRegister(ins->input()), 32); ARMRegister output = toWRegister(ins->output()); // Set output to 1 input compares equal to 0.0, else 0. masm.Fcmp(input, 0.0); masm.Cset(output, Assembler::Equal); // Comparison with NaN sets V in the NZCV register. // If the input was NaN, output must now be zero, so it can be incremented. // The instruction is read: "output = if NoOverflow then output else 0+1". masm.Csinc(output, output, ZeroRegister32, Assembler::NoOverflow); } void CodeGeneratorARM64::generateInvalidateEpilogue() { // Ensure that there is enough space in the buffer for the OsiPoint patching // to occur. Otherwise, we could overwrite the invalidation epilogue. for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) { masm.nop(); } masm.bind(&invalidate_); // Push the return address of the point that we bailout out onto the stack. masm.push(lr); // Push the Ion script onto the stack (when we determine what that pointer // is). invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1))); // Jump to the invalidator which will replace the current frame. TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk(); masm.jump(thunk); } template Register getBase(U* mir) { switch (mir->base()) { case U::Heap: return HeapReg; } return InvalidReg; } void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) { const MAsmJSLoadHeap* mir = ins->mir(); MOZ_ASSERT(!mir->hasMemoryBase()); const LAllocation* ptr = ins->ptr(); const LAllocation* boundsCheckLimit = ins->boundsCheckLimit(); Register ptrReg = ToRegister(ptr); Scalar::Type accessType = mir->accessType(); bool isFloat = accessType == Scalar::Float32 || accessType == Scalar::Float64; Label done; if (mir->needsBoundsCheck()) { Label boundsCheckPassed; Register boundsCheckLimitReg = ToRegister(boundsCheckLimit); masm.wasmBoundsCheck32(Assembler::Below, ptrReg, boundsCheckLimitReg, &boundsCheckPassed); // Return a default value in case of a bounds-check failure. if (isFloat) { if (accessType == Scalar::Float32) { masm.loadConstantFloat32(GenericNaN(), ToFloatRegister(ins->output())); } else { masm.loadConstantDouble(GenericNaN(), ToFloatRegister(ins->output())); } } else { masm.Mov(ARMRegister(ToRegister(ins->output()), 64), 0); } masm.jump(&done); masm.bind(&boundsCheckPassed); } MemOperand addr(ARMRegister(HeapReg, 64), ARMRegister(ptrReg, 64)); switch (accessType) { case Scalar::Int8: masm.Ldrb(toWRegister(ins->output()), addr); masm.Sxtb(toWRegister(ins->output()), toWRegister(ins->output())); break; case Scalar::Uint8: masm.Ldrb(toWRegister(ins->output()), addr); break; case Scalar::Int16: masm.Ldrh(toWRegister(ins->output()), addr); masm.Sxth(toWRegister(ins->output()), toWRegister(ins->output())); break; case Scalar::Uint16: masm.Ldrh(toWRegister(ins->output()), addr); break; case Scalar::Int32: case Scalar::Uint32: masm.Ldr(toWRegister(ins->output()), addr); break; case Scalar::Float64: masm.Ldr(ARMFPRegister(ToFloatRegister(ins->output()), 64), addr); break; case Scalar::Float32: masm.Ldr(ARMFPRegister(ToFloatRegister(ins->output()), 32), addr); break; default: MOZ_CRASH("unexpected array type"); } if (done.used()) { masm.bind(&done); } } void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) { const MAsmJSStoreHeap* mir = ins->mir(); MOZ_ASSERT(!mir->hasMemoryBase()); const LAllocation* ptr = ins->ptr(); const LAllocation* boundsCheckLimit = ins->boundsCheckLimit(); Register ptrReg = ToRegister(ptr); Label done; if (mir->needsBoundsCheck()) { Register boundsCheckLimitReg = ToRegister(boundsCheckLimit); masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg, boundsCheckLimitReg, &done); } MemOperand addr(ARMRegister(HeapReg, 64), ARMRegister(ptrReg, 64)); switch (mir->accessType()) { case Scalar::Int8: case Scalar::Uint8: masm.Strb(toWRegister(ins->value()), addr); break; case Scalar::Int16: case Scalar::Uint16: masm.Strh(toWRegister(ins->value()), addr); break; case Scalar::Int32: case Scalar::Uint32: masm.Str(toWRegister(ins->value()), addr); break; case Scalar::Float64: masm.Str(ARMFPRegister(ToFloatRegister(ins->value()), 64), addr); break; case Scalar::Float32: masm.Str(ARMFPRegister(ToFloatRegister(ins->value()), 32), addr); break; default: MOZ_CRASH("unexpected array type"); } if (done.used()) { masm.bind(&done); } } void CodeGenerator::visitWasmCompareExchangeHeap( LWasmCompareExchangeHeap* ins) { MWasmCompareExchangeHeap* mir = ins->mir(); Register memoryBase = ToRegister(ins->memoryBase()); Register ptr = ToRegister(ins->ptr()); Register oldval = ToRegister(ins->oldValue()); Register newval = ToRegister(ins->newValue()); Register out = ToRegister(ins->output()); BaseIndex srcAddr(memoryBase, ptr, TimesOne, mir->access().offset32()); if (mir->access().type() == Scalar::Int64) { masm.wasmCompareExchange64(mir->access(), srcAddr, Register64(oldval), Register64(newval), Register64(out)); } else { masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, out); } } void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) { MWasmAtomicExchangeHeap* mir = ins->mir(); Register memoryBase = ToRegister(ins->memoryBase()); Register ptr = ToRegister(ins->ptr()); Register oldval = ToRegister(ins->value()); Register out = ToRegister(ins->output()); BaseIndex srcAddr(memoryBase, ptr, TimesOne, mir->access().offset32()); if (mir->access().type() == Scalar::Int64) { masm.wasmAtomicExchange64(mir->access(), srcAddr, Register64(oldval), Register64(out)); } else { masm.wasmAtomicExchange(mir->access(), srcAddr, oldval, out); } } void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) { MWasmAtomicBinopHeap* mir = ins->mir(); MOZ_ASSERT(mir->hasUses()); Register memoryBase = ToRegister(ins->memoryBase()); Register ptr = ToRegister(ins->ptr()); Register value = ToRegister(ins->value()); Register flagTemp = ToRegister(ins->temp0()); Register out = ToRegister(ins->output()); BaseIndex srcAddr(memoryBase, ptr, TimesOne, mir->access().offset32()); AtomicOp op = mir->operation(); if (mir->access().type() == Scalar::Int64) { masm.wasmAtomicFetchOp64(mir->access(), op, Register64(value), srcAddr, Register64(flagTemp), Register64(out)); } else { masm.wasmAtomicFetchOp(mir->access(), op, value, srcAddr, flagTemp, out); } } void CodeGenerator::visitWasmAtomicBinopHeapForEffect( LWasmAtomicBinopHeapForEffect* ins) { MWasmAtomicBinopHeap* mir = ins->mir(); MOZ_ASSERT(!mir->hasUses()); Register memoryBase = ToRegister(ins->memoryBase()); Register ptr = ToRegister(ins->ptr()); Register value = ToRegister(ins->value()); Register flagTemp = ToRegister(ins->temp0()); BaseIndex srcAddr(memoryBase, ptr, TimesOne, mir->access().offset32()); AtomicOp op = mir->operation(); if (mir->access().type() == Scalar::Int64) { masm.wasmAtomicEffectOp64(mir->access(), op, Register64(value), srcAddr, Register64(flagTemp)); } else { masm.wasmAtomicEffectOp(mir->access(), op, value, srcAddr, flagTemp); } } void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) { const MWasmStackArg* mir = ins->mir(); Address dst(masm.getStackPointer(), mir->spOffset()); if (ins->arg()->isConstant()) { masm.storePtr(ImmWord(ToInt32(ins->arg())), dst); } else if (ins->arg()->isGeneralReg()) { masm.storePtr(ToRegister(ins->arg()), dst); } else { switch (mir->input()->type()) { case MIRType::Double: masm.storeDouble(ToFloatRegister(ins->arg()), dst); return; case MIRType::Float32: masm.storeFloat32(ToFloatRegister(ins->arg()), dst); return; #ifdef ENABLE_WASM_SIMD case MIRType::Simd128: masm.storeUnalignedSimd128(ToFloatRegister(ins->arg()), dst); return; #endif default: break; } MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE( "unexpected mir type in WasmStackArg"); } } void CodeGenerator::visitUDiv(LUDiv* ins) { MDiv* mir = ins->mir(); Register lhs = ToRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); Register output = ToRegister(ins->output()); ARMRegister lhs32 = ARMRegister(lhs, 32); ARMRegister rhs32 = ARMRegister(rhs, 32); ARMRegister output32 = ARMRegister(output, 32); // Prevent divide by zero. if (mir->canBeDivideByZero()) { if (mir->trapOnError()) { TrapIfDivideByZero(masm, ins, rhs32); } else if (mir->canTruncateInfinities()) { // Udiv returns zero for division by zero, exactly what we want for // truncated division. Remainder computation expects a non-zero divisor, // so we must also be allowed to truncate the remainder. MOZ_ASSERT(mir->canTruncateRemainder(), "remainder computation expects a non-zero divisor"); } else { MOZ_ASSERT(mir->fallible()); bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot()); } } // Unsigned division. masm.Udiv(output32, lhs32, rhs32); // If the remainder is > 0, bailout since this must be a double. if (!mir->canTruncateRemainder()) { vixl::UseScratchRegisterScope temps(&masm.asVIXL()); ARMRegister remainder32 = temps.AcquireW(); Register remainder = remainder32.asUnsized(); // Compute the remainder: remainder = lhs - (output * rhs). masm.Msub(remainder32, output32, rhs32, lhs32); bailoutTest32(Assembler::NonZero, remainder, remainder, ins->snapshot()); } // Unsigned div can return a value that's not a signed int32. // If our users aren't expecting that, bail. if (!mir->isTruncated()) { bailoutTest32(Assembler::Signed, output, output, ins->snapshot()); } } void CodeGenerator::visitUMod(LUMod* ins) { Register rhs = ToRegister(ins->rhs()); Register output = ToRegister(ins->output()); ARMRegister lhs32 = toWRegister(ins->lhs()); ARMRegister rhs32 = toWRegister(ins->rhs()); ARMRegister output32 = toWRegister(ins->output()); Label done; MMod* mir = ins->mir(); if (mir->canBeDivideByZero()) { if (mir->trapOnError()) { TrapIfDivideByZero(masm, ins, rhs32); } else if (mir->isTruncated()) { // Truncated division by zero yields integer zero. masm.Mov(output32, wzr); masm.Cbz(rhs32, &done); } else { // Non-truncated division by zero produces a non-integer. bailoutTest32(Assembler::Zero, rhs, rhs, ins->snapshot()); } } // Unsigned division. masm.Udiv(output32, lhs32, rhs32); // Compute the remainder: output = lhs - (output * rhs). masm.Msub(output32, output32, rhs32, lhs32); if (!mir->isTruncated()) { // Bail if the output would be negative. // // LUMod inputs may be Uint32, so care is taken to ensure the result // is not unexpectedly signed. bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot()); } if (done.used()) { masm.bind(&done); } } void CodeGenerator::visitEffectiveAddress3(LEffectiveAddress3* ins) { const MEffectiveAddress3* mir = ins->mir(); const ARMRegister base = toWRegister(ins->base()); const ARMRegister index = toWRegister(ins->index()); const ARMRegister output = toWRegister(ins->output()); if (mir->scale() == Scale::TimesOne) { masm.Add(output, base, Operand(index)); } else { masm.Add(output, base, Operand(index, vixl::LSL, mir->scale())); } if (mir->displacement() != 0) { masm.Add(output, output, Operand(mir->displacement())); } } void CodeGenerator::visitEffectiveAddress2(LEffectiveAddress2* ins) { const MEffectiveAddress2* mir = ins->mir(); const ARMRegister index = toWRegister(ins->index()); const ARMRegister output = toWRegister(ins->output()); // Ensured because the LIR's `index` input is not an AtStart variant. // If this ever fails, we'll need to generate a slower sequence the same // as ::visitEffectiveAddress, but with `base` being `wzr`. MOZ_RELEASE_ASSERT(output.code() != index.code()); masm.Mov(output, mir->displacement()); masm.Add(output, output, Operand(index, vixl::LSL, mir->scale())); } void CodeGenerator::visitNegI(LNegI* ins) { const ARMRegister input = toWRegister(ins->input()); const ARMRegister output = toWRegister(ins->output()); masm.Neg(output, input); } void CodeGenerator::visitNegI64(LNegI64* ins) { const ARMRegister input = toXRegister(ins->input()); const ARMRegister output = toXRegister(ins->output()); masm.Neg(output, input); } void CodeGenerator::visitNegD(LNegD* ins) { const ARMFPRegister input(ToFloatRegister(ins->input()), 64); const ARMFPRegister output(ToFloatRegister(ins->output()), 64); masm.Fneg(output, input); } void CodeGenerator::visitNegF(LNegF* ins) { const ARMFPRegister input(ToFloatRegister(ins->input()), 32); const ARMFPRegister output(ToFloatRegister(ins->output()), 32); masm.Fneg(output, input); } void CodeGenerator::visitCompareExchangeTypedArrayElement( LCompareExchangeTypedArrayElement* lir) { Register elements = ToRegister(lir->elements()); AnyRegister output = ToAnyRegister(lir->output()); Register temp = ToTempRegisterOrInvalid(lir->temp0()); Register oldval = ToRegister(lir->oldval()); Register newval = ToRegister(lir->newval()); Scalar::Type arrayType = lir->mir()->arrayType(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType); dest.match([&](const auto& dest) { masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval, newval, temp, output); }); } void CodeGenerator::visitAtomicExchangeTypedArrayElement( LAtomicExchangeTypedArrayElement* lir) { Register elements = ToRegister(lir->elements()); AnyRegister output = ToAnyRegister(lir->output()); Register temp = ToTempRegisterOrInvalid(lir->temp0()); Register value = ToRegister(lir->value()); Scalar::Type arrayType = lir->mir()->arrayType(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType); dest.match([&](const auto& dest) { masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value, temp, output); }); } void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) { Register elements = ToRegister(lir->elements()); Register64 out = ToOutRegister64(lir); Scalar::Type storageType = lir->mir()->storageType(); auto source = ToAddressOrBaseIndex(elements, lir->index(), storageType); // NOTE: the generated code must match the assembly code in gen_load in // GenerateAtomicOperations.py auto sync = Synchronization::Load(); masm.memoryBarrierBefore(sync); source.match([&](const auto& source) { masm.load64(source, out); }); masm.memoryBarrierAfter(sync); } void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) { Register elements = ToRegister(lir->elements()); Register64 value = ToRegister64(lir->value()); Scalar::Type writeType = lir->mir()->writeType(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), writeType); // NOTE: the generated code must match the assembly code in gen_store in // GenerateAtomicOperations.py auto sync = Synchronization::Store(); masm.memoryBarrierBefore(sync); dest.match([&](const auto& dest) { masm.store64(value, dest); }); masm.memoryBarrierAfter(sync); } void CodeGenerator::visitCompareExchangeTypedArrayElement64( LCompareExchangeTypedArrayElement64* lir) { Register elements = ToRegister(lir->elements()); Register64 oldval = ToRegister64(lir->oldval()); Register64 newval = ToRegister64(lir->newval()); Register64 out = ToOutRegister64(lir); Scalar::Type arrayType = lir->mir()->arrayType(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType); dest.match([&](const auto& dest) { masm.compareExchange64(Synchronization::Full(), dest, oldval, newval, out); }); } void CodeGenerator::visitAtomicExchangeTypedArrayElement64( LAtomicExchangeTypedArrayElement64* lir) { Register elements = ToRegister(lir->elements()); Register64 value = ToRegister64(lir->value()); Register64 out = ToOutRegister64(lir); Scalar::Type arrayType = lir->mir()->arrayType(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType); dest.match([&](const auto& dest) { masm.atomicExchange64(Synchronization::Full(), dest, value, out); }); } void CodeGenerator::visitAtomicTypedArrayElementBinop64( LAtomicTypedArrayElementBinop64* lir) { MOZ_ASSERT(!lir->mir()->isForEffect()); Register elements = ToRegister(lir->elements()); Register64 value = ToRegister64(lir->value()); Register64 temp = ToRegister64(lir->temp0()); Register64 out = ToOutRegister64(lir); Scalar::Type arrayType = lir->mir()->arrayType(); AtomicOp atomicOp = lir->mir()->operation(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType); dest.match([&](const auto& dest) { masm.atomicFetchOp64(Synchronization::Full(), atomicOp, value, dest, temp, out); }); } void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64( LAtomicTypedArrayElementBinopForEffect64* lir) { MOZ_ASSERT(lir->mir()->isForEffect()); Register elements = ToRegister(lir->elements()); Register64 value = ToRegister64(lir->value()); Register64 temp = ToRegister64(lir->temp0()); Scalar::Type arrayType = lir->mir()->arrayType(); AtomicOp atomicOp = lir->mir()->operation(); auto dest = ToAddressOrBaseIndex(elements, lir->index(), arrayType); dest.match([&](const auto& dest) { masm.atomicEffectOp64(Synchronization::Full(), atomicOp, value, dest, temp); }); } void CodeGenerator::visitAddI64(LAddI64* lir) { ARMRegister dest = toXRegister(lir->output()); ARMRegister lhs = toXRegister(lir->lhs()); Operand rhs = toXOperand(lir->rhs()); masm.Add(dest, lhs, rhs); } void CodeGenerator::visitMulI64(LMulI64* lir) { LInt64Allocation lhs = lir->lhs(); LInt64Allocation rhs = lir->rhs(); Register64 output = ToOutRegister64(lir); if (IsConstant(rhs)) { int64_t constant = ToInt64(rhs); // Ad-hoc strength reduction, cf the x64 code as well as the 32-bit code // higher up in this file. Bug 1712298 will lift this code to the MIR // constant folding pass, or to lowering. // // This is for wasm integers only, so no input guards or overflow checking // are needed. switch (constant) { case -1: masm.Neg(ARMRegister(output.reg, 64), ARMRegister(ToRegister64(lhs).reg, 64)); break; case 0: masm.Mov(ARMRegister(output.reg, 64), xzr); break; case 1: if (ToRegister64(lhs) != output) { masm.move64(ToRegister64(lhs), output); } break; case 2: masm.Add(ARMRegister(output.reg, 64), ARMRegister(ToRegister64(lhs).reg, 64), ARMRegister(ToRegister64(lhs).reg, 64)); break; default: // Use shift if constant is nonnegative power of 2. if (constant > 0) { int32_t shift = mozilla::FloorLog2(uint64_t(constant)); if (int64_t(1) << shift == constant) { masm.Lsl(ARMRegister(output.reg, 64), ARMRegister(ToRegister64(lhs).reg, 64), shift); break; } } masm.mul64(Imm64(constant), ToRegister64(lhs), output); break; } } else { masm.mul64(ToRegister64(lhs), ToRegister64(rhs), output); } } void CodeGenerator::visitSubI64(LSubI64* lir) { ARMRegister dest = toXRegister(lir->output()); ARMRegister lhs = toXRegister(lir->lhs()); Operand rhs = toXOperand(lir->rhs()); masm.Sub(dest, lhs, rhs); } void CodeGenerator::visitBitOpI64(LBitOpI64* lir) { ARMRegister dest = toXRegister(lir->output()); ARMRegister lhs = toXRegister(lir->lhs()); Operand rhs = toXOperand(lir->rhs()); switch (lir->bitop()) { case JSOp::BitOr: masm.Orr(dest, lhs, rhs); break; case JSOp::BitXor: masm.Eor(dest, lhs, rhs); break; case JSOp::BitAnd: masm.And(dest, lhs, rhs); break; default: MOZ_CRASH("unexpected binary opcode"); } } void CodeGenerator::visitShiftI64(LShiftI64* lir) { ARMRegister lhs(ToRegister64(lir->lhs()).reg, 64); const LAllocation* rhsAlloc = lir->rhs(); ARMRegister dest(ToOutRegister64(lir).reg, 64); if (rhsAlloc->isConstant()) { int32_t shift = int32_t(rhsAlloc->toConstant()->toInt64() & 0x3F); if (shift == 0) { masm.Mov(dest, lhs); } else { switch (lir->bitop()) { case JSOp::Lsh: masm.Lsl(dest, lhs, shift); break; case JSOp::Rsh: masm.Asr(dest, lhs, shift); break; case JSOp::Ursh: masm.Lsr(dest, lhs, shift); break; default: MOZ_CRASH("Unexpected shift op"); } } } else { ARMRegister rhs(ToRegister(rhsAlloc), 64); switch (lir->bitop()) { case JSOp::Lsh: masm.Lsl(dest, lhs, rhs); break; case JSOp::Rsh: masm.Asr(dest, lhs, rhs); break; case JSOp::Ursh: masm.Lsr(dest, lhs, rhs); break; default: MOZ_CRASH("Unexpected shift op"); } } } void CodeGenerator::visitAddIntPtr(LAddIntPtr* ins) { ARMRegister lhs = toXRegister(ins->lhs()); Operand rhs = toXOperand(ins->rhs()); ARMRegister dest = toXRegister(ins->output()); masm.Add(dest, lhs, rhs); } void CodeGenerator::visitSubIntPtr(LSubIntPtr* ins) { ARMRegister lhs = toXRegister(ins->lhs()); Operand rhs = toXOperand(ins->rhs()); ARMRegister dest = toXRegister(ins->output()); masm.Sub(dest, lhs, rhs); } void CodeGenerator::visitMulIntPtr(LMulIntPtr* ins) { ARMRegister lhs = toXRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); ARMRegister dest = toXRegister(ins->output()); if (rhs->isConstant()) { intptr_t constant = ToIntPtr(rhs); switch (constant) { case -1: masm.Neg(dest, lhs); return; case 0: masm.Mov(dest, xzr); return; case 1: if (!dest.Is(lhs)) { masm.Mov(dest, lhs); } return; case 2: masm.Add(dest, lhs, lhs); return; } // Use shift if constant is a power of 2. if (constant > 0 && std::has_single_bit(uintptr_t(constant))) { uint32_t shift = mozilla::FloorLog2(uintptr_t(constant)); masm.Lsl(dest, lhs, shift); return; } vixl::UseScratchRegisterScope temps(&masm.asVIXL()); vixl::Register scratch = temps.AcquireX(); masm.Mov(scratch, constant); masm.Mul(dest, lhs, scratch); } else { masm.Mul(dest, lhs, toXRegister(rhs)); } } // If we have a constant base ptr, try to add the offset to it, to generate // better code when the full address is known. The addition may overflow past // 32 bits because the front end does nothing special if the base is a large // constant and base+offset overflows; sidestep this by performing the addition // anyway, overflowing to 64-bit. static Maybe IsAbsoluteAddress(const LAllocation* ptr, const wasm::MemoryAccessDesc& access) { if (ptr->isConstantValue()) { const MConstant* c = ptr->toConstant(); uint64_t base_address = c->type() == MIRType::Int32 ? uint64_t(uint32_t(c->toInt32())) : uint64_t(c->toInt64()); uint64_t offset = access.offset32(); return Some(base_address + offset); } return Nothing(); } void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { const MWasmLoad* mir = lir->mir(); if (Maybe absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) { masm.wasmLoadAbsolute(mir->access(), ToRegister(lir->memoryBase()), absAddr.value(), ToAnyRegister(lir->output()), Register64::Invalid()); return; } // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a // true 64-bit value. masm.wasmLoad(mir->access(), ToRegister(lir->memoryBase()), ToRegister(lir->ptr()), ToAnyRegister(lir->output())); } void CodeGenerator::visitWasmStore(LWasmStore* lir) { const MWasmStore* mir = lir->mir(); if (Maybe absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) { masm.wasmStoreAbsolute(mir->access(), ToAnyRegister(lir->value()), Register64::Invalid(), ToRegister(lir->memoryBase()), absAddr.value()); return; } masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), ToRegister(lir->memoryBase()), ToRegister(lir->ptr())); } void CodeGenerator::visitWasmSelect(LWasmSelect* lir) { MIRType mirType = lir->mir()->type(); Register condReg = ToRegister(lir->condExpr()); masm.test32(condReg, condReg); switch (mirType) { case MIRType::Int32: case MIRType::WasmAnyRef: case MIRType::WasmArrayData: { Register outReg = ToRegister(lir->output()); Register trueReg = ToRegister(lir->trueExpr()); Register falseReg = ToRegister(lir->falseExpr()); if (mirType == MIRType::Int32) { masm.Csel(ARMRegister(outReg, 32), ARMRegister(trueReg, 32), ARMRegister(falseReg, 32), Assembler::NonZero); } else { masm.Csel(ARMRegister(outReg, 64), ARMRegister(trueReg, 64), ARMRegister(falseReg, 64), Assembler::NonZero); } break; } case MIRType::Float32: case MIRType::Double: case MIRType::Simd128: { FloatRegister outReg = ToFloatRegister(lir->output()); FloatRegister trueReg = ToFloatRegister(lir->trueExpr()); FloatRegister falseReg = ToFloatRegister(lir->falseExpr()); switch (mirType) { case MIRType::Float32: masm.Fcsel(ARMFPRegister(outReg, 32), ARMFPRegister(trueReg, 32), ARMFPRegister(falseReg, 32), Assembler::NonZero); break; case MIRType::Double: masm.Fcsel(ARMFPRegister(outReg, 64), ARMFPRegister(trueReg, 64), ARMFPRegister(falseReg, 64), Assembler::NonZero); break; #ifdef ENABLE_WASM_SIMD case MIRType::Simd128: { MOZ_ASSERT(outReg == trueReg); Label done; masm.j(Assembler::NonZero, &done); masm.moveSimd128(falseReg, outReg); masm.bind(&done); break; } #endif default: MOZ_CRASH(); } break; } default: { MOZ_CRASH("unhandled type in visitWasmSelect!"); } } } // We expect to handle the cases: compare is {{U,}Int32, {U,}Int64}, Float32, // Double}, and select is {{U,}Int32, {U,}Int64}, Float32, Double}, // independently. void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) { MCompare::CompareType compTy = ins->compareType(); // Set flag. if (compTy == MCompare::Compare_Int32 || compTy == MCompare::Compare_UInt32) { Register lhs = ToRegister(ins->leftExpr()); if (ins->rightExpr()->isConstant()) { masm.cmp32(lhs, Imm32(ins->rightExpr()->toConstant()->toInt32())); } else { masm.cmp32(lhs, ToRegister(ins->rightExpr())); } } else if (compTy == MCompare::Compare_Int64 || compTy == MCompare::Compare_UInt64) { Register lhs = ToRegister(ins->leftExpr()); if (ins->rightExpr()->isConstant()) { masm.cmpPtr(lhs, Imm64(ins->rightExpr()->toConstant()->toInt64())); } else { masm.cmpPtr(lhs, ToRegister(ins->rightExpr())); } } else if (compTy == MCompare::Compare_Float32) { masm.compareFloat(ToFloatRegister(ins->leftExpr()), ToFloatRegister(ins->rightExpr())); } else if (compTy == MCompare::Compare_Double) { masm.compareDouble(ToFloatRegister(ins->leftExpr()), ToFloatRegister(ins->rightExpr())); } else { // Ref types not supported yet; v128 is not yet observed to be worth // optimizing. MOZ_CRASH("CodeGenerator::visitWasmCompareAndSelect: unexpected type (1)"); } // Act on flag. Assembler::Condition cond; if (compTy == MCompare::Compare_Float32 || compTy == MCompare::Compare_Double) { cond = Assembler::ConditionFromDoubleCondition( JSOpToDoubleCondition(ins->jsop())); } else { cond = JSOpToCondition(compTy, ins->jsop()); } MIRType insTy = ins->mir()->type(); if (insTy == MIRType::Int32 || insTy == MIRType::Int64) { Register destReg = ToRegister(ins->output()); Register trueReg = ToRegister(ins->ifTrueExpr()); Register falseReg = ToRegister(ins->ifFalseExpr()); size_t size = insTy == MIRType::Int32 ? 32 : 64; masm.Csel(ARMRegister(destReg, size), ARMRegister(trueReg, size), ARMRegister(falseReg, size), cond); } else if (insTy == MIRType::Float32 || insTy == MIRType::Double) { FloatRegister destReg = ToFloatRegister(ins->output()); FloatRegister trueReg = ToFloatRegister(ins->ifTrueExpr()); FloatRegister falseReg = ToFloatRegister(ins->ifFalseExpr()); size_t size = MIRTypeToSize(insTy) * 8; masm.Fcsel(ARMFPRegister(destReg, size), ARMFPRegister(trueReg, size), ARMFPRegister(falseReg, size), cond); } else { // See above. MOZ_CRASH("CodeGenerator::visitWasmCompareAndSelect: unexpected type (2)"); } } void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) { const MWasmLoad* mir = lir->mir(); if (Maybe absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) { masm.wasmLoadAbsolute(mir->access(), ToRegister(lir->memoryBase()), absAddr.value(), AnyRegister(), ToOutRegister64(lir)); return; } masm.wasmLoadI64(mir->access(), ToRegister(lir->memoryBase()), ToRegister(lir->ptr()), ToOutRegister64(lir)); } void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) { const MWasmStore* mir = lir->mir(); if (Maybe absAddr = IsAbsoluteAddress(lir->ptr(), mir->access())) { masm.wasmStoreAbsolute(mir->access(), AnyRegister(), ToRegister64(lir->value()), ToRegister(lir->memoryBase()), absAddr.value()); return; } masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), ToRegister(lir->memoryBase()), ToRegister(lir->ptr())); } void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) { MWasmAddOffset* mir = lir->mir(); Register base = ToRegister(lir->base()); Register out = ToRegister(lir->output()); masm.Adds(ARMRegister(out, 32), ARMRegister(base, 32), Operand(mir->offset())); auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) { masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc()); }); addOutOfLineCode(ool, mir); masm.j(Assembler::CarrySet, ool->entry()); } void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) { MWasmAddOffset* mir = lir->mir(); Register64 base = ToRegister64(lir->base()); Register64 out = ToOutRegister64(lir); masm.Adds(ARMRegister(out.reg, 64), ARMRegister(base.reg, 64), Operand(mir->offset())); auto* ool = new (alloc()) LambdaOutOfLineCode([=, this](OutOfLineCode& ool) { masm.wasmTrap(wasm::Trap::OutOfBounds, mir->trapSiteDesc()); }); addOutOfLineCode(ool, mir); masm.j(Assembler::CarrySet, ool->entry()); } void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) { MOZ_ASSERT(lir->mir()->type() == MIRType::Int64); Register condReg = ToRegister(lir->condExpr()); Register64 trueReg = ToRegister64(lir->trueExpr()); Register64 falseReg = ToRegister64(lir->falseExpr()); Register64 outReg = ToOutRegister64(lir); masm.test32(condReg, condReg); masm.Csel(ARMRegister(outReg.reg, 64), ARMRegister(trueReg.reg, 64), ARMRegister(falseReg.reg, 64), Assembler::NonZero); } void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* ins) { Register64 input = ToRegister64(ins->input()); Register64 output = ToOutRegister64(ins); switch (ins->mir()->mode()) { case MSignExtendInt64::Byte: masm.move8To64SignExtend(input.reg, output); break; case MSignExtendInt64::Half: masm.move16To64SignExtend(input.reg, output); break; case MSignExtendInt64::Word: masm.move32To64SignExtend(input.reg, output); break; } } void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) { const MWasmStackArg* mir = ins->mir(); Address dst(masm.getStackPointer(), mir->spOffset()); if (IsConstant(ins->arg())) { masm.store64(Imm64(ToInt64(ins->arg())), dst); } else { masm.store64(ToRegister64(ins->arg()), dst); } } void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) { LInt64Allocation input = lir->input(); Register output = ToRegister(lir->output()); if (lir->mir()->bottomHalf()) { if (input.value().isMemory()) { masm.load32(ToAddress(input), output); } else { masm.move64To32(ToRegister64(input), output); } } else { MOZ_CRASH("Not implemented."); } } void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) { Register input = ToRegister(lir->input()); Register64 output = ToOutRegister64(lir); if (lir->mir()->isUnsigned()) { masm.move32To64ZeroExtend(input, output); } else { masm.move32To64SignExtend(input, output); } } void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) { // Generates no code on this platform because the input is assumed to have // canonical form. Register output = ToRegister(lir->output()); MOZ_ASSERT(ToRegister(lir->input()) == output); masm.debugAssertCanonicalInt32(output); } void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) { // Generates no code on this platform because the input is assumed to have // canonical form. Register output = ToRegister(lir->output()); MOZ_ASSERT(ToRegister(lir->input()) == output); masm.debugAssertCanonicalInt32(output); } void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) { auto input = ToFloatRegister(lir->input()); auto output = ToRegister(lir->output()); MWasmTruncateToInt32* mir = lir->mir(); MIRType fromType = mir->input()->type(); MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32); auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output); addOutOfLineCode(ool, mir); Label* oolEntry = ool->entry(); if (mir->isUnsigned()) { if (fromType == MIRType::Double) { masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(), oolEntry); } else if (fromType == MIRType::Float32) { masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(), oolEntry); } else { MOZ_CRASH("unexpected type"); } masm.bind(ool->rejoin()); return; } if (fromType == MIRType::Double) { masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(), oolEntry); } else if (fromType == MIRType::Float32) { masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(), oolEntry); } else { MOZ_CRASH("unexpected type"); } masm.bind(ool->rejoin()); } void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) { FloatRegister input = ToFloatRegister(lir->input()); Register64 output = ToOutRegister64(lir); MWasmTruncateToInt64* mir = lir->mir(); MIRType fromType = mir->input()->type(); MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32); auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output); addOutOfLineCode(ool, mir); Label* oolEntry = ool->entry(); Label* oolRejoin = ool->rejoin(); bool isSaturating = mir->isSaturating(); if (fromType == MIRType::Double) { if (mir->isUnsigned()) { masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); } else { masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); } } else { if (mir->isUnsigned()) { masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); } else { masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry, oolRejoin, InvalidFloatReg); } } } void CodeGeneratorARM64::visitOutOfLineWasmTruncateCheck( OutOfLineWasmTruncateCheck* ool) { FloatRegister input = ool->input(); Register output = ool->output(); Register64 output64 = ool->output64(); MIRType fromType = ool->fromType(); MIRType toType = ool->toType(); Label* oolRejoin = ool->rejoin(); TruncFlags flags = ool->flags(); const wasm::TrapSiteDesc& trapSiteDesc = ool->trapSiteDesc(); if (fromType == MIRType::Float32) { if (toType == MIRType::Int32) { masm.oolWasmTruncateCheckF32ToI32(input, output, flags, trapSiteDesc, oolRejoin); } else if (toType == MIRType::Int64) { masm.oolWasmTruncateCheckF32ToI64(input, output64, flags, trapSiteDesc, oolRejoin); } else { MOZ_CRASH("unexpected type"); } } else if (fromType == MIRType::Double) { if (toType == MIRType::Int32) { masm.oolWasmTruncateCheckF64ToI32(input, output, flags, trapSiteDesc, oolRejoin); } else if (toType == MIRType::Int64) { masm.oolWasmTruncateCheckF64ToI64(input, output64, flags, trapSiteDesc, oolRejoin); } else { MOZ_CRASH("unexpected type"); } } else { MOZ_CRASH("unexpected type"); } } void CodeGenerator::visitAtomicTypedArrayElementBinop( LAtomicTypedArrayElementBinop* lir) { MOZ_ASSERT(!lir->mir()->isForEffect()); AnyRegister output = ToAnyRegister(lir->output()); Register elements = ToRegister(lir->elements()); Register flagTemp = ToRegister(lir->temp0()); Register outTemp = ToTempRegisterOrInvalid(lir->temp1()); Register value = ToRegister(lir->value()); Scalar::Type arrayType = lir->mir()->arrayType(); auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType); mem.match([&](const auto& mem) { masm.atomicFetchOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value, mem, flagTemp, outTemp, output); }); } void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect( LAtomicTypedArrayElementBinopForEffect* lir) { MOZ_ASSERT(lir->mir()->isForEffect()); Register elements = ToRegister(lir->elements()); Register flagTemp = ToRegister(lir->temp0()); Register value = ToRegister(lir->value()); Scalar::Type arrayType = lir->mir()->arrayType(); auto mem = ToAddressOrBaseIndex(elements, lir->index(), arrayType); mem.match([&](const auto& mem) { masm.atomicEffectOpJS(arrayType, Synchronization::Full(), lir->mir()->operation(), value, mem, flagTemp); }); } void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) { Register64 input = ToRegister64(lir->input()); FloatRegister output = ToFloatRegister(lir->output()); MIRType outputType = lir->mir()->type(); MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32); if (outputType == MIRType::Double) { if (lir->mir()->isUnsigned()) { masm.convertUInt64ToDouble(input, output, Register::Invalid()); } else { masm.convertInt64ToDouble(input, output); } } else { if (lir->mir()->isUnsigned()) { masm.convertUInt64ToFloat32(input, output, Register::Invalid()); } else { masm.convertInt64ToFloat32(input, output); } } } void CodeGenerator::visitDivI64(LDivI64* lir) { Register lhs = ToRegister(lir->lhs()); Register rhs = ToRegister(lir->rhs()); ARMRegister lhs64 = toXRegister(lir->lhs()); ARMRegister rhs64 = toXRegister(lir->rhs()); ARMRegister output64 = toXRegister(lir->output()); MDiv* mir = lir->mir(); // Handle divide by zero. TrapIfDivideByZero(masm, lir, rhs64); // Handle an integer overflow exception from INT64_MIN / -1. if (mir->canBeNegativeOverflow()) { Label noOverflow; masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &noOverflow); masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &noOverflow); masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->trapSiteDesc()); masm.bind(&noOverflow); } masm.Sdiv(output64, lhs64, rhs64); } void CodeGenerator::visitModI64(LModI64* lir) { ARMRegister lhs64 = toXRegister(lir->lhs()); ARMRegister rhs64 = toXRegister(lir->rhs()); ARMRegister output64 = toXRegister(lir->output()); // Handle divide by zero. TrapIfDivideByZero(masm, lir, rhs64); masm.Sdiv(output64, lhs64, rhs64); // Compute the remainder: output = lhs - (output * rhs). masm.Msub(output64, output64, rhs64, lhs64); } void CodeGenerator::visitUDivI64(LUDivI64* lir) { ARMRegister lhs64 = toXRegister(lir->lhs()); ARMRegister rhs64 = toXRegister(lir->rhs()); ARMRegister output64 = toXRegister(lir->output()); // Handle divide by zero. TrapIfDivideByZero(masm, lir, rhs64); masm.Udiv(output64, lhs64, rhs64); } void CodeGenerator::visitUModI64(LUModI64* lir) { ARMRegister lhs64 = toXRegister(lir->lhs()); ARMRegister rhs64 = toXRegister(lir->rhs()); ARMRegister output64 = toXRegister(lir->output()); // Handle divide by zero. TrapIfDivideByZero(masm, lir, rhs64); masm.Udiv(output64, lhs64, rhs64); // Compute the remainder: output = lhs - (output * rhs). masm.Msub(output64, output64, rhs64, lhs64); } void CodeGenerator::visitSimd128(LSimd128* ins) { #ifdef ENABLE_WASM_SIMD const LDefinition* out = ins->output(); masm.loadConstantSimd128(ins->simd128(), ToFloatRegister(out)); #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) { #ifdef ENABLE_WASM_SIMD switch (ins->simdOp()) { case wasm::SimdOp::V128Bitselect: { FloatRegister lhs = ToFloatRegister(ins->v0()); FloatRegister rhs = ToFloatRegister(ins->v1()); FloatRegister controlDest = ToFloatRegister(ins->v2()); masm.bitwiseSelectSimd128(lhs, rhs, controlDest); break; } case wasm::SimdOp::F32x4RelaxedMadd: masm.fmaFloat32x4(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()), ToFloatRegister(ins->v2())); break; case wasm::SimdOp::F32x4RelaxedNmadd: masm.fnmaFloat32x4(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()), ToFloatRegister(ins->v2())); break; case wasm::SimdOp::F64x2RelaxedMadd: masm.fmaFloat64x2(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()), ToFloatRegister(ins->v2())); break; case wasm::SimdOp::F64x2RelaxedNmadd: masm.fnmaFloat64x2(ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()), ToFloatRegister(ins->v2())); break; case wasm::SimdOp::I8x16RelaxedLaneSelect: case wasm::SimdOp::I16x8RelaxedLaneSelect: case wasm::SimdOp::I32x4RelaxedLaneSelect: case wasm::SimdOp::I64x2RelaxedLaneSelect: { FloatRegister lhs = ToFloatRegister(ins->v0()); FloatRegister rhs = ToFloatRegister(ins->v1()); FloatRegister maskDest = ToFloatRegister(ins->v2()); masm.laneSelectSimd128(maskDest, lhs, rhs, maskDest); break; } case wasm::SimdOp::I32x4RelaxedDotI8x16I7x16AddS: masm.dotInt8x16Int7x16ThenAdd( ToFloatRegister(ins->v0()), ToFloatRegister(ins->v1()), ToFloatRegister(ins->v2()), ToFloatRegister(ins->temp0())); break; default: MOZ_CRASH("NYI"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister lhs = ToFloatRegister(ins->lhs()); FloatRegister rhs = ToFloatRegister(ins->rhs()); FloatRegister dest = ToFloatRegister(ins->output()); switch (ins->simdOp()) { case wasm::SimdOp::V128And: masm.bitwiseAndSimd128(lhs, rhs, dest); break; case wasm::SimdOp::V128Or: masm.bitwiseOrSimd128(lhs, rhs, dest); break; case wasm::SimdOp::V128Xor: masm.bitwiseXorSimd128(lhs, rhs, dest); break; case wasm::SimdOp::V128AndNot: masm.bitwiseAndNotSimd128(lhs, rhs, dest); break; case wasm::SimdOp::I8x16AvgrU: masm.unsignedAverageInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I16x8AvgrU: masm.unsignedAverageInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I8x16Add: masm.addInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16AddSatS: masm.addSatInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16AddSatU: masm.unsignedAddSatInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16Sub: masm.subInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16SubSatS: masm.subSatInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16SubSatU: masm.unsignedSubSatInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16MinS: masm.minInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16MinU: masm.unsignedMinInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16MaxS: masm.maxInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16MaxU: masm.unsignedMaxInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I16x8Add: masm.addInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8AddSatS: masm.addSatInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8AddSatU: masm.unsignedAddSatInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8Sub: masm.subInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8SubSatS: masm.subSatInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8SubSatU: masm.unsignedSubSatInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8Mul: masm.mulInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8MinS: masm.minInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8MinU: masm.unsignedMinInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8MaxS: masm.maxInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8MaxU: masm.unsignedMaxInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I32x4Add: masm.addInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4Sub: masm.subInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4Mul: masm.mulInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4MinS: masm.minInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4MinU: masm.unsignedMinInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4MaxS: masm.maxInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4MaxU: masm.unsignedMaxInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I64x2Add: masm.addInt64x2(lhs, rhs, dest); break; case wasm::SimdOp::I64x2Sub: masm.subInt64x2(lhs, rhs, dest); break; case wasm::SimdOp::I64x2Mul: { auto temp1 = ToFloatRegister(ins->temp0()); auto temp2 = ToFloatRegister(ins->temp1()); masm.mulInt64x2(lhs, rhs, dest, temp1, temp2); break; } case wasm::SimdOp::F32x4Add: masm.addFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F32x4Sub: masm.subFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F32x4Mul: masm.mulFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F32x4Div: masm.divFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F32x4Min: masm.minFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F32x4Max: masm.maxFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F64x2Add: masm.addFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::F64x2Sub: masm.subFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::F64x2Mul: masm.mulFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::F64x2Div: masm.divFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::F64x2Min: masm.minFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::F64x2Max: masm.maxFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::I8x16Swizzle: masm.swizzleInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16RelaxedSwizzle: masm.swizzleInt8x16Relaxed(lhs, rhs, dest); break; case wasm::SimdOp::I8x16NarrowI16x8S: masm.narrowInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I8x16NarrowI16x8U: masm.unsignedNarrowInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8NarrowI32x4S: masm.narrowInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I16x8NarrowI32x4U: masm.unsignedNarrowInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I8x16Eq: masm.compareInt8x16(Assembler::Equal, lhs, rhs, dest); break; case wasm::SimdOp::I8x16Ne: masm.compareInt8x16(Assembler::NotEqual, lhs, rhs, dest); break; case wasm::SimdOp::I8x16LtS: masm.compareInt8x16(Assembler::LessThan, lhs, rhs, dest); break; case wasm::SimdOp::I8x16GtS: masm.compareInt8x16(Assembler::GreaterThan, lhs, rhs, dest); break; case wasm::SimdOp::I8x16LeS: masm.compareInt8x16(Assembler::LessThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I8x16GeS: masm.compareInt8x16(Assembler::GreaterThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I8x16LtU: masm.compareInt8x16(Assembler::Below, lhs, rhs, dest); break; case wasm::SimdOp::I8x16GtU: masm.compareInt8x16(Assembler::Above, lhs, rhs, dest); break; case wasm::SimdOp::I8x16LeU: masm.compareInt8x16(Assembler::BelowOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I8x16GeU: masm.compareInt8x16(Assembler::AboveOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I16x8Eq: masm.compareInt16x8(Assembler::Equal, lhs, rhs, dest); break; case wasm::SimdOp::I16x8Ne: masm.compareInt16x8(Assembler::NotEqual, lhs, rhs, dest); break; case wasm::SimdOp::I16x8LtS: masm.compareInt16x8(Assembler::LessThan, lhs, rhs, dest); break; case wasm::SimdOp::I16x8GtS: masm.compareInt16x8(Assembler::GreaterThan, lhs, rhs, dest); break; case wasm::SimdOp::I16x8LeS: masm.compareInt16x8(Assembler::LessThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I16x8GeS: masm.compareInt16x8(Assembler::GreaterThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I16x8LtU: masm.compareInt16x8(Assembler::Below, lhs, rhs, dest); break; case wasm::SimdOp::I16x8GtU: masm.compareInt16x8(Assembler::Above, lhs, rhs, dest); break; case wasm::SimdOp::I16x8LeU: masm.compareInt16x8(Assembler::BelowOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I16x8GeU: masm.compareInt16x8(Assembler::AboveOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I32x4Eq: masm.compareInt32x4(Assembler::Equal, lhs, rhs, dest); break; case wasm::SimdOp::I32x4Ne: masm.compareInt32x4(Assembler::NotEqual, lhs, rhs, dest); break; case wasm::SimdOp::I32x4LtS: masm.compareInt32x4(Assembler::LessThan, lhs, rhs, dest); break; case wasm::SimdOp::I32x4GtS: masm.compareInt32x4(Assembler::GreaterThan, lhs, rhs, dest); break; case wasm::SimdOp::I32x4LeS: masm.compareInt32x4(Assembler::LessThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I32x4GeS: masm.compareInt32x4(Assembler::GreaterThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I32x4LtU: masm.compareInt32x4(Assembler::Below, lhs, rhs, dest); break; case wasm::SimdOp::I32x4GtU: masm.compareInt32x4(Assembler::Above, lhs, rhs, dest); break; case wasm::SimdOp::I32x4LeU: masm.compareInt32x4(Assembler::BelowOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I32x4GeU: masm.compareInt32x4(Assembler::AboveOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I64x2Eq: masm.compareInt64x2(Assembler::Equal, lhs, rhs, dest); break; case wasm::SimdOp::I64x2LtS: masm.compareInt64x2(Assembler::LessThan, lhs, rhs, dest); break; case wasm::SimdOp::I64x2GtS: masm.compareInt64x2(Assembler::GreaterThan, lhs, rhs, dest); break; case wasm::SimdOp::I64x2LeS: masm.compareInt64x2(Assembler::LessThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I64x2GeS: masm.compareInt64x2(Assembler::GreaterThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::I64x2Ne: masm.compareInt64x2(Assembler::NotEqual, lhs, rhs, dest); break; case wasm::SimdOp::F32x4Eq: masm.compareFloat32x4(Assembler::Equal, lhs, rhs, dest); break; case wasm::SimdOp::F32x4Ne: masm.compareFloat32x4(Assembler::NotEqual, lhs, rhs, dest); break; case wasm::SimdOp::F32x4Lt: masm.compareFloat32x4(Assembler::LessThan, lhs, rhs, dest); break; case wasm::SimdOp::F32x4Gt: masm.compareFloat32x4(Assembler::GreaterThan, lhs, rhs, dest); break; case wasm::SimdOp::F32x4Le: masm.compareFloat32x4(Assembler::LessThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::F32x4Ge: masm.compareFloat32x4(Assembler::GreaterThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::F64x2Eq: masm.compareFloat64x2(Assembler::Equal, lhs, rhs, dest); break; case wasm::SimdOp::F64x2Ne: masm.compareFloat64x2(Assembler::NotEqual, lhs, rhs, dest); break; case wasm::SimdOp::F64x2Lt: masm.compareFloat64x2(Assembler::LessThan, lhs, rhs, dest); break; case wasm::SimdOp::F64x2Gt: masm.compareFloat64x2(Assembler::GreaterThan, lhs, rhs, dest); break; case wasm::SimdOp::F64x2Le: masm.compareFloat64x2(Assembler::LessThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::F64x2Ge: masm.compareFloat64x2(Assembler::GreaterThanOrEqual, lhs, rhs, dest); break; case wasm::SimdOp::F32x4PMax: masm.pseudoMaxFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F32x4PMin: masm.pseudoMinFloat32x4(lhs, rhs, dest); break; case wasm::SimdOp::F64x2PMax: masm.pseudoMaxFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::F64x2PMin: masm.pseudoMinFloat64x2(lhs, rhs, dest); break; case wasm::SimdOp::I32x4DotI16x8S: masm.widenDotInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8ExtmulLowI8x16S: masm.extMulLowInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I16x8ExtmulHighI8x16S: masm.extMulHighInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I16x8ExtmulLowI8x16U: masm.unsignedExtMulLowInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I16x8ExtmulHighI8x16U: masm.unsignedExtMulHighInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I32x4ExtmulLowI16x8S: masm.extMulLowInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I32x4ExtmulHighI16x8S: masm.extMulHighInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I32x4ExtmulLowI16x8U: masm.unsignedExtMulLowInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I32x4ExtmulHighI16x8U: masm.unsignedExtMulHighInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I64x2ExtmulLowI32x4S: masm.extMulLowInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I64x2ExtmulHighI32x4S: masm.extMulHighInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I64x2ExtmulLowI32x4U: masm.unsignedExtMulLowInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I64x2ExtmulHighI32x4U: masm.unsignedExtMulHighInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I16x8Q15MulrSatS: masm.q15MulrSatInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::F32x4RelaxedMin: masm.minFloat32x4Relaxed(lhs, rhs, dest); break; case wasm::SimdOp::F32x4RelaxedMax: masm.maxFloat32x4Relaxed(lhs, rhs, dest); break; case wasm::SimdOp::F64x2RelaxedMin: masm.minFloat64x2Relaxed(lhs, rhs, dest); break; case wasm::SimdOp::F64x2RelaxedMax: masm.maxFloat64x2Relaxed(lhs, rhs, dest); break; case wasm::SimdOp::I16x8RelaxedQ15MulrS: masm.q15MulrInt16x8Relaxed(lhs, rhs, dest); break; case wasm::SimdOp::I16x8RelaxedDotI8x16I7x16S: masm.dotInt8x16Int7x16(lhs, rhs, dest); break; default: MOZ_CRASH("Binary SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmBinarySimd128WithConstant( LWasmBinarySimd128WithConstant* ins) { MOZ_CRASH("No SIMD"); } void CodeGenerator::visitWasmVariableShiftSimd128( LWasmVariableShiftSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister lhs = ToFloatRegister(ins->lhs()); Register rhs = ToRegister(ins->rhs()); FloatRegister dest = ToFloatRegister(ins->output()); switch (ins->mir()->simdOp()) { case wasm::SimdOp::I8x16Shl: masm.leftShiftInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16ShrS: masm.rightShiftInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I8x16ShrU: masm.unsignedRightShiftInt8x16(lhs, rhs, dest); break; case wasm::SimdOp::I16x8Shl: masm.leftShiftInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8ShrS: masm.rightShiftInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I16x8ShrU: masm.unsignedRightShiftInt16x8(lhs, rhs, dest); break; case wasm::SimdOp::I32x4Shl: masm.leftShiftInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4ShrS: masm.rightShiftInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I32x4ShrU: masm.unsignedRightShiftInt32x4(lhs, rhs, dest); break; case wasm::SimdOp::I64x2Shl: masm.leftShiftInt64x2(lhs, rhs, dest); break; case wasm::SimdOp::I64x2ShrS: masm.rightShiftInt64x2(lhs, rhs, dest); break; case wasm::SimdOp::I64x2ShrU: masm.unsignedRightShiftInt64x2(lhs, rhs, dest); break; default: MOZ_CRASH("Shift SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmConstantShiftSimd128( LWasmConstantShiftSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister src = ToFloatRegister(ins->src()); FloatRegister dest = ToFloatRegister(ins->output()); int32_t shift = ins->shift(); if (shift == 0) { if (src != dest) { masm.moveSimd128(src, dest); } return; } switch (ins->mir()->simdOp()) { case wasm::SimdOp::I8x16Shl: masm.leftShiftInt8x16(Imm32(shift), src, dest); break; case wasm::SimdOp::I8x16ShrS: masm.rightShiftInt8x16(Imm32(shift), src, dest); break; case wasm::SimdOp::I8x16ShrU: masm.unsignedRightShiftInt8x16(Imm32(shift), src, dest); break; case wasm::SimdOp::I16x8Shl: masm.leftShiftInt16x8(Imm32(shift), src, dest); break; case wasm::SimdOp::I16x8ShrS: masm.rightShiftInt16x8(Imm32(shift), src, dest); break; case wasm::SimdOp::I16x8ShrU: masm.unsignedRightShiftInt16x8(Imm32(shift), src, dest); break; case wasm::SimdOp::I32x4Shl: masm.leftShiftInt32x4(Imm32(shift), src, dest); break; case wasm::SimdOp::I32x4ShrS: masm.rightShiftInt32x4(Imm32(shift), src, dest); break; case wasm::SimdOp::I32x4ShrU: masm.unsignedRightShiftInt32x4(Imm32(shift), src, dest); break; case wasm::SimdOp::I64x2Shl: masm.leftShiftInt64x2(Imm32(shift), src, dest); break; case wasm::SimdOp::I64x2ShrS: masm.rightShiftInt64x2(Imm32(shift), src, dest); break; case wasm::SimdOp::I64x2ShrU: masm.unsignedRightShiftInt64x2(Imm32(shift), src, dest); break; default: MOZ_CRASH("Shift SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmSignReplicationSimd128( LWasmSignReplicationSimd128* ins) { MOZ_CRASH("No SIMD"); } void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister lhs = ToFloatRegister(ins->lhs()); FloatRegister rhs = ToFloatRegister(ins->rhs()); FloatRegister dest = ToFloatRegister(ins->output()); SimdConstant control = ins->control(); switch (ins->op()) { case SimdShuffleOp::BLEND_8x16: { masm.blendInt8x16(reinterpret_cast(control.asInt8x16()), lhs, rhs, dest); break; } case SimdShuffleOp::BLEND_16x8: { masm.blendInt16x8(reinterpret_cast(control.asInt16x8()), lhs, rhs, dest); break; } case SimdShuffleOp::CONCAT_RIGHT_SHIFT_8x16: { int8_t count = 16 - control.asInt8x16()[0]; MOZ_ASSERT(count > 0, "Should have been a MOVE operation"); masm.concatAndRightShiftSimd128(lhs, rhs, dest, count); break; } case SimdShuffleOp::INTERLEAVE_HIGH_8x16: { masm.interleaveHighInt8x16(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_HIGH_16x8: { masm.interleaveHighInt16x8(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_HIGH_32x4: { masm.interleaveHighInt32x4(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_HIGH_64x2: { masm.interleaveHighInt64x2(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_LOW_8x16: { masm.interleaveLowInt8x16(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_LOW_16x8: { masm.interleaveLowInt16x8(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_LOW_32x4: { masm.interleaveLowInt32x4(lhs, rhs, dest); break; } case SimdShuffleOp::INTERLEAVE_LOW_64x2: { masm.interleaveLowInt64x2(lhs, rhs, dest); break; } case SimdShuffleOp::SHUFFLE_BLEND_8x16: { masm.shuffleInt8x16(reinterpret_cast(control.asInt8x16()), lhs, rhs, dest); break; } default: { MOZ_CRASH("Unsupported SIMD shuffle operation"); } } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister src = ToFloatRegister(ins->src()); FloatRegister dest = ToFloatRegister(ins->output()); SimdConstant control = ins->control(); switch (ins->op()) { case SimdPermuteOp::BROADCAST_8x16: { const SimdConstant::I8x16& mask = control.asInt8x16(); int8_t source = mask[0]; masm.splatX16(source, src, dest); break; } case SimdPermuteOp::BROADCAST_16x8: { const SimdConstant::I16x8& mask = control.asInt16x8(); int16_t source = mask[0]; masm.splatX8(source, src, dest); break; } case SimdPermuteOp::MOVE: { masm.moveSimd128(src, dest); break; } case SimdPermuteOp::PERMUTE_8x16: { const SimdConstant::I8x16& mask = control.asInt8x16(); # ifdef DEBUG mozilla::DebugOnly i; for (i = 0; i < 16 && mask[i] == i; i++) { } MOZ_ASSERT(i < 16, "Should have been a MOVE operation"); # endif masm.permuteInt8x16(reinterpret_cast(mask), src, dest); break; } case SimdPermuteOp::PERMUTE_16x8: { const SimdConstant::I16x8& mask = control.asInt16x8(); # ifdef DEBUG mozilla::DebugOnly i; for (i = 0; i < 8 && mask[i] == i; i++) { } MOZ_ASSERT(i < 8, "Should have been a MOVE operation"); # endif masm.permuteInt16x8(reinterpret_cast(mask), src, dest); break; } case SimdPermuteOp::PERMUTE_32x4: { const SimdConstant::I32x4& mask = control.asInt32x4(); # ifdef DEBUG mozilla::DebugOnly i; for (i = 0; i < 4 && mask[i] == i; i++) { } MOZ_ASSERT(i < 4, "Should have been a MOVE operation"); # endif masm.permuteInt32x4(reinterpret_cast(mask), src, dest); break; } case SimdPermuteOp::ROTATE_RIGHT_8x16: { int8_t count = control.asInt8x16()[0]; MOZ_ASSERT(count > 0, "Should have been a MOVE operation"); masm.rotateRightSimd128(src, dest, count); break; } case SimdPermuteOp::SHIFT_LEFT_8x16: { int8_t count = control.asInt8x16()[0]; MOZ_ASSERT(count > 0, "Should have been a MOVE operation"); masm.leftShiftSimd128(Imm32(count), src, dest); break; } case SimdPermuteOp::SHIFT_RIGHT_8x16: { int8_t count = control.asInt8x16()[0]; MOZ_ASSERT(count > 0, "Should have been a MOVE operation"); masm.rightShiftSimd128(Imm32(count), src, dest); break; } case SimdPermuteOp::ZERO_EXTEND_8x16_TO_16x8: masm.zeroExtend8x16To16x8(src, dest); break; case SimdPermuteOp::ZERO_EXTEND_8x16_TO_32x4: masm.zeroExtend8x16To32x4(src, dest); break; case SimdPermuteOp::ZERO_EXTEND_8x16_TO_64x2: masm.zeroExtend8x16To64x2(src, dest); break; case SimdPermuteOp::ZERO_EXTEND_16x8_TO_32x4: masm.zeroExtend16x8To32x4(src, dest); break; case SimdPermuteOp::ZERO_EXTEND_16x8_TO_64x2: masm.zeroExtend16x8To64x2(src, dest); break; case SimdPermuteOp::ZERO_EXTEND_32x4_TO_64x2: masm.zeroExtend32x4To64x2(src, dest); break; case SimdPermuteOp::REVERSE_16x8: masm.reverseInt16x8(src, dest); break; case SimdPermuteOp::REVERSE_32x4: masm.reverseInt32x4(src, dest); break; case SimdPermuteOp::REVERSE_64x2: masm.reverseInt64x2(src, dest); break; default: { MOZ_CRASH("Unsupported SIMD permutation operation"); } } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) { #ifdef ENABLE_WASM_SIMD MOZ_ASSERT(ToFloatRegister(ins->lhs()) == ToFloatRegister(ins->output())); FloatRegister lhsDest = ToFloatRegister(ins->lhs()); const LAllocation* rhs = ins->rhs(); uint32_t laneIndex = ins->mir()->laneIndex(); switch (ins->mir()->simdOp()) { case wasm::SimdOp::I8x16ReplaceLane: masm.replaceLaneInt8x16(laneIndex, ToRegister(rhs), lhsDest); break; case wasm::SimdOp::I16x8ReplaceLane: masm.replaceLaneInt16x8(laneIndex, ToRegister(rhs), lhsDest); break; case wasm::SimdOp::I32x4ReplaceLane: masm.replaceLaneInt32x4(laneIndex, ToRegister(rhs), lhsDest); break; case wasm::SimdOp::F32x4ReplaceLane: masm.replaceLaneFloat32x4(laneIndex, ToFloatRegister(rhs), lhsDest); break; case wasm::SimdOp::F64x2ReplaceLane: masm.replaceLaneFloat64x2(laneIndex, ToFloatRegister(rhs), lhsDest); break; default: MOZ_CRASH("ReplaceLane SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmReplaceInt64LaneSimd128( LWasmReplaceInt64LaneSimd128* ins) { #ifdef ENABLE_WASM_SIMD MOZ_RELEASE_ASSERT(ins->mir()->simdOp() == wasm::SimdOp::I64x2ReplaceLane); MOZ_ASSERT(ToFloatRegister(ins->lhs()) == ToFloatRegister(ins->output())); masm.replaceLaneInt64x2(ins->mir()->laneIndex(), ToRegister64(ins->rhs()), ToFloatRegister(ins->lhs())); #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister dest = ToFloatRegister(ins->output()); switch (ins->mir()->simdOp()) { case wasm::SimdOp::I8x16Splat: masm.splatX16(ToRegister(ins->src()), dest); break; case wasm::SimdOp::I16x8Splat: masm.splatX8(ToRegister(ins->src()), dest); break; case wasm::SimdOp::I32x4Splat: masm.splatX4(ToRegister(ins->src()), dest); break; case wasm::SimdOp::F32x4Splat: masm.splatX4(ToFloatRegister(ins->src()), dest); break; case wasm::SimdOp::F64x2Splat: masm.splatX2(ToFloatRegister(ins->src()), dest); break; default: MOZ_CRASH("ScalarToSimd128 SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) { #ifdef ENABLE_WASM_SIMD Register64 src = ToRegister64(ins->src()); FloatRegister dest = ToFloatRegister(ins->output()); switch (ins->mir()->simdOp()) { case wasm::SimdOp::I64x2Splat: masm.splatX2(src, dest); break; case wasm::SimdOp::V128Load8x8S: masm.moveGPR64ToDouble(src, dest); masm.widenLowInt8x16(dest, dest); break; case wasm::SimdOp::V128Load8x8U: masm.moveGPR64ToDouble(src, dest); masm.unsignedWidenLowInt8x16(dest, dest); break; case wasm::SimdOp::V128Load16x4S: masm.moveGPR64ToDouble(src, dest); masm.widenLowInt16x8(dest, dest); break; case wasm::SimdOp::V128Load16x4U: masm.moveGPR64ToDouble(src, dest); masm.unsignedWidenLowInt16x8(dest, dest); break; case wasm::SimdOp::V128Load32x2S: masm.moveGPR64ToDouble(src, dest); masm.widenLowInt32x4(dest, dest); break; case wasm::SimdOp::V128Load32x2U: masm.moveGPR64ToDouble(src, dest); masm.unsignedWidenLowInt32x4(dest, dest); break; default: MOZ_CRASH("Int64ToSimd128 SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister src = ToFloatRegister(ins->src()); FloatRegister dest = ToFloatRegister(ins->output()); switch (ins->mir()->simdOp()) { case wasm::SimdOp::I8x16Neg: masm.negInt8x16(src, dest); break; case wasm::SimdOp::I16x8Neg: masm.negInt16x8(src, dest); break; case wasm::SimdOp::I16x8ExtendLowI8x16S: masm.widenLowInt8x16(src, dest); break; case wasm::SimdOp::I16x8ExtendHighI8x16S: masm.widenHighInt8x16(src, dest); break; case wasm::SimdOp::I16x8ExtendLowI8x16U: masm.unsignedWidenLowInt8x16(src, dest); break; case wasm::SimdOp::I16x8ExtendHighI8x16U: masm.unsignedWidenHighInt8x16(src, dest); break; case wasm::SimdOp::I32x4Neg: masm.negInt32x4(src, dest); break; case wasm::SimdOp::I32x4ExtendLowI16x8S: masm.widenLowInt16x8(src, dest); break; case wasm::SimdOp::I32x4ExtendHighI16x8S: masm.widenHighInt16x8(src, dest); break; case wasm::SimdOp::I32x4ExtendLowI16x8U: masm.unsignedWidenLowInt16x8(src, dest); break; case wasm::SimdOp::I32x4ExtendHighI16x8U: masm.unsignedWidenHighInt16x8(src, dest); break; case wasm::SimdOp::I32x4TruncSatF32x4S: masm.truncSatFloat32x4ToInt32x4(src, dest); break; case wasm::SimdOp::I32x4TruncSatF32x4U: masm.unsignedTruncSatFloat32x4ToInt32x4(src, dest); break; case wasm::SimdOp::I64x2Neg: masm.negInt64x2(src, dest); break; case wasm::SimdOp::I64x2ExtendLowI32x4S: masm.widenLowInt32x4(src, dest); break; case wasm::SimdOp::I64x2ExtendHighI32x4S: masm.widenHighInt32x4(src, dest); break; case wasm::SimdOp::I64x2ExtendLowI32x4U: masm.unsignedWidenLowInt32x4(src, dest); break; case wasm::SimdOp::I64x2ExtendHighI32x4U: masm.unsignedWidenHighInt32x4(src, dest); break; case wasm::SimdOp::F32x4Abs: masm.absFloat32x4(src, dest); break; case wasm::SimdOp::F32x4Neg: masm.negFloat32x4(src, dest); break; case wasm::SimdOp::F32x4Sqrt: masm.sqrtFloat32x4(src, dest); break; case wasm::SimdOp::F32x4ConvertI32x4S: masm.convertInt32x4ToFloat32x4(src, dest); break; case wasm::SimdOp::F32x4ConvertI32x4U: masm.unsignedConvertInt32x4ToFloat32x4(src, dest); break; case wasm::SimdOp::F64x2Abs: masm.absFloat64x2(src, dest); break; case wasm::SimdOp::F64x2Neg: masm.negFloat64x2(src, dest); break; case wasm::SimdOp::F64x2Sqrt: masm.sqrtFloat64x2(src, dest); break; case wasm::SimdOp::V128Not: masm.bitwiseNotSimd128(src, dest); break; case wasm::SimdOp::I8x16Abs: masm.absInt8x16(src, dest); break; case wasm::SimdOp::I16x8Abs: masm.absInt16x8(src, dest); break; case wasm::SimdOp::I32x4Abs: masm.absInt32x4(src, dest); break; case wasm::SimdOp::I64x2Abs: masm.absInt64x2(src, dest); break; case wasm::SimdOp::F32x4Ceil: masm.ceilFloat32x4(src, dest); break; case wasm::SimdOp::F32x4Floor: masm.floorFloat32x4(src, dest); break; case wasm::SimdOp::F32x4Trunc: masm.truncFloat32x4(src, dest); break; case wasm::SimdOp::F32x4Nearest: masm.nearestFloat32x4(src, dest); break; case wasm::SimdOp::F64x2Ceil: masm.ceilFloat64x2(src, dest); break; case wasm::SimdOp::F64x2Floor: masm.floorFloat64x2(src, dest); break; case wasm::SimdOp::F64x2Trunc: masm.truncFloat64x2(src, dest); break; case wasm::SimdOp::F64x2Nearest: masm.nearestFloat64x2(src, dest); break; case wasm::SimdOp::F32x4DemoteF64x2Zero: masm.convertFloat64x2ToFloat32x4(src, dest); break; case wasm::SimdOp::F64x2PromoteLowF32x4: masm.convertFloat32x4ToFloat64x2(src, dest); break; case wasm::SimdOp::F64x2ConvertLowI32x4S: masm.convertInt32x4ToFloat64x2(src, dest); break; case wasm::SimdOp::F64x2ConvertLowI32x4U: masm.unsignedConvertInt32x4ToFloat64x2(src, dest); break; case wasm::SimdOp::I32x4TruncSatF64x2SZero: masm.truncSatFloat64x2ToInt32x4(src, dest, ToFloatRegister(ins->temp0())); break; case wasm::SimdOp::I32x4TruncSatF64x2UZero: masm.unsignedTruncSatFloat64x2ToInt32x4(src, dest, ToFloatRegister(ins->temp0())); break; case wasm::SimdOp::I16x8ExtaddPairwiseI8x16S: masm.extAddPairwiseInt8x16(src, dest); break; case wasm::SimdOp::I16x8ExtaddPairwiseI8x16U: masm.unsignedExtAddPairwiseInt8x16(src, dest); break; case wasm::SimdOp::I32x4ExtaddPairwiseI16x8S: masm.extAddPairwiseInt16x8(src, dest); break; case wasm::SimdOp::I32x4ExtaddPairwiseI16x8U: masm.unsignedExtAddPairwiseInt16x8(src, dest); break; case wasm::SimdOp::I8x16Popcnt: masm.popcntInt8x16(src, dest); break; case wasm::SimdOp::I32x4RelaxedTruncF32x4S: masm.truncFloat32x4ToInt32x4Relaxed(src, dest); break; case wasm::SimdOp::I32x4RelaxedTruncF32x4U: masm.unsignedTruncFloat32x4ToInt32x4Relaxed(src, dest); break; case wasm::SimdOp::I32x4RelaxedTruncF64x2SZero: masm.truncFloat64x2ToInt32x4Relaxed(src, dest); break; case wasm::SimdOp::I32x4RelaxedTruncF64x2UZero: masm.unsignedTruncFloat64x2ToInt32x4Relaxed(src, dest); break; default: MOZ_CRASH("Unary SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister src = ToFloatRegister(ins->src()); const LDefinition* dest = ins->output(); uint32_t imm = ins->mir()->imm(); FloatRegister temp = ToTempFloatRegisterOrInvalid(ins->temp0()); switch (ins->mir()->simdOp()) { case wasm::SimdOp::V128AnyTrue: masm.anyTrueSimd128(src, ToRegister(dest)); break; case wasm::SimdOp::I8x16AllTrue: masm.allTrueInt8x16(src, ToRegister(dest)); break; case wasm::SimdOp::I16x8AllTrue: masm.allTrueInt16x8(src, ToRegister(dest)); break; case wasm::SimdOp::I32x4AllTrue: masm.allTrueInt32x4(src, ToRegister(dest)); break; case wasm::SimdOp::I64x2AllTrue: masm.allTrueInt64x2(src, ToRegister(dest)); break; case wasm::SimdOp::I8x16Bitmask: masm.bitmaskInt8x16(src, ToRegister(dest), temp); break; case wasm::SimdOp::I16x8Bitmask: masm.bitmaskInt16x8(src, ToRegister(dest), temp); break; case wasm::SimdOp::I32x4Bitmask: masm.bitmaskInt32x4(src, ToRegister(dest), temp); break; case wasm::SimdOp::I64x2Bitmask: masm.bitmaskInt64x2(src, ToRegister(dest), temp); break; case wasm::SimdOp::I8x16ExtractLaneS: masm.extractLaneInt8x16(imm, src, ToRegister(dest)); break; case wasm::SimdOp::I8x16ExtractLaneU: masm.unsignedExtractLaneInt8x16(imm, src, ToRegister(dest)); break; case wasm::SimdOp::I16x8ExtractLaneS: masm.extractLaneInt16x8(imm, src, ToRegister(dest)); break; case wasm::SimdOp::I16x8ExtractLaneU: masm.unsignedExtractLaneInt16x8(imm, src, ToRegister(dest)); break; case wasm::SimdOp::I32x4ExtractLane: masm.extractLaneInt32x4(imm, src, ToRegister(dest)); break; case wasm::SimdOp::F32x4ExtractLane: masm.extractLaneFloat32x4(imm, src, ToFloatRegister(dest)); break; case wasm::SimdOp::F64x2ExtractLane: masm.extractLaneFloat64x2(imm, src, ToFloatRegister(dest)); break; default: MOZ_CRASH("Reduce SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmReduceAndBranchSimd128( LWasmReduceAndBranchSimd128* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister src = ToFloatRegister(ins->src()); ScratchSimd128Scope scratch(masm); vixl::UseScratchRegisterScope temps(&masm.asVIXL()); const Register test = temps.AcquireX().asUnsized(); switch (ins->simdOp()) { case wasm::SimdOp::V128AnyTrue: masm.Addp(Simd1D(scratch), Simd2D(src)); masm.Umov(ARMRegister(test, 64), Simd1D(scratch), 0); masm.branch64(Assembler::Equal, Register64(test), Imm64(0), getJumpLabelForBranch(ins->ifFalse())); jumpToBlock(ins->ifTrue()); break; case wasm::SimdOp::I8x16AllTrue: case wasm::SimdOp::I16x8AllTrue: case wasm::SimdOp::I32x4AllTrue: case wasm::SimdOp::I64x2AllTrue: { // Compare all lanes to zero. switch (ins->simdOp()) { case wasm::SimdOp::I8x16AllTrue: masm.Cmeq(Simd16B(scratch), Simd16B(src), 0); break; case wasm::SimdOp::I16x8AllTrue: masm.Cmeq(Simd8H(scratch), Simd8H(src), 0); break; case wasm::SimdOp::I32x4AllTrue: masm.Cmeq(Simd4S(scratch), Simd4S(src), 0); break; case wasm::SimdOp::I64x2AllTrue: masm.Cmeq(Simd2D(scratch), Simd2D(src), 0); break; default: MOZ_CRASH(); } masm.Addp(Simd1D(scratch), Simd2D(scratch)); masm.Umov(ARMRegister(test, 64), Simd1D(scratch), 0); masm.branch64(Assembler::NotEqual, Register64(test), Imm64(0), getJumpLabelForBranch(ins->ifFalse())); jumpToBlock(ins->ifTrue()); break; } default: MOZ_CRASH("Reduce-and-branch SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmReduceSimd128ToInt64( LWasmReduceSimd128ToInt64* ins) { #ifdef ENABLE_WASM_SIMD FloatRegister src = ToFloatRegister(ins->src()); Register64 dest = ToOutRegister64(ins); uint32_t imm = ins->mir()->imm(); switch (ins->mir()->simdOp()) { case wasm::SimdOp::I64x2ExtractLane: masm.extractLaneInt64x2(imm, src, dest); break; default: MOZ_CRASH("Reduce SimdOp not implemented"); } #else MOZ_CRASH("No SIMD"); #endif } static inline wasm::MemoryAccessDesc DeriveMemoryAccessDesc( const wasm::MemoryAccessDesc& access, Scalar::Type type) { return wasm::MemoryAccessDesc(access.memoryIndex(), type, access.align(), access.offset32(), access.trapDesc(), access.isHugeMemory()); } void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) { #ifdef ENABLE_WASM_SIMD // Forward loading to wasmLoad, and use replaceLane after that. const MWasmLoadLaneSimd128* mir = ins->mir(); Register memoryBase = ToRegister(ins->memoryBase()); Register temp = ToRegister(ins->temp0()); FloatRegister src = ToFloatRegister(ins->src()); FloatRegister dest = ToFloatRegister(ins->output()); // replaceLane takes an lhsDest argument. masm.moveSimd128(src, dest); switch (mir->laneSize()) { case 1: { masm.wasmLoad(DeriveMemoryAccessDesc(mir->access(), Scalar::Int8), memoryBase, ToRegister(ins->ptr()), AnyRegister(temp)); masm.replaceLaneInt8x16(mir->laneIndex(), temp, dest); break; } case 2: { masm.wasmLoad(DeriveMemoryAccessDesc(mir->access(), Scalar::Int16), memoryBase, ToRegister(ins->ptr()), AnyRegister(temp)); masm.replaceLaneInt16x8(mir->laneIndex(), temp, dest); break; } case 4: { masm.wasmLoad(DeriveMemoryAccessDesc(mir->access(), Scalar::Int32), memoryBase, ToRegister(ins->ptr()), AnyRegister(temp)); masm.replaceLaneInt32x4(mir->laneIndex(), temp, dest); break; } case 8: { masm.wasmLoadI64(DeriveMemoryAccessDesc(mir->access(), Scalar::Int64), memoryBase, ToRegister(ins->ptr()), Register64(temp)); masm.replaceLaneInt64x2(mir->laneIndex(), Register64(temp), dest); break; } default: MOZ_CRASH("Unsupported load lane size"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) { #ifdef ENABLE_WASM_SIMD // Forward storing to wasmStore for the result of extractLane. const MWasmStoreLaneSimd128* mir = ins->mir(); Register memoryBase = ToRegister(ins->memoryBase()); Register temp = ToRegister(ins->temp0()); FloatRegister src = ToFloatRegister(ins->src()); switch (mir->laneSize()) { case 1: { masm.extractLaneInt8x16(mir->laneIndex(), src, temp); masm.wasmStore(DeriveMemoryAccessDesc(mir->access(), Scalar::Int8), AnyRegister(temp), memoryBase, ToRegister(ins->ptr())); break; } case 2: { masm.extractLaneInt16x8(mir->laneIndex(), src, temp); masm.wasmStore(DeriveMemoryAccessDesc(mir->access(), Scalar::Int16), AnyRegister(temp), memoryBase, ToRegister(ins->ptr())); break; } case 4: { masm.extractLaneInt32x4(mir->laneIndex(), src, temp); masm.wasmStore(DeriveMemoryAccessDesc(mir->access(), Scalar::Int32), AnyRegister(temp), memoryBase, ToRegister(ins->ptr())); break; } case 8: { masm.extractLaneInt64x2(mir->laneIndex(), src, Register64(temp)); masm.wasmStoreI64(DeriveMemoryAccessDesc(mir->access(), Scalar::Int64), Register64(temp), memoryBase, ToRegister(ins->ptr())); break; } default: MOZ_CRASH("Unsupported store lane size"); } #else MOZ_CRASH("No SIMD"); #endif } void CodeGenerator::visitWasmMulI64WideHI64(LWasmMulI64WideHI64* lir) { Register lhs = ToRegister(lir->lhs()); Register rhs = ToRegister(lir->rhs()); Register output = ToRegister(lir->output()); // This holds because both operands are non-AtStart variants. MOZ_ASSERT(output != lhs && output != rhs); masm.wasmMulI64WideHI64(lhs, rhs, output, lir->isSigned()); }