Bug 1804104: Share more StoreDenseElementHole code r=jandem
The two code paths were already almost identical. The only differences were that the Ion code used setupAlignedABICall and had a small optimization to reuse the condition flags an earlier test (on architectures with condition flags). Differential Revision: https://phabricator.services.mozilla.com/D241531
This commit is contained in:
@@ -6984,48 +6984,9 @@ bool CacheIRCompiler::emitStoreDenseElementHole(ObjOperandId objId,
|
||||
masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
|
||||
masm.jump(&inBounds);
|
||||
|
||||
// If we're out-of-bounds, only handle the index == initLength case.
|
||||
masm.bind(&outOfBounds);
|
||||
masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
|
||||
|
||||
// If index < capacity, we can add a dense element inline. If not we
|
||||
// need to allocate more elements.
|
||||
Label allocElement, addNewElement;
|
||||
Address capacity(scratch, ObjectElements::offsetOfCapacity());
|
||||
masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
|
||||
masm.jump(&addNewElement);
|
||||
|
||||
masm.bind(&allocElement);
|
||||
|
||||
LiveRegisterSet save = liveVolatileRegs();
|
||||
save.takeUnchecked(scratch);
|
||||
masm.PushRegsInMask(save);
|
||||
|
||||
using Fn = bool (*)(JSContext* cx, NativeObject* obj);
|
||||
masm.setupUnalignedABICall(scratch);
|
||||
masm.loadJSContext(scratch);
|
||||
masm.passABIArg(scratch);
|
||||
masm.passABIArg(obj);
|
||||
masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
|
||||
masm.storeCallPointerResult(scratch);
|
||||
|
||||
masm.PopRegsInMask(save);
|
||||
masm.branchIfFalseBool(scratch, failure->label());
|
||||
|
||||
// Load the reallocated elements pointer.
|
||||
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
|
||||
|
||||
masm.bind(&addNewElement);
|
||||
|
||||
// Increment initLength.
|
||||
masm.add32(Imm32(1), initLength);
|
||||
|
||||
// If length is now <= index, increment length too.
|
||||
Label skipIncrementLength;
|
||||
Address length(scratch, ObjectElements::offsetOfLength());
|
||||
masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
|
||||
masm.add32(Imm32(1), length);
|
||||
masm.bind(&skipIncrementLength);
|
||||
masm.prepareOOBStoreElement(obj, index, scratch, spectreTemp,
|
||||
failure->label(), liveVolatileRegs());
|
||||
|
||||
// Skip EmitPreBarrier as the memory is uninitialized.
|
||||
masm.jump(&storeSkipPreBarrier);
|
||||
|
||||
@@ -14956,95 +14956,17 @@ void CodeGenerator::visitStoreHoleValueElement(LStoreHoleValueElement* lir) {
|
||||
masm.storeValue(MagicValue(JS_ELEMENTS_HOLE), element);
|
||||
}
|
||||
|
||||
void CodeGenerator::emitStoreElementHoleOOL(LInstruction* lir) {
|
||||
Register object, elements, index;
|
||||
mozilla::Maybe<ConstantOrRegister> value;
|
||||
Register temp;
|
||||
|
||||
if (lir->isStoreElementHoleV()) {
|
||||
LStoreElementHoleV* store = lir->toStoreElementHoleV();
|
||||
object = ToRegister(store->object());
|
||||
elements = ToRegister(store->elements());
|
||||
index = ToRegister(store->index());
|
||||
value.emplace(TypedOrValueRegister(ToValue(store->value())));
|
||||
temp = ToRegister(store->temp0());
|
||||
} else {
|
||||
LStoreElementHoleT* store = lir->toStoreElementHoleT();
|
||||
object = ToRegister(store->object());
|
||||
elements = ToRegister(store->elements());
|
||||
index = ToRegister(store->index());
|
||||
if (store->value()->isConstant()) {
|
||||
value.emplace(
|
||||
ConstantOrRegister(store->value()->toConstant()->toJSValue()));
|
||||
} else {
|
||||
MIRType valueType = store->mir()->value()->type();
|
||||
value.emplace(
|
||||
TypedOrValueRegister(valueType, ToAnyRegister(store->value())));
|
||||
}
|
||||
temp = ToRegister(store->temp0());
|
||||
}
|
||||
|
||||
Address initLength(elements, ObjectElements::offsetOfInitializedLength());
|
||||
|
||||
// We're out-of-bounds. We only handle the index == initlength case.
|
||||
// If index > initializedLength, bail out. Note that this relies on the
|
||||
// condition flags sticking from the incoming branch.
|
||||
// Also note: this branch does not need Spectre mitigations, doing that for
|
||||
// the capacity check below is sufficient.
|
||||
Label allocElement, addNewElement;
|
||||
#if defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
|
||||
defined(JS_CODEGEN_RISCV64)
|
||||
// Had to reimplement for MIPS because there are no flags.
|
||||
bailoutCmp32(Assembler::NotEqual, initLength, index, lir->snapshot());
|
||||
#else
|
||||
bailoutIf(Assembler::NotEqual, lir->snapshot());
|
||||
#endif
|
||||
|
||||
// If index < capacity, we can add a dense element inline. If not, we need
|
||||
// to allocate more elements first.
|
||||
masm.spectreBoundsCheck32(
|
||||
index, Address(elements, ObjectElements::offsetOfCapacity()), temp,
|
||||
&allocElement);
|
||||
masm.jump(&addNewElement);
|
||||
|
||||
masm.bind(&allocElement);
|
||||
|
||||
// Save all live volatile registers, except |temp|.
|
||||
LiveRegisterSet liveRegs = liveVolatileRegs(lir);
|
||||
liveRegs.takeUnchecked(temp);
|
||||
masm.PushRegsInMask(liveRegs);
|
||||
|
||||
masm.setupAlignedABICall();
|
||||
masm.loadJSContext(temp);
|
||||
masm.passABIArg(temp);
|
||||
masm.passABIArg(object);
|
||||
|
||||
using Fn = bool (*)(JSContext*, NativeObject*);
|
||||
masm.callWithABI<Fn, NativeObject::addDenseElementPure>();
|
||||
masm.storeCallPointerResult(temp);
|
||||
|
||||
masm.PopRegsInMask(liveRegs);
|
||||
bailoutIfFalseBool(temp, lir->snapshot());
|
||||
|
||||
// Load the reallocated elements pointer.
|
||||
masm.loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
|
||||
|
||||
masm.bind(&addNewElement);
|
||||
|
||||
// Increment initLength
|
||||
masm.add32(Imm32(1), initLength);
|
||||
|
||||
// If length is now <= index, increment length too.
|
||||
Label skipIncrementLength;
|
||||
Address length(elements, ObjectElements::offsetOfLength());
|
||||
masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
|
||||
masm.add32(Imm32(1), length);
|
||||
masm.bind(&skipIncrementLength);
|
||||
}
|
||||
|
||||
void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
|
||||
Register obj = ToRegister(lir->object());
|
||||
Register elements = ToRegister(lir->elements());
|
||||
Register index = ToRegister(lir->index());
|
||||
Register temp = ToRegister(lir->temp0());
|
||||
|
||||
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
|
||||
emitStoreElementHoleOOL(lir);
|
||||
Label bail;
|
||||
masm.prepareOOBStoreElement(obj, index, elements, temp, &bail,
|
||||
liveVolatileRegs(lir));
|
||||
bailoutFrom(&bail, lir->snapshot());
|
||||
|
||||
// Jump to the inline path where we will store the value.
|
||||
// We rejoin after the prebarrier, because the memory is uninitialized.
|
||||
@@ -15052,11 +14974,6 @@ void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
|
||||
});
|
||||
addOutOfLineCode(ool, lir->mir());
|
||||
|
||||
Register obj = ToRegister(lir->object());
|
||||
Register elements = ToRegister(lir->elements());
|
||||
Register index = ToRegister(lir->index());
|
||||
Register temp = ToRegister(lir->temp0());
|
||||
|
||||
Address initLength(elements, ObjectElements::offsetOfInitializedLength());
|
||||
masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
|
||||
|
||||
@@ -15075,8 +14992,17 @@ void CodeGenerator::visitStoreElementHoleT(LStoreElementHoleT* lir) {
|
||||
}
|
||||
|
||||
void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
|
||||
Register obj = ToRegister(lir->object());
|
||||
Register elements = ToRegister(lir->elements());
|
||||
Register index = ToRegister(lir->index());
|
||||
ValueOperand value = ToValue(lir->value());
|
||||
Register temp = ToRegister(lir->temp0());
|
||||
|
||||
auto* ool = new (alloc()) LambdaOutOfLineCode([=](OutOfLineCode& ool) {
|
||||
emitStoreElementHoleOOL(lir);
|
||||
Label bail;
|
||||
masm.prepareOOBStoreElement(obj, index, elements, temp, &bail,
|
||||
liveVolatileRegs(lir));
|
||||
bailoutFrom(&bail, lir->snapshot());
|
||||
|
||||
// Jump to the inline path where we will store the value.
|
||||
// We rejoin after the prebarrier, because the memory is uninitialized.
|
||||
@@ -15084,12 +15010,6 @@ void CodeGenerator::visitStoreElementHoleV(LStoreElementHoleV* lir) {
|
||||
});
|
||||
addOutOfLineCode(ool, lir->mir());
|
||||
|
||||
Register obj = ToRegister(lir->object());
|
||||
Register elements = ToRegister(lir->elements());
|
||||
Register index = ToRegister(lir->index());
|
||||
ValueOperand value = ToValue(lir->value());
|
||||
Register temp = ToRegister(lir->temp0());
|
||||
|
||||
Address initLength(elements, ObjectElements::offsetOfInitializedLength());
|
||||
masm.spectreBoundsCheck32(index, initLength, temp, ool->entry());
|
||||
|
||||
|
||||
@@ -117,7 +117,6 @@ class CodeGenerator final : public CodeGeneratorSpecific {
|
||||
void visitOutOfLineCallVM(
|
||||
OutOfLineCallVM<Fn, fn, ArgSeq, StoreOutputTo>* ool);
|
||||
|
||||
void emitStoreElementHoleOOL(LInstruction* lir);
|
||||
void emitIsCallableOOL(Register object, Register output);
|
||||
|
||||
void emitResumableWasmTrapOOL(LInstruction* lir, size_t framePushed,
|
||||
|
||||
@@ -9300,6 +9300,57 @@ void MacroAssembler::registerIterator(Register enumeratorsList, Register iter,
|
||||
storePtr(iter, Address(enumeratorsList, NativeIterator::offsetOfPrev()));
|
||||
}
|
||||
|
||||
void MacroAssembler::prepareOOBStoreElement(Register object, Register index,
|
||||
Register elements,
|
||||
Register spectreTemp,
|
||||
Label* failure,
|
||||
LiveRegisterSet volatileLiveRegs) {
|
||||
Address length(elements, ObjectElements::offsetOfLength());
|
||||
Address initLength(elements, ObjectElements::offsetOfInitializedLength());
|
||||
Address capacity(elements, ObjectElements::offsetOfCapacity());
|
||||
|
||||
// We only handle the index == initLength case.
|
||||
branch32(Assembler::NotEqual, initLength, index, failure);
|
||||
|
||||
// If index < capacity, we can add a dense element inline. If not, we
|
||||
// need to allocate more elements.
|
||||
Label allocElement, addNewElement;
|
||||
spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
|
||||
jump(&addNewElement);
|
||||
|
||||
bind(&allocElement);
|
||||
|
||||
volatileLiveRegs.takeUnchecked(elements);
|
||||
volatileLiveRegs.takeUnchecked(spectreTemp);
|
||||
PushRegsInMask(volatileLiveRegs);
|
||||
|
||||
// Use `elements` as a scratch register because we're about to reallocate it.
|
||||
using Fn = bool (*)(JSContext* cx, NativeObject* obj);
|
||||
setupUnalignedABICall(elements);
|
||||
loadJSContext(elements);
|
||||
passABIArg(elements);
|
||||
passABIArg(object);
|
||||
callWithABI<Fn, NativeObject::addDenseElementPure>();
|
||||
storeCallPointerResult(elements);
|
||||
|
||||
PopRegsInMask(volatileLiveRegs);
|
||||
branchIfFalseBool(elements, failure);
|
||||
|
||||
// Load the reallocated elements pointer.
|
||||
loadPtr(Address(object, NativeObject::offsetOfElements()), elements);
|
||||
|
||||
bind(&addNewElement);
|
||||
|
||||
// Increment initLength.
|
||||
add32(Imm32(1), initLength);
|
||||
|
||||
// If length is now <= index, increment length too.
|
||||
Label skipIncrementLength;
|
||||
branch32(Assembler::Above, length, index, &skipIncrementLength);
|
||||
add32(Imm32(1), length);
|
||||
bind(&skipIncrementLength);
|
||||
}
|
||||
|
||||
void MacroAssembler::toHashableNonGCThing(ValueOperand value,
|
||||
ValueOperand result,
|
||||
FloatRegister tempFloat) {
|
||||
|
||||
@@ -5404,6 +5404,10 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
||||
Register temp3);
|
||||
void registerIterator(Register enumeratorsList, Register iter, Register temp);
|
||||
|
||||
void prepareOOBStoreElement(Register object, Register index,
|
||||
Register elements, Register spectreTemp,
|
||||
Label* failure, LiveRegisterSet volatileLiveRegs);
|
||||
|
||||
void toHashableNonGCThing(ValueOperand value, ValueOperand result,
|
||||
FloatRegister tempFloat);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user