Skip to content

Commit f0acede

Browse files
jupvfrancoCommit Bot
authored andcommitted
Remove weak-list of optimized JS functions.
This CL removes the weak-list of JS functions from the context and all the code that iterares over it. This list was being used mainly during deoptimization (for code unlinking) and during garbage collection. Removing it will improve performance of programs that create many closures and trigger many scavenge GC cycles. No extra work is required during garbage collection. However, given that we no longer unlink code from JS functions during deoptimization, we leave it as it is, and on its next activation we check whether the mark_for_deoptimization bit of that code is set, and if it is, than we unlink it and jump to lazy compiled code. This check happens in the prologue of every code object. We needed to change/remove the cctests that used to check something on this list. Working in x64, ia32, arm64, arm, mips64 and mips. Bug: v8:6637 Change-Id: Ica99a12fd0351ae985e9a287918bf28caf6d2e24 TBR: [email protected] Reviewed-on: https://chromium-review.googlesource.com/647596 Commit-Queue: Jaroslav Sevcik <[email protected]> Reviewed-by: Benedikt Meurer <[email protected]> Reviewed-by: Jaroslav Sevcik <[email protected]> Cr-Commit-Position: refs/heads/master@{#47808}
1 parent 9f2641d commit f0acede

28 files changed

+396
-586
lines changed

src/builtins/arm/builtins-arm.cc

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -972,33 +972,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
972972
static void ReplaceClosureCodeWithOptimizedCode(
973973
MacroAssembler* masm, Register optimized_code, Register closure,
974974
Register scratch1, Register scratch2, Register scratch3) {
975-
Register native_context = scratch1;
976-
977975
// Store code entry in the closure.
978976
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
979977
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
980978
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
981979
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
982980
OMIT_SMI_CHECK);
983-
984-
// Link the closure into the optimized function list.
985-
__ ldr(native_context, NativeContextMemOperand());
986-
__ ldr(scratch2,
987-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
988-
__ str(scratch2,
989-
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
990-
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
991-
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
992-
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
993-
const int function_list_offset =
994-
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
995-
__ str(closure,
996-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
997-
// Save closure before the write barrier.
998-
__ mov(scratch2, closure);
999-
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
1000-
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
1001-
__ mov(closure, scratch2);
1002981
}
1003982

1004983
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1567,6 +1546,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
15671546
GenerateTailCallToSharedCode(masm);
15681547
}
15691548

1549+
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1550+
// Set the code slot inside the JSFunction to the trampoline to the
1551+
// interpreter entry.
1552+
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1553+
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
1554+
__ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
1555+
__ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
1556+
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1557+
// Jump to compile lazy.
1558+
Generate_CompileLazy(masm);
1559+
}
1560+
15701561
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
15711562
// ----------- S t a t e -------------
15721563
// -- r0 : argument count (preserved for callee)

src/builtins/arm64/builtins-arm64.cc

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -986,31 +986,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
986986
static void ReplaceClosureCodeWithOptimizedCode(
987987
MacroAssembler* masm, Register optimized_code, Register closure,
988988
Register scratch1, Register scratch2, Register scratch3) {
989-
Register native_context = scratch1;
990-
991989
// Store code entry in the closure.
992990
__ Str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
993991
__ Mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
994992
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
995993
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
996994
OMIT_SMI_CHECK);
997-
998-
// Link the closure into the optimized function list.
999-
__ Ldr(native_context, NativeContextMemOperand());
1000-
__ Ldr(scratch2,
1001-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
1002-
__ Str(scratch2,
1003-
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
1004-
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
1005-
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
1006-
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1007-
const int function_list_offset =
1008-
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
1009-
__ Str(closure,
1010-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
1011-
__ Mov(scratch2, closure);
1012-
__ RecordWriteContextSlot(native_context, function_list_offset, scratch2,
1013-
scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
1014995
}
1015996

1016997
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1584,6 +1565,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
15841565
GenerateTailCallToSharedCode(masm);
15851566
}
15861567

1568+
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1569+
// Set the code slot inside the JSFunction to the trampoline to the
1570+
// interpreter entry.
1571+
__ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1572+
__ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
1573+
__ Str(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
1574+
__ RecordWriteField(x1, JSFunction::kCodeOffset, x2, x5, kLRHasNotBeenSaved,
1575+
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1576+
// Jump to compile lazy.
1577+
Generate_CompileLazy(masm);
1578+
}
1579+
15871580
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
15881581
// ----------- S t a t e -------------
15891582
// -- x0 : argument count (preserved for callee)

src/builtins/builtins-definitions.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ namespace internal {
121121
\
122122
/* Code life-cycle */ \
123123
ASM(CompileLazy) \
124+
ASM(CompileLazyDeoptimizedCode) \
124125
ASM(CheckOptimizationMarker) \
125126
TFJ(DeserializeLazy, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
126127
ASM(InstantiateAsmJs) \

src/builtins/ia32/builtins-ia32.cc

Lines changed: 12 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -615,31 +615,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
615615
static void ReplaceClosureCodeWithOptimizedCode(
616616
MacroAssembler* masm, Register optimized_code, Register closure,
617617
Register scratch1, Register scratch2, Register scratch3) {
618-
Register native_context = scratch1;
619618

620619
// Store the optimized code in the closure.
621620
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
622621
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
623622
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
624623
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
625-
626-
// Link the closure into the optimized function list.
627-
__ mov(native_context, NativeContextOperand());
628-
__ mov(scratch3,
629-
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
630-
__ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
631-
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
632-
scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
633-
OMIT_SMI_CHECK);
634-
const int function_list_offset =
635-
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
636-
__ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
637-
closure);
638-
// Save closure before the write barrier.
639-
__ mov(scratch3, closure);
640-
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
641-
scratch2, kDontSaveFPRegs);
642-
__ mov(closure, scratch3);
643624
}
644625

645626
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1348,6 +1329,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
13481329
GenerateTailCallToSharedCode(masm);
13491330
}
13501331

1332+
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1333+
// Set the code slot inside the JSFunction to the trampoline to the
1334+
// interpreter entry.
1335+
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1336+
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
1337+
__ mov(FieldOperand(edi, JSFunction::kCodeOffset), ecx);
1338+
__ RecordWriteField(edi, JSFunction::kCodeOffset, ecx, ebx, kDontSaveFPRegs,
1339+
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1340+
// Jump to compile lazy.
1341+
Generate_CompileLazy(masm);
1342+
}
1343+
13511344
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
13521345
// ----------- S t a t e -------------
13531346
// -- eax : argument count (preserved for callee)

src/builtins/mips/builtins-mips.cc

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -954,33 +954,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
954954
static void ReplaceClosureCodeWithOptimizedCode(
955955
MacroAssembler* masm, Register optimized_code, Register closure,
956956
Register scratch1, Register scratch2, Register scratch3) {
957-
Register native_context = scratch1;
958-
959957
// Store code entry in the closure.
960958
__ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
961959
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
962960
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
963961
kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
964962
OMIT_SMI_CHECK);
965-
966-
// Link the closure into the optimized function list.
967-
__ lw(native_context, NativeContextMemOperand());
968-
__ lw(scratch2,
969-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
970-
__ sw(scratch2,
971-
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
972-
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
973-
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
974-
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
975-
const int function_list_offset =
976-
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
977-
__ sw(closure,
978-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
979-
// Save closure before the write barrier.
980-
__ mov(scratch2, closure);
981-
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
982-
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
983-
__ mov(closure, scratch2);
984963
}
985964

986965
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1554,6 +1533,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
15541533
GenerateTailCallToSharedCode(masm);
15551534
}
15561535

1536+
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1537+
// Set the code slot inside the JSFunction to the trampoline to the
1538+
// interpreter entry.
1539+
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1540+
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
1541+
__ sw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
1542+
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, t0, kRAHasNotBeenSaved,
1543+
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1544+
// Jump to compile lazy.
1545+
Generate_CompileLazy(masm);
1546+
}
1547+
15571548
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
15581549
// ----------- S t a t e -------------
15591550
// -- a0 : argument count (preserved for callee)

src/builtins/mips64/builtins-mips64.cc

Lines changed: 12 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -957,33 +957,12 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
957957
static void ReplaceClosureCodeWithOptimizedCode(
958958
MacroAssembler* masm, Register optimized_code, Register closure,
959959
Register scratch1, Register scratch2, Register scratch3) {
960-
Register native_context = scratch1;
961-
962960
// Store code entry in the closure.
963961
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
964962
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
965963
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
966964
kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
967965
OMIT_SMI_CHECK);
968-
969-
// Link the closure into the optimized function list.
970-
__ Ld(native_context, NativeContextMemOperand());
971-
__ Ld(scratch2,
972-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
973-
__ Sd(scratch2,
974-
FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
975-
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
976-
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
977-
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
978-
const int function_list_offset =
979-
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
980-
__ Sd(closure,
981-
ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
982-
// Save closure before the write barrier.
983-
__ mov(scratch2, closure);
984-
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
985-
scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
986-
__ mov(closure, scratch2);
987966
}
988967

989968
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
@@ -1558,6 +1537,18 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
15581537
GenerateTailCallToSharedCode(masm);
15591538
}
15601539

1540+
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1541+
// Set the code slot inside the JSFunction to the trampoline to the
1542+
// interpreter entry.
1543+
__ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1544+
__ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
1545+
__ Sd(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
1546+
__ RecordWriteField(a1, JSFunction::kCodeOffset, a2, a4, kRAHasNotBeenSaved,
1547+
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1548+
// Jump to compile lazy.
1549+
Generate_CompileLazy(masm);
1550+
}
1551+
15611552
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
15621553
// ----------- S t a t e -------------
15631554
// -- a0 : argument count (preserved for callee)

src/builtins/x64/builtins-x64.cc

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -693,34 +693,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
693693
__ jmp(&stepping_prepared);
694694
}
695695

696+
// TODO(juliana): if we remove the code below then we don't need all
697+
// the parameters.
696698
static void ReplaceClosureCodeWithOptimizedCode(
697699
MacroAssembler* masm, Register optimized_code, Register closure,
698700
Register scratch1, Register scratch2, Register scratch3) {
699-
Register native_context = scratch1;
700701

701702
// Store the optimized code in the closure.
702703
__ movp(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
703704
__ movp(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
704705
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
705706
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
706-
707-
// Link the closure into the optimized function list.
708-
__ movp(native_context, NativeContextOperand());
709-
__ movp(scratch3,
710-
ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
711-
__ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
712-
__ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
713-
scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
714-
OMIT_SMI_CHECK);
715-
const int function_list_offset =
716-
Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
717-
__ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
718-
closure);
719-
// Save closure before the write barrier.
720-
__ movp(scratch3, closure);
721-
__ RecordWriteContextSlot(native_context, function_list_offset, closure,
722-
scratch2, kDontSaveFPRegs);
723-
__ movp(closure, scratch3);
724707
}
725708

726709
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1325,6 +1308,21 @@ void Builtins::Generate_CheckOptimizationMarker(MacroAssembler* masm) {
13251308
GenerateTailCallToSharedCode(masm);
13261309
}
13271310

1311+
// TODO(jupvfranco): investigate whether there is any case where the CompileLazy
1312+
// builtin does not set the code field in the JS function. If there isn't then
1313+
// we do not need this builtin and can jump directly to CompileLazy.
1314+
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
1315+
// Set the code slot inside the JSFunction to the trampoline to the
1316+
// interpreter entry.
1317+
__ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
1318+
__ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
1319+
__ movq(FieldOperand(rdi, JSFunction::kCodeOffset), rcx);
1320+
__ RecordWriteField(rdi, JSFunction::kCodeOffset, rcx, r15, kDontSaveFPRegs,
1321+
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
1322+
// Jump to compile lazy.
1323+
Generate_CompileLazy(masm);
1324+
}
1325+
13281326
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
13291327
// ----------- S t a t e -------------
13301328
// -- rax : argument count (preserved for callee)

src/compiler/arm/code-generator-arm.cc

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -712,6 +712,26 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
712712
first_unused_stack_slot);
713713
}
714714

715+
// Check if the code object is marked for deoptimization. If it is, then it
716+
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
717+
// to:
718+
// 1. load the address of the current instruction;
719+
// 2. read from memory the word that contains that bit, which can be found in
720+
// the first set of flags ({kKindSpecificFlags1Offset});
721+
// 3. test kMarkedForDeoptimizationBit in those flags; and
722+
// 4. if it is not zero then it jumps to the builtin.
723+
void CodeGenerator::BailoutIfDeoptimized() {
724+
int pc_offset = __ pc_offset();
725+
int offset =
726+
Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc_offset + 8);
727+
// We can use the register pc - 8 for the address of the current instruction.
728+
__ ldr(ip, MemOperand(pc, offset));
729+
__ tst(ip, Operand(1 << Code::kMarkedForDeoptimizationBit));
730+
Handle<Code> code = isolate()->builtins()->builtin_handle(
731+
Builtins::kCompileLazyDeoptimizedCode);
732+
__ Jump(code, RelocInfo::CODE_TARGET, ne);
733+
}
734+
715735
// Assembles an instruction after register allocation, producing machine code.
716736
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
717737
Instruction* instr) {

src/compiler/arm64/code-generator-arm64.cc

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -711,6 +711,28 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
711711
first_unused_stack_slot);
712712
}
713713

714+
// Check if the code object is marked for deoptimization. If it is, then it
715+
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
716+
// to:
717+
// 1. load the address of the current instruction;
718+
// 2. read from memory the word that contains that bit, which can be found in
719+
// the first set of flags ({kKindSpecificFlags1Offset});
720+
// 3. test kMarkedForDeoptimizationBit in those flags; and
721+
// 4. if it is not zero then it jumps to the builtin.
722+
void CodeGenerator::BailoutIfDeoptimized() {
723+
Label current;
724+
// The Adr instruction gets the address of the current instruction.
725+
__ Adr(x2, &current);
726+
__ Bind(&current);
727+
int pc = __ pc_offset();
728+
int offset = Code::kKindSpecificFlags1Offset - (Code::kHeaderSize + pc);
729+
__ Ldr(x2, MemOperand(x2, offset));
730+
__ Tst(x2, Immediate(1 << Code::kMarkedForDeoptimizationBit));
731+
Handle<Code> code = isolate()->builtins()->builtin_handle(
732+
Builtins::kCompileLazyDeoptimizedCode);
733+
__ Jump(code, RelocInfo::CODE_TARGET, ne);
734+
}
735+
714736
// Assembles an instruction after register allocation, producing machine code.
715737
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
716738
Instruction* instr) {

0 commit comments

Comments
 (0)