Skip to content

Commit

Permalink
[GR-55222] Support Lazy Deoptimization
Browse files Browse the repository at this point in the history
PullRequest: graal/19048
  • Loading branch information
Eric Wu authored and peter-hofer committed Feb 7, 2025
2 parents 75344f7 + 2805efd commit b50044e
Show file tree
Hide file tree
Showing 36 changed files with 867 additions and 254 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -847,7 +847,7 @@ private static void walkStack(IsolateThread thread, JavaStackWalk walk, ObjectRe
VMError.guarantee(!JavaFrames.isUnknownFrame(frame), "GC must not encounter unknown frames");

/* We are during a GC, so tethering of the CodeInfo is not necessary. */
DeoptimizedFrame deoptFrame = Deoptimizer.checkDeoptimized(frame);
DeoptimizedFrame deoptFrame = Deoptimizer.checkEagerDeoptimized(frame);
if (deoptFrame == null) {
Pointer sp = frame.getSP();
CodeInfo codeInfo = CodeInfoAccess.unsafeConvert(frame.getIPCodeInfo());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,26 +76,28 @@ public boolean visitCode(CodeInfo codeInfo) {
Object tether = UntetheredCodeInfoAccess.getTetherUnsafe(codeInfo);
if (tether != null && !isReachable(tether)) {
int state = CodeInfoAccess.getState(codeInfo);
if (state == CodeInfo.STATE_INVALIDATED) {
if (state == CodeInfo.STATE_REMOVED_FROM_CODE_CACHE) {
/*
* The tether object is not reachable and the CodeInfo was already invalidated, so
* we only need to visit references that will be accessed before the unmanaged
* memory is freed during this garbage collection.
* The tether object is not reachable and the CodeInfo was already removed from the
* code cache, so we only need to visit references that will be accessed before the
* unmanaged memory is freed during this garbage collection.
*/
RuntimeCodeInfoAccess.walkObjectFields(codeInfo, greyToBlackObjectVisitor);
CodeInfoAccess.setState(codeInfo, CodeInfo.STATE_UNREACHABLE);
CodeInfoAccess.setState(codeInfo, CodeInfo.STATE_PENDING_FREE);
return true;
}

/*
* We don't want to keep heap objects unnecessarily alive, so invalidate and free the
* CodeInfo if it has weak references to otherwise unreachable objects. However, we need
* to make sure that all the objects that are accessed during the invalidation remain
* reachable. Those objects can only be collected in a subsequent garbage collection.
* We don't want to keep heap objects unnecessarily alive. So, we check if the CodeInfo
* has weak references to otherwise unreachable objects. If so, we remove the CodeInfo
* from the code cache and free the CodeInfo during the current safepoint (see
* RuntimeCodeCacheCleaner). However, we need to make sure that all the objects that are
* accessed while doing so remain reachable. Those objects can only be collected in a
* subsequent garbage collection.
*/
if (state == CodeInfo.STATE_NON_ENTRANT || invalidateCodeThatReferencesUnreachableObjects && state == CodeInfo.STATE_CODE_CONSTANTS_LIVE && hasWeakReferenceToUnreachableObject(codeInfo)) {
RuntimeCodeInfoAccess.walkObjectFields(codeInfo, greyToBlackObjectVisitor);
CodeInfoAccess.setState(codeInfo, CodeInfo.STATE_READY_FOR_INVALIDATION);
CodeInfoAccess.setState(codeInfo, CodeInfo.STATE_PENDING_REMOVAL_FROM_CODE_CACHE);
return true;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -469,19 +469,27 @@ public SubstrateLIRGenerationResult(CompilationIdentifier compilationId, LIR lir
super(compilationId, lir, frameMapBuilder, registerAllocationConfig, callingConvention);
this.method = method;

if (method.hasCalleeSavedRegisters()) {
/*
* Besides for methods with callee saved registers, we reserve additional stack space
* for lazyDeoptStub too. This is necessary because the lazy deopt stub might read
* callee-saved register values in the callee of the function to be deoptimized, thus
* that stack space must not be overwritten by the lazy deopt stub.
*/
if (method.hasCalleeSavedRegisters() || method.getDeoptStubType() == Deoptimizer.StubType.LazyEntryStub) {
AArch64CalleeSavedRegisters calleeSavedRegisters = AArch64CalleeSavedRegisters.singleton();
FrameMap frameMap = ((FrameMapBuilderTool) frameMapBuilder).getFrameMap();
int registerSaveAreaSizeInBytes = calleeSavedRegisters.getSaveAreaSize();
StackSlot calleeSaveArea = frameMap.allocateStackMemory(registerSaveAreaSizeInBytes, frameMap.getTarget().wordSize);

/*
* The offset of the callee save area must be fixed early during image generation.
* It is accessed when compiling methods that have a call with callee-saved calling
* convention. Here we verify that offset computed earlier is the same as the offset
* actually reserved.
*/
calleeSavedRegisters.verifySaveAreaOffsetInFrame(calleeSaveArea.getRawOffset());
if (method.hasCalleeSavedRegisters()) {
/*
* The offset of the callee save area must be fixed early during image
* generation. It is accessed when compiling methods that have a call with
* callee-saved calling convention. Here we verify that offset computed earlier
* is the same as the offset actually reserved.
*/
calleeSavedRegisters.verifySaveAreaOffsetInFrame(calleeSaveArea.getRawOffset());
}
}

if (method.canDeoptimize() || method.isDeoptTarget()) {
Expand Down Expand Up @@ -951,8 +959,8 @@ public void returned(CompilationResultBuilder crb) {
}

/**
* Generates the prolog of a {@link com.oracle.svm.core.deopt.Deoptimizer.StubType#EntryStub}
* method.
* Generates the prolog of a
* {@link com.oracle.svm.core.deopt.Deoptimizer.StubType#EagerEntryStub} method.
*/
protected static class DeoptEntryStubContext extends SubstrateAArch64FrameContext {
protected final CallingConvention callingConvention;
Expand Down Expand Up @@ -1241,7 +1249,7 @@ public CompilationResultBuilder newCompilationResultBuilder(LIRGenerationResult
}

protected FrameContext createFrameContext(SharedMethod method, Deoptimizer.StubType stubType, CallingConvention callingConvention) {
if (stubType == Deoptimizer.StubType.EntryStub) {
if (stubType == Deoptimizer.StubType.EagerEntryStub || stubType == Deoptimizer.StubType.LazyEntryStub) {
return new DeoptEntryStubContext(method, callingConvention);
} else if (stubType == Deoptimizer.StubType.ExitStub) {
return new DeoptExitStubContext(method, callingConvention);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -289,8 +289,9 @@ public CallingConvention getCallingConvention(Type t, JavaType returnType, JavaT
int currentFP = 0;

/*
* We have to reserve a slot between return address and outgoing parameters for the deopt
* frame handle. Exception: calls to native methods.
* We have to reserve a slot between return address and outgoing parameters for the
* deoptimized frame (eager deoptimization), or the original return address (lazy
* deoptimization). Exception: calls to native methods.
*/
int currentStackOffset = (type.nativeABI() ? nativeParamsStackOffset : target.wordSize);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -555,19 +555,27 @@ public SubstrateLIRGenerationResult(CompilationIdentifier compilationId, LIR lir
super(compilationId, lir, frameMapBuilder, registerAllocationConfig, callingConvention);
this.method = method;

if (method.hasCalleeSavedRegisters()) {
/*
* Besides for methods with callee saved registers, we reserve additional stack space
* for lazyDeoptStub too. This is necessary because the lazy deopt stub might read
* callee-saved register values in the callee of the function to be deoptimized, thus
* that stack space must not be overwritten by the lazy deopt stub.
*/
if (method.hasCalleeSavedRegisters() || method.getDeoptStubType() == Deoptimizer.StubType.LazyEntryStub) {
AMD64CalleeSavedRegisters calleeSavedRegisters = AMD64CalleeSavedRegisters.singleton();
FrameMap frameMap = ((FrameMapBuilderTool) frameMapBuilder).getFrameMap();
int registerSaveAreaSizeInBytes = calleeSavedRegisters.getSaveAreaSize();
StackSlot calleeSaveArea = frameMap.allocateStackMemory(registerSaveAreaSizeInBytes, frameMap.getTarget().wordSize);

/*
* The offset of the callee save area must be fixed early during image generation.
* It is accessed when compiling methods that have a call with callee-saved calling
* convention. Here we verify that offset computed earlier is the same as the offset
* actually reserved.
*/
calleeSavedRegisters.verifySaveAreaOffsetInFrame(calleeSaveArea.getRawOffset());
if (method.hasCalleeSavedRegisters()) {
/*
* The offset of the callee save area must be fixed early during image
* generation. It is accessed when compiling methods that have a call with
* callee-saved calling convention. Here we verify that offset computed earlier
* is the same as the offset actually reserved.
*/
calleeSavedRegisters.verifySaveAreaOffsetInFrame(calleeSaveArea.getRawOffset());
}
}

if (method.canDeoptimize() || method.isDeoptTarget()) {
Expand Down Expand Up @@ -1327,8 +1335,8 @@ public void returned(CompilationResultBuilder crb) {
}

/**
* Generates the prolog of a {@link com.oracle.svm.core.deopt.Deoptimizer.StubType#EntryStub}
* method.
* Generates the prolog of a
* {@link com.oracle.svm.core.deopt.Deoptimizer.StubType#EagerEntryStub} method.
*/
protected static class DeoptEntryStubContext extends SubstrateAMD64FrameContext {
protected DeoptEntryStubContext(SharedMethod method, CallingConvention callingConvention) {
Expand Down Expand Up @@ -1367,7 +1375,7 @@ public void enter(CompilationResultBuilder tasm) {
* method.
*
* Note no special handling is necessary for CFI as this will be a direct call from the
* {@link com.oracle.svm.core.deopt.Deoptimizer.StubType#EntryStub}.
* {@link com.oracle.svm.core.deopt.Deoptimizer.StubType#EagerEntryStub}.
*/
protected static class DeoptExitStubContext extends SubstrateAMD64FrameContext {
protected DeoptExitStubContext(SharedMethod method, CallingConvention callingConvention) {
Expand Down Expand Up @@ -1811,7 +1819,7 @@ protected AMD64MacroAssembler createAssembler(OptionValues options) {
}

protected FrameContext createFrameContext(SharedMethod method, Deoptimizer.StubType stubType, CallingConvention callingConvention) {
if (stubType == Deoptimizer.StubType.EntryStub) {
if (stubType == Deoptimizer.StubType.EagerEntryStub || stubType == Deoptimizer.StubType.LazyEntryStub) {
return new DeoptEntryStubContext(method, callingConvention);
} else if (stubType == Deoptimizer.StubType.ExitStub) {
return new DeoptExitStubContext(method, callingConvention);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,8 +264,9 @@ public CallingConvention getCallingConvention(Type t, JavaType returnType, JavaT
boolean isEntryPoint = type.nativeABI() && !type.outgoing;

/*
* We have to reserve a slot between return address and outgoing parameters for the deopt
* frame handle. Exception: calls to native methods.
* We have to reserve a slot between return address and outgoing parameters for the
* deoptimized frame (eager deoptimization), or the original return address (lazy
* deoptimization). Exception: calls to native methods.
*/
int currentStackOffset = type.nativeABI() ? nativeParamsStackOffset : target.wordSize;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,9 @@ public CallingConvention getCallingConvention(Type t, JavaType returnType, JavaT
int currentFP = 0;

/*
* We have to reserve a slot between return address and outgoing parameters for the deopt
* frame handle. Exception: calls to native methods.
* We have to reserve a slot between return address and outgoing parameters for the
* deoptimized frame (eager deoptimization), or the original return address (lazy
* deoptimization). Exception: calls to native methods.
*/
int currentStackOffset = (type.nativeABI() ? nativeParamsStackOffset : target.wordSize);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,12 @@ public int maxInvocationCount() {
@Override
@RestrictHeapAccess(access = RestrictHeapAccess.Access.NO_ALLOCATION, reason = "Must not allocate while printing diagnostics.")
public void printDiagnostics(Log log, ErrorContext context, int maxDiagnosticLevel, int invocationCount) {
log.string("DeoptStubPointer address: ").zhex(DeoptimizationSupport.getDeoptStubPointer()).newline().newline();
log.string("EagerDeoptStub address: ").zhex(DeoptimizationSupport.getEagerDeoptStubPointer()).newline();
if (Deoptimizer.Options.LazyDeoptimization.getValue()) {
log.string("LazyDeoptStubPrimitiveReturn address: ").zhex(DeoptimizationSupport.getLazyDeoptStubPrimitiveReturnPointer()).newline();
log.string("LazyDeoptStubObjectReturn address: ").zhex(DeoptimizationSupport.getLazyDeoptStubObjectReturnPointer()).newline();
}
log.newline();
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package com.oracle.svm.core.jvmti.headers;
package com.oracle.svm.core.c;

import org.graalvm.nativeimage.c.struct.CPointerTo;
import org.graalvm.word.PointerBase;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,36 +66,37 @@ public interface CodeInfo extends UntetheredCodeInfo {
/**
* This state is only a temporary state when the VM is at a safepoint. It indicates that no
* activations are remaining and that the code is no longer needed (code is non-entrant) or no
* longer wanted (code has references to otherwise unreachable objects). The GC will invalidate
* and free this {@link CodeInfo} object during the current safepoint. It is crucial that the GC
* still visits all heap references that may be accessed while invalidating and freeing the
* {@link CodeInfo} object (i.e., all object fields).
* longer wanted (code has references to otherwise unreachable objects). The GC will remove this
* {@link CodeInfo} object from the code cache and free it during the current safepoint. It is
* crucial that the GC still visits all heap references that may be accessed while removing and
* freeing the {@link CodeInfo} object (i.e., all object fields).
*/
@DuplicatedInNativeCode //
int STATE_READY_FOR_INVALIDATION = STATE_NON_ENTRANT + 1;
int STATE_PENDING_REMOVAL_FROM_CODE_CACHE = STATE_NON_ENTRANT + 1;

/**
* Indicates that this {@link CodeInfo} object was invalidated. The data will be freed by the GC
* once the tether object becomes unreachable. Until then, the GC must continue visiting all
* heap references, including code constants that are directly embedded into the machine code.
* Indicates that this {@link CodeInfo} object was removed from the code cache. The data will be
* freed by the GC once the tether object becomes unreachable. Until then, the GC must continue
* visiting all heap references, including code constants that are directly embedded into the
* machine code.
*/
@DuplicatedInNativeCode //
int STATE_INVALIDATED = STATE_READY_FOR_INVALIDATION + 1;
int STATE_REMOVED_FROM_CODE_CACHE = STATE_PENDING_REMOVAL_FROM_CODE_CACHE + 1;

/**
* This state is only a temporary state when the VM is at a safepoint. It indicates that a
* previously invalidated {@link CodeInfo} object is no longer reachable from the GC point of
* view. The GC will free the {@link CodeInfo} object during the current safepoint. It is
* crucial that the GC still visits all heap references that may be accessed while freeing the
* {@link CodeInfo} object (i.e., all object fields).
* {@link CodeInfo} object which has already been removed from the code cache is no longer
* reachable from the GC point of view. The GC will free the {@link CodeInfo} object during the
* current safepoint. It is crucial that the GC still visits all heap references that may be
* accessed while freeing the {@link CodeInfo} object (i.e., all object fields).
*/
@DuplicatedInNativeCode //
int STATE_UNREACHABLE = STATE_INVALIDATED + 1;
int STATE_PENDING_FREE = STATE_REMOVED_FROM_CODE_CACHE + 1;

/**
* Indicates that the {@link CodeInfo} object was already freed. This state should never be
* seen.
*/
@DuplicatedInNativeCode //
int STATE_FREED = STATE_UNREACHABLE + 1;
int STATE_FREED = STATE_PENDING_FREE + 1;
}
Original file line number Diff line number Diff line change
Expand Up @@ -155,12 +155,12 @@ public static String stateToString(int codeInfoState) {
return "code constants live";
case CodeInfo.STATE_NON_ENTRANT:
return "non-entrant";
case CodeInfo.STATE_READY_FOR_INVALIDATION:
return "ready for invalidation";
case CodeInfo.STATE_INVALIDATED:
return "invalidated";
case CodeInfo.STATE_UNREACHABLE:
return "unreachable";
case CodeInfo.STATE_PENDING_REMOVAL_FROM_CODE_CACHE:
return "pending removal from code cache";
case CodeInfo.STATE_REMOVED_FROM_CODE_CACHE:
return "removed from code cache";
case CodeInfo.STATE_PENDING_FREE:
return "pending free";
case CodeInfo.STATE_FREED:
return "invalid (freed)";
default:
Expand Down
Loading

0 comments on commit b50044e

Please sign in to comment.