Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 74 additions & 15 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include "code/icBuffer.hpp"
#include "compiler/oopMap.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/g1Arguments.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BatchedTask.hpp"
Expand Down Expand Up @@ -492,6 +493,13 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_
gclocker_retry_count += 1;
}

// Has the gc overhead limit been reached in the meantime? If so, this mutator
// should receive null even when unsuccessfully scheduling a collection as well
// for global consistency.
if (gc_overhead_limit_exceeded()) {
return nullptr;
}

// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
// stalled due to the GC locker. In either can we should retry the
Expand Down Expand Up @@ -760,7 +768,12 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
GCLocker::stall_until_clear();
gclocker_retry_count += 1;
}

// Has the gc overhead limit been reached in the meantime? If so, this mutator
// should receive null even when unsuccessfully scheduling a collection as well
// for global consistency.
if (gc_overhead_limit_exceeded()) {
return nullptr;
}

// We can reach here if we were unsuccessful in scheduling a
// collection (because another thread beat us to it) or if we were
Expand Down Expand Up @@ -981,27 +994,64 @@ void G1CollectedHeap::resize_heap_if_necessary() {
}
}

void G1CollectedHeap::update_gc_overhead_counter() {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");

if (!UseGCOverheadLimit) {
return;
}

bool gc_time_over_limit = (_policy->analytics()->long_term_pause_time_ratio() * 100) >= GCTimeLimit;
double free_space_percent = percent_of(num_free_or_available_regions() * HeapRegion::GrainBytes, max_capacity());
bool free_space_below_limit = free_space_percent < GCHeapFreeLimit;

log_debug(gc)("GC Overhead Limit: GC Time %f Free Space %f Counter %zu",
(_policy->analytics()->long_term_pause_time_ratio() * 100),
free_space_percent,
_gc_overhead_counter);

if (gc_time_over_limit && free_space_below_limit) {
_gc_overhead_counter++;
} else {
_gc_overhead_counter = 0;
}
}

bool G1CollectedHeap::gc_overhead_limit_exceeded() {
return _gc_overhead_counter >= GCOverheadLimitThreshold;
}

HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
bool do_gc,
bool maximal_compaction,
bool expect_null_mutator_alloc_region,
bool* gc_succeeded) {
*gc_succeeded = true;
// Let's attempt the allocation first.
HeapWord* result =
attempt_allocation_at_safepoint(word_size,
expect_null_mutator_alloc_region);
if (result != nullptr) {
return result;
}
// Skip allocation if GC overhead limit has been exceeded to let the mutator run
// into an OOME. It can either exit "gracefully" or try to free up memory asap.
// For the latter situation, keep running GCs. If the mutator frees up enough
// memory quickly enough, the overhead(s) will go below the threshold(s) again
// and the VM may continue running.
// If we did not continue garbage collections, the (gc overhead) limit may decrease
// enough by itself to not count as exceeding the limit any more, in the worst
// case bouncing back-and-forth all the time.
if (!gc_overhead_limit_exceeded()) {
// Let's attempt the allocation first.
HeapWord* result =
attempt_allocation_at_safepoint(word_size,
expect_null_mutator_alloc_region);
if (result != nullptr) {
return result;
}

// In a G1 heap, we're supposed to keep allocation from failing by
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
result = expand_and_allocate(word_size);
if (result != nullptr) {
return result;
// In a G1 heap, we're supposed to keep allocation from failing by
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
result = expand_and_allocate(word_size);
if (result != nullptr) {
return result;
}
}

if (do_gc) {
Expand All @@ -1025,6 +1075,10 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
bool* succeeded) {
assert_at_safepoint_on_vm_thread();

// Update GC overhead limits after the initial garbage collection leading to this
// allocation attempt.
update_gc_overhead_counter();

// Attempts to allocate followed by Full GC.
HeapWord* result =
satisfy_failed_allocation_helper(word_size,
Expand Down Expand Up @@ -1062,6 +1116,10 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
assert(!soft_ref_policy()->should_clear_all_soft_refs(),
"Flag should have been handled and cleared prior to this point");

if (gc_overhead_limit_exceeded()) {
log_info(gc)("GC Overhead Limit exceeded too often (%zu).", GCOverheadLimitThreshold);
}

// What else? We might try synchronous finalization later. If the total
// space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be
Expand Down Expand Up @@ -1230,6 +1288,7 @@ class HumongousRegionSetChecker : public HeapRegionSetChecker {

G1CollectedHeap::G1CollectedHeap() :
CollectedHeap(),
_gc_overhead_counter(0),
_service_thread(nullptr),
_periodic_gc_task(nullptr),
_free_arena_memory_task(nullptr),
Expand Down
11 changes: 11 additions & 0 deletions src/hotspot/share/gc/g1/g1CollectedHeap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,17 @@ class G1CollectedHeap : public CollectedHeap {
friend class G1CheckRegionAttrTableClosure;

private:
// GC Overhead Limit functionality related members.
//
// The goal is to return null for allocations prematurely (before really going
// OOME) in case both GC CPU usage (>= GCTimeLimit) and not much available free
// memory (<= GCHeapFreeLimit) so that applications can exit gracefully or try
// to keep running by easing off memory.
uintx _gc_overhead_counter; // The number of consecutive garbage collections we were over the limits.

void update_gc_overhead_counter();
bool gc_overhead_limit_exceeded();

G1ServiceThread* _service_thread;
G1ServiceTask* _periodic_gc_task;
G1MonotonicArenaFreeMemoryTask* _free_arena_memory_task;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/shared/gc_globals.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,7 @@
"Estimate of footprint other than Java Heap") \
range(0, max_uintx) \
\
product(bool, UseGCOverheadLimit, true, \
product(bool, UseGCOverheadLimit, falseInDebug, \
"Use policy to limit of proportion of time spent in GC " \
"before an OutOfMemory error is thrown") \
\
Expand Down
97 changes: 97 additions & 0 deletions test/hotspot/jtreg/gc/TestUseGCOverheadLimit.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/

package gc;

/*
* @test id=Parallel
* @requires vm.gc.Parallel & false
* @requires !vm.debug
* @summary Verifies that the UseGCOverheadLimit functionality works in Parallel GC.
* @library /test/lib
* @run driver gc.TestUseGCOverheadLimit Parallel
*/

/*
* @test id=G1
* @requires vm.gc.G1
* @requires !vm.debug
* @summary Verifies that the UseGCOverheadLimit functionality works in G1 GC.
* @library /test/lib
* @run driver gc.TestUseGCOverheadLimit G1
*/

import java.util.Arrays;
import java.util.stream.Stream;

import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;

public class TestUseGCOverheadLimit {
public static void main(String args[]) throws Exception {
String[] parallelArgs = {
"-XX:+UseParallelGC",
"-XX:NewSize=122m",
"-XX:SurvivorRatio=99",
"-XX:GCHeapFreeLimit=10"
};
String[] g1Args = {
"-XX:+UseG1GC",
"-XX:GCHeapFreeLimit=5"
};

String[] selectedArgs = args[0].equals("G1") ? g1Args : parallelArgs;

final String[] commonArgs = {
"-XX:ParallelGCThreads=1", // Make GCs take longer.
"-XX:+UseGCOverheadLimit",
"-Xlog:gc=debug",
"-XX:GCTimeLimit=90", // Ease the CPU requirement a little.
"-Xmx128m",
Allocating.class.getName()
};

String[] vmArgs = Stream.concat(Arrays.stream(selectedArgs), Arrays.stream(commonArgs)).toArray(String[]::new);
OutputAnalyzer output = ProcessTools.executeLimitedTestJava(vmArgs);
output.shouldNotHaveExitValue(0);

System.out.println(output.getStdout());

output.stdoutShouldContain("GC Overhead Limit exceeded too often (5).");
}

static class Allocating {
public static void main(String[] args) {
Object[] cache = new Object[1024 * 1024 * 2];

// Allocate random objects, keeping around data, causing garbage
// collections.
for (int i = 0; i < 1024* 1024 * 30; i++) {
Object[] obj = new Object[10];
cache[i % cache.length] = obj;
}

System.out.println(cache);
}
}
}