@@ -39,10 +39,27 @@ type pageID uintptr
39
39
// base address for all 0-byte allocations
40
40
var zerobase uintptr
41
41
42
+ // Determine whether to initiate a GC.
43
+ // Currently the primitive heuristic we use will start a new
44
+ // concurrent GC when approximately half the available space
45
+ // made available by the last GC cycle has been used.
46
+ // If the GC is already working no need to trigger another one.
47
+ // This should establish a feedback loop where if the GC does not
48
+ // have sufficient time to complete then more memory will be
49
+ // requested from the OS increasing heap size thus allow future
50
+ // GCs more time to complete.
51
+ // memstat.heap_alloc and memstat.next_gc reads have benign races
52
+ // A false negative simple does not start a GC, a false positive
53
+ // will start a GC needlessly. Neither have correctness issues.
54
+ func shouldtriggergc () bool {
55
+ return memstats .heap_alloc + memstats .heap_alloc * 3 / 4 >= memstats .next_gc && atomicloaduint (& bggc .working ) == 0
56
+ }
57
+
42
58
// Allocate an object of size bytes.
43
59
// Small objects are allocated from the per-P cache's free lists.
44
60
// Large objects (> 32 kB) are allocated straight from the heap.
45
61
func mallocgc (size uintptr , typ * _type , flags uint32 ) unsafe.Pointer {
62
+ shouldhelpgc := false
46
63
if size == 0 {
47
64
return unsafe .Pointer (& zerobase )
48
65
}
@@ -144,6 +161,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
144
161
systemstack (func () {
145
162
mCache_Refill (c , tinySizeClass )
146
163
})
164
+ shouldhelpgc = true
147
165
s = c .alloc [tinySizeClass ]
148
166
v = s .freelist
149
167
}
@@ -174,6 +192,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
174
192
systemstack (func () {
175
193
mCache_Refill (c , int32 (sizeclass ))
176
194
})
195
+ shouldhelpgc = true
177
196
s = c .alloc [sizeclass ]
178
197
v = s .freelist
179
198
}
@@ -191,6 +210,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
191
210
c .local_cachealloc += intptr (size )
192
211
} else {
193
212
var s * mspan
213
+ shouldhelpgc = true
194
214
systemstack (func () {
195
215
s = largeAlloc (size , uint32 (flags ))
196
216
})
@@ -345,8 +365,15 @@ marked:
345
365
}
346
366
}
347
367
348
- if memstats . heap_alloc >= memstats . next_gc / 2 {
368
+ if shouldtriggergc () {
349
369
gogc (0 )
370
+ } else if shouldhelpgc && atomicloaduint (& bggc .working ) == 1 {
371
+ // bggc.lock not taken since race on bggc.working is benign.
372
+ // At worse we don't call gchelpwork.
373
+ // Delay the gchelpwork until the epilogue so that it doesn't
374
+ // interfere with the inner working of malloc such as
375
+ // mcache refills that might happen while doing the gchelpwork
376
+ systemstack (gchelpwork )
350
377
}
351
378
352
379
return x
@@ -466,14 +493,25 @@ func gogc(force int32) {
466
493
releasem (mp )
467
494
mp = nil
468
495
469
- semacquire (& worldsema , false )
470
-
471
- if force == 0 && memstats .heap_alloc < memstats .next_gc {
472
- // typically threads which lost the race to grab
473
- // worldsema exit here when gc is done.
474
- semrelease (& worldsema )
475
- return
496
+ if force == 0 {
497
+ lock (& bggc .lock )
498
+ if ! bggc .started {
499
+ bggc .working = 1
500
+ bggc .started = true
501
+ go backgroundgc ()
502
+ } else if bggc .working == 0 {
503
+ bggc .working = 1
504
+ ready (bggc .g )
505
+ }
506
+ unlock (& bggc .lock )
507
+ } else {
508
+ gcwork (force )
476
509
}
510
+ }
511
+
512
+ func gcwork (force int32 ) {
513
+
514
+ semacquire (& worldsema , false )
477
515
478
516
// Pick up the remaining unswept/not being swept spans concurrently
479
517
for gosweepone () != ^ uintptr (0 ) {
@@ -482,14 +520,17 @@ func gogc(force int32) {
482
520
483
521
// Ok, we're doing it! Stop everybody else
484
522
485
- startTime := nanotime ()
486
- mp = acquirem ()
523
+ mp := acquirem ()
487
524
mp .gcing = 1
488
525
releasem (mp )
489
526
gctimer .count ++
490
527
if force == 0 {
491
528
gctimer .cycle .sweepterm = nanotime ()
492
529
}
530
+ // Pick up the remaining unswept/not being swept spans before we STW
531
+ for gosweepone () != ^ uintptr (0 ) {
532
+ sweep .nbgsweep ++
533
+ }
493
534
systemstack (stoptheworld )
494
535
systemstack (finishsweep_m ) // finish sweep before we start concurrent scan.
495
536
if force == 0 { // Do as much work concurrently as possible
@@ -500,7 +541,7 @@ func gogc(force int32) {
500
541
systemstack (gcscan_m )
501
542
gctimer .cycle .installmarkwb = nanotime ()
502
543
systemstack (stoptheworld )
503
- gcinstallmarkwb ( )
544
+ systemstack ( gcinstallmarkwb )
504
545
systemstack (starttheworld )
505
546
gctimer .cycle .mark = nanotime ()
506
547
systemstack (gcmark_m )
@@ -509,6 +550,7 @@ func gogc(force int32) {
509
550
systemstack (gcinstalloffwb_m )
510
551
}
511
552
553
+ startTime := nanotime ()
512
554
if mp != acquirem () {
513
555
throw ("gogc: rescheduled" )
514
556
}
@@ -527,6 +569,7 @@ func gogc(force int32) {
527
569
eagersweep := force >= 2
528
570
for i := 0 ; i < n ; i ++ {
529
571
if i > 0 {
572
+ // refresh start time if doing a second GC
530
573
startTime = nanotime ()
531
574
}
532
575
// switch to g0, call gc, then switch back
@@ -579,8 +622,8 @@ func GCcheckmarkdisable() {
579
622
// gctimes records the time in nanoseconds of each phase of the concurrent GC.
580
623
type gctimes struct {
581
624
sweepterm int64 // stw
582
- scan int64 // stw
583
- installmarkwb int64
625
+ scan int64
626
+ installmarkwb int64 // stw
584
627
mark int64
585
628
markterm int64 // stw
586
629
sweep int64
@@ -601,7 +644,7 @@ type gcchronograph struct {
601
644
602
645
var gctimer gcchronograph
603
646
604
- // GCstarttimes initializes the gc timess . All previous timess are lost.
647
+ // GCstarttimes initializes the gc times . All previous times are lost.
605
648
func GCstarttimes (verbose int64 ) {
606
649
gctimer = gcchronograph {verbose : verbose }
607
650
}
@@ -655,6 +698,11 @@ func calctimes() gctimes {
655
698
// the information from the most recent Concurent GC cycle. Calls from the
656
699
// application to runtime.GC() are ignored.
657
700
func GCprinttimes () {
701
+ if gctimer .verbose == 0 {
702
+ println ("GC timers not enabled" )
703
+ return
704
+ }
705
+
658
706
// Explicitly put times on the heap so printPhase can use it.
659
707
times := new (gctimes )
660
708
* times = calctimes ()
0 commit comments