Ruby 3.2.1p31 (2023-02-08 revision 31819e82c88c6f8ecfaeb162519bfa26a14b21fd)
gc.c
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#include <signal.h>
23
24#define sighandler_t ruby_sighandler_t
25
26#ifndef _WIN32
27#include <unistd.h>
28#include <sys/mman.h>
29#endif
30
31#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
32# include "wasm/setjmp.h"
33# include "wasm/machine.h"
34#else
35# include <setjmp.h>
36#endif
37#include <stdarg.h>
38#include <stdio.h>
39
40/* MALLOC_HEADERS_BEGIN */
41#ifndef HAVE_MALLOC_USABLE_SIZE
42# ifdef _WIN32
43# define HAVE_MALLOC_USABLE_SIZE
44# define malloc_usable_size(a) _msize(a)
45# elif defined HAVE_MALLOC_SIZE
46# define HAVE_MALLOC_USABLE_SIZE
47# define malloc_usable_size(a) malloc_size(a)
48# endif
49#endif
50
51#ifdef HAVE_MALLOC_USABLE_SIZE
52# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
53/* Alternative malloc header is included in ruby/missing.h */
54# elif defined(HAVE_MALLOC_H)
55# include <malloc.h>
56# elif defined(HAVE_MALLOC_NP_H)
57# include <malloc_np.h>
58# elif defined(HAVE_MALLOC_MALLOC_H)
59# include <malloc/malloc.h>
60# endif
61#endif
62
63#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
64/* LIST_HEAD conflicts with sys/queue.h on macOS */
65# include <sys/user.h>
66#endif
67/* MALLOC_HEADERS_END */
68
69#ifdef HAVE_SYS_TIME_H
70# include <sys/time.h>
71#endif
72
73#ifdef HAVE_SYS_RESOURCE_H
74# include <sys/resource.h>
75#endif
76
77#if defined _WIN32 || defined __CYGWIN__
78# include <windows.h>
79#elif defined(HAVE_POSIX_MEMALIGN)
80#elif defined(HAVE_MEMALIGN)
81# include <malloc.h>
82#endif
83
84#include <sys/types.h>
85
86#ifdef __EMSCRIPTEN__
87#include <emscripten.h>
88#endif
89
90#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
91# include <mach/task.h>
92# include <mach/mach_init.h>
93# include <mach/mach_port.h>
94#endif
95#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
96
97#include "constant.h"
98#include "debug_counter.h"
99#include "eval_intern.h"
100#include "gc.h"
101#include "id_table.h"
102#include "internal.h"
103#include "internal/class.h"
104#include "internal/complex.h"
105#include "internal/cont.h"
106#include "internal/error.h"
107#include "internal/eval.h"
108#include "internal/gc.h"
109#include "internal/hash.h"
110#include "internal/imemo.h"
111#include "internal/io.h"
112#include "internal/numeric.h"
113#include "internal/object.h"
114#include "internal/proc.h"
115#include "internal/rational.h"
116#include "internal/sanitizers.h"
117#include "internal/struct.h"
118#include "internal/symbol.h"
119#include "internal/thread.h"
120#include "internal/variable.h"
121#include "internal/warnings.h"
122#include "mjit.h"
123#include "probes.h"
124#include "regint.h"
125#include "ruby/debug.h"
126#include "ruby/io.h"
127#include "ruby/re.h"
128#include "ruby/st.h"
129#include "ruby/thread.h"
130#include "ruby/util.h"
131#include "ruby_assert.h"
132#include "ruby_atomic.h"
133#include "symbol.h"
134#include "transient_heap.h"
135#include "vm_core.h"
136#include "vm_sync.h"
137#include "vm_callinfo.h"
138#include "ractor_core.h"
139
140#include "builtin.h"
141#include "shape.h"
142
143#define rb_setjmp(env) RUBY_SETJMP(env)
144#define rb_jmp_buf rb_jmpbuf_t
145#undef rb_data_object_wrap
146
147#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
148#define MAP_ANONYMOUS MAP_ANON
149#endif
150
151static inline struct rbimpl_size_mul_overflow_tag
152size_add_overflow(size_t x, size_t y)
153{
154 size_t z;
155 bool p;
156#if 0
157
158#elif __has_builtin(__builtin_add_overflow)
159 p = __builtin_add_overflow(x, y, &z);
160
161#elif defined(DSIZE_T)
162 RB_GNUC_EXTENSION DSIZE_T dx = x;
163 RB_GNUC_EXTENSION DSIZE_T dy = y;
164 RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
165 p = dz > SIZE_MAX;
166 z = (size_t)dz;
167
168#else
169 z = x + y;
170 p = z < y;
171
172#endif
173 return (struct rbimpl_size_mul_overflow_tag) { p, z, };
174}
175
176static inline struct rbimpl_size_mul_overflow_tag
177size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
178{
179 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
180 struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
181 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
182}
183
184static inline struct rbimpl_size_mul_overflow_tag
185size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
186{
187 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
188 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
189 struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
190 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
191}
192
193PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
194
195static inline size_t
196size_mul_or_raise(size_t x, size_t y, VALUE exc)
197{
198 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
199 if (LIKELY(!t.left)) {
200 return t.right;
201 }
202 else if (rb_during_gc()) {
203 rb_memerror(); /* or...? */
204 }
205 else {
206 gc_raise(
207 exc,
208 "integer overflow: %"PRIuSIZE
209 " * %"PRIuSIZE
210 " > %"PRIuSIZE,
211 x, y, (size_t)SIZE_MAX);
212 }
213}
214
215size_t
216rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
217{
218 return size_mul_or_raise(x, y, exc);
219}
220
221static inline size_t
222size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
223{
224 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
225 if (LIKELY(!t.left)) {
226 return t.right;
227 }
228 else if (rb_during_gc()) {
229 rb_memerror(); /* or...? */
230 }
231 else {
232 gc_raise(
233 exc,
234 "integer overflow: %"PRIuSIZE
235 " * %"PRIuSIZE
236 " + %"PRIuSIZE
237 " > %"PRIuSIZE,
238 x, y, z, (size_t)SIZE_MAX);
239 }
240}
241
242size_t
243rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
244{
245 return size_mul_add_or_raise(x, y, z, exc);
246}
247
248static inline size_t
249size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
250{
251 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
252 if (LIKELY(!t.left)) {
253 return t.right;
254 }
255 else if (rb_during_gc()) {
256 rb_memerror(); /* or...? */
257 }
258 else {
259 gc_raise(
260 exc,
261 "integer overflow: %"PRIdSIZE
262 " * %"PRIdSIZE
263 " + %"PRIdSIZE
264 " * %"PRIdSIZE
265 " > %"PRIdSIZE,
266 x, y, z, w, (size_t)SIZE_MAX);
267 }
268}
269
270#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
271/* trick the compiler into thinking a external signal handler uses this */
272volatile VALUE rb_gc_guarded_val;
273volatile VALUE *
274rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
275{
276 rb_gc_guarded_val = val;
277
278 return ptr;
279}
280#endif
281
282#ifndef GC_HEAP_INIT_SLOTS
283#define GC_HEAP_INIT_SLOTS 10000
284#endif
285#ifndef GC_HEAP_FREE_SLOTS
286#define GC_HEAP_FREE_SLOTS 4096
287#endif
288#ifndef GC_HEAP_GROWTH_FACTOR
289#define GC_HEAP_GROWTH_FACTOR 1.8
290#endif
291#ifndef GC_HEAP_GROWTH_MAX_SLOTS
292#define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
293#endif
294#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
295#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
296#endif
297
298#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
299#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
300#endif
301#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
302#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
303#endif
304#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
305#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
306#endif
307
308#ifndef GC_MALLOC_LIMIT_MIN
309#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
310#endif
311#ifndef GC_MALLOC_LIMIT_MAX
312#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
313#endif
314#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
315#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
316#endif
317
318#ifndef GC_OLDMALLOC_LIMIT_MIN
319#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
320#endif
321#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
322#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
323#endif
324#ifndef GC_OLDMALLOC_LIMIT_MAX
325#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
326#endif
327
328#ifndef PRINT_MEASURE_LINE
329#define PRINT_MEASURE_LINE 0
330#endif
331#ifndef PRINT_ENTER_EXIT_TICK
332#define PRINT_ENTER_EXIT_TICK 0
333#endif
334#ifndef PRINT_ROOT_TICKS
335#define PRINT_ROOT_TICKS 0
336#endif
337
338#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
339#define TICK_TYPE 1
340
341typedef struct {
342 size_t heap_init_slots;
343 size_t heap_free_slots;
344 double growth_factor;
345 size_t growth_max_slots;
346
347 double heap_free_slots_min_ratio;
348 double heap_free_slots_goal_ratio;
349 double heap_free_slots_max_ratio;
350 double oldobject_limit_factor;
351
352 size_t malloc_limit_min;
353 size_t malloc_limit_max;
354 double malloc_limit_growth_factor;
355
356 size_t oldmalloc_limit_min;
357 size_t oldmalloc_limit_max;
358 double oldmalloc_limit_growth_factor;
359
360 VALUE gc_stress;
362
363static ruby_gc_params_t gc_params = {
364 GC_HEAP_INIT_SLOTS,
365 GC_HEAP_FREE_SLOTS,
366 GC_HEAP_GROWTH_FACTOR,
367 GC_HEAP_GROWTH_MAX_SLOTS,
368
369 GC_HEAP_FREE_SLOTS_MIN_RATIO,
370 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
371 GC_HEAP_FREE_SLOTS_MAX_RATIO,
372 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
373
374 GC_MALLOC_LIMIT_MIN,
375 GC_MALLOC_LIMIT_MAX,
376 GC_MALLOC_LIMIT_GROWTH_FACTOR,
377
378 GC_OLDMALLOC_LIMIT_MIN,
379 GC_OLDMALLOC_LIMIT_MAX,
380 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
381
382 FALSE,
383};
384
385/* GC_DEBUG:
386 * enable to embed GC debugging information.
387 */
388#ifndef GC_DEBUG
389#define GC_DEBUG 0
390#endif
391
392/* RGENGC_DEBUG:
393 * 1: basic information
394 * 2: remember set operation
395 * 3: mark
396 * 4:
397 * 5: sweep
398 */
399#ifndef RGENGC_DEBUG
400#ifdef RUBY_DEVEL
401#define RGENGC_DEBUG -1
402#else
403#define RGENGC_DEBUG 0
404#endif
405#endif
406#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
407# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
408#elif defined(HAVE_VA_ARGS_MACRO)
409# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
410#else
411# define RGENGC_DEBUG_ENABLED(level) 0
412#endif
413int ruby_rgengc_debug;
414
415/* RGENGC_CHECK_MODE
416 * 0: disable all assertions
417 * 1: enable assertions (to debug RGenGC)
418 * 2: enable internal consistency check at each GC (for debugging)
419 * 3: enable internal consistency check at each GC steps (for debugging)
420 * 4: enable liveness check
421 * 5: show all references
422 */
423#ifndef RGENGC_CHECK_MODE
424#define RGENGC_CHECK_MODE 0
425#endif
426
427// Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
428#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
429
430/* RGENGC_OLD_NEWOBJ_CHECK
431 * 0: disable all assertions
432 * >0: make a OLD object when new object creation.
433 *
434 * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
435 */
436#ifndef RGENGC_OLD_NEWOBJ_CHECK
437#define RGENGC_OLD_NEWOBJ_CHECK 0
438#endif
439
440/* RGENGC_PROFILE
441 * 0: disable RGenGC profiling
442 * 1: enable profiling for basic information
443 * 2: enable profiling for each types
444 */
445#ifndef RGENGC_PROFILE
446#define RGENGC_PROFILE 0
447#endif
448
449/* RGENGC_ESTIMATE_OLDMALLOC
450 * Enable/disable to estimate increase size of malloc'ed size by old objects.
451 * If estimation exceeds threshold, then will invoke full GC.
452 * 0: disable estimation.
453 * 1: enable estimation.
454 */
455#ifndef RGENGC_ESTIMATE_OLDMALLOC
456#define RGENGC_ESTIMATE_OLDMALLOC 1
457#endif
458
459/* RGENGC_FORCE_MAJOR_GC
460 * Force major/full GC if this macro is not 0.
461 */
462#ifndef RGENGC_FORCE_MAJOR_GC
463#define RGENGC_FORCE_MAJOR_GC 0
464#endif
465
466#ifndef GC_PROFILE_MORE_DETAIL
467#define GC_PROFILE_MORE_DETAIL 0
468#endif
469#ifndef GC_PROFILE_DETAIL_MEMORY
470#define GC_PROFILE_DETAIL_MEMORY 0
471#endif
472#ifndef GC_ENABLE_INCREMENTAL_MARK
473#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
474#endif
475#ifndef GC_ENABLE_LAZY_SWEEP
476#define GC_ENABLE_LAZY_SWEEP 1
477#endif
478#ifndef CALC_EXACT_MALLOC_SIZE
479#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
480#endif
481#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
482#ifndef MALLOC_ALLOCATED_SIZE
483#define MALLOC_ALLOCATED_SIZE 0
484#endif
485#else
486#define MALLOC_ALLOCATED_SIZE 0
487#endif
488#ifndef MALLOC_ALLOCATED_SIZE_CHECK
489#define MALLOC_ALLOCATED_SIZE_CHECK 0
490#endif
491
492#ifndef GC_DEBUG_STRESS_TO_CLASS
493#define GC_DEBUG_STRESS_TO_CLASS 0
494#endif
495
496#ifndef RGENGC_OBJ_INFO
497#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
498#endif
499
500typedef enum {
501 GPR_FLAG_NONE = 0x000,
502 /* major reason */
503 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
504 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
505 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
506 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
507#if RGENGC_ESTIMATE_OLDMALLOC
508 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
509#endif
510 GPR_FLAG_MAJOR_MASK = 0x0ff,
511
512 /* gc reason */
513 GPR_FLAG_NEWOBJ = 0x100,
514 GPR_FLAG_MALLOC = 0x200,
515 GPR_FLAG_METHOD = 0x400,
516 GPR_FLAG_CAPI = 0x800,
517 GPR_FLAG_STRESS = 0x1000,
518
519 /* others */
520 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
521 GPR_FLAG_HAVE_FINALIZE = 0x4000,
522 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
523 GPR_FLAG_FULL_MARK = 0x10000,
524 GPR_FLAG_COMPACT = 0x20000,
525
526 GPR_DEFAULT_REASON =
527 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
528 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
529} gc_profile_record_flag;
530
531typedef struct gc_profile_record {
532 unsigned int flags;
533
534 double gc_time;
535 double gc_invoke_time;
536
537 size_t heap_total_objects;
538 size_t heap_use_size;
539 size_t heap_total_size;
540 size_t moved_objects;
541
542#if GC_PROFILE_MORE_DETAIL
543 double gc_mark_time;
544 double gc_sweep_time;
545
546 size_t heap_use_pages;
547 size_t heap_live_objects;
548 size_t heap_free_objects;
549
550 size_t allocate_increase;
551 size_t allocate_limit;
552
553 double prepare_time;
554 size_t removing_objects;
555 size_t empty_objects;
556#if GC_PROFILE_DETAIL_MEMORY
557 long maxrss;
558 long minflt;
559 long majflt;
560#endif
561#endif
562#if MALLOC_ALLOCATED_SIZE
563 size_t allocated_size;
564#endif
565
566#if RGENGC_PROFILE > 0
567 size_t old_objects;
568 size_t remembered_normal_objects;
569 size_t remembered_shady_objects;
570#endif
572
573struct RMoved {
574 VALUE flags;
575 VALUE dummy;
576 VALUE destination;
577 shape_id_t original_shape_id;
578};
579
580#define RMOVED(obj) ((struct RMoved *)(obj))
581
582typedef struct RVALUE {
583 union {
584 struct {
585 VALUE flags; /* always 0 for freed obj */
586 struct RVALUE *next;
587 } free;
588 struct RMoved moved;
589 struct RBasic basic;
590 struct RObject object;
591 struct RClass klass;
592 struct RFloat flonum;
593 struct RString string;
594 struct RArray array;
595 struct RRegexp regexp;
596 struct RHash hash;
597 struct RData data;
598 struct RTypedData typeddata;
599 struct RStruct rstruct;
600 struct RBignum bignum;
601 struct RFile file;
602 struct RMatch match;
603 struct RRational rational;
604 struct RComplex complex;
605 struct RSymbol symbol;
606 union {
607 rb_cref_t cref;
608 struct vm_svar svar;
609 struct vm_throw_data throw_data;
610 struct vm_ifunc ifunc;
611 struct MEMO memo;
612 struct rb_method_entry_struct ment;
613 const rb_iseq_t iseq;
614 rb_env_t env;
615 struct rb_imemo_tmpbuf_struct alloc;
616 rb_ast_t ast;
617 } imemo;
618 struct {
619 struct RBasic basic;
620 VALUE v1;
621 VALUE v2;
622 VALUE v3;
623 } values;
624 } as;
625
626 /* Start of RVALUE_OVERHEAD.
627 * Do not directly read these members from the RVALUE as they're located
628 * at the end of the slot (which may differ in size depending on the size
629 * pool). */
630#if RACTOR_CHECK_MODE
631 uint32_t _ractor_belonging_id;
632#endif
633#if GC_DEBUG
634 const char *file;
635 int line;
636#endif
637} RVALUE;
638
639#if RACTOR_CHECK_MODE
640# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
641#elif GC_DEBUG
642# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
643#else
644# define RVALUE_OVERHEAD 0
645#endif
646
647STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5) + RVALUE_OVERHEAD);
648STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
649
650typedef uintptr_t bits_t;
651enum {
652 BITS_SIZE = sizeof(bits_t),
653 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
654};
655#define popcount_bits rb_popcount_intptr
656
658 struct heap_page *page;
659};
660
662 struct heap_page_header header;
663 /* char gap[]; */
664 /* RVALUE values[]; */
665};
666
667struct gc_list {
668 VALUE *varptr;
669 struct gc_list *next;
670};
671
672#define STACK_CHUNK_SIZE 500
673
674typedef struct stack_chunk {
675 VALUE data[STACK_CHUNK_SIZE];
676 struct stack_chunk *next;
678
679typedef struct mark_stack {
680 stack_chunk_t *chunk;
681 stack_chunk_t *cache;
682 int index;
683 int limit;
684 size_t cache_size;
685 size_t unused_cache_size;
687
688#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
689#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
690
691typedef struct rb_heap_struct {
692 struct heap_page *free_pages;
693 struct ccan_list_head pages;
694 struct heap_page *sweeping_page; /* iterator for .pages */
695 struct heap_page *compact_cursor;
696 uintptr_t compact_cursor_index;
697#if GC_ENABLE_INCREMENTAL_MARK
698 struct heap_page *pooled_pages;
699#endif
700 size_t total_pages; /* total page count in a heap */
701 size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
702} rb_heap_t;
703
704typedef struct rb_size_pool_struct {
705 short slot_size;
706
707 size_t allocatable_pages;
708
709 /* Basic statistics */
710 size_t total_allocated_pages;
711 size_t total_freed_pages;
712 size_t force_major_gc_count;
713
714#if USE_RVARGC
715 /* Sweeping statistics */
716 size_t freed_slots;
717 size_t empty_slots;
718#endif
719
720 rb_heap_t eden_heap;
721 rb_heap_t tomb_heap;
723
724enum gc_mode {
725 gc_mode_none,
726 gc_mode_marking,
727 gc_mode_sweeping,
728 gc_mode_compacting,
729};
730
731typedef struct rb_objspace {
732 struct {
733 size_t limit;
734 size_t increase;
735#if MALLOC_ALLOCATED_SIZE
736 size_t allocated_size;
737 size_t allocations;
738#endif
739
740 } malloc_params;
741
742 struct {
743 unsigned int mode : 2;
744 unsigned int immediate_sweep : 1;
745 unsigned int dont_gc : 1;
746 unsigned int dont_incremental : 1;
747 unsigned int during_gc : 1;
748 unsigned int during_compacting : 1;
749 unsigned int gc_stressful: 1;
750 unsigned int has_hook: 1;
751 unsigned int during_minor_gc : 1;
752#if GC_ENABLE_INCREMENTAL_MARK
753 unsigned int during_incremental_marking : 1;
754#endif
755 unsigned int measure_gc : 1;
756 } flags;
757
758 rb_event_flag_t hook_events;
759 size_t total_allocated_objects;
760 VALUE next_object_id;
761
762 rb_size_pool_t size_pools[SIZE_POOL_COUNT];
763
764 struct {
765 rb_atomic_t finalizing;
766 } atomic_flags;
767
769 size_t marked_slots;
770
771 struct {
772 struct heap_page **sorted;
773 size_t allocated_pages;
774 size_t allocatable_pages;
775 size_t sorted_length;
776 uintptr_t range[2];
777 size_t freeable_pages;
778
779 /* final */
780 size_t final_slots;
781 VALUE deferred_final;
782 } heap_pages;
783
784 st_table *finalizer_table;
785
786 struct {
787 int run;
788 unsigned int latest_gc_info;
789 gc_profile_record *records;
790 gc_profile_record *current_record;
791 size_t next_index;
792 size_t size;
793
794#if GC_PROFILE_MORE_DETAIL
795 double prepare_time;
796#endif
797 double invoke_time;
798
799 size_t minor_gc_count;
800 size_t major_gc_count;
801 size_t compact_count;
802 size_t read_barrier_faults;
803#if RGENGC_PROFILE > 0
804 size_t total_generated_normal_object_count;
805 size_t total_generated_shady_object_count;
806 size_t total_shade_operation_count;
807 size_t total_promoted_count;
808 size_t total_remembered_normal_object_count;
809 size_t total_remembered_shady_object_count;
810
811#if RGENGC_PROFILE >= 2
812 size_t generated_normal_object_count_types[RUBY_T_MASK];
813 size_t generated_shady_object_count_types[RUBY_T_MASK];
814 size_t shade_operation_count_types[RUBY_T_MASK];
815 size_t promoted_types[RUBY_T_MASK];
816 size_t remembered_normal_object_count_types[RUBY_T_MASK];
817 size_t remembered_shady_object_count_types[RUBY_T_MASK];
818#endif
819#endif /* RGENGC_PROFILE */
820
821 /* temporary profiling space */
822 double gc_sweep_start_time;
823 size_t total_allocated_objects_at_gc_start;
824 size_t heap_used_at_gc_start;
825
826 /* basic statistics */
827 size_t count;
828 size_t total_freed_objects;
829 uint64_t total_time_ns;
830 struct timespec start_time;
831 } profile;
832 struct gc_list *global_list;
833
834 VALUE gc_stress_mode;
835
836 struct {
837 VALUE parent_object;
838 int need_major_gc;
839 size_t last_major_gc;
840 size_t uncollectible_wb_unprotected_objects;
841 size_t uncollectible_wb_unprotected_objects_limit;
842 size_t old_objects;
843 size_t old_objects_limit;
844
845#if RGENGC_ESTIMATE_OLDMALLOC
846 size_t oldmalloc_increase;
847 size_t oldmalloc_increase_limit;
848#endif
849
850#if RGENGC_CHECK_MODE >= 2
851 struct st_table *allrefs_table;
852 size_t error_count;
853#endif
854 } rgengc;
855
856 struct {
857 size_t considered_count_table[T_MASK];
858 size_t moved_count_table[T_MASK];
859 size_t moved_up_count_table[T_MASK];
860 size_t moved_down_count_table[T_MASK];
861 size_t total_moved;
862 } rcompactor;
863
864#if GC_ENABLE_INCREMENTAL_MARK
865 struct {
866 size_t pooled_slots;
867 size_t step_slots;
868 } rincgc;
869#endif
870
871 st_table *id_to_obj_tbl;
872 st_table *obj_to_id_tbl;
873
874#if GC_DEBUG_STRESS_TO_CLASS
875 VALUE stress_to_class;
876#endif
878
879
880#ifndef HEAP_PAGE_ALIGN_LOG
881/* default tiny heap size: 64KiB */
882#define HEAP_PAGE_ALIGN_LOG 16
883#endif
884
885#define BASE_SLOT_SIZE sizeof(RVALUE)
886
887#define CEILDIV(i, mod) roomof(i, mod)
888enum {
889 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
890 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
891 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
892 HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE),
893 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
894 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
895};
896#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
897#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
898
899#if GC_ENABLE_INCREMENTAL_MARK && !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
900# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
901#endif
902
903#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
904/* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
905 * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
906
907#ifndef HAVE_MMAP
908/* We can't use mmap of course, if it is not available. */
909static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
910
911#elif defined(__wasm__)
912/* wasmtime does not have proper support for mmap.
913 * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
914 */
915static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
916
917#elif HAVE_CONST_PAGE_SIZE
918/* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
919static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
920
921#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
922/* If we can use the maximum page size. */
923static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
924
925#elif defined(PAGE_SIZE)
926/* If the PAGE_SIZE macro can be used dynamically. */
927# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
928
929#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
930/* If we can use sysconf to determine the page size. */
931# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
932
933#else
934/* Otherwise we can't determine the system page size, so don't use mmap. */
935static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
936#endif
937
938#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
939/* We can determine the system page size at runtime. */
940# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
941
942static bool heap_page_alloc_use_mmap;
943#endif
944
945struct heap_page {
946 short slot_size;
947 short total_slots;
948 short free_slots;
949 short final_slots;
950 struct {
951 unsigned int before_sweep : 1;
952 unsigned int has_remembered_objects : 1;
953 unsigned int has_uncollectible_shady_objects : 1;
954 unsigned int in_tomb : 1;
955 } flags;
956
957 rb_size_pool_t *size_pool;
958
959 struct heap_page *free_next;
960 uintptr_t start;
961 RVALUE *freelist;
962 struct ccan_list_node page_node;
963
964 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
965 /* the following three bitmaps are cleared at the beginning of full GC */
966 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
967 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
968 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
969
970 /* If set, the object is not movable */
971 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
972};
973
974/*
975 * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
976 */
977static void
978asan_lock_freelist(struct heap_page *page)
979{
980 asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
981}
982
983/*
984 * When asan is enabled, this will enable the ability to write to the freelist
985 */
986static void
987asan_unlock_freelist(struct heap_page *page)
988{
989 asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
990}
991
992#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
993#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
994#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
995
996#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
997#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
998#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
999#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
1000
1001/* Bitmap Operations */
1002#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
1003#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
1004#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
1005
1006/* getting bitmap */
1007#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
1008#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
1009#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
1010#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
1011#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
1012
1013#define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
1014
1015/* Aliases */
1016#define rb_objspace (*rb_objspace_of(GET_VM()))
1017#define rb_objspace_of(vm) ((vm)->objspace)
1018
1019#define ruby_initial_gc_stress gc_params.gc_stress
1020
1021VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
1022
1023#define malloc_limit objspace->malloc_params.limit
1024#define malloc_increase objspace->malloc_params.increase
1025#define malloc_allocated_size objspace->malloc_params.allocated_size
1026#define heap_pages_sorted objspace->heap_pages.sorted
1027#define heap_allocated_pages objspace->heap_pages.allocated_pages
1028#define heap_pages_sorted_length objspace->heap_pages.sorted_length
1029#define heap_pages_lomem objspace->heap_pages.range[0]
1030#define heap_pages_himem objspace->heap_pages.range[1]
1031#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
1032#define heap_pages_final_slots objspace->heap_pages.final_slots
1033#define heap_pages_deferred_final objspace->heap_pages.deferred_final
1034#define size_pools objspace->size_pools
1035#define during_gc objspace->flags.during_gc
1036#define finalizing objspace->atomic_flags.finalizing
1037#define finalizer_table objspace->finalizer_table
1038#define global_list objspace->global_list
1039#define ruby_gc_stressful objspace->flags.gc_stressful
1040#define ruby_gc_stress_mode objspace->gc_stress_mode
1041#if GC_DEBUG_STRESS_TO_CLASS
1042#define stress_to_class objspace->stress_to_class
1043#else
1044#define stress_to_class 0
1045#endif
1046
1047#if 0
1048#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
1049#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
1050#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
1051#define dont_gc_val() (objspace->flags.dont_gc)
1052#else
1053#define dont_gc_on() (objspace->flags.dont_gc = 1)
1054#define dont_gc_off() (objspace->flags.dont_gc = 0)
1055#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
1056#define dont_gc_val() (objspace->flags.dont_gc)
1057#endif
1058
1059static inline enum gc_mode
1060gc_mode_verify(enum gc_mode mode)
1061{
1062#if RGENGC_CHECK_MODE > 0
1063 switch (mode) {
1064 case gc_mode_none:
1065 case gc_mode_marking:
1066 case gc_mode_sweeping:
1067 case gc_mode_compacting:
1068 break;
1069 default:
1070 rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
1071 }
1072#endif
1073 return mode;
1074}
1075
1076static inline bool
1077has_sweeping_pages(rb_objspace_t *objspace)
1078{
1079 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1080 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1081 return TRUE;
1082 }
1083 }
1084 return FALSE;
1085}
1086
1087static inline size_t
1088heap_eden_total_pages(rb_objspace_t *objspace)
1089{
1090 size_t count = 0;
1091 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1092 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1093 }
1094 return count;
1095}
1096
1097static inline size_t
1098heap_eden_total_slots(rb_objspace_t *objspace)
1099{
1100 size_t count = 0;
1101 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1102 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1103 }
1104 return count;
1105}
1106
1107static inline size_t
1108heap_tomb_total_pages(rb_objspace_t *objspace)
1109{
1110 size_t count = 0;
1111 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1112 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1113 }
1114 return count;
1115}
1116
1117static inline size_t
1118heap_allocatable_pages(rb_objspace_t *objspace)
1119{
1120 size_t count = 0;
1121 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1122 count += size_pools[i].allocatable_pages;
1123 }
1124 return count;
1125}
1126
1127static inline size_t
1128heap_allocatable_slots(rb_objspace_t *objspace)
1129{
1130 size_t count = 0;
1131 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1132 rb_size_pool_t *size_pool = &size_pools[i];
1133 int slot_size_multiple = size_pool->slot_size / BASE_SLOT_SIZE;
1134 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1135 }
1136 return count;
1137}
1138
1139static inline size_t
1140total_allocated_pages(rb_objspace_t *objspace)
1141{
1142 size_t count = 0;
1143 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1144 rb_size_pool_t *size_pool = &size_pools[i];
1145 count += size_pool->total_allocated_pages;
1146 }
1147 return count;
1148}
1149
1150static inline size_t
1151total_freed_pages(rb_objspace_t *objspace)
1152{
1153 size_t count = 0;
1154 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1155 rb_size_pool_t *size_pool = &size_pools[i];
1156 count += size_pool->total_freed_pages;
1157 }
1158 return count;
1159}
1160
1161#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1162#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1163
1164#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1165#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1166#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1167#if GC_ENABLE_INCREMENTAL_MARK
1168#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1169#else
1170#define is_incremental_marking(objspace) FALSE
1171#endif
1172#if GC_ENABLE_INCREMENTAL_MARK
1173#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1174#else
1175#define will_be_incremental_marking(objspace) FALSE
1176#endif
1177#if GC_ENABLE_INCREMENTAL_MARK
1178#define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
1179#endif
1180#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1181
1182#if SIZEOF_LONG == SIZEOF_VOIDP
1183# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1184# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
1185#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1186# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1187# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1188 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1189#else
1190# error not supported
1191#endif
1192
1193#define RANY(o) ((RVALUE*)(o))
1194
1195struct RZombie {
1196 struct RBasic basic;
1197 VALUE next;
1198 void (*dfree)(void *);
1199 void *data;
1200};
1201
1202#define RZOMBIE(o) ((struct RZombie *)(o))
1203
1204#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1205
1206#if RUBY_MARK_FREE_DEBUG
1207int ruby_gc_debug_indent = 0;
1208#endif
1210int ruby_disable_gc = 0;
1211int ruby_enable_autocompact = 0;
1212
1213void rb_iseq_mark(const rb_iseq_t *iseq);
1214void rb_iseq_update_references(rb_iseq_t *iseq);
1215void rb_iseq_free(const rb_iseq_t *iseq);
1216size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1217void rb_vm_update_references(void *ptr);
1218
1219void rb_gcdebug_print_obj_condition(VALUE obj);
1220
1221static VALUE define_final0(VALUE obj, VALUE block);
1222
1223NORETURN(static void *gc_vraise(void *ptr));
1224NORETURN(static void gc_raise(VALUE exc, const char *fmt, ...));
1225NORETURN(static void negative_size_allocation_error(const char *));
1226
1227static void init_mark_stack(mark_stack_t *stack);
1228
1229static int ready_to_gc(rb_objspace_t *objspace);
1230
1231static int garbage_collect(rb_objspace_t *, unsigned int reason);
1232
1233static int gc_start(rb_objspace_t *objspace, unsigned int reason);
1234static void gc_rest(rb_objspace_t *objspace);
1235
1236enum gc_enter_event {
1237 gc_enter_event_start,
1238 gc_enter_event_mark_continue,
1239 gc_enter_event_sweep_continue,
1240 gc_enter_event_rest,
1241 gc_enter_event_finalizer,
1242 gc_enter_event_rb_memerror,
1243};
1244
1245static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1246static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1247
1248static void gc_marks(rb_objspace_t *objspace, int full_mark);
1249static void gc_marks_start(rb_objspace_t *objspace, int full);
1250static void gc_marks_finish(rb_objspace_t *objspace);
1251static void gc_marks_rest(rb_objspace_t *objspace);
1252static void gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1253
1254static void gc_sweep(rb_objspace_t *objspace);
1255static void gc_sweep_start(rb_objspace_t *objspace);
1256#if USE_RVARGC
1257static void gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool);
1258#endif
1259static void gc_sweep_finish(rb_objspace_t *objspace);
1260static int gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1261static void gc_sweep_rest(rb_objspace_t *objspace);
1262static void gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap);
1263
1264static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1265static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1266static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1267static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1268NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1269static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1270
1271static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1272static int gc_mark_stacked_objects_all(rb_objspace_t *);
1273static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1274
1275static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1276NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1277
1278static void push_mark_stack(mark_stack_t *, VALUE);
1279static int pop_mark_stack(mark_stack_t *, VALUE *);
1280static size_t mark_stack_size(mark_stack_t *stack);
1281static void shrink_stack_chunk_cache(mark_stack_t *stack);
1282
1283static size_t obj_memsize_of(VALUE obj, int use_all_types);
1284static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1285static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1286static int gc_verify_heap_pages(rb_objspace_t *objspace);
1287
1288static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1289static VALUE gc_disable_no_rest(rb_objspace_t *);
1290
1291static double getrusage_time(void);
1292static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1293static inline void gc_prof_timer_start(rb_objspace_t *);
1294static inline void gc_prof_timer_stop(rb_objspace_t *);
1295static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1296static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1297static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1298static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1299static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1300static inline void gc_prof_set_heap_info(rb_objspace_t *);
1301
1302#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1303 if (gc_object_moved_p((_objspace), (VALUE)(_thing))) { \
1304 *(_type *)&(_thing) = (_type)RMOVED(_thing)->destination; \
1305 } \
1306} while (0)
1307
1308#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1309
1310#define gc_prof_record(objspace) (objspace)->profile.current_record
1311#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1312
1313#ifdef HAVE_VA_ARGS_MACRO
1314# define gc_report(level, objspace, ...) \
1315 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1316#else
1317# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1318#endif
1319PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1320static const char *obj_info(VALUE obj);
1321static const char *obj_type_name(VALUE obj);
1322
1323/*
1324 * 1 - TSC (H/W Time Stamp Counter)
1325 * 2 - getrusage
1326 */
1327#ifndef TICK_TYPE
1328#define TICK_TYPE 1
1329#endif
1330
1331#if USE_TICK_T
1332
1333#if TICK_TYPE == 1
1334/* the following code is only for internal tuning. */
1335
1336/* Source code to use RDTSC is quoted and modified from
1337 * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1338 * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1339 */
1340
1341#if defined(__GNUC__) && defined(__i386__)
1342typedef unsigned long long tick_t;
1343#define PRItick "llu"
1344static inline tick_t
1345tick(void)
1346{
1347 unsigned long long int x;
1348 __asm__ __volatile__ ("rdtsc" : "=A" (x));
1349 return x;
1350}
1351
1352#elif defined(__GNUC__) && defined(__x86_64__)
1353typedef unsigned long long tick_t;
1354#define PRItick "llu"
1355
1356static __inline__ tick_t
1357tick(void)
1358{
1359 unsigned long hi, lo;
1360 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1361 return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1362}
1363
1364#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1365typedef unsigned long long tick_t;
1366#define PRItick "llu"
1367
1368static __inline__ tick_t
1369tick(void)
1370{
1371 unsigned long long val = __builtin_ppc_get_timebase();
1372 return val;
1373}
1374
1375/* Implementation for macOS PPC by @nobu
1376 * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1377 */
1378#elif defined(__POWERPC__) && defined(__APPLE__)
1379typedef unsigned long long tick_t;
1380#define PRItick "llu"
1381
1382static __inline__ tick_t
1383tick(void)
1384{
1385 unsigned long int upper, lower, tmp;
1386 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1387 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1388 do {
1389 mftbu(upper);
1390 mftb(lower);
1391 mftbu(tmp);
1392 } while (tmp != upper);
1393 return ((tick_t)upper << 32) | lower;
1394}
1395
1396#elif defined(__aarch64__) && defined(__GNUC__)
1397typedef unsigned long tick_t;
1398#define PRItick "lu"
1399
1400static __inline__ tick_t
1401tick(void)
1402{
1403 unsigned long val;
1404 __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1405 return val;
1406}
1407
1408
1409#elif defined(_WIN32) && defined(_MSC_VER)
1410#include <intrin.h>
1411typedef unsigned __int64 tick_t;
1412#define PRItick "llu"
1413
1414static inline tick_t
1415tick(void)
1416{
1417 return __rdtsc();
1418}
1419
1420#else /* use clock */
1421typedef clock_t tick_t;
1422#define PRItick "llu"
1423
1424static inline tick_t
1425tick(void)
1426{
1427 return clock();
1428}
1429#endif /* TSC */
1430
1431#elif TICK_TYPE == 2
1432typedef double tick_t;
1433#define PRItick "4.9f"
1434
1435static inline tick_t
1436tick(void)
1437{
1438 return getrusage_time();
1439}
1440#else /* TICK_TYPE */
1441#error "choose tick type"
1442#endif /* TICK_TYPE */
1443
1444#define MEASURE_LINE(expr) do { \
1445 volatile tick_t start_time = tick(); \
1446 volatile tick_t end_time; \
1447 expr; \
1448 end_time = tick(); \
1449 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1450} while (0)
1451
1452#else /* USE_TICK_T */
1453#define MEASURE_LINE(expr) expr
1454#endif /* USE_TICK_T */
1455
1456static inline void *
1457asan_unpoison_object_temporary(VALUE obj)
1458{
1459 void *ptr = asan_poisoned_object_p(obj);
1460 asan_unpoison_object(obj, false);
1461 return ptr;
1462}
1463
1464static inline void *
1465asan_poison_object_restore(VALUE obj, void *ptr)
1466{
1467 if (ptr) {
1468 asan_poison_object(obj);
1469 }
1470 return NULL;
1471}
1472
1473#define asan_unpoisoning_object(obj) \
1474 for (void *poisoned = asan_unpoison_object_temporary(obj), \
1475 *unpoisoning = &poisoned; /* flag to loop just once */ \
1476 unpoisoning; \
1477 unpoisoning = asan_poison_object_restore(obj, poisoned))
1478
1479#define FL_CHECK2(name, x, pred) \
1480 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1481 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1482#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1483#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1484#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1485
1486#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1487#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1488#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1489
1490#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1491#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1492#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1493
1494#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1495#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1496#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1497
1498#define RVALUE_OLD_AGE 3
1499#define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1500
1501static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1502static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1503static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1504static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1505static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1506
1507static inline int
1508RVALUE_FLAGS_AGE(VALUE flags)
1509{
1510 return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1511}
1512
1513static int
1514check_rvalue_consistency_force(const VALUE obj, int terminate)
1515{
1516 int err = 0;
1517 rb_objspace_t *objspace = &rb_objspace;
1518
1519 RB_VM_LOCK_ENTER_NO_BARRIER();
1520 {
1521 if (SPECIAL_CONST_P(obj)) {
1522 fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1523 err++;
1524 }
1525 else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1526 /* check if it is in tomb_pages */
1527 struct heap_page *page = NULL;
1528 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1529 rb_size_pool_t *size_pool = &size_pools[i];
1530 ccan_list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1531 if (page->start <= (uintptr_t)obj &&
1532 (uintptr_t)obj < (page->start + (page->total_slots * size_pool->slot_size))) {
1533 fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1534 (void *)obj, (void *)page);
1535 err++;
1536 goto skip;
1537 }
1538 }
1539 }
1540 bp();
1541 fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1542 err++;
1543 skip:
1544 ;
1545 }
1546 else {
1547 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1548 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1549 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1550 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1551 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1552
1553 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1554 fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1555 err++;
1556 }
1557 if (BUILTIN_TYPE(obj) == T_NONE) {
1558 fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1559 err++;
1560 }
1561 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1562 fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1563 err++;
1564 }
1565
1566 obj_memsize_of((VALUE)obj, FALSE);
1567
1568 /* check generation
1569 *
1570 * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1571 */
1572 if (age > 0 && wb_unprotected_bit) {
1573 fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1574 err++;
1575 }
1576
1577 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1578 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1579 err++;
1580 }
1581
1582 if (!is_full_marking(objspace)) {
1583 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1584 fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1585 obj_info(obj), age);
1586 err++;
1587 }
1588 if (remembered_bit && age != RVALUE_OLD_AGE) {
1589 fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1590 obj_info(obj), age);
1591 err++;
1592 }
1593 }
1594
1595 /*
1596 * check coloring
1597 *
1598 * marking:false marking:true
1599 * marked:false white *invalid*
1600 * marked:true black grey
1601 */
1602 if (is_incremental_marking(objspace) && marking_bit) {
1603 if (!is_marking(objspace) && !mark_bit) {
1604 fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1605 err++;
1606 }
1607 }
1608 }
1609 }
1610 RB_VM_LOCK_LEAVE_NO_BARRIER();
1611
1612 if (err > 0 && terminate) {
1613 rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1614 }
1615 return err;
1616}
1617
1618#if RGENGC_CHECK_MODE == 0
1619static inline VALUE
1620check_rvalue_consistency(const VALUE obj)
1621{
1622 return obj;
1623}
1624#else
1625static VALUE
1626check_rvalue_consistency(const VALUE obj)
1627{
1628 check_rvalue_consistency_force(obj, TRUE);
1629 return obj;
1630}
1631#endif
1632
1633static inline int
1634gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1635{
1636 if (RB_SPECIAL_CONST_P(obj)) {
1637 return FALSE;
1638 }
1639 else {
1640 void *poisoned = asan_unpoison_object_temporary(obj);
1641
1642 int ret = BUILTIN_TYPE(obj) == T_MOVED;
1643 /* Re-poison slot if it's not the one we want */
1644 if (poisoned) {
1645 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
1646 asan_poison_object(obj);
1647 }
1648 return ret;
1649 }
1650}
1651
1652static inline int
1653RVALUE_MARKED(VALUE obj)
1654{
1655 check_rvalue_consistency(obj);
1656 return RVALUE_MARK_BITMAP(obj) != 0;
1657}
1658
1659static inline int
1660RVALUE_PINNED(VALUE obj)
1661{
1662 check_rvalue_consistency(obj);
1663 return RVALUE_PIN_BITMAP(obj) != 0;
1664}
1665
1666static inline int
1667RVALUE_WB_UNPROTECTED(VALUE obj)
1668{
1669 check_rvalue_consistency(obj);
1670 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1671}
1672
1673static inline int
1674RVALUE_MARKING(VALUE obj)
1675{
1676 check_rvalue_consistency(obj);
1677 return RVALUE_MARKING_BITMAP(obj) != 0;
1678}
1679
1680static inline int
1681RVALUE_REMEMBERED(VALUE obj)
1682{
1683 check_rvalue_consistency(obj);
1684 return RVALUE_MARKING_BITMAP(obj) != 0;
1685}
1686
1687static inline int
1688RVALUE_UNCOLLECTIBLE(VALUE obj)
1689{
1690 check_rvalue_consistency(obj);
1691 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1692}
1693
1694static inline int
1695RVALUE_OLD_P_RAW(VALUE obj)
1696{
1697 const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1698 return (RBASIC(obj)->flags & promoted) == promoted;
1699}
1700
1701static inline int
1702RVALUE_OLD_P(VALUE obj)
1703{
1704 check_rvalue_consistency(obj);
1705 return RVALUE_OLD_P_RAW(obj);
1706}
1707
1708#if RGENGC_CHECK_MODE || GC_DEBUG
1709static inline int
1710RVALUE_AGE(VALUE obj)
1711{
1712 check_rvalue_consistency(obj);
1713 return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1714}
1715#endif
1716
1717static inline void
1718RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1719{
1720 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1721 objspace->rgengc.old_objects++;
1722 rb_transient_heap_promote(obj);
1723
1724#if RGENGC_PROFILE >= 2
1725 objspace->profile.total_promoted_count++;
1726 objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1727#endif
1728}
1729
1730static inline void
1731RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1732{
1733 RB_DEBUG_COUNTER_INC(obj_promote);
1734 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1735}
1736
1737static inline VALUE
1738RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1739{
1740 flags &= ~(FL_PROMOTED0 | FL_PROMOTED1);
1741 flags |= (age << RVALUE_AGE_SHIFT);
1742 return flags;
1743}
1744
1745/* set age to age+1 */
1746static inline void
1747RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1748{
1749 VALUE flags = RBASIC(obj)->flags;
1750 int age = RVALUE_FLAGS_AGE(flags);
1751
1752 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1753 rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1754 }
1755
1756 age++;
1757 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1758
1759 if (age == RVALUE_OLD_AGE) {
1760 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1761 }
1762 check_rvalue_consistency(obj);
1763}
1764
1765/* set age to RVALUE_OLD_AGE */
1766static inline void
1767RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1768{
1769 check_rvalue_consistency(obj);
1770 GC_ASSERT(!RVALUE_OLD_P(obj));
1771
1772 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1773 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1774
1775 check_rvalue_consistency(obj);
1776}
1777
1778/* set age to RVALUE_OLD_AGE - 1 */
1779static inline void
1780RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1781{
1782 check_rvalue_consistency(obj);
1783 GC_ASSERT(!RVALUE_OLD_P(obj));
1784
1785 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1786
1787 check_rvalue_consistency(obj);
1788}
1789
1790static inline void
1791RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1792{
1793 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1794 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1795}
1796
1797static inline void
1798RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1799{
1800 check_rvalue_consistency(obj);
1801 GC_ASSERT(RVALUE_OLD_P(obj));
1802
1803 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1804 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1805 }
1806
1807 RVALUE_DEMOTE_RAW(objspace, obj);
1808
1809 if (RVALUE_MARKED(obj)) {
1810 objspace->rgengc.old_objects--;
1811 }
1812
1813 check_rvalue_consistency(obj);
1814}
1815
1816static inline void
1817RVALUE_AGE_RESET_RAW(VALUE obj)
1818{
1819 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1820}
1821
1822static inline void
1823RVALUE_AGE_RESET(VALUE obj)
1824{
1825 check_rvalue_consistency(obj);
1826 GC_ASSERT(!RVALUE_OLD_P(obj));
1827
1828 RVALUE_AGE_RESET_RAW(obj);
1829 check_rvalue_consistency(obj);
1830}
1831
1832static inline int
1833RVALUE_BLACK_P(VALUE obj)
1834{
1835 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1836}
1837
1838#if 0
1839static inline int
1840RVALUE_GREY_P(VALUE obj)
1841{
1842 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1843}
1844#endif
1845
1846static inline int
1847RVALUE_WHITE_P(VALUE obj)
1848{
1849 return RVALUE_MARKED(obj) == FALSE;
1850}
1851
1852/*
1853 --------------------------- ObjectSpace -----------------------------
1854*/
1855
1856static inline void *
1857calloc1(size_t n)
1858{
1859 return calloc(1, n);
1860}
1861
1863rb_objspace_alloc(void)
1864{
1865 rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1866 objspace->flags.measure_gc = 1;
1867 malloc_limit = gc_params.malloc_limit_min;
1868
1869 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1870 rb_size_pool_t *size_pool = &size_pools[i];
1871
1872 size_pool->slot_size = (1 << i) * BASE_SLOT_SIZE;
1873
1874 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1875 ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1876 }
1877
1878 dont_gc_on();
1879
1880 return objspace;
1881}
1882
1883static void free_stack_chunks(mark_stack_t *);
1884static void mark_stack_free_cache(mark_stack_t *);
1885static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1886
1887void
1888rb_objspace_free(rb_objspace_t *objspace)
1889{
1890 if (is_lazy_sweeping(objspace))
1891 rb_bug("lazy sweeping underway when freeing object space");
1892
1893 if (objspace->profile.records) {
1894 free(objspace->profile.records);
1895 objspace->profile.records = 0;
1896 }
1897
1898 if (global_list) {
1899 struct gc_list *list, *next;
1900 for (list = global_list; list; list = next) {
1901 next = list->next;
1902 xfree(list);
1903 }
1904 }
1905 if (heap_pages_sorted) {
1906 size_t i;
1907 for (i = 0; i < heap_allocated_pages; ++i) {
1908 heap_page_free(objspace, heap_pages_sorted[i]);
1909 }
1910 free(heap_pages_sorted);
1911 heap_allocated_pages = 0;
1912 heap_pages_sorted_length = 0;
1913 heap_pages_lomem = 0;
1914 heap_pages_himem = 0;
1915
1916 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1917 rb_size_pool_t *size_pool = &size_pools[i];
1918 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1919 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1920 }
1921 }
1922 st_free_table(objspace->id_to_obj_tbl);
1923 st_free_table(objspace->obj_to_id_tbl);
1924
1925 free_stack_chunks(&objspace->mark_stack);
1926 mark_stack_free_cache(&objspace->mark_stack);
1927
1928 free(objspace);
1929}
1930
1931static void
1932heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1933{
1934 struct heap_page **sorted;
1935 size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1936
1937 gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %"PRIdSIZE", size: %"PRIdSIZE"\n",
1938 next_length, size);
1939
1940 if (heap_pages_sorted_length > 0) {
1941 sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1942 if (sorted) heap_pages_sorted = sorted;
1943 }
1944 else {
1945 sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1946 }
1947
1948 if (sorted == 0) {
1949 rb_memerror();
1950 }
1951
1952 heap_pages_sorted_length = next_length;
1953}
1954
1955static void
1956heap_pages_expand_sorted(rb_objspace_t *objspace)
1957{
1958 /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1959 * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1960 * however, if there are pages which do not have empty slots, then try to create new pages
1961 * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1962 */
1963 size_t next_length = heap_allocatable_pages(objspace);
1964 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
1965 rb_size_pool_t *size_pool = &size_pools[i];
1966 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1967 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1968 }
1969
1970 if (next_length > heap_pages_sorted_length) {
1971 heap_pages_expand_sorted_to(objspace, next_length);
1972 }
1973
1974 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1975 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1976}
1977
1978static void
1979size_pool_allocatable_pages_set(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t s)
1980{
1981 size_pool->allocatable_pages = s;
1982 heap_pages_expand_sorted(objspace);
1983}
1984
1985static inline void
1986heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1987{
1988 ASSERT_vm_locking();
1989
1990 RVALUE *p = (RVALUE *)obj;
1991
1992 asan_unpoison_object(obj, false);
1993
1994 asan_unlock_freelist(page);
1995
1996 p->as.free.flags = 0;
1997 p->as.free.next = page->freelist;
1998 page->freelist = p;
1999 asan_lock_freelist(page);
2000
2001 if (RGENGC_CHECK_MODE &&
2002 /* obj should belong to page */
2003 !(page->start <= (uintptr_t)obj &&
2004 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
2005 obj % BASE_SLOT_SIZE == 0)) {
2006 rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
2007 }
2008
2009 asan_poison_object(obj);
2010 gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
2011}
2012
2013static inline void
2014heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
2015{
2016 asan_unlock_freelist(page);
2017 GC_ASSERT(page->free_slots != 0);
2018 GC_ASSERT(page->freelist != NULL);
2019
2020 page->free_next = heap->free_pages;
2021 heap->free_pages = page;
2022
2023 RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
2024
2025 asan_lock_freelist(page);
2026}
2027
2028#if GC_ENABLE_INCREMENTAL_MARK
2029static inline void
2030heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2031{
2032 asan_unlock_freelist(page);
2033 GC_ASSERT(page->free_slots != 0);
2034 GC_ASSERT(page->freelist != NULL);
2035
2036 page->free_next = heap->pooled_pages;
2037 heap->pooled_pages = page;
2038 objspace->rincgc.pooled_slots += page->free_slots;
2039
2040 asan_lock_freelist(page);
2041}
2042#endif
2043
2044static void
2045heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
2046{
2047 ccan_list_del(&page->page_node);
2048 heap->total_pages--;
2049 heap->total_slots -= page->total_slots;
2050}
2051
2052static void rb_aligned_free(void *ptr, size_t size);
2053
2054static void
2055heap_page_body_free(struct heap_page_body *page_body)
2056{
2057 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2058
2059 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2060#ifdef HAVE_MMAP
2061 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
2062 if (munmap(page_body, HEAP_PAGE_SIZE)) {
2063 rb_bug("heap_page_body_free: munmap failed");
2064 }
2065#endif
2066 }
2067 else {
2068 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
2069 }
2070}
2071
2072static void
2073heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
2074{
2075 heap_allocated_pages--;
2076 page->size_pool->total_freed_pages++;
2077 heap_page_body_free(GET_PAGE_BODY(page->start));
2078 free(page);
2079}
2080
2081static void
2082heap_pages_free_unused_pages(rb_objspace_t *objspace)
2083{
2084 size_t i, j;
2085
2086 bool has_pages_in_tomb_heap = FALSE;
2087 for (i = 0; i < SIZE_POOL_COUNT; i++) {
2088 if (!ccan_list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
2089 has_pages_in_tomb_heap = TRUE;
2090 break;
2091 }
2092 }
2093
2094 if (has_pages_in_tomb_heap) {
2095 for (i = j = 1; j < heap_allocated_pages; i++) {
2096 struct heap_page *page = heap_pages_sorted[i];
2097
2098 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
2099 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
2100 heap_page_free(objspace, page);
2101 }
2102 else {
2103 if (i != j) {
2104 heap_pages_sorted[j] = page;
2105 }
2106 j++;
2107 }
2108 }
2109
2110 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
2111 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
2112 GC_ASSERT(himem <= heap_pages_himem);
2113 heap_pages_himem = himem;
2114
2115 GC_ASSERT(j == heap_allocated_pages);
2116 }
2117}
2118
2119static struct heap_page_body *
2120heap_page_body_allocate(void)
2121{
2122 struct heap_page_body *page_body;
2123
2124 if (HEAP_PAGE_ALLOC_USE_MMAP) {
2125#ifdef HAVE_MMAP
2126 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
2127
2128 char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
2129 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2130 if (ptr == MAP_FAILED) {
2131 return NULL;
2132 }
2133
2134 char *aligned = ptr + HEAP_PAGE_ALIGN;
2135 aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
2136 GC_ASSERT(aligned > ptr);
2137 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
2138
2139 size_t start_out_of_range_size = aligned - ptr;
2140 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2141 if (start_out_of_range_size > 0) {
2142 if (munmap(ptr, start_out_of_range_size)) {
2143 rb_bug("heap_page_body_allocate: munmap failed for start");
2144 }
2145 }
2146
2147 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
2148 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
2149 if (end_out_of_range_size > 0) {
2150 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
2151 rb_bug("heap_page_body_allocate: munmap failed for end");
2152 }
2153 }
2154
2155 page_body = (struct heap_page_body *)aligned;
2156#endif
2157 }
2158 else {
2159 page_body = rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
2160 }
2161
2162 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
2163
2164 return page_body;
2165}
2166
2167static struct heap_page *
2168heap_page_allocate(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2169{
2170 uintptr_t start, end, p;
2171 struct heap_page *page;
2172 uintptr_t hi, lo, mid;
2173 size_t stride = size_pool->slot_size;
2174 unsigned int limit = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)))/(int)stride;
2175
2176 /* assign heap_page body (contains heap_page_header and RVALUEs) */
2177 struct heap_page_body *page_body = heap_page_body_allocate();
2178 if (page_body == 0) {
2179 rb_memerror();
2180 }
2181
2182 /* assign heap_page entry */
2183 page = calloc1(sizeof(struct heap_page));
2184 if (page == 0) {
2185 heap_page_body_free(page_body);
2186 rb_memerror();
2187 }
2188
2189 /* adjust obj_limit (object number available in this page) */
2190 start = (uintptr_t)((VALUE)page_body + sizeof(struct heap_page_header));
2191
2192 if (start % BASE_SLOT_SIZE != 0) {
2193 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
2194 start = start + delta;
2195 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
2196
2197 /* Find a num in page that is evenly divisible by `stride`.
2198 * This is to ensure that objects are aligned with bit planes.
2199 * In other words, ensure there are an even number of objects
2200 * per bit plane. */
2201 if (NUM_IN_PAGE(start) == 1) {
2202 start += stride - BASE_SLOT_SIZE;
2203 }
2204
2205 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % stride == 0);
2206
2207 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2208 }
2209 end = start + (limit * (int)stride);
2210
2211 /* setup heap_pages_sorted */
2212 lo = 0;
2213 hi = (uintptr_t)heap_allocated_pages;
2214 while (lo < hi) {
2215 struct heap_page *mid_page;
2216
2217 mid = (lo + hi) / 2;
2218 mid_page = heap_pages_sorted[mid];
2219 if ((uintptr_t)mid_page->start < start) {
2220 lo = mid + 1;
2221 }
2222 else if ((uintptr_t)mid_page->start > start) {
2223 hi = mid;
2224 }
2225 else {
2226 rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
2227 }
2228 }
2229
2230 if (hi < (uintptr_t)heap_allocated_pages) {
2231 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
2232 }
2233
2234 heap_pages_sorted[hi] = page;
2235
2236 heap_allocated_pages++;
2237
2238 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2239 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2240 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2241
2242 size_pool->total_allocated_pages++;
2243
2244 if (heap_allocated_pages > heap_pages_sorted_length) {
2245 rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
2246 heap_allocated_pages, heap_pages_sorted_length);
2247 }
2248
2249 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2250 if (heap_pages_himem < end) heap_pages_himem = end;
2251
2252 page->start = start;
2253 page->total_slots = limit;
2254 page->slot_size = size_pool->slot_size;
2255 page->size_pool = size_pool;
2256 page_body->header.page = page;
2257
2258 for (p = start; p != end; p += stride) {
2259 gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
2260 heap_page_add_freeobj(objspace, page, (VALUE)p);
2261 }
2262 page->free_slots = limit;
2263
2264 asan_lock_freelist(page);
2265 return page;
2266}
2267
2268static struct heap_page *
2269heap_page_resurrect(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2270{
2271 struct heap_page *page = 0, *next;
2272
2273 ccan_list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2274 asan_unlock_freelist(page);
2275 if (page->freelist != NULL) {
2276 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2277 asan_lock_freelist(page);
2278 return page;
2279 }
2280 }
2281
2282 return NULL;
2283}
2284
2285static struct heap_page *
2286heap_page_create(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
2287{
2288 struct heap_page *page;
2289 const char *method = "recycle";
2290
2291 size_pool->allocatable_pages--;
2292
2293 page = heap_page_resurrect(objspace, size_pool);
2294
2295 if (page == NULL) {
2296 page = heap_page_allocate(objspace, size_pool);
2297 method = "allocate";
2298 }
2299 if (0) fprintf(stderr, "heap_page_create: %s - %p, "
2300 "heap_allocated_pages: %"PRIdSIZE", "
2301 "heap_allocated_pages: %"PRIdSIZE", "
2302 "tomb->total_pages: %"PRIdSIZE"\n",
2303 method, (void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2304 return page;
2305}
2306
2307static void
2308heap_add_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
2309{
2310 /* Adding to eden heap during incremental sweeping is forbidden */
2311 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2312 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2313 ccan_list_add_tail(&heap->pages, &page->page_node);
2314 heap->total_pages++;
2315 heap->total_slots += page->total_slots;
2316}
2317
2318static void
2319heap_assign_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2320{
2321 struct heap_page *page = heap_page_create(objspace, size_pool);
2322 heap_add_page(objspace, size_pool, heap, page);
2323 heap_add_freepage(heap, page);
2324}
2325
2326static void
2327heap_add_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, size_t add)
2328{
2329 size_t i;
2330
2331 size_pool_allocatable_pages_set(objspace, size_pool, add);
2332
2333 for (i = 0; i < add; i++) {
2334 heap_assign_page(objspace, size_pool, heap);
2335 }
2336
2337 GC_ASSERT(size_pool->allocatable_pages == 0);
2338}
2339
2340static size_t
2341heap_extend_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, size_t free_slots, size_t total_slots, size_t used)
2342{
2343 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2344 size_t next_used;
2345
2346 if (goal_ratio == 0.0) {
2347 next_used = (size_t)(used * gc_params.growth_factor);
2348 }
2349 else if (total_slots == 0) {
2350 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
2351 next_used = (gc_params.heap_init_slots * multiple) / HEAP_PAGE_OBJ_LIMIT;
2352 }
2353 else {
2354 /* Find `f' where free_slots = f * total_slots * goal_ratio
2355 * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
2356 */
2357 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2358
2359 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2360 if (f < 1.0) f = 1.1;
2361
2362 next_used = (size_t)(f * used);
2363
2364 if (0) {
2365 fprintf(stderr,
2366 "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
2367 " G(%1.2f), f(%1.2f),"
2368 " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
2369 free_slots, total_slots, free_slots/(double)total_slots,
2370 goal_ratio, f, used, next_used);
2371 }
2372 }
2373
2374 if (gc_params.growth_max_slots > 0) {
2375 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2376 if (next_used > max_used) next_used = max_used;
2377 }
2378
2379 size_t extend_page_count = next_used - used;
2380 /* Extend by at least 1 page. */
2381 if (extend_page_count == 0) extend_page_count = 1;
2382
2383 return extend_page_count;
2384}
2385
2386static int
2387heap_increment(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2388{
2389 if (size_pool->allocatable_pages > 0) {
2390 gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %"PRIdSIZE", "
2391 "heap_pages_inc: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2392 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2393
2394 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2395 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2396
2397 heap_assign_page(objspace, size_pool, heap);
2398 return TRUE;
2399 }
2400 return FALSE;
2401}
2402
2403static void
2404gc_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2405{
2406 /* Continue marking if in incremental marking. */
2407 if (heap->free_pages == NULL && is_incremental_marking(objspace)) {
2408 gc_marks_continue(objspace, size_pool, heap);
2409 }
2410
2411 /* Continue sweeping if in lazy sweeping or the previous incremental
2412 * marking finished and did not yield a free page. */
2413 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2414 gc_sweep_continue(objspace, size_pool, heap);
2415 }
2416}
2417
2418static void
2419heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2420{
2421 GC_ASSERT(heap->free_pages == NULL);
2422
2423 /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2424 gc_continue(objspace, size_pool, heap);
2425
2426 /* If we still don't have a free page and not allowed to create a new page,
2427 * we should start a new GC cycle. */
2428 if (heap->free_pages == NULL &&
2429 (will_be_incremental_marking(objspace) ||
2430 (heap_increment(objspace, size_pool, heap) == FALSE))) {
2431 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2432 rb_memerror();
2433 }
2434 else {
2435 /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2436 gc_continue(objspace, size_pool, heap);
2437
2438 /* If we're not incremental marking (e.g. a minor GC) or finished
2439 * sweeping and still don't have a free page, then
2440 * gc_sweep_finish_size_pool should allow us to create a new page. */
2441 if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
2442 if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
2443 rb_bug("cannot create a new page after GC");
2444 }
2445 else { // Major GC is required, which will allow us to create new page
2446 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2447 rb_memerror();
2448 }
2449 else {
2450 /* Do steps of incremental marking or lazy sweeping. */
2451 gc_continue(objspace, size_pool, heap);
2452
2453 if (heap->free_pages == NULL &&
2454 !heap_increment(objspace, size_pool, heap)) {
2455 rb_bug("cannot create a new page after major GC");
2456 }
2457 }
2458 }
2459 }
2460 }
2461 }
2462
2463 GC_ASSERT(heap->free_pages != NULL);
2464}
2465
2466void
2467rb_objspace_set_event_hook(const rb_event_flag_t event)
2468{
2469 rb_objspace_t *objspace = &rb_objspace;
2470 objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2471 objspace->flags.has_hook = (objspace->hook_events != 0);
2472}
2473
2474static void
2475gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2476{
2477 const VALUE *pc = ec->cfp->pc;
2478 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2479 /* increment PC because source line is calculated with PC-1 */
2480 ec->cfp->pc++;
2481 }
2482 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2483 ec->cfp->pc = pc;
2484}
2485
2486#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2487#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2488
2489#define gc_event_hook_prep(objspace, event, data, prep) do { \
2490 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2491 prep; \
2492 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2493 } \
2494} while (0)
2495
2496#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2497
2498static inline VALUE
2499newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2500{
2501#if !__has_feature(memory_sanitizer)
2502 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2503 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2504#endif
2505 RVALUE *p = RANY(obj);
2506 p->as.basic.flags = flags;
2507 *((VALUE *)&p->as.basic.klass) = klass;
2508
2509#if RACTOR_CHECK_MODE
2510 rb_ractor_setup_belonging(obj);
2511#endif
2512
2513#if RGENGC_CHECK_MODE
2514 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2515
2516 RB_VM_LOCK_ENTER_NO_BARRIER();
2517 {
2518 check_rvalue_consistency(obj);
2519
2520 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2521 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2522 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2523 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2524
2525 if (flags & FL_PROMOTED1) {
2526 if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2527 }
2528 else {
2529 if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2530 }
2531 if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2532 }
2533 RB_VM_LOCK_LEAVE_NO_BARRIER();
2534#endif
2535
2536 if (UNLIKELY(wb_protected == FALSE)) {
2537 ASSERT_vm_locking();
2538 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2539 }
2540
2541 // TODO: make it atomic, or ractor local
2542 objspace->total_allocated_objects++;
2543
2544#if RGENGC_PROFILE
2545 if (wb_protected) {
2546 objspace->profile.total_generated_normal_object_count++;
2547#if RGENGC_PROFILE >= 2
2548 objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2549#endif
2550 }
2551 else {
2552 objspace->profile.total_generated_shady_object_count++;
2553#if RGENGC_PROFILE >= 2
2554 objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2555#endif
2556 }
2557#endif
2558
2559#if GC_DEBUG
2560 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2561 GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2562#endif
2563
2564 gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2565
2566#if RGENGC_OLD_NEWOBJ_CHECK > 0
2567 {
2568 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2569
2570 if (!is_incremental_marking(objspace) &&
2571 flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2572 ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2573 if (--newobj_cnt == 0) {
2574 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2575
2576 gc_mark_set(objspace, obj);
2577 RVALUE_AGE_SET_OLD(objspace, obj);
2578
2579 rb_gc_writebarrier_remember(obj);
2580 }
2581 }
2582 }
2583#endif
2584 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
2585 return obj;
2586}
2587
2588size_t
2589rb_gc_obj_slot_size(VALUE obj)
2590{
2591 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2592}
2593
2594static inline size_t
2595size_pool_slot_size(unsigned char pool_id)
2596{
2597 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2598
2599 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2600
2601#if RGENGC_CHECK_MODE
2602 rb_objspace_t *objspace = &rb_objspace;
2603 GC_ASSERT(size_pools[pool_id].slot_size == (short)slot_size);
2604#endif
2605
2606 slot_size -= RVALUE_OVERHEAD;
2607
2608 return slot_size;
2609}
2610
2611size_t
2612rb_size_pool_slot_size(unsigned char pool_id)
2613{
2614 return size_pool_slot_size(pool_id);
2615}
2616
2617bool
2618rb_gc_size_allocatable_p(size_t size)
2619{
2620 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2621}
2622
2623static inline VALUE
2624ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2625 size_t size_pool_idx)
2626{
2627 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2628 RVALUE *p = size_pool_cache->freelist;
2629
2630#if GC_ENABLE_INCREMENTAL_MARK
2631 if (is_incremental_marking(objspace)) {
2632 // Not allowed to allocate without running an incremental marking step
2633 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2634 return Qfalse;
2635 }
2636
2637 if (p) {
2638 cache->incremental_mark_step_allocated_slots++;
2639 }
2640 }
2641#endif
2642
2643 if (p) {
2644 VALUE obj = (VALUE)p;
2645 MAYBE_UNUSED(const size_t) stride = size_pool_slot_size(size_pool_idx);
2646 size_pool_cache->freelist = p->as.free.next;
2647#if USE_RVARGC
2648 asan_unpoison_memory_region(p, stride, true);
2649#else
2650 asan_unpoison_object(obj, true);
2651#endif
2652#if RGENGC_CHECK_MODE
2653 GC_ASSERT(rb_gc_obj_slot_size(obj) == stride);
2654 // zero clear
2655 MEMZERO((char *)obj, char, stride);
2656#endif
2657 return obj;
2658 }
2659 else {
2660 return Qfalse;
2661 }
2662}
2663
2664static struct heap_page *
2665heap_next_free_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
2666{
2667 ASSERT_vm_locking();
2668
2669 struct heap_page *page;
2670
2671 if (heap->free_pages == NULL) {
2672 heap_prepare(objspace, size_pool, heap);
2673 }
2674
2675 page = heap->free_pages;
2676 heap->free_pages = page->free_next;
2677
2678 GC_ASSERT(page->free_slots != 0);
2679 RUBY_DEBUG_LOG("page:%p freelist:%p cnt:%d", (void *)page, (void *)page->freelist, page->free_slots);
2680
2681 asan_unlock_freelist(page);
2682
2683 return page;
2684}
2685
2686static inline void
2687ractor_cache_set_page(rb_ractor_newobj_cache_t *cache, size_t size_pool_idx,
2688 struct heap_page *page)
2689{
2690 gc_report(3, &rb_objspace, "ractor_set_cache: Using page %p\n", (void *)GET_PAGE_BODY(page->start));
2691
2692 rb_ractor_newobj_size_pool_cache_t *size_pool_cache = &cache->size_pool_caches[size_pool_idx];
2693
2694 GC_ASSERT(size_pool_cache->freelist == NULL);
2695 GC_ASSERT(page->free_slots != 0);
2696 GC_ASSERT(page->freelist != NULL);
2697
2698 size_pool_cache->using_page = page;
2699 size_pool_cache->freelist = page->freelist;
2700 page->free_slots = 0;
2701 page->freelist = NULL;
2702
2703 asan_unpoison_object((VALUE)size_pool_cache->freelist, false);
2704 GC_ASSERT(RB_TYPE_P((VALUE)size_pool_cache->freelist, T_NONE));
2705 asan_poison_object((VALUE)size_pool_cache->freelist);
2706}
2707
2708static inline VALUE
2709newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2710{
2711 RVALUE *p = (RVALUE *)obj;
2712 p->as.values.v1 = v1;
2713 p->as.values.v2 = v2;
2714 p->as.values.v3 = v3;
2715 return obj;
2716}
2717
2718static inline size_t
2719size_pool_idx_for_size(size_t size)
2720{
2721#if USE_RVARGC
2722 size += RVALUE_OVERHEAD;
2723
2724 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2725
2726 /* size_pool_idx is ceil(log2(slot_count)) */
2727 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2728
2729 if (size_pool_idx >= SIZE_POOL_COUNT) {
2730 rb_bug("size_pool_idx_for_size: allocation size too large");
2731 }
2732
2733#if RGENGC_CHECK_MODE
2734 rb_objspace_t *objspace = &rb_objspace;
2735 GC_ASSERT(size <= (size_t)size_pools[size_pool_idx].slot_size);
2736 if (size_pool_idx > 0) GC_ASSERT(size > (size_t)size_pools[size_pool_idx - 1].slot_size);
2737#endif
2738
2739 return size_pool_idx;
2740#else
2741 GC_ASSERT(size <= sizeof(RVALUE));
2742 return 0;
2743#endif
2744}
2745
2746static VALUE
2747newobj_alloc(rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx, bool vm_locked)
2748{
2749 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
2750 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
2751 rb_ractor_newobj_cache_t *cache = &cr->newobj_cache;
2752
2753 VALUE obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2754
2755 if (UNLIKELY(obj == Qfalse)) {
2756 unsigned int lev;
2757 bool unlock_vm = false;
2758
2759 if (!vm_locked) {
2760 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2761 vm_locked = true;
2762 unlock_vm = true;
2763 }
2764
2765 {
2766 ASSERT_vm_locking();
2767
2768#if GC_ENABLE_INCREMENTAL_MARK
2769 if (is_incremental_marking(objspace)) {
2770 gc_marks_continue(objspace, size_pool, heap);
2771 cache->incremental_mark_step_allocated_slots = 0;
2772
2773 // Retry allocation after resetting incremental_mark_step_allocated_slots
2774 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2775 }
2776#endif
2777
2778 if (obj == Qfalse) {
2779 // Get next free page (possibly running GC)
2780 struct heap_page *page = heap_next_free_page(objspace, size_pool, heap);
2781 ractor_cache_set_page(cache, size_pool_idx, page);
2782
2783 // Retry allocation after moving to new page
2784 obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
2785
2786 GC_ASSERT(obj != Qfalse);
2787 }
2788 }
2789
2790 if (unlock_vm) {
2791 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2792 }
2793 }
2794
2795 return obj;
2796}
2797
2798ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx));
2799
2800static inline VALUE
2801newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, int wb_protected, size_t size_pool_idx)
2802{
2803 VALUE obj;
2804 unsigned int lev;
2805
2806 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2807 {
2808 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2809 if (during_gc) {
2810 dont_gc_on();
2811 during_gc = 0;
2812 rb_bug("object allocation during garbage collection phase");
2813 }
2814
2815 if (ruby_gc_stressful) {
2816 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2817 rb_memerror();
2818 }
2819 }
2820 }
2821
2822 obj = newobj_alloc(objspace, cr, size_pool_idx, true);
2823#if SHAPE_IN_BASIC_FLAGS
2824 flags |= (VALUE)(size_pool_idx) << SHAPE_FLAG_SHIFT;
2825#endif
2826 newobj_init(klass, flags, wb_protected, objspace, obj);
2827
2828 gc_event_hook_prep(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj, newobj_fill(obj, 0, 0, 0));
2829 }
2830 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2831
2832 return obj;
2833}
2834
2835NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2836 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2837NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2838 rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx));
2839
2840static VALUE
2841newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2842{
2843 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2844}
2845
2846static VALUE
2847newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_t *cr, size_t size_pool_idx)
2848{
2849 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2850}
2851
2852static inline VALUE
2853newobj_of0(VALUE klass, VALUE flags, int wb_protected, rb_ractor_t *cr, size_t alloc_size)
2854{
2855 VALUE obj;
2856 rb_objspace_t *objspace = &rb_objspace;
2857
2858 RB_DEBUG_COUNTER_INC(obj_newobj);
2859 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2860
2861#if GC_DEBUG_STRESS_TO_CLASS
2862 if (UNLIKELY(stress_to_class)) {
2863 long i, cnt = RARRAY_LEN(stress_to_class);
2864 for (i = 0; i < cnt; ++i) {
2865 if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2866 }
2867 }
2868#endif
2869
2870 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2871
2872 if (!UNLIKELY(during_gc ||
2873 ruby_gc_stressful ||
2874 gc_event_hook_available_p(objspace)) &&
2875 wb_protected) {
2876 obj = newobj_alloc(objspace, cr, size_pool_idx, false);
2877#if SHAPE_IN_BASIC_FLAGS
2878 flags |= (VALUE)size_pool_idx << SHAPE_FLAG_SHIFT;
2879#endif
2880 newobj_init(klass, flags, wb_protected, objspace, obj);
2881 }
2882 else {
2883 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2884
2885 obj = wb_protected ?
2886 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2887 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2888 }
2889
2890 return obj;
2891}
2892
2893static inline VALUE
2894newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2895{
2896 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2897 return newobj_fill(obj, v1, v2, v3);
2898}
2899
2900static inline VALUE
2901newobj_of_cr(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, size_t alloc_size)
2902{
2903 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2904 return newobj_fill(obj, v1, v2, v3);
2905}
2906
2907VALUE
2908rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
2909{
2910 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2911 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2912}
2913
2914VALUE
2915rb_wb_protected_newobj_of(VALUE klass, VALUE flags, size_t size)
2916{
2917 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2918 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2919}
2920
2921VALUE
2922rb_ec_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
2923{
2924 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2925 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2926}
2927
2928/* for compatibility */
2929
2930VALUE
2931rb_newobj(void)
2932{
2933 return newobj_of(0, T_NONE, 0, 0, 0, FALSE, RVALUE_SIZE);
2934}
2935
2936static size_t
2937rb_obj_embedded_size(uint32_t numiv)
2938{
2939 return offsetof(struct RObject, as.ary) + (sizeof(VALUE) * numiv);
2940}
2941
2942static VALUE
2943rb_class_instance_allocate_internal(VALUE klass, VALUE flags, bool wb_protected)
2944{
2945 GC_ASSERT((flags & RUBY_T_MASK) == T_OBJECT);
2946 GC_ASSERT(flags & ROBJECT_EMBED);
2947
2948 size_t size;
2949#if USE_RVARGC
2950 uint32_t index_tbl_num_entries = RCLASS_EXT(klass)->max_iv_count;
2951
2952 size = rb_obj_embedded_size(index_tbl_num_entries);
2953 if (!rb_gc_size_allocatable_p(size)) {
2954 size = sizeof(struct RObject);
2955 }
2956#else
2957 size = sizeof(struct RObject);
2958#endif
2959
2960 VALUE obj = newobj_of(klass, flags, 0, 0, 0, wb_protected, size);
2961 RUBY_ASSERT(rb_shape_get_shape(obj)->type == SHAPE_ROOT ||
2962 rb_shape_get_shape(obj)->type == SHAPE_INITIAL_CAPACITY);
2963
2964 // Set the shape to the specific T_OBJECT shape which is always
2965 // SIZE_POOL_COUNT away from the root shape.
2966 ROBJECT_SET_SHAPE_ID(obj, ROBJECT_SHAPE_ID(obj) + SIZE_POOL_COUNT);
2967
2968#if RUBY_DEBUG
2969 RUBY_ASSERT(!rb_shape_obj_too_complex(obj));
2970 VALUE *ptr = ROBJECT_IVPTR(obj);
2971 for (size_t i = 0; i < ROBJECT_IV_CAPACITY(obj); i++) {
2972 ptr[i] = Qundef;
2973 }
2974#endif
2975
2976 return obj;
2977}
2978
2979VALUE
2980rb_newobj_of(VALUE klass, VALUE flags)
2981{
2982 if ((flags & RUBY_T_MASK) == T_OBJECT) {
2983 return rb_class_instance_allocate_internal(klass, (flags | ROBJECT_EMBED) & ~FL_WB_PROTECTED, flags & FL_WB_PROTECTED);
2984 }
2985 else {
2986 return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED, RVALUE_SIZE);
2987 }
2988}
2989
2990#define UNEXPECTED_NODE(func) \
2991 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2992 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2993
2994const char *
2995rb_imemo_name(enum imemo_type type)
2996{
2997 // put no default case to get a warning if an imemo type is missing
2998 switch (type) {
2999#define IMEMO_NAME(x) case imemo_##x: return #x;
3000 IMEMO_NAME(env);
3001 IMEMO_NAME(cref);
3002 IMEMO_NAME(svar);
3003 IMEMO_NAME(throw_data);
3004 IMEMO_NAME(ifunc);
3005 IMEMO_NAME(memo);
3006 IMEMO_NAME(ment);
3007 IMEMO_NAME(iseq);
3008 IMEMO_NAME(tmpbuf);
3009 IMEMO_NAME(ast);
3010 IMEMO_NAME(parser_strterm);
3011 IMEMO_NAME(callinfo);
3012 IMEMO_NAME(callcache);
3013 IMEMO_NAME(constcache);
3014#undef IMEMO_NAME
3015 }
3016 return "unknown";
3017}
3018
3019#undef rb_imemo_new
3020
3021VALUE
3022rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3023{
3024 size_t size = RVALUE_SIZE;
3025 VALUE flags = T_IMEMO | (type << FL_USHIFT);
3026 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
3027}
3028
3029static VALUE
3030rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
3031{
3032 size_t size = sizeof(struct rb_imemo_tmpbuf_struct);
3033 VALUE flags = T_IMEMO | (imemo_tmpbuf << FL_USHIFT);
3034 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
3035}
3036
3037static VALUE
3038rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
3039{
3040 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
3041}
3042
3044rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
3045{
3046 return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
3047}
3048
3049static size_t
3050imemo_memsize(VALUE obj)
3051{
3052 size_t size = 0;
3053 switch (imemo_type(obj)) {
3054 case imemo_ment:
3055 size += sizeof(RANY(obj)->as.imemo.ment.def);
3056 break;
3057 case imemo_iseq:
3058 size += rb_iseq_memsize((rb_iseq_t *)obj);
3059 break;
3060 case imemo_env:
3061 size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
3062 break;
3063 case imemo_tmpbuf:
3064 size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
3065 break;
3066 case imemo_ast:
3067 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
3068 break;
3069 case imemo_cref:
3070 case imemo_svar:
3071 case imemo_throw_data:
3072 case imemo_ifunc:
3073 case imemo_memo:
3074 case imemo_parser_strterm:
3075 break;
3076 default:
3077 /* unreachable */
3078 break;
3079 }
3080 return size;
3081}
3082
3083#if IMEMO_DEBUG
3084VALUE
3085rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
3086{
3087 VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
3088 fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
3089 return memo;
3090}
3091#endif
3092
3093MJIT_FUNC_EXPORTED VALUE
3094rb_class_allocate_instance(VALUE klass)
3095{
3096 return rb_class_instance_allocate_internal(klass, T_OBJECT | ROBJECT_EMBED, RGENGC_WB_PROTECTED_OBJECT);
3097}
3098
3099static inline void
3100rb_data_object_check(VALUE klass)
3101{
3102 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
3103 rb_undef_alloc_func(klass);
3104 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
3105 }
3106}
3107
3108VALUE
3109rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3110{
3112 if (klass) rb_data_object_check(klass);
3113 return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE, sizeof(struct RTypedData));
3114}
3115
3116VALUE
3117rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
3118{
3119 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
3120 DATA_PTR(obj) = xcalloc(1, size);
3121 return obj;
3122}
3123
3124VALUE
3125rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
3126{
3127 RBIMPL_NONNULL_ARG(type);
3128 if (klass) rb_data_object_check(klass);
3129 return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED, sizeof(struct RTypedData));
3130}
3131
3132VALUE
3133rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
3134{
3135 VALUE obj = rb_data_typed_object_wrap(klass, 0, type);
3136 DATA_PTR(obj) = xcalloc(1, size);
3137 return obj;
3138}
3139
3140size_t
3141rb_objspace_data_type_memsize(VALUE obj)
3142{
3143 if (RTYPEDDATA_P(obj)) {
3144 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
3145 const void *ptr = RTYPEDDATA_DATA(obj);
3146 if (ptr && type->function.dsize) {
3147 return type->function.dsize(ptr);
3148 }
3149 }
3150 return 0;
3151}
3152
3153const char *
3154rb_objspace_data_type_name(VALUE obj)
3155{
3156 if (RTYPEDDATA_P(obj)) {
3157 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
3158 }
3159 else {
3160 return 0;
3161 }
3162}
3163
3164static int
3165ptr_in_page_body_p(const void *ptr, const void *memb)
3166{
3167 struct heap_page *page = *(struct heap_page **)memb;
3168 uintptr_t p_body = (uintptr_t)GET_PAGE_BODY(page->start);
3169
3170 if ((uintptr_t)ptr >= p_body) {
3171 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
3172 }
3173 else {
3174 return -1;
3175 }
3176}
3177
3178PUREFUNC(static inline struct heap_page * heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
3179static inline struct heap_page *
3180heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
3181{
3182 struct heap_page **res;
3183
3184 if (ptr < (uintptr_t)heap_pages_lomem ||
3185 ptr > (uintptr_t)heap_pages_himem) {
3186 return NULL;
3187 }
3188
3189 res = bsearch((void *)ptr, heap_pages_sorted,
3190 (size_t)heap_allocated_pages, sizeof(struct heap_page *),
3191 ptr_in_page_body_p);
3192
3193 if (res) {
3194 return *res;
3195 }
3196 else {
3197 return NULL;
3198 }
3199}
3200
3201PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
3202static inline int
3203is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
3204{
3205 register uintptr_t p = (uintptr_t)ptr;
3206 register struct heap_page *page;
3207
3208 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
3209
3210 if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
3211 RB_DEBUG_COUNTER_INC(gc_isptr_range);
3212
3213 if (p % BASE_SLOT_SIZE != 0) return FALSE;
3214 RB_DEBUG_COUNTER_INC(gc_isptr_align);
3215
3216 page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
3217 if (page) {
3218 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
3219 if (page->flags.in_tomb) {
3220 return FALSE;
3221 }
3222 else {
3223 if (p < page->start) return FALSE;
3224 if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
3225 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0) return FALSE;
3226
3227 return TRUE;
3228 }
3229 }
3230 return FALSE;
3231}
3232
3233static enum rb_id_table_iterator_result
3234free_const_entry_i(VALUE value, void *data)
3235{
3236 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3237 xfree(ce);
3238 return ID_TABLE_CONTINUE;
3239}
3240
3241void
3242rb_free_const_table(struct rb_id_table *tbl)
3243{
3244 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
3245 rb_id_table_free(tbl);
3246}
3247
3248// alive: if false, target pointers can be freed already.
3249// To check it, we need objspace parameter.
3250static void
3251vm_ccs_free(struct rb_class_cc_entries *ccs, int alive, rb_objspace_t *objspace, VALUE klass)
3252{
3253 if (ccs->entries) {
3254 for (int i=0; i<ccs->len; i++) {
3255 const struct rb_callcache *cc = ccs->entries[i].cc;
3256 if (!alive) {
3257 void *ptr = asan_unpoison_object_temporary((VALUE)cc);
3258 // ccs can be free'ed.
3259 if (is_pointer_to_heap(objspace, (void *)cc) &&
3260 IMEMO_TYPE_P(cc, imemo_callcache) &&
3261 cc->klass == klass) {
3262 // OK. maybe target cc.
3263 }
3264 else {
3265 if (ptr) {
3266 asan_poison_object((VALUE)cc);
3267 }
3268 continue;
3269 }
3270 if (ptr) {
3271 asan_poison_object((VALUE)cc);
3272 }
3273 }
3274 vm_cc_invalidate(cc);
3275 }
3276 ruby_xfree(ccs->entries);
3277 }
3278 ruby_xfree(ccs);
3279}
3280
3281void
3282rb_vm_ccs_free(struct rb_class_cc_entries *ccs)
3283{
3284 RB_DEBUG_COUNTER_INC(ccs_free);
3285 vm_ccs_free(ccs, TRUE, NULL, Qundef);
3286}
3287
3289 rb_objspace_t *objspace;
3290 VALUE klass;
3291 bool alive;
3292};
3293
3294static enum rb_id_table_iterator_result
3295cc_table_mark_i(ID id, VALUE ccs_ptr, void *data_ptr)
3296{
3297 struct cc_tbl_i_data *data = data_ptr;
3298 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3299 VM_ASSERT(vm_ccs_p(ccs));
3300 VM_ASSERT(id == ccs->cme->called_id);
3301
3302 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
3303 rb_vm_ccs_free(ccs);
3304 return ID_TABLE_DELETE;
3305 }
3306 else {
3307 gc_mark(data->objspace, (VALUE)ccs->cme);
3308
3309 for (int i=0; i<ccs->len; i++) {
3310 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
3311 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
3312
3313 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
3314 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
3315 }
3316 return ID_TABLE_CONTINUE;
3317 }
3318}
3319
3320static void
3321cc_table_mark(rb_objspace_t *objspace, VALUE klass)
3322{
3323 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3324 if (cc_tbl) {
3325 struct cc_tbl_i_data data = {
3326 .objspace = objspace,
3327 .klass = klass,
3328 };
3329 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
3330 }
3331}
3332
3333static enum rb_id_table_iterator_result
3334cc_table_free_i(VALUE ccs_ptr, void *data_ptr)
3335{
3336 struct cc_tbl_i_data *data = data_ptr;
3337 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3338 VM_ASSERT(vm_ccs_p(ccs));
3339 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
3340 return ID_TABLE_CONTINUE;
3341}
3342
3343static void
3344cc_table_free(rb_objspace_t *objspace, VALUE klass, bool alive)
3345{
3346 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
3347
3348 if (cc_tbl) {
3349 struct cc_tbl_i_data data = {
3350 .objspace = objspace,
3351 .klass = klass,
3352 .alive = alive,
3353 };
3354 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
3355 rb_id_table_free(cc_tbl);
3356 }
3357}
3358
3359static enum rb_id_table_iterator_result
3360cvar_table_free_i(VALUE value, void * ctx)
3361{
3362 xfree((void *) value);
3363 return ID_TABLE_CONTINUE;
3364}
3365
3366void
3367rb_cc_table_free(VALUE klass)
3368{
3369 cc_table_free(&rb_objspace, klass, TRUE);
3370}
3371
3372static inline void
3373make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
3374{
3375 struct RZombie *zombie = RZOMBIE(obj);
3376 zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
3377 zombie->dfree = dfree;
3378 zombie->data = data;
3379 VALUE prev, next = heap_pages_deferred_final;
3380 do {
3381 zombie->next = prev = next;
3382 next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
3383 } while (next != prev);
3384
3385 struct heap_page *page = GET_HEAP_PAGE(obj);
3386 page->final_slots++;
3387 heap_pages_final_slots++;
3388}
3389
3390static inline void
3391make_io_zombie(rb_objspace_t *objspace, VALUE obj)
3392{
3393 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3394 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3395}
3396
3397static void
3398obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
3399{
3400 ASSERT_vm_locking();
3401 st_data_t o = (st_data_t)obj, id;
3402
3403 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
3405
3406 if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
3407 GC_ASSERT(id);
3408 st_delete(objspace->id_to_obj_tbl, &id, NULL);
3409 }
3410 else {
3411 rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3412 }
3413}
3414
3415static int
3416obj_free(rb_objspace_t *objspace, VALUE obj)
3417{
3418 RB_DEBUG_COUNTER_INC(obj_free);
3419 // RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, obj_type_name(obj));
3420
3421 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
3422
3423 switch (BUILTIN_TYPE(obj)) {
3424 case T_NIL:
3425 case T_FIXNUM:
3426 case T_TRUE:
3427 case T_FALSE:
3428 rb_bug("obj_free() called for broken object");
3429 break;
3430 default:
3431 break;
3432 }
3433
3434 if (FL_TEST(obj, FL_EXIVAR)) {
3436 FL_UNSET(obj, FL_EXIVAR);
3437 }
3438
3439 if (FL_TEST(obj, FL_SEEN_OBJ_ID) && !FL_TEST(obj, FL_FINALIZE)) {
3440 obj_free_object_id(objspace, obj);
3441 }
3442
3443 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3444
3445#if RGENGC_CHECK_MODE
3446#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3447 CHECK(RVALUE_WB_UNPROTECTED);
3448 CHECK(RVALUE_MARKED);
3449 CHECK(RVALUE_MARKING);
3450 CHECK(RVALUE_UNCOLLECTIBLE);
3451#undef CHECK
3452#endif
3453
3454 switch (BUILTIN_TYPE(obj)) {
3455 case T_OBJECT:
3456 if (rb_shape_obj_too_complex(obj)) {
3457 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
3458 rb_id_table_free(ROBJECT_IV_HASH(obj));
3459 }
3460 else if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3461 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3462 }
3463 else if (ROBJ_TRANSIENT_P(obj)) {
3464 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3465 }
3466 else {
3467 xfree(RANY(obj)->as.object.as.heap.ivptr);
3468 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3469 }
3470 break;
3471 case T_MODULE:
3472 case T_CLASS:
3473 rb_id_table_free(RCLASS_M_TBL(obj));
3474 cc_table_free(objspace, obj, FALSE);
3475 if (RCLASS_IVPTR(obj)) {
3476 xfree(RCLASS_IVPTR(obj));
3477 }
3478 if (RCLASS_CONST_TBL(obj)) {
3479 rb_free_const_table(RCLASS_CONST_TBL(obj));
3480 }
3481 if (RCLASS_CVC_TBL(obj)) {
3482 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3483 rb_id_table_free(RCLASS_CVC_TBL(obj));
3484 }
3485 rb_class_remove_subclass_head(obj);
3486 rb_class_remove_from_module_subclasses(obj);
3487 rb_class_remove_from_super_subclasses(obj);
3488 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3489 xfree(RCLASS_SUPERCLASSES(obj));
3490 }
3491
3492#if SIZE_POOL_COUNT == 1
3493 if (RCLASS_EXT(obj))
3494 xfree(RCLASS_EXT(obj));
3495#endif
3496
3497 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
3498 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
3499 break;
3500 case T_STRING:
3501 rb_str_free(obj);
3502 break;
3503 case T_ARRAY:
3504 rb_ary_free(obj);
3505 break;
3506 case T_HASH:
3507#if USE_DEBUG_COUNTER
3508 switch (RHASH_SIZE(obj)) {
3509 case 0:
3510 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3511 break;
3512 case 1:
3513 RB_DEBUG_COUNTER_INC(obj_hash_1);
3514 break;
3515 case 2:
3516 RB_DEBUG_COUNTER_INC(obj_hash_2);
3517 break;
3518 case 3:
3519 RB_DEBUG_COUNTER_INC(obj_hash_3);
3520 break;
3521 case 4:
3522 RB_DEBUG_COUNTER_INC(obj_hash_4);
3523 break;
3524 case 5:
3525 case 6:
3526 case 7:
3527 case 8:
3528 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3529 break;
3530 default:
3531 GC_ASSERT(RHASH_SIZE(obj) > 8);
3532 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3533 }
3534
3535 if (RHASH_AR_TABLE_P(obj)) {
3536 if (RHASH_AR_TABLE(obj) == NULL) {
3537 RB_DEBUG_COUNTER_INC(obj_hash_null);
3538 }
3539 else {
3540 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3541 }
3542 }
3543 else {
3544 RB_DEBUG_COUNTER_INC(obj_hash_st);
3545 }
3546#endif
3547 if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
3548 struct ar_table_struct *tab = RHASH(obj)->as.ar;
3549
3550 if (tab) {
3551 if (RHASH_TRANSIENT_P(obj)) {
3552 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3553 }
3554 else {
3555 ruby_xfree(tab);
3556 }
3557 }
3558 }
3559 else {
3560 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3561 st_free_table(RHASH(obj)->as.st);
3562 }
3563 break;
3564 case T_REGEXP:
3565 if (RANY(obj)->as.regexp.ptr) {
3566 onig_free(RANY(obj)->as.regexp.ptr);
3567 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3568 }
3569 break;
3570 case T_DATA:
3571 if (DATA_PTR(obj)) {
3572 int free_immediately = FALSE;
3573 void (*dfree)(void *);
3574 void *data = DATA_PTR(obj);
3575
3576 if (RTYPEDDATA_P(obj)) {
3577 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3578 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3579 if (0 && free_immediately == 0) {
3580 /* to expose non-free-immediate T_DATA */
3581 fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3582 }
3583 }
3584 else {
3585 dfree = RANY(obj)->as.data.dfree;
3586 }
3587
3588 if (dfree) {
3589 if (dfree == RUBY_DEFAULT_FREE) {
3590 xfree(data);
3591 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3592 }
3593 else if (free_immediately) {
3594 (*dfree)(data);
3595 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3596 }
3597 else {
3598 make_zombie(objspace, obj, dfree, data);
3599 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3600 return FALSE;
3601 }
3602 }
3603 else {
3604 RB_DEBUG_COUNTER_INC(obj_data_empty);
3605 }
3606 }
3607 break;
3608 case T_MATCH:
3609 if (RANY(obj)->as.match.rmatch) {
3610 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3611#if USE_DEBUG_COUNTER
3612 if (rm->regs.num_regs >= 8) {
3613 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3614 }
3615 else if (rm->regs.num_regs >= 4) {
3616 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3617 }
3618 else if (rm->regs.num_regs >= 1) {
3619 RB_DEBUG_COUNTER_INC(obj_match_under4);
3620 }
3621#endif
3622 onig_region_free(&rm->regs, 0);
3623 if (rm->char_offset)
3624 xfree(rm->char_offset);
3625 xfree(rm);
3626
3627 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3628 }
3629 break;
3630 case T_FILE:
3631 if (RANY(obj)->as.file.fptr) {
3632 make_io_zombie(objspace, obj);
3633 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3634 return FALSE;
3635 }
3636 break;
3637 case T_RATIONAL:
3638 RB_DEBUG_COUNTER_INC(obj_rational);
3639 break;
3640 case T_COMPLEX:
3641 RB_DEBUG_COUNTER_INC(obj_complex);
3642 break;
3643 case T_MOVED:
3644 break;
3645 case T_ICLASS:
3646 /* Basically , T_ICLASS shares table with the module */
3647 if (RICLASS_OWNS_M_TBL_P(obj)) {
3648 /* Method table is not shared for origin iclasses of classes */
3649 rb_id_table_free(RCLASS_M_TBL(obj));
3650 }
3651 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3652 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3653 }
3654 rb_class_remove_subclass_head(obj);
3655 cc_table_free(objspace, obj, FALSE);
3656 rb_class_remove_from_module_subclasses(obj);
3657 rb_class_remove_from_super_subclasses(obj);
3658#if !USE_RVARGC
3659 xfree(RCLASS_EXT(obj));
3660#endif
3661
3662 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3663 break;
3664
3665 case T_FLOAT:
3666 RB_DEBUG_COUNTER_INC(obj_float);
3667 break;
3668
3669 case T_BIGNUM:
3670 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3671 xfree(BIGNUM_DIGITS(obj));
3672 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3673 }
3674 else {
3675 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3676 }
3677 break;
3678
3679 case T_NODE:
3680 UNEXPECTED_NODE(obj_free);
3681 break;
3682
3683 case T_STRUCT:
3684 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3685 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3686 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3687 }
3688 else if (RSTRUCT_TRANSIENT_P(obj)) {
3689 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3690 }
3691 else {
3692 xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
3693 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3694 }
3695 break;
3696
3697 case T_SYMBOL:
3698 {
3699 rb_gc_free_dsymbol(obj);
3700 RB_DEBUG_COUNTER_INC(obj_symbol);
3701 }
3702 break;
3703
3704 case T_IMEMO:
3705 switch (imemo_type(obj)) {
3706 case imemo_ment:
3707 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3708 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3709 break;
3710 case imemo_iseq:
3711 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3712 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3713 break;
3714 case imemo_env:
3715 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3716 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3717 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3718 break;
3719 case imemo_tmpbuf:
3720 xfree(RANY(obj)->as.imemo.alloc.ptr);
3721 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3722 break;
3723 case imemo_ast:
3724 rb_ast_free(&RANY(obj)->as.imemo.ast);
3725 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3726 break;
3727 case imemo_cref:
3728 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3729 break;
3730 case imemo_svar:
3731 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3732 break;
3733 case imemo_throw_data:
3734 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3735 break;
3736 case imemo_ifunc:
3737 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3738 break;
3739 case imemo_memo:
3740 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3741 break;
3742 case imemo_parser_strterm:
3743 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3744 break;
3745 case imemo_callinfo:
3746 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3747 break;
3748 case imemo_callcache:
3749 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3750 break;
3751 case imemo_constcache:
3752 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3753 break;
3754 }
3755 return TRUE;
3756
3757 default:
3758 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3759 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
3760 }
3761
3762 if (FL_TEST(obj, FL_FINALIZE)) {
3763 make_zombie(objspace, obj, 0, 0);
3764 return FALSE;
3765 }
3766 else {
3767 return TRUE;
3768 }
3769}
3770
3771
3772#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3773#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3774
3775static int
3776object_id_cmp(st_data_t x, st_data_t y)
3777{
3778 if (RB_BIGNUM_TYPE_P(x)) {
3779 return !rb_big_eql(x, y);
3780 }
3781 else {
3782 return x != y;
3783 }
3784}
3785
3786static st_index_t
3787object_id_hash(st_data_t n)
3788{
3789 if (RB_BIGNUM_TYPE_P(n)) {
3790 return FIX2LONG(rb_big_hash(n));
3791 }
3792 else {
3793 return st_numhash(n);
3794 }
3795}
3796static const struct st_hash_type object_id_hash_type = {
3797 object_id_cmp,
3798 object_id_hash,
3799};
3800
3801void
3802Init_heap(void)
3803{
3804 rb_objspace_t *objspace = &rb_objspace;
3805
3806#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
3807 /* Need to determine if we can use mmap at runtime. */
3808 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
3809#endif
3810
3811 objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
3812 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3813 objspace->obj_to_id_tbl = st_init_numtable();
3814
3815#if RGENGC_ESTIMATE_OLDMALLOC
3816 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3817#endif
3818
3819 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3820
3821 /* Give other size pools allocatable pages. */
3822 for (int i = 1; i < SIZE_POOL_COUNT; i++) {
3823 rb_size_pool_t *size_pool = &size_pools[i];
3824 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
3825 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3826 }
3827 heap_pages_expand_sorted(objspace);
3828
3829 init_mark_stack(&objspace->mark_stack);
3830
3831 objspace->profile.invoke_time = getrusage_time();
3832 finalizer_table = st_init_numtable();
3833}
3834
3835void
3836Init_gc_stress(void)
3837{
3838 rb_objspace_t *objspace = &rb_objspace;
3839
3840 gc_stress_set(objspace, ruby_initial_gc_stress);
3841}
3842
3843typedef int each_obj_callback(void *, void *, size_t, void *);
3844
3845static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected);
3846static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
3847
3849 rb_objspace_t *objspace;
3850 bool reenable_incremental;
3851
3852 each_obj_callback *callback;
3853 void *data;
3854
3855 struct heap_page **pages[SIZE_POOL_COUNT];
3856 size_t pages_counts[SIZE_POOL_COUNT];
3857};
3858
3859static VALUE
3860objspace_each_objects_ensure(VALUE arg)
3861{
3862 struct each_obj_data *data = (struct each_obj_data *)arg;
3863 rb_objspace_t *objspace = data->objspace;
3864
3865 /* Reenable incremental GC */
3866 if (data->reenable_incremental) {
3867 objspace->flags.dont_incremental = FALSE;
3868 }
3869
3870 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3871 struct heap_page **pages = data->pages[i];
3872 /* pages could be NULL if an error was raised during setup (e.g.
3873 * malloc failed due to out of memory). */
3874 if (pages) {
3875 free(pages);
3876 }
3877 }
3878
3879 return Qnil;
3880}
3881
3882static VALUE
3883objspace_each_objects_try(VALUE arg)
3884{
3885 struct each_obj_data *data = (struct each_obj_data *)arg;
3886 rb_objspace_t *objspace = data->objspace;
3887
3888 /* Copy pages from all size_pools to their respective buffers. */
3889 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3890 rb_size_pool_t *size_pool = &size_pools[i];
3891 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
3892
3893 struct heap_page **pages = malloc(size);
3894 if (!pages) rb_memerror();
3895
3896 /* Set up pages buffer by iterating over all pages in the current eden
3897 * heap. This will be a snapshot of the state of the heap before we
3898 * call the callback over each page that exists in this buffer. Thus it
3899 * is safe for the callback to allocate objects without possibly entering
3900 * an infinite loop. */
3901 struct heap_page *page = 0;
3902 size_t pages_count = 0;
3903 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3904 pages[pages_count] = page;
3905 pages_count++;
3906 }
3907 data->pages[i] = pages;
3908 data->pages_counts[i] = pages_count;
3909 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3910 }
3911
3912 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
3913 rb_size_pool_t *size_pool = &size_pools[i];
3914 size_t pages_count = data->pages_counts[i];
3915 struct heap_page **pages = data->pages[i];
3916
3917 struct heap_page *page = ccan_list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, struct heap_page, page_node);
3918 for (size_t i = 0; i < pages_count; i++) {
3919 /* If we have reached the end of the linked list then there are no
3920 * more pages, so break. */
3921 if (page == NULL) break;
3922
3923 /* If this page does not match the one in the buffer, then move to
3924 * the next page in the buffer. */
3925 if (pages[i] != page) continue;
3926
3927 uintptr_t pstart = (uintptr_t)page->start;
3928 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3929
3930 if (!__asan_region_is_poisoned((void *)pstart, pend - pstart) &&
3931 (*data->callback)((void *)pstart, (void *)pend, size_pool->slot_size, data->data)) {
3932 break;
3933 }
3934
3935 page = ccan_list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3936 }
3937 }
3938
3939 return Qnil;
3940}
3941
3942/*
3943 * rb_objspace_each_objects() is special C API to walk through
3944 * Ruby object space. This C API is too difficult to use it.
3945 * To be frank, you should not use it. Or you need to read the
3946 * source code of this function and understand what this function does.
3947 *
3948 * 'callback' will be called several times (the number of heap page,
3949 * at current implementation) with:
3950 * vstart: a pointer to the first living object of the heap_page.
3951 * vend: a pointer to next to the valid heap_page area.
3952 * stride: a distance to next VALUE.
3953 *
3954 * If callback() returns non-zero, the iteration will be stopped.
3955 *
3956 * This is a sample callback code to iterate liveness objects:
3957 *
3958 * int
3959 * sample_callback(void *vstart, void *vend, int stride, void *data) {
3960 * VALUE v = (VALUE)vstart;
3961 * for (; v != (VALUE)vend; v += stride) {
3962 * if (RBASIC(v)->flags) { // liveness check
3963 * // do something with live object 'v'
3964 * }
3965 * return 0; // continue to iteration
3966 * }
3967 *
3968 * Note: 'vstart' is not a top of heap_page. This point the first
3969 * living object to grasp at least one object to avoid GC issue.
3970 * This means that you can not walk through all Ruby object page
3971 * including freed object page.
3972 *
3973 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3974 * However, there are possibilities to pass variable values with
3975 * 'stride' with some reasons. You must use stride instead of
3976 * use some constant value in the iteration.
3977 */
3978void
3979rb_objspace_each_objects(each_obj_callback *callback, void *data)
3980{
3981 objspace_each_objects(&rb_objspace, callback, data, TRUE);
3982}
3983
3984static void
3985objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
3986{
3987 /* Disable incremental GC */
3988 bool reenable_incremental = FALSE;
3989 if (protected) {
3990 reenable_incremental = !objspace->flags.dont_incremental;
3991
3992 gc_rest(objspace);
3993 objspace->flags.dont_incremental = TRUE;
3994 }
3995
3996 struct each_obj_data each_obj_data = {
3997 .objspace = objspace,
3998 .reenable_incremental = reenable_incremental,
3999
4000 .callback = callback,
4001 .data = data,
4002
4003 .pages = {NULL},
4004 .pages_counts = {0},
4005 };
4006 rb_ensure(objspace_each_objects_try, (VALUE)&each_obj_data,
4007 objspace_each_objects_ensure, (VALUE)&each_obj_data);
4008}
4009
4010void
4011rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
4012{
4013 objspace_each_objects(&rb_objspace, callback, data, FALSE);
4014}
4015
4017 size_t num;
4018 VALUE of;
4019};
4020
4021static int
4022internal_object_p(VALUE obj)
4023{
4024 RVALUE *p = (RVALUE *)obj;
4025 void *ptr = asan_unpoison_object_temporary(obj);
4026 bool used_p = p->as.basic.flags;
4027
4028 if (used_p) {
4029 switch (BUILTIN_TYPE(obj)) {
4030 case T_NODE:
4031 UNEXPECTED_NODE(internal_object_p);
4032 break;
4033 case T_NONE:
4034 case T_MOVED:
4035 case T_IMEMO:
4036 case T_ICLASS:
4037 case T_ZOMBIE:
4038 break;
4039 case T_CLASS:
4040 if (!p->as.basic.klass) break;
4041 if (FL_TEST(obj, FL_SINGLETON)) {
4042 return rb_singleton_class_internal_p(obj);
4043 }
4044 return 0;
4045 default:
4046 if (!p->as.basic.klass) break;
4047 return 0;
4048 }
4049 }
4050 if (ptr || ! used_p) {
4051 asan_poison_object(obj);
4052 }
4053 return 1;
4054}
4055
4056int
4057rb_objspace_internal_object_p(VALUE obj)
4058{
4059 return internal_object_p(obj);
4060}
4061
4062static int
4063os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
4064{
4065 struct os_each_struct *oes = (struct os_each_struct *)data;
4066
4067 VALUE v = (VALUE)vstart;
4068 for (; v != (VALUE)vend; v += stride) {
4069 if (!internal_object_p(v)) {
4070 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
4071 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
4072 rb_yield(v);
4073 oes->num++;
4074 }
4075 }
4076 }
4077 }
4078
4079 return 0;
4080}
4081
4082static VALUE
4083os_obj_of(VALUE of)
4084{
4085 struct os_each_struct oes;
4086
4087 oes.num = 0;
4088 oes.of = of;
4089 rb_objspace_each_objects(os_obj_of_i, &oes);
4090 return SIZET2NUM(oes.num);
4091}
4092
4093/*
4094 * call-seq:
4095 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
4096 * ObjectSpace.each_object([module]) -> an_enumerator
4097 *
4098 * Calls the block once for each living, nonimmediate object in this
4099 * Ruby process. If <i>module</i> is specified, calls the block
4100 * for only those classes or modules that match (or are a subclass of)
4101 * <i>module</i>. Returns the number of objects found. Immediate
4102 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
4103 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
4104 * never returned. In the example below, #each_object returns both
4105 * the numbers we defined and several constants defined in the Math
4106 * module.
4107 *
4108 * If no block is given, an enumerator is returned instead.
4109 *
4110 * a = 102.7
4111 * b = 95 # Won't be returned
4112 * c = 12345678987654321
4113 * count = ObjectSpace.each_object(Numeric) {|x| p x }
4114 * puts "Total count: #{count}"
4115 *
4116 * <em>produces:</em>
4117 *
4118 * 12345678987654321
4119 * 102.7
4120 * 2.71828182845905
4121 * 3.14159265358979
4122 * 2.22044604925031e-16
4123 * 1.7976931348623157e+308
4124 * 2.2250738585072e-308
4125 * Total count: 7
4126 *
4127 */
4128
4129static VALUE
4130os_each_obj(int argc, VALUE *argv, VALUE os)
4131{
4132 VALUE of;
4133
4134 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
4135 RETURN_ENUMERATOR(os, 1, &of);
4136 return os_obj_of(of);
4137}
4138
4139/*
4140 * call-seq:
4141 * ObjectSpace.undefine_finalizer(obj)
4142 *
4143 * Removes all finalizers for <i>obj</i>.
4144 *
4145 */
4146
4147static VALUE
4148undefine_final(VALUE os, VALUE obj)
4149{
4150 return rb_undefine_finalizer(obj);
4151}
4152
4153VALUE
4154rb_undefine_finalizer(VALUE obj)
4155{
4156 rb_objspace_t *objspace = &rb_objspace;
4157 st_data_t data = obj;
4158 rb_check_frozen(obj);
4159 st_delete(finalizer_table, &data, 0);
4160 FL_UNSET(obj, FL_FINALIZE);
4161 return obj;
4162}
4163
4164static void
4165should_be_callable(VALUE block)
4166{
4167 if (!rb_obj_respond_to(block, idCall, TRUE)) {
4168 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
4169 rb_obj_class(block));
4170 }
4171}
4172
4173static void
4174should_be_finalizable(VALUE obj)
4175{
4176 if (!FL_ABLE(obj)) {
4177 rb_raise(rb_eArgError, "cannot define finalizer for %s",
4178 rb_obj_classname(obj));
4179 }
4180 rb_check_frozen(obj);
4181}
4182
4183/*
4184 * call-seq:
4185 * ObjectSpace.define_finalizer(obj, aProc=proc())
4186 *
4187 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
4188 * was destroyed. The object ID of the <i>obj</i> will be passed
4189 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
4190 * method, make sure it can be called with a single argument.
4191 *
4192 * The return value is an array <code>[0, aProc]</code>.
4193 *
4194 * The two recommended patterns are to either create the finaliser proc
4195 * in a non-instance method where it can safely capture the needed state,
4196 * or to use a custom callable object that stores the needed state
4197 * explicitly as instance variables.
4198 *
4199 * class Foo
4200 * def initialize(data_needed_for_finalization)
4201 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
4202 * end
4203 *
4204 * def self.create_finalizer(data_needed_for_finalization)
4205 * proc {
4206 * puts "finalizing #{data_needed_for_finalization}"
4207 * }
4208 * end
4209 * end
4210 *
4211 * class Bar
4212 * class Remover
4213 * def initialize(data_needed_for_finalization)
4214 * @data_needed_for_finalization = data_needed_for_finalization
4215 * end
4216 *
4217 * def call(id)
4218 * puts "finalizing #{@data_needed_for_finalization}"
4219 * end
4220 * end
4221 *
4222 * def initialize(data_needed_for_finalization)
4223 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
4224 * end
4225 * end
4226 *
4227 * Note that if your finalizer references the object to be
4228 * finalized it will never be run on GC, although it will still be
4229 * run at exit. You will get a warning if you capture the object
4230 * to be finalized as the receiver of the finalizer.
4231 *
4232 * class CapturesSelf
4233 * def initialize(name)
4234 * ObjectSpace.define_finalizer(self, proc {
4235 * # this finalizer will only be run on exit
4236 * puts "finalizing #{name}"
4237 * })
4238 * end
4239 * end
4240 *
4241 * Also note that finalization can be unpredictable and is never guaranteed
4242 * to be run except on exit.
4243 */
4244
4245static VALUE
4246define_final(int argc, VALUE *argv, VALUE os)
4247{
4248 VALUE obj, block;
4249
4250 rb_scan_args(argc, argv, "11", &obj, &block);
4251 should_be_finalizable(obj);
4252 if (argc == 1) {
4253 block = rb_block_proc();
4254 }
4255 else {
4256 should_be_callable(block);
4257 }
4258
4259 if (rb_callable_receiver(block) == obj) {
4260 rb_warn("finalizer references object to be finalized");
4261 }
4262
4263 return define_final0(obj, block);
4264}
4265
4266static VALUE
4267define_final0(VALUE obj, VALUE block)
4268{
4269 rb_objspace_t *objspace = &rb_objspace;
4270 VALUE table;
4271 st_data_t data;
4272
4273 RBASIC(obj)->flags |= FL_FINALIZE;
4274
4275 if (st_lookup(finalizer_table, obj, &data)) {
4276 table = (VALUE)data;
4277
4278 /* avoid duplicate block, table is usually small */
4279 {
4280 long len = RARRAY_LEN(table);
4281 long i;
4282
4283 for (i = 0; i < len; i++) {
4284 VALUE recv = RARRAY_AREF(table, i);
4285 if (rb_equal(recv, block)) {
4286 block = recv;
4287 goto end;
4288 }
4289 }
4290 }
4291
4292 rb_ary_push(table, block);
4293 }
4294 else {
4295 table = rb_ary_new3(1, block);
4296 RBASIC_CLEAR_CLASS(table);
4297 st_add_direct(finalizer_table, obj, table);
4298 }
4299 end:
4300 block = rb_ary_new3(2, INT2FIX(0), block);
4301 OBJ_FREEZE(block);
4302 return block;
4303}
4304
4305VALUE
4306rb_define_finalizer(VALUE obj, VALUE block)
4307{
4308 should_be_finalizable(obj);
4309 should_be_callable(block);
4310 return define_final0(obj, block);
4311}
4312
4313void
4314rb_gc_copy_finalizer(VALUE dest, VALUE obj)
4315{
4316 rb_objspace_t *objspace = &rb_objspace;
4317 VALUE table;
4318 st_data_t data;
4319
4320 if (!FL_TEST(obj, FL_FINALIZE)) return;
4321 if (st_lookup(finalizer_table, obj, &data)) {
4322 table = (VALUE)data;
4323 st_insert(finalizer_table, dest, table);
4324 }
4325 FL_SET(dest, FL_FINALIZE);
4326}
4327
4328static VALUE
4329run_single_final(VALUE cmd, VALUE objid)
4330{
4331 return rb_check_funcall(cmd, idCall, 1, &objid);
4332}
4333
4334static void
4335warn_exception_in_finalizer(rb_execution_context_t *ec, VALUE final)
4336{
4337 if (!UNDEF_P(final) && !NIL_P(ruby_verbose)) {
4338 VALUE errinfo = ec->errinfo;
4339 rb_warn("Exception in finalizer %+"PRIsVALUE, final);
4340 rb_ec_error_print(ec, errinfo);
4341 }
4342}
4343
4344static void
4345run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
4346{
4347 long i;
4348 enum ruby_tag_type state;
4349 volatile struct {
4350 VALUE errinfo;
4351 VALUE objid;
4352 VALUE final;
4353 rb_control_frame_t *cfp;
4354 long finished;
4355 } saved;
4356 rb_execution_context_t * volatile ec = GET_EC();
4357#define RESTORE_FINALIZER() (\
4358 ec->cfp = saved.cfp, \
4359 ec->errinfo = saved.errinfo)
4360
4361 saved.errinfo = ec->errinfo;
4362 saved.objid = rb_obj_id(obj);
4363 saved.cfp = ec->cfp;
4364 saved.finished = 0;
4365 saved.final = Qundef;
4366
4367 EC_PUSH_TAG(ec);
4368 state = EC_EXEC_TAG();
4369 if (state != TAG_NONE) {
4370 ++saved.finished; /* skip failed finalizer */
4371 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final, Qundef));
4372 }
4373 for (i = saved.finished;
4374 RESTORE_FINALIZER(), i<RARRAY_LEN(table);
4375 saved.finished = ++i) {
4376 run_single_final(saved.final = RARRAY_AREF(table, i), saved.objid);
4377 }
4378 EC_POP_TAG();
4379#undef RESTORE_FINALIZER
4380}
4381
4382static void
4383run_final(rb_objspace_t *objspace, VALUE zombie)
4384{
4385 st_data_t key, table;
4386
4387 if (RZOMBIE(zombie)->dfree) {
4388 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4389 }
4390
4391 key = (st_data_t)zombie;
4392 if (st_delete(finalizer_table, &key, &table)) {
4393 run_finalizer(objspace, zombie, (VALUE)table);
4394 }
4395}
4396
4397static void
4398finalize_list(rb_objspace_t *objspace, VALUE zombie)
4399{
4400 while (zombie) {
4401 VALUE next_zombie;
4402 struct heap_page *page;
4403 asan_unpoison_object(zombie, false);
4404 next_zombie = RZOMBIE(zombie)->next;
4405 page = GET_HEAP_PAGE(zombie);
4406
4407 run_final(objspace, zombie);
4408
4409 RB_VM_LOCK_ENTER();
4410 {
4411 GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
4412 if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
4413 obj_free_object_id(objspace, zombie);
4414 }
4415
4416 GC_ASSERT(heap_pages_final_slots > 0);
4417 GC_ASSERT(page->final_slots > 0);
4418
4419 heap_pages_final_slots--;
4420 page->final_slots--;
4421 page->free_slots++;
4422 heap_page_add_freeobj(objspace, page, zombie);
4423 objspace->profile.total_freed_objects++;
4424 }
4425 RB_VM_LOCK_LEAVE();
4426
4427 zombie = next_zombie;
4428 }
4429}
4430
4431static void
4432finalize_deferred_heap_pages(rb_objspace_t *objspace)
4433{
4434 VALUE zombie;
4435 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4436 finalize_list(objspace, zombie);
4437 }
4438}
4439
4440static void
4441finalize_deferred(rb_objspace_t *objspace)
4442{
4443 rb_execution_context_t *ec = GET_EC();
4444 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4445 finalize_deferred_heap_pages(objspace);
4446 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4447}
4448
4449static void
4450gc_finalize_deferred(void *dmy)
4451{
4452 rb_objspace_t *objspace = dmy;
4453 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4454
4455 finalize_deferred(objspace);
4456 ATOMIC_SET(finalizing, 0);
4457}
4458
4459static void
4460gc_finalize_deferred_register(rb_objspace_t *objspace)
4461{
4462 if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
4463 rb_bug("gc_finalize_deferred_register: can't register finalizer.");
4464 }
4465}
4466
4468 VALUE obj;
4469 VALUE table;
4470 struct force_finalize_list *next;
4471};
4472
4473static int
4474force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4475{
4476 struct force_finalize_list **prev = (struct force_finalize_list **)arg;
4477 struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
4478 curr->obj = key;
4479 curr->table = val;
4480 curr->next = *prev;
4481 *prev = curr;
4482 return ST_CONTINUE;
4483}
4484
4485bool rb_obj_is_main_ractor(VALUE gv);
4486
4487void
4488rb_objspace_call_finalizer(rb_objspace_t *objspace)
4489{
4490 size_t i;
4491
4492#if RGENGC_CHECK_MODE >= 2
4493 gc_verify_internal_consistency(objspace);
4494#endif
4495 gc_rest(objspace);
4496
4497 if (ATOMIC_EXCHANGE(finalizing, 1)) return;
4498
4499 /* run finalizers */
4500 finalize_deferred(objspace);
4501 GC_ASSERT(heap_pages_deferred_final == 0);
4502
4503 gc_rest(objspace);
4504 /* prohibit incremental GC */
4505 objspace->flags.dont_incremental = 1;
4506
4507 /* force to run finalizer */
4508 while (finalizer_table->num_entries) {
4509 struct force_finalize_list *list = 0;
4510 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4511 while (list) {
4512 struct force_finalize_list *curr = list;
4513 st_data_t obj = (st_data_t)curr->obj;
4514 run_finalizer(objspace, curr->obj, curr->table);
4515 st_delete(finalizer_table, &obj, 0);
4516 list = curr->next;
4517 xfree(curr);
4518 }
4519 }
4520
4521 /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
4522 dont_gc_on();
4523
4524 /* running data/file finalizers are part of garbage collection */
4525 unsigned int lock_lev;
4526 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4527
4528 /* run data/file object's finalizers */
4529 for (i = 0; i < heap_allocated_pages; i++) {
4530 struct heap_page *page = heap_pages_sorted[i];
4531 short stride = page->slot_size;
4532
4533 uintptr_t p = (uintptr_t)page->start;
4534 uintptr_t pend = p + page->total_slots * stride;
4535 for (; p < pend; p += stride) {
4536 VALUE vp = (VALUE)p;
4537 void *poisoned = asan_unpoison_object_temporary(vp);
4538 switch (BUILTIN_TYPE(vp)) {
4539 case T_DATA:
4540 if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
4541 if (rb_obj_is_thread(vp)) break;
4542 if (rb_obj_is_mutex(vp)) break;
4543 if (rb_obj_is_fiber(vp)) break;
4544 if (rb_obj_is_main_ractor(vp)) break;
4545 if (RTYPEDDATA_P(vp)) {
4546 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4547 }
4548 RANY(p)->as.free.flags = 0;
4549 if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
4550 xfree(DATA_PTR(p));
4551 }
4552 else if (RANY(p)->as.data.dfree) {
4553 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4554 }
4555 break;
4556 case T_FILE:
4557 if (RANY(p)->as.file.fptr) {
4558 make_io_zombie(objspace, vp);
4559 }
4560 break;
4561 default:
4562 break;
4563 }
4564 if (poisoned) {
4565 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
4566 asan_poison_object(vp);
4567 }
4568 }
4569 }
4570
4571 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4572
4573 finalize_deferred_heap_pages(objspace);
4574
4575 st_free_table(finalizer_table);
4576 finalizer_table = 0;
4577 ATOMIC_SET(finalizing, 0);
4578}
4579
4580static inline int
4581is_swept_object(rb_objspace_t *objspace, VALUE ptr)
4582{
4583 struct heap_page *page = GET_HEAP_PAGE(ptr);
4584 return page->flags.before_sweep ? FALSE : TRUE;
4585}
4586
4587/* garbage objects will be collected soon. */
4588static inline int
4589is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
4590{
4591 if (!is_lazy_sweeping(objspace) ||
4592 is_swept_object(objspace, ptr) ||
4593 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4594
4595 return FALSE;
4596 }
4597 else {
4598 return TRUE;
4599 }
4600}
4601
4602static inline int
4603is_live_object(rb_objspace_t *objspace, VALUE ptr)
4604{
4605 switch (BUILTIN_TYPE(ptr)) {
4606 case T_NONE:
4607 case T_MOVED:
4608 case T_ZOMBIE:
4609 return FALSE;
4610 default:
4611 break;
4612 }
4613
4614 if (!is_garbage_object(objspace, ptr)) {
4615 return TRUE;
4616 }
4617 else {
4618 return FALSE;
4619 }
4620}
4621
4622static inline int
4623is_markable_object(rb_objspace_t *objspace, VALUE obj)
4624{
4625 if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
4626 check_rvalue_consistency(obj);
4627 return TRUE;
4628}
4629
4630int
4631rb_objspace_markable_object_p(VALUE obj)
4632{
4633 rb_objspace_t *objspace = &rb_objspace;
4634 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4635}
4636
4637int
4638rb_objspace_garbage_object_p(VALUE obj)
4639{
4640 rb_objspace_t *objspace = &rb_objspace;
4641 return is_garbage_object(objspace, obj);
4642}
4643
4644static VALUE
4645id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
4646{
4647 VALUE orig;
4648 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4649 return orig;
4650 }
4651 else {
4652 return Qundef;
4653 }
4654}
4655
4656/*
4657 * call-seq:
4658 * ObjectSpace._id2ref(object_id) -> an_object
4659 *
4660 * Converts an object id to a reference to the object. May not be
4661 * called on an object id passed as a parameter to a finalizer.
4662 *
4663 * s = "I am a string" #=> "I am a string"
4664 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
4665 * r == s #=> true
4666 *
4667 * On multi-ractor mode, if the object is not shareable, it raises
4668 * RangeError.
4669 */
4670
4671static VALUE
4672id2ref(VALUE objid)
4673{
4674#if SIZEOF_LONG == SIZEOF_VOIDP
4675#define NUM2PTR(x) NUM2ULONG(x)
4676#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4677#define NUM2PTR(x) NUM2ULL(x)
4678#endif
4679 rb_objspace_t *objspace = &rb_objspace;
4680 VALUE ptr;
4681 VALUE orig;
4682 void *p0;
4683
4684 objid = rb_to_int(objid);
4685 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4686 ptr = NUM2PTR(objid);
4687 if (ptr == Qtrue) return Qtrue;
4688 if (ptr == Qfalse) return Qfalse;
4689 if (NIL_P(ptr)) return Qnil;
4690 if (FIXNUM_P(ptr)) return (VALUE)ptr;
4691 if (FLONUM_P(ptr)) return (VALUE)ptr;
4692
4693 ptr = obj_id_to_ref(objid);
4694 if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
4695 ID symid = ptr / sizeof(RVALUE);
4696 p0 = (void *)ptr;
4697 if (!rb_static_id_valid_p(symid))
4698 rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
4699 return ID2SYM(symid);
4700 }
4701 }
4702
4703 if (!UNDEF_P(orig = id2ref_obj_tbl(objspace, objid)) &&
4704 is_live_object(objspace, orig)) {
4705
4706 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(orig)) {
4707 return orig;
4708 }
4709 else {
4710 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4711 }
4712 }
4713
4714 if (rb_int_ge(objid, objspace->next_object_id)) {
4715 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
4716 }
4717 else {
4718 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
4719 }
4720}
4721
4722/* :nodoc: */
4723static VALUE
4724os_id2ref(VALUE os, VALUE objid)
4725{
4726 return id2ref(objid);
4727}
4728
4729static VALUE
4730rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4731{
4732 if (STATIC_SYM_P(obj)) {
4733 return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
4734 }
4735 else if (FLONUM_P(obj)) {
4736#if SIZEOF_LONG == SIZEOF_VOIDP
4737 return LONG2NUM((SIGNED_VALUE)obj);
4738#else
4739 return LL2NUM((SIGNED_VALUE)obj);
4740#endif
4741 }
4742 else if (SPECIAL_CONST_P(obj)) {
4743 return LONG2NUM((SIGNED_VALUE)obj);
4744 }
4745
4746 return get_heap_object_id(obj);
4747}
4748
4749static VALUE
4750cached_object_id(VALUE obj)
4751{
4752 VALUE id;
4753 rb_objspace_t *objspace = &rb_objspace;
4754
4755 RB_VM_LOCK_ENTER();
4756 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
4757 GC_ASSERT(FL_TEST(obj, FL_SEEN_OBJ_ID));
4758 }
4759 else {
4760 GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
4761
4762 id = objspace->next_object_id;
4763 objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
4764
4765 VALUE already_disabled = rb_gc_disable_no_rest();
4766 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
4767 st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
4768 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4769 FL_SET(obj, FL_SEEN_OBJ_ID);
4770 }
4771 RB_VM_LOCK_LEAVE();
4772
4773 return id;
4774}
4775
4776static VALUE
4777nonspecial_obj_id_(VALUE obj)
4778{
4779 return nonspecial_obj_id(obj);
4780}
4781
4782
4783VALUE
4784rb_memory_id(VALUE obj)
4785{
4786 return rb_find_object_id(obj, nonspecial_obj_id_);
4787}
4788
4789/*
4790 * Document-method: __id__
4791 * Document-method: object_id
4792 *
4793 * call-seq:
4794 * obj.__id__ -> integer
4795 * obj.object_id -> integer
4796 *
4797 * Returns an integer identifier for +obj+.
4798 *
4799 * The same number will be returned on all calls to +object_id+ for a given
4800 * object, and no two active objects will share an id.
4801 *
4802 * Note: that some objects of builtin classes are reused for optimization.
4803 * This is the case for immediate values and frozen string literals.
4804 *
4805 * BasicObject implements +__id__+, Kernel implements +object_id+.
4806 *
4807 * Immediate values are not passed by reference but are passed by value:
4808 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
4809 *
4810 * Object.new.object_id == Object.new.object_id # => false
4811 * (21 * 2).object_id == (21 * 2).object_id # => true
4812 * "hello".object_id == "hello".object_id # => false
4813 * "hi".freeze.object_id == "hi".freeze.object_id # => true
4814 */
4815
4816VALUE
4817rb_obj_id(VALUE obj)
4818{
4819 /*
4820 * 32-bit VALUE space
4821 * MSB ------------------------ LSB
4822 * false 00000000000000000000000000000000
4823 * true 00000000000000000000000000000010
4824 * nil 00000000000000000000000000000100
4825 * undef 00000000000000000000000000000110
4826 * symbol ssssssssssssssssssssssss00001110
4827 * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
4828 * fixnum fffffffffffffffffffffffffffffff1
4829 *
4830 * object_id space
4831 * LSB
4832 * false 00000000000000000000000000000000
4833 * true 00000000000000000000000000000010
4834 * nil 00000000000000000000000000000100
4835 * undef 00000000000000000000000000000110
4836 * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
4837 * object oooooooooooooooooooooooooooooo0 o...o % A = 0
4838 * fixnum fffffffffffffffffffffffffffffff1 bignum if required
4839 *
4840 * where A = sizeof(RVALUE)/4
4841 *
4842 * sizeof(RVALUE) is
4843 * 20 if 32-bit, double is 4-byte aligned
4844 * 24 if 32-bit, double is 8-byte aligned
4845 * 40 if 64-bit
4846 */
4847
4848 return rb_find_object_id(obj, cached_object_id);
4849}
4850
4851static enum rb_id_table_iterator_result
4852cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
4853{
4854 size_t *total_size = data_ptr;
4855 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
4856 *total_size += sizeof(*ccs);
4857 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
4858 return ID_TABLE_CONTINUE;
4859}
4860
4861static size_t
4862cc_table_memsize(struct rb_id_table *cc_table)
4863{
4864 size_t total = rb_id_table_memsize(cc_table);
4865 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4866 return total;
4867}
4868
4869static size_t
4870obj_memsize_of(VALUE obj, int use_all_types)
4871{
4872 size_t size = 0;
4873
4874 if (SPECIAL_CONST_P(obj)) {
4875 return 0;
4876 }
4877
4878 if (FL_TEST(obj, FL_EXIVAR)) {
4879 size += rb_generic_ivar_memsize(obj);
4880 }
4881
4882 switch (BUILTIN_TYPE(obj)) {
4883 case T_OBJECT:
4884 if (rb_shape_obj_too_complex(obj)) {
4885 size += rb_id_table_memsize(ROBJECT_IV_HASH(obj));
4886 }
4887 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
4888 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
4889 }
4890 break;
4891 case T_MODULE:
4892 case T_CLASS:
4893 if (RCLASS_EXT(obj)) {
4894 if (RCLASS_M_TBL(obj)) {
4895 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4896 }
4897 // class IV sizes are allocated as powers of two
4898 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
4899 if (RCLASS_CVC_TBL(obj)) {
4900 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4901 }
4902 if (RCLASS_EXT(obj)->const_tbl) {
4903 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4904 }
4905 if (RCLASS_CC_TBL(obj)) {
4906 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4907 }
4908 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
4909 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
4910 }
4911#if SIZE_POOL_COUNT == 1
4912 size += sizeof(rb_classext_t);
4913#endif
4914 }
4915 break;
4916 case T_ICLASS:
4917 if (RICLASS_OWNS_M_TBL_P(obj)) {
4918 if (RCLASS_M_TBL(obj)) {
4919 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4920 }
4921 }
4922 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4923 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4924 }
4925 break;
4926 case T_STRING:
4927 size += rb_str_memsize(obj);
4928 break;
4929 case T_ARRAY:
4930 size += rb_ary_memsize(obj);
4931 break;
4932 case T_HASH:
4933 if (RHASH_AR_TABLE_P(obj)) {
4934 if (RHASH_AR_TABLE(obj) != NULL) {
4935 size_t rb_hash_ar_table_size(void);
4936 size += rb_hash_ar_table_size();
4937 }
4938 }
4939 else {
4940 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4941 size += st_memsize(RHASH_ST_TABLE(obj));
4942 }
4943 break;
4944 case T_REGEXP:
4945 if (RREGEXP_PTR(obj)) {
4946 size += onig_memsize(RREGEXP_PTR(obj));
4947 }
4948 break;
4949 case T_DATA:
4950 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4951 break;
4952 case T_MATCH:
4953 if (RMATCH(obj)->rmatch) {
4954 struct rmatch *rm = RMATCH(obj)->rmatch;
4955 size += onig_region_memsize(&rm->regs);
4956 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
4957 size += sizeof(struct rmatch);
4958 }
4959 break;
4960 case T_FILE:
4961 if (RFILE(obj)->fptr) {
4962 size += rb_io_memsize(RFILE(obj)->fptr);
4963 }
4964 break;
4965 case T_RATIONAL:
4966 case T_COMPLEX:
4967 break;
4968 case T_IMEMO:
4969 size += imemo_memsize(obj);
4970 break;
4971
4972 case T_FLOAT:
4973 case T_SYMBOL:
4974 break;
4975
4976 case T_BIGNUM:
4977 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4978 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
4979 }
4980 break;
4981
4982 case T_NODE:
4983 UNEXPECTED_NODE(obj_memsize_of);
4984 break;
4985
4986 case T_STRUCT:
4987 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4988 RSTRUCT(obj)->as.heap.ptr) {
4989 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
4990 }
4991 break;
4992
4993 case T_ZOMBIE:
4994 case T_MOVED:
4995 break;
4996
4997 default:
4998 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
4999 BUILTIN_TYPE(obj), (void*)obj);
5000 }
5001
5002 return size + rb_gc_obj_slot_size(obj);
5003}
5004
5005size_t
5006rb_obj_memsize_of(VALUE obj)
5007{
5008 return obj_memsize_of(obj, TRUE);
5009}
5010
5011static int
5012set_zero(st_data_t key, st_data_t val, st_data_t arg)
5013{
5014 VALUE k = (VALUE)key;
5015 VALUE hash = (VALUE)arg;
5016 rb_hash_aset(hash, k, INT2FIX(0));
5017 return ST_CONTINUE;
5018}
5019
5020static VALUE
5021type_sym(size_t type)
5022{
5023 switch (type) {
5024#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
5025 COUNT_TYPE(T_NONE);
5026 COUNT_TYPE(T_OBJECT);
5027 COUNT_TYPE(T_CLASS);
5028 COUNT_TYPE(T_MODULE);
5029 COUNT_TYPE(T_FLOAT);
5030 COUNT_TYPE(T_STRING);
5031 COUNT_TYPE(T_REGEXP);
5032 COUNT_TYPE(T_ARRAY);
5033 COUNT_TYPE(T_HASH);
5034 COUNT_TYPE(T_STRUCT);
5035 COUNT_TYPE(T_BIGNUM);
5036 COUNT_TYPE(T_FILE);
5037 COUNT_TYPE(T_DATA);
5038 COUNT_TYPE(T_MATCH);
5039 COUNT_TYPE(T_COMPLEX);
5040 COUNT_TYPE(T_RATIONAL);
5041 COUNT_TYPE(T_NIL);
5042 COUNT_TYPE(T_TRUE);
5043 COUNT_TYPE(T_FALSE);
5044 COUNT_TYPE(T_SYMBOL);
5045 COUNT_TYPE(T_FIXNUM);
5046 COUNT_TYPE(T_IMEMO);
5047 COUNT_TYPE(T_UNDEF);
5048 COUNT_TYPE(T_NODE);
5049 COUNT_TYPE(T_ICLASS);
5050 COUNT_TYPE(T_ZOMBIE);
5051 COUNT_TYPE(T_MOVED);
5052#undef COUNT_TYPE
5053 default: return SIZET2NUM(type); break;
5054 }
5055}
5056
5057/*
5058 * call-seq:
5059 * ObjectSpace.count_objects([result_hash]) -> hash
5060 *
5061 * Counts all objects grouped by type.
5062 *
5063 * It returns a hash, such as:
5064 * {
5065 * :TOTAL=>10000,
5066 * :FREE=>3011,
5067 * :T_OBJECT=>6,
5068 * :T_CLASS=>404,
5069 * # ...
5070 * }
5071 *
5072 * The contents of the returned hash are implementation specific.
5073 * It may be changed in future.
5074 *
5075 * The keys starting with +:T_+ means live objects.
5076 * For example, +:T_ARRAY+ is the number of arrays.
5077 * +:FREE+ means object slots which is not used now.
5078 * +:TOTAL+ means sum of above.
5079 *
5080 * If the optional argument +result_hash+ is given,
5081 * it is overwritten and returned. This is intended to avoid probe effect.
5082 *
5083 * h = {}
5084 * ObjectSpace.count_objects(h)
5085 * puts h
5086 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
5087 *
5088 * This method is only expected to work on C Ruby.
5089 *
5090 */
5091
5092static VALUE
5093count_objects(int argc, VALUE *argv, VALUE os)
5094{
5095 rb_objspace_t *objspace = &rb_objspace;
5096 size_t counts[T_MASK+1];
5097 size_t freed = 0;
5098 size_t total = 0;
5099 size_t i;
5100 VALUE hash = Qnil;
5101
5102 if (rb_check_arity(argc, 0, 1) == 1) {
5103 hash = argv[0];
5104 if (!RB_TYPE_P(hash, T_HASH))
5105 rb_raise(rb_eTypeError, "non-hash given");
5106 }
5107
5108 for (i = 0; i <= T_MASK; i++) {
5109 counts[i] = 0;
5110 }
5111
5112 for (i = 0; i < heap_allocated_pages; i++) {
5113 struct heap_page *page = heap_pages_sorted[i];
5114 short stride = page->slot_size;
5115
5116 uintptr_t p = (uintptr_t)page->start;
5117 uintptr_t pend = p + page->total_slots * stride;
5118 for (;p < pend; p += stride) {
5119 VALUE vp = (VALUE)p;
5120 GC_ASSERT((NUM_IN_PAGE(vp) * BASE_SLOT_SIZE) % page->slot_size == 0);
5121
5122 void *poisoned = asan_unpoison_object_temporary(vp);
5123 if (RANY(p)->as.basic.flags) {
5124 counts[BUILTIN_TYPE(vp)]++;
5125 }
5126 else {
5127 freed++;
5128 }
5129 if (poisoned) {
5130 GC_ASSERT(BUILTIN_TYPE(vp) == T_NONE);
5131 asan_poison_object(vp);
5132 }
5133 }
5134 total += page->total_slots;
5135 }
5136
5137 if (NIL_P(hash)) {
5138 hash = rb_hash_new();
5139 }
5140 else if (!RHASH_EMPTY_P(hash)) {
5141 rb_hash_stlike_foreach(hash, set_zero, hash);
5142 }
5143 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
5144 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
5145
5146 for (i = 0; i <= T_MASK; i++) {
5147 VALUE type = type_sym(i);
5148 if (counts[i])
5149 rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
5150 }
5151
5152 return hash;
5153}
5154
5155/*
5156 ------------------------ Garbage Collection ------------------------
5157*/
5158
5159/* Sweeping */
5160
5161static size_t
5162objspace_available_slots(rb_objspace_t *objspace)
5163{
5164 size_t total_slots = 0;
5165 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5166 rb_size_pool_t *size_pool = &size_pools[i];
5167 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
5168 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5169 }
5170 return total_slots;
5171}
5172
5173static size_t
5174objspace_live_slots(rb_objspace_t *objspace)
5175{
5176 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
5177}
5178
5179static size_t
5180objspace_free_slots(rb_objspace_t *objspace)
5181{
5182 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
5183}
5184
5185static void
5186gc_setup_mark_bits(struct heap_page *page)
5187{
5188 /* copy oldgen bitmap to mark bitmap */
5189 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
5190}
5191
5192static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
5193static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size);
5194
5195#if defined(_WIN32)
5196enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
5197
5198static BOOL
5199protect_page_body(struct heap_page_body *body, DWORD protect)
5200{
5201 DWORD old_protect;
5202 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
5203}
5204#else
5205enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
5206#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
5207#endif
5208
5209static void
5210lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5211{
5212 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
5213 rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
5214 }
5215 else {
5216 gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
5217 }
5218}
5219
5220static void
5221unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
5222{
5223 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
5224 rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
5225 }
5226 else {
5227 gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
5228 }
5229}
5230
5231static bool
5232try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
5233{
5234 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5235
5236 struct heap_page *src_page = GET_HEAP_PAGE(src);
5237 if (!free_page) {
5238 return false;
5239 }
5240
5241 /* We should return true if either src is successfully moved, or src is
5242 * unmoveable. A false return will cause the sweeping cursor to be
5243 * incremented to the next page, and src will attempt to move again */
5244 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
5245
5246 asan_unlock_freelist(free_page);
5247 VALUE dest = (VALUE)free_page->freelist;
5248 asan_lock_freelist(free_page);
5249 asan_unpoison_object(dest, false);
5250 if (!dest) {
5251 /* if we can't get something from the freelist then the page must be
5252 * full */
5253 return false;
5254 }
5255 free_page->freelist = RANY(dest)->as.free.next;
5256
5257 GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
5258
5259 if (src_page->slot_size > free_page->slot_size) {
5260 objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
5261 }
5262 else if (free_page->slot_size > src_page->slot_size) {
5263 objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
5264 }
5265 objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
5266 objspace->rcompactor.total_moved++;
5267
5268 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
5269 gc_pin(objspace, src);
5270 free_page->free_slots--;
5271
5272 return true;
5273}
5274
5275static void
5276gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
5277{
5278 struct heap_page *cursor = heap->compact_cursor;
5279
5280 while (cursor) {
5281 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5282 cursor = ccan_list_next(&heap->pages, cursor, page_node);
5283 }
5284}
5285
5286static void gc_update_references(rb_objspace_t * objspace);
5287static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
5288
5289#ifndef GC_CAN_COMPILE_COMPACTION
5290#if defined(__wasi__) /* WebAssembly doesn't support signals */
5291# define GC_CAN_COMPILE_COMPACTION 0
5292#else
5293# define GC_CAN_COMPILE_COMPACTION 1
5294#endif
5295#endif
5296
5297#if defined(__MINGW32__) || defined(_WIN32)
5298# define GC_COMPACTION_SUPPORTED 1
5299#else
5300/* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
5301 * the read barrier, so we must disable compaction. */
5302# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
5303#endif
5304
5305#if GC_CAN_COMPILE_COMPACTION
5306static void
5307read_barrier_handler(uintptr_t original_address)
5308{
5309 VALUE obj;
5310 rb_objspace_t * objspace = &rb_objspace;
5311
5312 /* Calculate address aligned to slots. */
5313 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
5314
5315 obj = (VALUE)address;
5316
5317 struct heap_page_body *page_body = GET_PAGE_BODY(obj);
5318
5319 /* If the page_body is NULL, then mprotect cannot handle it and will crash
5320 * with "Cannot allocate memory". */
5321 if (page_body == NULL) {
5322 rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address);
5323 }
5324
5325 RB_VM_LOCK_ENTER();
5326 {
5327 unlock_page_body(objspace, page_body);
5328
5329 objspace->profile.read_barrier_faults++;
5330
5331 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5332 }
5333 RB_VM_LOCK_LEAVE();
5334}
5335#endif
5336
5337#if !GC_CAN_COMPILE_COMPACTION
5338static void
5339uninstall_handlers(void)
5340{
5341 /* no-op */
5342}
5343
5344static void
5345install_handlers(void)
5346{
5347 /* no-op */
5348}
5349#elif defined(_WIN32)
5350static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5351typedef void (*signal_handler)(int);
5352static signal_handler old_sigsegv_handler;
5353
5354static LONG WINAPI
5355read_barrier_signal(EXCEPTION_POINTERS * info)
5356{
5357 /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
5358 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5359 /* > The second array element specifies the virtual address of the inaccessible data.
5360 * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
5361 *
5362 * Use this address to invalidate the page */
5363 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5364 return EXCEPTION_CONTINUE_EXECUTION;
5365 }
5366 else {
5367 return EXCEPTION_CONTINUE_SEARCH;
5368 }
5369}
5370
5371static void
5372uninstall_handlers(void)
5373{
5374 signal(SIGSEGV, old_sigsegv_handler);
5375 SetUnhandledExceptionFilter(old_handler);
5376}
5377
5378static void
5379install_handlers(void)
5380{
5381 /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
5382 old_sigsegv_handler = signal(SIGSEGV, NULL);
5383 /* Unhandled Exception Filter has access to the violation address similar
5384 * to si_addr from sigaction */
5385 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5386}
5387#else
5388static struct sigaction old_sigbus_handler;
5389static struct sigaction old_sigsegv_handler;
5390
5391#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5392static exception_mask_t old_exception_masks[32];
5393static mach_port_t old_exception_ports[32];
5394static exception_behavior_t old_exception_behaviors[32];
5395static thread_state_flavor_t old_exception_flavors[32];
5396static mach_msg_type_number_t old_exception_count;
5397
5398static void
5399disable_mach_bad_access_exc(void)
5400{
5401 old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
5402 task_swap_exception_ports(
5403 mach_task_self(), EXC_MASK_BAD_ACCESS,
5404 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
5405 old_exception_masks, &old_exception_count,
5406 old_exception_ports, old_exception_behaviors, old_exception_flavors
5407 );
5408}
5409
5410static void
5411restore_mach_bad_access_exc(void)
5412{
5413 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
5414 task_set_exception_ports(
5415 mach_task_self(),
5416 old_exception_masks[i], old_exception_ports[i],
5417 old_exception_behaviors[i], old_exception_flavors[i]
5418 );
5419 }
5420}
5421#endif
5422
5423static void
5424read_barrier_signal(int sig, siginfo_t * info, void * data)
5425{
5426 // setup SEGV/BUS handlers for errors
5427 struct sigaction prev_sigbus, prev_sigsegv;
5428 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5429 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5430
5431 // enable SIGBUS/SEGV
5432 sigset_t set, prev_set;
5433 sigemptyset(&set);
5434 sigaddset(&set, SIGBUS);
5435 sigaddset(&set, SIGSEGV);
5436 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5437#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5438 disable_mach_bad_access_exc();
5439#endif
5440 // run handler
5441 read_barrier_handler((uintptr_t)info->si_addr);
5442
5443 // reset SEGV/BUS handlers
5444#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5445 restore_mach_bad_access_exc();
5446#endif
5447 sigaction(SIGBUS, &prev_sigbus, NULL);
5448 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5449 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5450}
5451
5452static void
5453uninstall_handlers(void)
5454{
5455#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5456 restore_mach_bad_access_exc();
5457#endif
5458 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5459 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5460}
5461
5462static void
5463install_handlers(void)
5464{
5465 struct sigaction action;
5466 memset(&action, 0, sizeof(struct sigaction));
5467 sigemptyset(&action.sa_mask);
5468 action.sa_sigaction = read_barrier_signal;
5469 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5470
5471 sigaction(SIGBUS, &action, &old_sigbus_handler);
5472 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5473#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
5474 disable_mach_bad_access_exc();
5475#endif
5476}
5477#endif
5478
5479static void
5480revert_stack_objects(VALUE stack_obj, void *ctx)
5481{
5482 rb_objspace_t * objspace = (rb_objspace_t*)ctx;
5483
5484 if (BUILTIN_TYPE(stack_obj) == T_MOVED) {
5485 /* For now we'll revert the whole page if the object made it to the
5486 * stack. I think we can change this to move just the one object
5487 * back though */
5488 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5489 }
5490}
5491
5492static void
5493revert_machine_stack_references(rb_objspace_t *objspace, VALUE v)
5494{
5495 if (is_pointer_to_heap(objspace, (void *)v)) {
5496 if (BUILTIN_TYPE(v) == T_MOVED) {
5497 /* For now we'll revert the whole page if the object made it to the
5498 * stack. I think we can change this to move just the one object
5499 * back though */
5500 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5501 }
5502 }
5503}
5504
5505static void each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE));
5506
5507static void
5508check_stack_for_moved(rb_objspace_t *objspace)
5509{
5510 rb_execution_context_t *ec = GET_EC();
5511 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5512 rb_vm_each_stack_value(vm, revert_stack_objects, (void*)objspace);
5513 each_machine_stack_value(ec, revert_machine_stack_references);
5514}
5515
5516static void gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode);
5517
5518static void
5519gc_compact_finish(rb_objspace_t *objspace)
5520{
5521 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5522 rb_size_pool_t *size_pool = &size_pools[i];
5523 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5524 gc_unprotect_pages(objspace, heap);
5525 }
5526
5527 uninstall_handlers();
5528
5529 /* The mutator is allowed to run during incremental sweeping. T_MOVED
5530 * objects can get pushed on the stack and when the compaction process
5531 * finishes up, it may remove the read barrier before anything has a
5532 * chance to read from the T_MOVED address. To fix this, we scan the stack
5533 * then revert any moved objects that made it to the stack. */
5534 check_stack_for_moved(objspace);
5535
5536 gc_update_references(objspace);
5537 objspace->profile.compact_count++;
5538
5539 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5540 rb_size_pool_t *size_pool = &size_pools[i];
5541 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5542 heap->compact_cursor = NULL;
5543 heap->free_pages = NULL;
5544 heap->compact_cursor_index = 0;
5545 }
5546
5547 if (gc_prof_enabled(objspace)) {
5548 gc_profile_record *record = gc_prof_record(objspace);
5549 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5550 }
5551 objspace->flags.during_compacting = FALSE;
5552}
5553
5555 struct heap_page *page;
5556 int final_slots;
5557 int freed_slots;
5558 int empty_slots;
5559};
5560
5561static inline void
5562gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
5563{
5564 struct heap_page * sweep_page = ctx->page;
5565 short slot_size = sweep_page->slot_size;
5566 short slot_bits = slot_size / BASE_SLOT_SIZE;
5567 GC_ASSERT(slot_bits > 0);
5568
5569 do {
5570 VALUE vp = (VALUE)p;
5571 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5572
5573 asan_unpoison_object(vp, false);
5574 if (bitset & 1) {
5575 switch (BUILTIN_TYPE(vp)) {
5576 default: /* majority case */
5577 gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
5578#if RGENGC_CHECK_MODE
5579 if (!is_full_marking(objspace)) {
5580 if (RVALUE_OLD_P(vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
5581 if (rgengc_remembered_sweep(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
5582 }
5583#endif
5584 if (obj_free(objspace, vp)) {
5585 // always add free slots back to the swept pages freelist,
5586 // so that if we're comapacting, we can re-use the slots
5587 (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
5588 heap_page_add_freeobj(objspace, sweep_page, vp);
5589 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5590 ctx->freed_slots++;
5591 }
5592 else {
5593 ctx->final_slots++;
5594 }
5595 break;
5596
5597 case T_MOVED:
5598 if (objspace->flags.during_compacting) {
5599 /* The sweep cursor shouldn't have made it to any
5600 * T_MOVED slots while the compact flag is enabled.
5601 * The sweep cursor and compact cursor move in
5602 * opposite directions, and when they meet references will
5603 * get updated and "during_compacting" should get disabled */
5604 rb_bug("T_MOVED shouldn't be seen until compaction is finished\n");
5605 }
5606 gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info(vp));
5607 ctx->empty_slots++;
5608 heap_page_add_freeobj(objspace, sweep_page, vp);
5609 break;
5610 case T_ZOMBIE:
5611 /* already counted */
5612 break;
5613 case T_NONE:
5614 ctx->empty_slots++; /* already freed */
5615 break;
5616 }
5617 }
5618 p += slot_size;
5619 bitset >>= slot_bits;
5620 } while (bitset);
5621}
5622
5623static inline void
5624gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
5625{
5626 struct heap_page *sweep_page = ctx->page;
5627 GC_ASSERT(SIZE_POOL_EDEN_HEAP(sweep_page->size_pool) == heap);
5628
5629 uintptr_t p;
5630 bits_t *bits, bitset;
5631
5632 gc_report(2, objspace, "page_sweep: start.\n");
5633
5634#if RGENGC_CHECK_MODE
5635 if (!objspace->flags.immediate_sweep) {
5636 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
5637 }
5638#endif
5639 sweep_page->flags.before_sweep = FALSE;
5640 sweep_page->free_slots = 0;
5641
5642 p = (uintptr_t)sweep_page->start;
5643 bits = sweep_page->mark_bits;
5644
5645 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
5646 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5647 if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
5648 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5649 }
5650
5651 /* The last bitmap plane may not be used if the last plane does not
5652 * have enough space for the slot_size. In that case, the last plane must
5653 * be skipped since none of the bits will be set. */
5654 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
5655 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
5656 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
5657
5658 // Skip out of range slots at the head of the page
5659 bitset = ~bits[0];
5660 bitset >>= NUM_IN_PAGE(p);
5661 if (bitset) {
5662 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5663 }
5664 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5665
5666 for (int i = 1; i < bitmap_plane_count; i++) {
5667 bitset = ~bits[i];
5668 if (bitset) {
5669 gc_sweep_plane(objspace, heap, p, bitset, ctx);
5670 }
5671 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5672 }
5673
5674 if (!heap->compact_cursor) {
5675 gc_setup_mark_bits(sweep_page);
5676 }
5677
5678#if GC_PROFILE_MORE_DETAIL
5679 if (gc_prof_enabled(objspace)) {
5680 gc_profile_record *record = gc_prof_record(objspace);
5681 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5682 record->empty_objects += ctx->empty_slots;
5683 }
5684#endif
5685 if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5686 rb_gc_count(),
5687 sweep_page->total_slots,
5688 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5689
5690 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5691 objspace->profile.total_freed_objects += ctx->freed_slots;
5692
5693 if (heap_pages_deferred_final && !finalizing) {
5694 rb_thread_t *th = GET_THREAD();
5695 if (th) {
5696 gc_finalize_deferred_register(objspace);
5697 }
5698 }
5699
5700#if RGENGC_CHECK_MODE
5701 short freelist_len = 0;
5702 asan_unlock_freelist(sweep_page);
5703 RVALUE *ptr = sweep_page->freelist;
5704 while (ptr) {
5705 freelist_len++;
5706 ptr = ptr->as.free.next;
5707 }
5708 asan_lock_freelist(sweep_page);
5709 if (freelist_len != sweep_page->free_slots) {
5710 rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5711 }
5712#endif
5713
5714 gc_report(2, objspace, "page_sweep: end.\n");
5715}
5716
5717#if !USE_RVARGC
5718/* allocate additional minimum page to work */
5719static void
5720gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5721{
5722 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5723 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5724 /* there is no free after page_sweep() */
5725 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5726 if (!heap_increment(objspace, size_pool, heap)) { /* can't allocate additional free objects */
5727 rb_memerror();
5728 }
5729 }
5730 }
5731}
5732#endif
5733
5734static const char *
5735gc_mode_name(enum gc_mode mode)
5736{
5737 switch (mode) {
5738 case gc_mode_none: return "none";
5739 case gc_mode_marking: return "marking";
5740 case gc_mode_sweeping: return "sweeping";
5741 case gc_mode_compacting: return "compacting";
5742 default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
5743 }
5744}
5745
5746static void
5747gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
5748{
5749#if RGENGC_CHECK_MODE
5750 enum gc_mode prev_mode = gc_mode(objspace);
5751 switch (prev_mode) {
5752 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
5753 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
5754 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
5755 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
5756 }
5757#endif
5758 if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5759 gc_mode_set(objspace, mode);
5760}
5761
5762static void
5763heap_page_freelist_append(struct heap_page *page, RVALUE *freelist)
5764{
5765 if (freelist) {
5766 asan_unlock_freelist(page);
5767 if (page->freelist) {
5768 RVALUE *p = page->freelist;
5769 asan_unpoison_object((VALUE)p, false);
5770 while (p->as.free.next) {
5771 RVALUE *prev = p;
5772 p = p->as.free.next;
5773 asan_poison_object((VALUE)prev);
5774 asan_unpoison_object((VALUE)p, false);
5775 }
5776 p->as.free.next = freelist;
5777 asan_poison_object((VALUE)p);
5778 }
5779 else {
5780 page->freelist = freelist;
5781 }
5782 asan_lock_freelist(page);
5783 }
5784}
5785
5786static void
5787gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
5788{
5789 heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
5790 heap->free_pages = NULL;
5791#if GC_ENABLE_INCREMENTAL_MARK
5792 heap->pooled_pages = NULL;
5793#endif
5794 if (!objspace->flags.immediate_sweep) {
5795 struct heap_page *page = NULL;
5796
5797 ccan_list_for_each(&heap->pages, page, page_node) {
5798 page->flags.before_sweep = TRUE;
5799 }
5800 }
5801}
5802
5803#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5804__attribute__((noinline))
5805#endif
5806static void
5807gc_sweep_start(rb_objspace_t *objspace)
5808{
5809 gc_mode_transition(objspace, gc_mode_sweeping);
5810
5811#if GC_ENABLE_INCREMENTAL_MARK
5812 objspace->rincgc.pooled_slots = 0;
5813#endif
5814
5815 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5816 rb_size_pool_t *size_pool = &size_pools[i];
5817 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5818
5819 gc_sweep_start_heap(objspace, heap);
5820
5821#if USE_RVARGC
5822 /* We should call gc_sweep_finish_size_pool for size pools with no pages. */
5823 if (heap->sweeping_page == NULL) {
5824 GC_ASSERT(heap->total_pages == 0);
5825 GC_ASSERT(heap->total_slots == 0);
5826 gc_sweep_finish_size_pool(objspace, size_pool);
5827 }
5828#endif
5829 }
5830
5831 rb_ractor_t *r = NULL;
5832 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5833 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5834 }
5835}
5836
5837#if USE_RVARGC
5838static void
5839gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
5840{
5841 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5842 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5843 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5844 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5845
5846 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5847
5848 /* If we don't have enough slots and we have pages on the tomb heap, move
5849 * pages from the tomb heap to the eden heap. This may prevent page
5850 * creation thrashing (frequently allocating and deallocting pages) and
5851 * GC thrashing (running GC more frequently than required). */
5852 struct heap_page *resurrected_page;
5853 while ((swept_slots < min_free_slots || swept_slots < gc_params.heap_init_slots) &&
5854 (resurrected_page = heap_page_resurrect(objspace, size_pool))) {
5855 swept_slots += resurrected_page->free_slots;
5856
5857 heap_add_page(objspace, size_pool, heap, resurrected_page);
5858 heap_add_freepage(heap, resurrected_page);
5859 }
5860
5861 /* Some size pools may have very few pages (or even no pages). These size pools
5862 * should still have allocatable pages. */
5863 if (min_free_slots < gc_params.heap_init_slots && swept_slots < gc_params.heap_init_slots) {
5864 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
5865 size_t extra_slots = gc_params.heap_init_slots - swept_slots;
5866 size_t extend_page_count = CEILDIV(extra_slots * multiple, HEAP_PAGE_OBJ_LIMIT);
5867 if (extend_page_count > size_pool->allocatable_pages) {
5868 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5869 }
5870 }
5871
5872 if (swept_slots < min_free_slots) {
5873 bool grow_heap = is_full_marking(objspace);
5874
5875 if (!is_full_marking(objspace)) {
5876 /* The heap is a growth heap if it freed more slots than had empty
5877 * slots and used up all of its allocatable pages. */
5878 bool is_growth_heap = (size_pool->empty_slots == 0 ||
5879 size_pool->freed_slots > size_pool->empty_slots) &&
5880 size_pool->allocatable_pages == 0;
5881
5882 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5883 grow_heap = TRUE;
5884 }
5885 else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
5886 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5887 size_pool->force_major_gc_count++;
5888 }
5889 }
5890
5891 if (grow_heap) {
5892 size_t extend_page_count = heap_extend_pages(objspace, size_pool, swept_slots, total_slots, total_pages);
5893
5894 if (extend_page_count > size_pool->allocatable_pages) {
5895 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5896 }
5897 }
5898 }
5899}
5900#endif
5901
5902static void
5903gc_sweep_finish(rb_objspace_t *objspace)
5904{
5905 gc_report(1, objspace, "gc_sweep_finish\n");
5906
5907 gc_prof_set_heap_info(objspace);
5908 heap_pages_free_unused_pages(objspace);
5909
5910 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
5911 rb_size_pool_t *size_pool = &size_pools[i];
5912
5913 /* if heap_pages has unused pages, then assign them to increment */
5914 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5915 if (size_pool->allocatable_pages < tomb_pages) {
5916 size_pool->allocatable_pages = tomb_pages;
5917 }
5918
5919#if USE_RVARGC
5920 size_pool->freed_slots = 0;
5921 size_pool->empty_slots = 0;
5922
5923#if GC_ENABLE_INCREMENTAL_MARK
5924 if (!will_be_incremental_marking(objspace)) {
5925 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5926 struct heap_page *end_page = eden_heap->free_pages;
5927 if (end_page) {
5928 while (end_page->free_next) end_page = end_page->free_next;
5929 end_page->free_next = eden_heap->pooled_pages;
5930 }
5931 else {
5932 eden_heap->free_pages = eden_heap->pooled_pages;
5933 }
5934 eden_heap->pooled_pages = NULL;
5935 objspace->rincgc.pooled_slots = 0;
5936 }
5937#endif
5938#endif
5939 }
5940 heap_pages_expand_sorted(objspace);
5941
5942 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_SWEEP, 0);
5943 gc_mode_transition(objspace, gc_mode_none);
5944}
5945
5946static int
5947gc_sweep_step(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
5948{
5949 struct heap_page *sweep_page = heap->sweeping_page;
5950 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
5951
5952#if GC_ENABLE_INCREMENTAL_MARK
5953 int swept_slots = 0;
5954#if USE_RVARGC
5955 bool need_pool = TRUE;
5956#else
5957 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5958#endif
5959
5960 gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
5961#else
5962 gc_report(2, objspace, "gc_sweep_step\n");
5963#endif
5964
5965 if (sweep_page == NULL) return FALSE;
5966
5967#if GC_ENABLE_LAZY_SWEEP
5968 gc_prof_sweep_timer_start(objspace);
5969#endif
5970
5971 do {
5972 RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
5973
5974 struct gc_sweep_context ctx = {
5975 .page = sweep_page,
5976 .final_slots = 0,
5977 .freed_slots = 0,
5978 .empty_slots = 0,
5979 };
5980 gc_sweep_page(objspace, heap, &ctx);
5981 int free_slots = ctx.freed_slots + ctx.empty_slots;
5982
5983 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
5984
5985 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5986 heap_pages_freeable_pages > 0 &&
5987 unlink_limit > 0) {
5988 heap_pages_freeable_pages--;
5989 unlink_limit--;
5990 /* there are no living objects -> move this page to tomb heap */
5991 heap_unlink_page(objspace, heap, sweep_page);
5992 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
5993 }
5994 else if (free_slots > 0) {
5995#if USE_RVARGC
5996 size_pool->freed_slots += ctx.freed_slots;
5997 size_pool->empty_slots += ctx.empty_slots;
5998#endif
5999
6000#if GC_ENABLE_INCREMENTAL_MARK
6001 if (need_pool) {
6002 heap_add_poolpage(objspace, heap, sweep_page);
6003 need_pool = FALSE;
6004 }
6005 else {
6006 heap_add_freepage(heap, sweep_page);
6007 swept_slots += free_slots;
6008 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
6009 break;
6010 }
6011 }
6012#else
6013 heap_add_freepage(heap, sweep_page);
6014 break;
6015#endif
6016 }
6017 else {
6018 sweep_page->free_next = NULL;
6019 }
6020 } while ((sweep_page = heap->sweeping_page));
6021
6022 if (!heap->sweeping_page) {
6023#if USE_RVARGC
6024 gc_sweep_finish_size_pool(objspace, size_pool);
6025#endif
6026
6027 if (!has_sweeping_pages(objspace)) {
6028 gc_sweep_finish(objspace);
6029 }
6030 }
6031
6032#if GC_ENABLE_LAZY_SWEEP
6033 gc_prof_sweep_timer_stop(objspace);
6034#endif
6035
6036 return heap->free_pages != NULL;
6037}
6038
6039static void
6040gc_sweep_rest(rb_objspace_t *objspace)
6041{
6042 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6043 rb_size_pool_t *size_pool = &size_pools[i];
6044
6045 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
6046 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6047 }
6048 }
6049}
6050
6051static void
6052gc_sweep_continue(rb_objspace_t *objspace, rb_size_pool_t *sweep_size_pool, rb_heap_t *heap)
6053{
6054 GC_ASSERT(dont_gc_val() == FALSE);
6055 if (!GC_ENABLE_LAZY_SWEEP) return;
6056
6057 unsigned int lock_lev;
6058 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
6059
6060 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6061 rb_size_pool_t *size_pool = &size_pools[i];
6062 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
6063#if USE_RVARGC
6064 /* sweep_size_pool requires a free slot but sweeping did not yield any. */
6065 if (size_pool == sweep_size_pool) {
6066 if (size_pool->allocatable_pages > 0) {
6067 heap_increment(objspace, size_pool, heap);
6068 }
6069 else {
6070 /* Not allowed to create a new page so finish sweeping. */
6071 gc_sweep_rest(objspace);
6072 break;
6073 }
6074 }
6075#endif
6076 }
6077 }
6078
6079 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
6080}
6081
6082static void
6083invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
6084{
6085 if (bitset) {
6086 do {
6087 if (bitset & 1) {
6088 VALUE forwarding_object = (VALUE)p;
6089 VALUE object;
6090
6091 if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
6092 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
6093 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6094
6095 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
6096
6097 object = rb_gc_location(forwarding_object);
6098
6099 shape_id_t original_shape_id = 0;
6100 if (RB_TYPE_P(object, T_OBJECT)) {
6101 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
6102 }
6103
6104 gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
6105 /* forwarding_object is now our actual object, and "object"
6106 * is the free slot for the original page */
6107
6108 if (original_shape_id) {
6109 ROBJECT_SET_SHAPE_ID(forwarding_object, original_shape_id);
6110 }
6111
6112 struct heap_page *orig_page = GET_HEAP_PAGE(object);
6113 orig_page->free_slots++;
6114 heap_page_add_freeobj(objspace, orig_page, object);
6115
6116 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
6117 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
6118 GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
6119 }
6120 }
6121 p += BASE_SLOT_SIZE;
6122 bitset >>= 1;
6123 } while (bitset);
6124 }
6125}
6126
6127static void
6128invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
6129{
6130 int i;
6131 bits_t *mark_bits, *pin_bits;
6132 bits_t bitset;
6133
6134 mark_bits = page->mark_bits;
6135 pin_bits = page->pinned_bits;
6136
6137 uintptr_t p = page->start;
6138
6139 // Skip out of range slots at the head of the page
6140 bitset = pin_bits[0] & ~mark_bits[0];
6141 bitset >>= NUM_IN_PAGE(p);
6142 invalidate_moved_plane(objspace, page, p, bitset);
6143 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
6144
6145 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
6146 /* Moved objects are pinned but never marked. We reuse the pin bits
6147 * to indicate there is a moved object in this slot. */
6148 bitset = pin_bits[i] & ~mark_bits[i];
6149
6150 invalidate_moved_plane(objspace, page, p, bitset);
6151 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
6152 }
6153}
6154
6155static void
6156gc_compact_start(rb_objspace_t *objspace)
6157{
6158 struct heap_page *page = NULL;
6159 gc_mode_transition(objspace, gc_mode_compacting);
6160
6161 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6162 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
6163 ccan_list_for_each(&heap->pages, page, page_node) {
6164 page->flags.before_sweep = TRUE;
6165 }
6166
6167 heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
6168 heap->compact_cursor_index = 0;
6169 }
6170
6171 if (gc_prof_enabled(objspace)) {
6172 gc_profile_record *record = gc_prof_record(objspace);
6173 record->moved_objects = objspace->rcompactor.total_moved;
6174 }
6175
6176 memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
6177 memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
6178 memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
6179 memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
6180
6181 /* Set up read barrier for pages containing MOVED objects */
6182 install_handlers();
6183}
6184
6185static void gc_sweep_compact(rb_objspace_t *objspace);
6186
6187static void
6188gc_sweep(rb_objspace_t *objspace)
6189{
6190 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
6191
6192 gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
6193
6194 gc_sweep_start(objspace);
6195 if (objspace->flags.during_compacting) {
6196 gc_sweep_compact(objspace);
6197 }
6198
6199 if (immediate_sweep) {
6200#if !GC_ENABLE_LAZY_SWEEP
6201 gc_prof_sweep_timer_start(objspace);
6202#endif
6203 gc_sweep_rest(objspace);
6204#if !GC_ENABLE_LAZY_SWEEP
6205 gc_prof_sweep_timer_stop(objspace);
6206#endif
6207 }
6208 else {
6209
6210 /* Sweep every size pool. */
6211 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
6212 rb_size_pool_t *size_pool = &size_pools[i];
6213 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6214 }
6215 }
6216
6217#if !USE_RVARGC
6218 rb_size_pool_t *size_pool = &size_pools[0];
6219 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
6220#endif
6221}
6222
6223/* Marking - Marking stack */
6224
6225static stack_chunk_t *
6226stack_chunk_alloc(void)
6227{
6228 stack_chunk_t *res;
6229
6230 res = malloc(sizeof(stack_chunk_t));
6231 if (!res)
6232 rb_memerror();
6233
6234 return res;
6235}
6236
6237static inline int
6238is_mark_stack_empty(mark_stack_t *stack)
6239{
6240 return stack->chunk == NULL;
6241}
6242
6243static size_t
6244mark_stack_size(mark_stack_t *stack)
6245{
6246 size_t size = stack->index;
6247 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
6248
6249 while (chunk) {
6250 size += stack->limit;
6251 chunk = chunk->next;
6252 }
6253 return size;
6254}
6255
6256static void
6257add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
6258{
6259 chunk->next = stack->cache;
6260 stack->cache = chunk;
6261 stack->cache_size++;
6262}
6263
6264static void
6265shrink_stack_chunk_cache(mark_stack_t *stack)
6266{
6267 stack_chunk_t *chunk;
6268
6269 if (stack->unused_cache_size > (stack->cache_size/2)) {
6270 chunk = stack->cache;
6271 stack->cache = stack->cache->next;
6272 stack->cache_size--;
6273 free(chunk);
6274 }
6275 stack->unused_cache_size = stack->cache_size;
6276}
6277
6278static void
6279push_mark_stack_chunk(mark_stack_t *stack)
6280{
6281 stack_chunk_t *next;
6282
6283 GC_ASSERT(stack->index == stack->limit);
6284
6285 if (stack->cache_size > 0) {
6286 next = stack->cache;
6287 stack->cache = stack->cache->next;
6288 stack->cache_size--;
6289 if (stack->unused_cache_size > stack->cache_size)
6290 stack->unused_cache_size = stack->cache_size;
6291 }
6292 else {
6293 next = stack_chunk_alloc();
6294 }
6295 next->next = stack->chunk;
6296 stack->chunk = next;
6297 stack->index = 0;
6298}
6299
6300static void
6301pop_mark_stack_chunk(mark_stack_t *stack)
6302{
6303 stack_chunk_t *prev;
6304
6305 prev = stack->chunk->next;
6306 GC_ASSERT(stack->index == 0);
6307 add_stack_chunk_cache(stack, stack->chunk);
6308 stack->chunk = prev;
6309 stack->index = stack->limit;
6310}
6311
6312static void
6313mark_stack_chunk_list_free(stack_chunk_t *chunk)
6314{
6315 stack_chunk_t *next = NULL;
6316
6317 while (chunk != NULL) {
6318 next = chunk->next;
6319 free(chunk);
6320 chunk = next;
6321 }
6322}
6323
6324static void
6325free_stack_chunks(mark_stack_t *stack)
6326{
6327 mark_stack_chunk_list_free(stack->chunk);
6328}
6329
6330static void
6331mark_stack_free_cache(mark_stack_t *stack)
6332{
6333 mark_stack_chunk_list_free(stack->cache);
6334 stack->cache_size = 0;
6335 stack->unused_cache_size = 0;
6336}
6337
6338static void
6339push_mark_stack(mark_stack_t *stack, VALUE data)
6340{
6341 VALUE obj = data;
6342 switch (BUILTIN_TYPE(obj)) {
6343 case T_OBJECT:
6344 case T_CLASS:
6345 case T_MODULE:
6346 case T_FLOAT:
6347 case T_STRING:
6348 case T_REGEXP:
6349 case T_ARRAY:
6350 case T_HASH:
6351 case T_STRUCT:
6352 case T_BIGNUM:
6353 case T_FILE:
6354 case T_DATA:
6355 case T_MATCH:
6356 case T_COMPLEX:
6357 case T_RATIONAL:
6358 case T_TRUE:
6359 case T_FALSE:
6360 case T_SYMBOL:
6361 case T_IMEMO:
6362 case T_ICLASS:
6363 if (stack->index == stack->limit) {
6364 push_mark_stack_chunk(stack);
6365 }
6366 stack->chunk->data[stack->index++] = data;
6367 return;
6368
6369 case T_NONE:
6370 case T_NIL:
6371 case T_FIXNUM:
6372 case T_MOVED:
6373 case T_ZOMBIE:
6374 case T_UNDEF:
6375 case T_MASK:
6376 rb_bug("push_mark_stack() called for broken object");
6377 break;
6378
6379 case T_NODE:
6380 UNEXPECTED_NODE(push_mark_stack);
6381 break;
6382 }
6383
6384 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
6385 BUILTIN_TYPE(obj), (void *)data,
6386 is_pointer_to_heap(&rb_objspace, (void *)data) ? "corrupted object" : "non object");
6387}
6388
6389static int
6390pop_mark_stack(mark_stack_t *stack, VALUE *data)
6391{
6392 if (is_mark_stack_empty(stack)) {
6393 return FALSE;
6394 }
6395 if (stack->index == 1) {
6396 *data = stack->chunk->data[--stack->index];
6397 pop_mark_stack_chunk(stack);
6398 }
6399 else {
6400 *data = stack->chunk->data[--stack->index];
6401 }
6402 return TRUE;
6403}
6404
6405static void
6406init_mark_stack(mark_stack_t *stack)
6407{
6408 int i;
6409
6410 MEMZERO(stack, mark_stack_t, 1);
6411 stack->index = stack->limit = STACK_CHUNK_SIZE;
6412
6413 for (i=0; i < 4; i++) {
6414 add_stack_chunk_cache(stack, stack_chunk_alloc());
6415 }
6416 stack->unused_cache_size = stack->cache_size;
6417}
6418
6419/* Marking */
6420
6421#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6422
6423#define STACK_START (ec->machine.stack_start)
6424#define STACK_END (ec->machine.stack_end)
6425#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6426
6427#if STACK_GROW_DIRECTION < 0
6428# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6429#elif STACK_GROW_DIRECTION > 0
6430# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6431#else
6432# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6433 : (size_t)(STACK_END - STACK_START + 1))
6434#endif
6435#if !STACK_GROW_DIRECTION
6436int ruby_stack_grow_direction;
6437int
6438ruby_get_stack_grow_direction(volatile VALUE *addr)
6439{
6440 VALUE *end;
6441 SET_MACHINE_STACK_END(&end);
6442
6443 if (end > addr) return ruby_stack_grow_direction = 1;
6444 return ruby_stack_grow_direction = -1;
6445}
6446#endif
6447
6448size_t
6450{
6451 rb_execution_context_t *ec = GET_EC();
6452 SET_STACK_END;
6453 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6454 return STACK_LENGTH;
6455}
6456
6457#define PREVENT_STACK_OVERFLOW 1
6458#ifndef PREVENT_STACK_OVERFLOW
6459#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6460# define PREVENT_STACK_OVERFLOW 1
6461#else
6462# define PREVENT_STACK_OVERFLOW 0
6463#endif
6464#endif
6465#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6466static int
6467stack_check(rb_execution_context_t *ec, int water_mark)
6468{
6469 SET_STACK_END;
6470
6471 size_t length = STACK_LENGTH;
6472 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6473
6474 return length > maximum_length;
6475}
6476#else
6477#define stack_check(ec, water_mark) FALSE
6478#endif
6479
6480#define STACKFRAME_FOR_CALL_CFUNC 2048
6481
6482MJIT_FUNC_EXPORTED int
6483rb_ec_stack_check(rb_execution_context_t *ec)
6484{
6485 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6486}
6487
6488int
6490{
6491 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6492}
6493
6494ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE)));
6495static void
6496each_location(rb_objspace_t *objspace, register const VALUE *x, register long n, void (*cb)(rb_objspace_t *, VALUE))
6497{
6498 VALUE v;
6499 while (n--) {
6500 v = *x;
6501 cb(objspace, v);
6502 x++;
6503 }
6504}
6505
6506static void
6507gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end, void (*cb)(rb_objspace_t *, VALUE))
6508{
6509 long n;
6510
6511 if (end <= start) return;
6512 n = end - start;
6513 each_location(objspace, start, n, cb);
6514}
6515
6516void
6517rb_gc_mark_locations(const VALUE *start, const VALUE *end)
6518{
6519 gc_mark_locations(&rb_objspace, start, end, gc_mark_maybe);
6520}
6521
6522static void
6523gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
6524{
6525 long i;
6526
6527 for (i=0; i<n; i++) {
6528 gc_mark(objspace, values[i]);
6529 }
6530}
6531
6532void
6533rb_gc_mark_values(long n, const VALUE *values)
6534{
6535 long i;
6536 rb_objspace_t *objspace = &rb_objspace;
6537
6538 for (i=0; i<n; i++) {
6539 gc_mark_and_pin(objspace, values[i]);
6540 }
6541}
6542
6543static void
6544gc_mark_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
6545{
6546 long i;
6547
6548 for (i=0; i<n; i++) {
6549 if (is_markable_object(objspace, values[i])) {
6550 gc_mark_and_pin(objspace, values[i]);
6551 }
6552 }
6553}
6554
6555void
6556rb_gc_mark_vm_stack_values(long n, const VALUE *values)
6557{
6558 rb_objspace_t *objspace = &rb_objspace;
6559 gc_mark_stack_values(objspace, n, values);
6560}
6561
6562static int
6563mark_value(st_data_t key, st_data_t value, st_data_t data)
6564{
6565 rb_objspace_t *objspace = (rb_objspace_t *)data;
6566 gc_mark(objspace, (VALUE)value);
6567 return ST_CONTINUE;
6568}
6569
6570static int
6571mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6572{
6573 rb_objspace_t *objspace = (rb_objspace_t *)data;
6574 gc_mark_and_pin(objspace, (VALUE)value);
6575 return ST_CONTINUE;
6576}
6577
6578static void
6579mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
6580{
6581 if (!tbl || tbl->num_entries == 0) return;
6582 st_foreach(tbl, mark_value, (st_data_t)objspace);
6583}
6584
6585static void
6586mark_tbl(rb_objspace_t *objspace, st_table *tbl)
6587{
6588 if (!tbl || tbl->num_entries == 0) return;
6589 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6590}
6591
6592static int
6593mark_key(st_data_t key, st_data_t value, st_data_t data)
6594{
6595 rb_objspace_t *objspace = (rb_objspace_t *)data;
6596 gc_mark_and_pin(objspace, (VALUE)key);
6597 return ST_CONTINUE;
6598}
6599
6600static void
6601mark_set(rb_objspace_t *objspace, st_table *tbl)
6602{
6603 if (!tbl) return;
6604 st_foreach(tbl, mark_key, (st_data_t)objspace);
6605}
6606
6607static int
6608pin_value(st_data_t key, st_data_t value, st_data_t data)
6609{
6610 rb_objspace_t *objspace = (rb_objspace_t *)data;
6611 gc_mark_and_pin(objspace, (VALUE)value);
6612 return ST_CONTINUE;
6613}
6614
6615static void
6616mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
6617{
6618 if (!tbl) return;
6619 st_foreach(tbl, pin_value, (st_data_t)objspace);
6620}
6621
6622void
6623rb_mark_set(st_table *tbl)
6624{
6625 mark_set(&rb_objspace, tbl);
6626}
6627
6628static int
6629mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6630{
6631 rb_objspace_t *objspace = (rb_objspace_t *)data;
6632
6633 gc_mark(objspace, (VALUE)key);
6634 gc_mark(objspace, (VALUE)value);
6635 return ST_CONTINUE;
6636}
6637
6638static int
6639pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6640{
6641 rb_objspace_t *objspace = (rb_objspace_t *)data;
6642
6643 gc_mark_and_pin(objspace, (VALUE)key);
6644 gc_mark_and_pin(objspace, (VALUE)value);
6645 return ST_CONTINUE;
6646}
6647
6648static int
6649pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6650{
6651 rb_objspace_t *objspace = (rb_objspace_t *)data;
6652
6653 gc_mark_and_pin(objspace, (VALUE)key);
6654 gc_mark(objspace, (VALUE)value);
6655 return ST_CONTINUE;
6656}
6657
6658static void
6659mark_hash(rb_objspace_t *objspace, VALUE hash)
6660{
6661 if (rb_hash_compare_by_id_p(hash)) {
6662 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6663 }
6664 else {
6665 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6666 }
6667
6668 if (RHASH_AR_TABLE_P(hash)) {
6669 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6670 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6671 }
6672 }
6673 else {
6674 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6675 }
6676 gc_mark(objspace, RHASH(hash)->ifnone);
6677}
6678
6679static void
6680mark_st(rb_objspace_t *objspace, st_table *tbl)
6681{
6682 if (!tbl) return;
6683 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6684}
6685
6686void
6687rb_mark_hash(st_table *tbl)
6688{
6689 mark_st(&rb_objspace, tbl);
6690}
6691
6692static void
6693mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
6694{
6695 const rb_method_definition_t *def = me->def;
6696
6697 gc_mark(objspace, me->owner);
6698 gc_mark(objspace, me->defined_class);
6699
6700 if (def) {
6701 switch (def->type) {
6702 case VM_METHOD_TYPE_ISEQ:
6703 if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
6704 gc_mark(objspace, (VALUE)def->body.iseq.cref);
6705
6706 if (def->iseq_overload && me->defined_class) {
6707 // it can be a key of "overloaded_cme" table
6708 // so it should be pinned.
6709 gc_mark_and_pin(objspace, (VALUE)me);
6710 }
6711 break;
6712 case VM_METHOD_TYPE_ATTRSET:
6713 case VM_METHOD_TYPE_IVAR:
6714 gc_mark(objspace, def->body.attr.location);
6715 break;
6716 case VM_METHOD_TYPE_BMETHOD:
6717 gc_mark(objspace, def->body.bmethod.proc);
6718 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6719 break;
6720 case VM_METHOD_TYPE_ALIAS:
6721 gc_mark(objspace, (VALUE)def->body.alias.original_me);
6722 return;
6723 case VM_METHOD_TYPE_REFINED:
6724 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6725 gc_mark(objspace, (VALUE)def->body.refined.owner);
6726 break;
6727 case VM_METHOD_TYPE_CFUNC:
6728 case VM_METHOD_TYPE_ZSUPER:
6729 case VM_METHOD_TYPE_MISSING:
6730 case VM_METHOD_TYPE_OPTIMIZED:
6731 case VM_METHOD_TYPE_UNDEF:
6732 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6733 break;
6734 }
6735 }
6736}
6737
6738static enum rb_id_table_iterator_result
6739mark_method_entry_i(VALUE me, void *data)
6740{
6741 rb_objspace_t *objspace = (rb_objspace_t *)data;
6742
6743 gc_mark(objspace, me);
6744 return ID_TABLE_CONTINUE;
6745}
6746
6747static void
6748mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6749{
6750 if (tbl) {
6751 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6752 }
6753}
6754
6755static enum rb_id_table_iterator_result
6756mark_const_entry_i(VALUE value, void *data)
6757{
6758 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
6759 rb_objspace_t *objspace = data;
6760
6761 gc_mark(objspace, ce->value);
6762 gc_mark(objspace, ce->file);
6763 return ID_TABLE_CONTINUE;
6764}
6765
6766static void
6767mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
6768{
6769 if (!tbl) return;
6770 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6771}
6772
6773#if STACK_GROW_DIRECTION < 0
6774#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6775#elif STACK_GROW_DIRECTION > 0
6776#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6777#else
6778#define GET_STACK_BOUNDS(start, end, appendix) \
6779 ((STACK_END < STACK_START) ? \
6780 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6781#endif
6782
6783static void each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6784 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE));
6785
6786#if defined(__wasm__)
6787
6788
6789static VALUE *rb_stack_range_tmp[2];
6790
6791static void
6792rb_mark_locations(void *begin, void *end)
6793{
6794 rb_stack_range_tmp[0] = begin;
6795 rb_stack_range_tmp[1] = end;
6796}
6797
6798# if defined(__EMSCRIPTEN__)
6799
6800static void
6801mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6802{
6803 emscripten_scan_stack(rb_mark_locations);
6804 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6805
6806 emscripten_scan_registers(rb_mark_locations);
6807 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6808}
6809# else // use Asyncify version
6810
6811static void
6812mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6813{
6814 VALUE *stack_start, *stack_end;
6815 SET_STACK_END;
6816 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6817 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6818
6819 rb_wasm_scan_locals(rb_mark_locations);
6820 each_stack_location(objspace, ec, rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe);
6821}
6822
6823# endif
6824
6825#else // !defined(__wasm__)
6826
6827static void
6828mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
6829{
6830 union {
6831 rb_jmp_buf j;
6832 VALUE v[sizeof(rb_jmp_buf) / (sizeof(VALUE))];
6833 } save_regs_gc_mark;
6834 VALUE *stack_start, *stack_end;
6835
6836 FLUSH_REGISTER_WINDOWS;
6837 memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
6838 /* This assumes that all registers are saved into the jmp_buf (and stack) */
6839 rb_setjmp(save_regs_gc_mark.j);
6840
6841 /* SET_STACK_END must be called in this function because
6842 * the stack frame of this function may contain
6843 * callee save registers and they should be marked. */
6844 SET_STACK_END;
6845 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6846
6847 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6848
6849 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6850}
6851#endif
6852
6853static void
6854each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE))
6855{
6856 rb_objspace_t *objspace = &rb_objspace;
6857 VALUE *stack_start, *stack_end;
6858
6859 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6860 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6861}
6862
6863void
6864rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
6865{
6866 each_machine_stack_value(ec, gc_mark_maybe);
6867}
6868
6869static void
6870each_stack_location(rb_objspace_t *objspace, const rb_execution_context_t *ec,
6871 const VALUE *stack_start, const VALUE *stack_end, void (*cb)(rb_objspace_t *, VALUE))
6872{
6873
6874 gc_mark_locations(objspace, stack_start, stack_end, cb);
6875
6876#if defined(__mc68000__)
6877 gc_mark_locations(objspace,
6878 (VALUE*)((char*)stack_start + 2),
6879 (VALUE*)((char*)stack_end - 2), cb);
6880#endif
6881}
6882
6883void
6884rb_mark_tbl(st_table *tbl)
6885{
6886 mark_tbl(&rb_objspace, tbl);
6887}
6888
6889void
6890rb_mark_tbl_no_pin(st_table *tbl)
6891{
6892 mark_tbl_no_pin(&rb_objspace, tbl);
6893}
6894
6895static void
6896gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
6897{
6898 (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
6899
6900 if (is_pointer_to_heap(objspace, (void *)obj)) {
6901 void *ptr = asan_unpoison_object_temporary(obj);
6902
6903 /* Garbage can live on the stack, so do not mark or pin */
6904 switch (BUILTIN_TYPE(obj)) {
6905 case T_ZOMBIE:
6906 case T_NONE:
6907 break;
6908 default:
6909 gc_mark_and_pin(objspace, obj);
6910 break;
6911 }
6912
6913 if (ptr) {
6914 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
6915 asan_poison_object(obj);
6916 }
6917 }
6918}
6919
6920void
6921rb_gc_mark_maybe(VALUE obj)
6922{
6923 gc_mark_maybe(&rb_objspace, obj);
6924}
6925
6926static inline int
6927gc_mark_set(rb_objspace_t *objspace, VALUE obj)
6928{
6929 ASSERT_vm_locking();
6930 if (RVALUE_MARKED(obj)) return 0;
6931 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6932 return 1;
6933}
6934
6935static int
6936gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
6937{
6938 struct heap_page *page = GET_HEAP_PAGE(obj);
6939 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6940
6941 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6942 page->flags.has_uncollectible_shady_objects = TRUE;
6943 MARK_IN_BITMAP(uncollectible_bits, obj);
6944 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6945
6946#if RGENGC_PROFILE > 0
6947 objspace->profile.total_remembered_shady_object_count++;
6948#if RGENGC_PROFILE >= 2
6949 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
6950#endif
6951#endif
6952 return TRUE;
6953 }
6954 else {
6955 return FALSE;
6956 }
6957}
6958
6959static void
6960rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
6961{
6962 const VALUE old_parent = objspace->rgengc.parent_object;
6963
6964 if (old_parent) { /* parent object is old */
6965 if (RVALUE_WB_UNPROTECTED(obj)) {
6966 if (gc_remember_unprotected(objspace, obj)) {
6967 gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6968 }
6969 }
6970 else {
6971 if (!RVALUE_OLD_P(obj)) {
6972 if (RVALUE_MARKED(obj)) {
6973 /* An object pointed from an OLD object should be OLD. */
6974 gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6975 RVALUE_AGE_SET_OLD(objspace, obj);
6976 if (is_incremental_marking(objspace)) {
6977 if (!RVALUE_MARKING(obj)) {
6978 gc_grey(objspace, obj);
6979 }
6980 }
6981 else {
6982 rgengc_remember(objspace, obj);
6983 }
6984 }
6985 else {
6986 gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6987 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6988 }
6989 }
6990 }
6991 }
6992
6993 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6994}
6995
6996static void
6997gc_grey(rb_objspace_t *objspace, VALUE obj)
6998{
6999#if RGENGC_CHECK_MODE
7000 if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
7001 if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
7002#endif
7003
7004#if GC_ENABLE_INCREMENTAL_MARK
7005 if (is_incremental_marking(objspace)) {
7006 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7007 }
7008#endif
7009
7010 push_mark_stack(&objspace->mark_stack, obj);
7011}
7012
7013static void
7014gc_aging(rb_objspace_t *objspace, VALUE obj)
7015{
7016 struct heap_page *page = GET_HEAP_PAGE(obj);
7017
7018 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
7019 check_rvalue_consistency(obj);
7020
7021 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
7022 if (!RVALUE_OLD_P(obj)) {
7023 gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
7024 RVALUE_AGE_INC(objspace, obj);
7025 }
7026 else if (is_full_marking(objspace)) {
7027 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
7028 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
7029 }
7030 }
7031 check_rvalue_consistency(obj);
7032
7033 objspace->marked_slots++;
7034}
7035
7036NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
7037static void reachable_objects_from_callback(VALUE obj);
7038
7039static void
7040gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
7041{
7042 if (LIKELY(during_gc)) {
7043 rgengc_check_relation(objspace, obj);
7044 if (!gc_mark_set(objspace, obj)) return; /* already marked */
7045
7046 if (0) { // for debug GC marking miss
7047 if (objspace->rgengc.parent_object) {
7048 RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
7049 (void *)obj, obj_type_name(obj),
7050 (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
7051 }
7052 else {
7053 RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
7054 }
7055 }
7056
7057 if (UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
7058 rp(obj);
7059 rb_bug("try to mark T_NONE object"); /* check here will help debugging */
7060 }
7061 gc_aging(objspace, obj);
7062 gc_grey(objspace, obj);
7063 }
7064 else {
7065 reachable_objects_from_callback(obj);
7066 }
7067}
7068
7069static inline void
7070gc_pin(rb_objspace_t *objspace, VALUE obj)
7071{
7072 GC_ASSERT(is_markable_object(objspace, obj));
7073 if (UNLIKELY(objspace->flags.during_compacting)) {
7074 if (LIKELY(during_gc)) {
7075 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
7076 }
7077 }
7078}
7079
7080static inline void
7081gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
7082{
7083 if (!is_markable_object(objspace, obj)) return;
7084 gc_pin(objspace, obj);
7085 gc_mark_ptr(objspace, obj);
7086}
7087
7088static inline void
7089gc_mark(rb_objspace_t *objspace, VALUE obj)
7090{
7091 if (!is_markable_object(objspace, obj)) return;
7092 gc_mark_ptr(objspace, obj);
7093}
7094
7095void
7096rb_gc_mark_movable(VALUE ptr)
7097{
7098 gc_mark(&rb_objspace, ptr);
7099}
7100
7101void
7102rb_gc_mark(VALUE ptr)
7103{
7104 gc_mark_and_pin(&rb_objspace, ptr);
7105}
7106
7107/* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
7108 * This function is only for GC_END_MARK timing.
7109 */
7110
7111int
7112rb_objspace_marked_object_p(VALUE obj)
7113{
7114 return RVALUE_MARKED(obj) ? TRUE : FALSE;
7115}
7116
7117static inline void
7118gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
7119{
7120 if (RVALUE_OLD_P(obj)) {
7121 objspace->rgengc.parent_object = obj;
7122 }
7123 else {
7124 objspace->rgengc.parent_object = Qfalse;
7125 }
7126}
7127
7128static void
7129gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
7130{
7131 switch (imemo_type(obj)) {
7132 case imemo_env:
7133 {
7134 const rb_env_t *env = (const rb_env_t *)obj;
7135
7136 if (LIKELY(env->ep)) {
7137 // just after newobj() can be NULL here.
7138 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
7139 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
7140 gc_mark_values(objspace, (long)env->env_size, env->env);
7141 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
7142 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
7143 gc_mark(objspace, (VALUE)env->iseq);
7144 }
7145 }
7146 return;
7147 case imemo_cref:
7148 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
7149 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
7150 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
7151 return;
7152 case imemo_svar:
7153 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
7154 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
7155 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
7156 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
7157 return;
7158 case imemo_throw_data:
7159 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
7160 return;
7161 case imemo_ifunc:
7162 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
7163 return;
7164 case imemo_memo:
7165 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
7166 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
7167 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
7168 return;
7169 case imemo_ment:
7170 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
7171 return;
7172 case imemo_iseq:
7173 rb_iseq_mark((rb_iseq_t *)obj);
7174 return;
7175 case imemo_tmpbuf:
7176 {
7177 const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
7178 do {
7179 rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
7180 } while ((m = m->next) != NULL);
7181 }
7182 return;
7183 case imemo_ast:
7184 rb_ast_mark(&RANY(obj)->as.imemo.ast);
7185 return;
7186 case imemo_parser_strterm:
7187 rb_strterm_mark(obj);
7188 return;
7189 case imemo_callinfo:
7190 return;
7191 case imemo_callcache:
7192 {
7193 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
7194 // should not mark klass here
7195 gc_mark(objspace, (VALUE)vm_cc_cme(cc));
7196 }
7197 return;
7198 case imemo_constcache:
7199 {
7201 gc_mark(objspace, ice->value);
7202 }
7203 return;
7204#if VM_CHECK_MODE > 0
7205 default:
7206 VM_UNREACHABLE(gc_mark_imemo);
7207#endif
7208 }
7209}
7210
7211static void
7212gc_mark_children(rb_objspace_t *objspace, VALUE obj)
7213{
7214 register RVALUE *any = RANY(obj);
7215 gc_mark_set_parent(objspace, obj);
7216
7217 if (FL_TEST(obj, FL_EXIVAR)) {
7218 rb_mark_generic_ivar(obj);
7219 }
7220
7221 switch (BUILTIN_TYPE(obj)) {
7222 case T_FLOAT:
7223 case T_BIGNUM:
7224 case T_SYMBOL:
7225 /* Not immediates, but does not have references and singleton
7226 * class */
7227 return;
7228
7229 case T_NIL:
7230 case T_FIXNUM:
7231 rb_bug("rb_gc_mark() called for broken object");
7232 break;
7233
7234 case T_NODE:
7235 UNEXPECTED_NODE(rb_gc_mark);
7236 break;
7237
7238 case T_IMEMO:
7239 gc_mark_imemo(objspace, obj);
7240 return;
7241
7242 default:
7243 break;
7244 }
7245
7246 gc_mark(objspace, any->as.basic.klass);
7247
7248 switch (BUILTIN_TYPE(obj)) {
7249 case T_CLASS:
7250 case T_MODULE:
7251 if (RCLASS_SUPER(obj)) {
7252 gc_mark(objspace, RCLASS_SUPER(obj));
7253 }
7254 if (!RCLASS_EXT(obj)) break;
7255
7256 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7257 cc_table_mark(objspace, obj);
7258 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
7259 gc_mark(objspace, RCLASS_IVPTR(obj)[i]);
7260 }
7261 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
7262 break;
7263
7264 case T_ICLASS:
7265 if (RICLASS_OWNS_M_TBL_P(obj)) {
7266 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
7267 }
7268 if (RCLASS_SUPER(obj)) {
7269 gc_mark(objspace, RCLASS_SUPER(obj));
7270 }
7271 if (!RCLASS_EXT(obj)) break;
7272
7273 if (RCLASS_INCLUDER(obj)) {
7274 gc_mark(objspace, RCLASS_INCLUDER(obj));
7275 }
7276 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
7277 cc_table_mark(objspace, obj);
7278 break;
7279
7280 case T_ARRAY:
7281 if (ARY_SHARED_P(obj)) {
7282 VALUE root = ARY_SHARED_ROOT(obj);
7283 gc_mark(objspace, root);
7284 }
7285 else {
7286 long i, len = RARRAY_LEN(obj);
7287 const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
7288 for (i=0; i < len; i++) {
7289 gc_mark(objspace, ptr[i]);
7290 }
7291
7292 if (LIKELY(during_gc)) {
7293 if (!ARY_EMBED_P(obj) && RARRAY_TRANSIENT_P(obj)) {
7294 rb_transient_heap_mark(obj, ptr);
7295 }
7296 }
7297 }
7298 break;
7299
7300 case T_HASH:
7301 mark_hash(objspace, obj);
7302 break;
7303
7304 case T_STRING:
7305 if (STR_SHARED_P(obj)) {
7306 gc_mark(objspace, any->as.string.as.heap.aux.shared);
7307 }
7308 break;
7309
7310 case T_DATA:
7311 {
7312 void *const ptr = DATA_PTR(obj);
7313 if (ptr) {
7314 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
7315 any->as.typeddata.type->function.dmark :
7316 any->as.data.dmark;
7317 if (mark_func) (*mark_func)(ptr);
7318 }
7319 }
7320 break;
7321
7322 case T_OBJECT:
7323 {
7324 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
7325 if (rb_shape_obj_too_complex(obj)) {
7326 mark_m_tbl(objspace, ROBJECT_IV_HASH(obj));
7327 }
7328 else {
7329 const VALUE * const ptr = ROBJECT_IVPTR(obj);
7330
7331 uint32_t i, len = ROBJECT_IV_COUNT(obj);
7332 for (i = 0; i < len; i++) {
7333 gc_mark(objspace, ptr[i]);
7334 }
7335
7336 if (LIKELY(during_gc) &&
7337 ROBJ_TRANSIENT_P(obj)) {
7338 rb_transient_heap_mark(obj, ptr);
7339 }
7340 }
7341 if (shape) {
7342 VALUE klass = RBASIC_CLASS(obj);
7343
7344 // Increment max_iv_count if applicable, used to determine size pool allocation
7345 uint32_t num_of_ivs = shape->next_iv_index;
7346 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
7347 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
7348 }
7349 }
7350 }
7351 break;
7352
7353 case T_FILE:
7354 if (any->as.file.fptr) {
7355 gc_mark(objspace, any->as.file.fptr->self);
7356 gc_mark(objspace, any->as.file.fptr->pathv);
7357 gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
7358 gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
7359 gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
7360 gc_mark(objspace, any->as.file.fptr->encs.ecopts);
7361 gc_mark(objspace, any->as.file.fptr->write_lock);
7362 gc_mark(objspace, any->as.file.fptr->timeout);
7363 }
7364 break;
7365
7366 case T_REGEXP:
7367 gc_mark(objspace, any->as.regexp.src);
7368 break;
7369
7370 case T_MATCH:
7371 gc_mark(objspace, any->as.match.regexp);
7372 if (any->as.match.str) {
7373 gc_mark(objspace, any->as.match.str);
7374 }
7375 break;
7376
7377 case T_RATIONAL:
7378 gc_mark(objspace, any->as.rational.num);
7379 gc_mark(objspace, any->as.rational.den);
7380 break;
7381
7382 case T_COMPLEX:
7383 gc_mark(objspace, any->as.complex.real);
7384 gc_mark(objspace, any->as.complex.imag);
7385 break;
7386
7387 case T_STRUCT:
7388 {
7389 long i;
7390 const long len = RSTRUCT_LEN(obj);
7391 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
7392
7393 for (i=0; i<len; i++) {
7394 gc_mark(objspace, ptr[i]);
7395 }
7396
7397 if (LIKELY(during_gc) &&
7398 RSTRUCT_TRANSIENT_P(obj)) {
7399 rb_transient_heap_mark(obj, ptr);
7400 }
7401 }
7402 break;
7403
7404 default:
7405#if GC_DEBUG
7406 rb_gcdebug_print_obj_condition((VALUE)obj);
7407#endif
7408 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
7409 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
7410 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
7411 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
7412 BUILTIN_TYPE(obj), (void *)any,
7413 is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
7414 }
7415}
7416
7421static inline int
7422gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
7423{
7424 mark_stack_t *mstack = &objspace->mark_stack;
7425 VALUE obj;
7426#if GC_ENABLE_INCREMENTAL_MARK
7427 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7428 size_t popped_count = 0;
7429#endif
7430
7431 while (pop_mark_stack(mstack, &obj)) {
7432 if (UNDEF_P(obj)) continue; /* skip */
7433
7434 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7435 rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7436 }
7437 gc_mark_children(objspace, obj);
7438
7439#if GC_ENABLE_INCREMENTAL_MARK
7440 if (incremental) {
7441 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7442 rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
7443 }
7444 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7445 popped_count++;
7446
7447 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7448 break;
7449 }
7450 }
7451 else {
7452 /* just ignore marking bits */
7453 }
7454#endif
7455 }
7456
7457 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7458
7459 if (is_mark_stack_empty(mstack)) {
7460 shrink_stack_chunk_cache(mstack);
7461 return TRUE;
7462 }
7463 else {
7464 return FALSE;
7465 }
7466}
7467
7468static int
7469gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
7470{
7471 return gc_mark_stacked_objects(objspace, TRUE, count);
7472}
7473
7474static int
7475gc_mark_stacked_objects_all(rb_objspace_t *objspace)
7476{
7477 return gc_mark_stacked_objects(objspace, FALSE, 0);
7478}
7479
7480#if PRINT_ROOT_TICKS
7481#define MAX_TICKS 0x100
7482static tick_t mark_ticks[MAX_TICKS];
7483static const char *mark_ticks_categories[MAX_TICKS];
7484
7485static void
7486show_mark_ticks(void)
7487{
7488 int i;
7489 fprintf(stderr, "mark ticks result:\n");
7490 for (i=0; i<MAX_TICKS; i++) {
7491 const char *category = mark_ticks_categories[i];
7492 if (category) {
7493 fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
7494 }
7495 else {
7496 break;
7497 }
7498 }
7499}
7500
7501#endif /* PRINT_ROOT_TICKS */
7502
7503static void
7504gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
7505{
7506 struct gc_list *list;
7507 rb_execution_context_t *ec = GET_EC();
7508 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7509
7510#if PRINT_ROOT_TICKS
7511 tick_t start_tick = tick();
7512 int tick_count = 0;
7513 const char *prev_category = 0;
7514
7515 if (mark_ticks_categories[0] == 0) {
7516 atexit(show_mark_ticks);
7517 }
7518#endif
7519
7520 if (categoryp) *categoryp = "xxx";
7521
7522 objspace->rgengc.parent_object = Qfalse;
7523
7524#if PRINT_ROOT_TICKS
7525#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7526 if (prev_category) { \
7527 tick_t t = tick(); \
7528 mark_ticks[tick_count] = t - start_tick; \
7529 mark_ticks_categories[tick_count] = prev_category; \
7530 tick_count++; \
7531 } \
7532 prev_category = category; \
7533 start_tick = tick(); \
7534} while (0)
7535#else /* PRINT_ROOT_TICKS */
7536#define MARK_CHECKPOINT_PRINT_TICK(category)
7537#endif
7538
7539#define MARK_CHECKPOINT(category) do { \
7540 if (categoryp) *categoryp = category; \
7541 MARK_CHECKPOINT_PRINT_TICK(category); \
7542} while (0)
7543
7544 MARK_CHECKPOINT("vm");
7545 SET_STACK_END;
7546 rb_vm_mark(vm);
7547 if (vm->self) gc_mark(objspace, vm->self);
7548
7549 MARK_CHECKPOINT("finalizers");
7550 mark_finalizer_tbl(objspace, finalizer_table);
7551
7552 MARK_CHECKPOINT("machine_context");
7553 mark_current_machine_context(objspace, ec);
7554
7555 /* mark protected global variables */
7556 MARK_CHECKPOINT("global_list");
7557 for (list = global_list; list; list = list->next) {
7558 gc_mark_maybe(objspace, *list->varptr);
7559 }
7560
7561 MARK_CHECKPOINT("end_proc");
7562 rb_mark_end_proc();
7563
7564 MARK_CHECKPOINT("global_tbl");
7565 rb_gc_mark_global_tbl();
7566
7567 MARK_CHECKPOINT("object_id");
7568 rb_gc_mark(objspace->next_object_id);
7569 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
7570
7571 if (stress_to_class) rb_gc_mark(stress_to_class);
7572
7573 MARK_CHECKPOINT("finish");
7574#undef MARK_CHECKPOINT
7575}
7576
7577#if RGENGC_CHECK_MODE >= 4
7578
7579#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7580#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7581#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7582
7583struct reflist {
7584 VALUE *list;
7585 int pos;
7586 int size;
7587};
7588
7589static struct reflist *
7590reflist_create(VALUE obj)
7591{
7592 struct reflist *refs = xmalloc(sizeof(struct reflist));
7593 refs->size = 1;
7594 refs->list = ALLOC_N(VALUE, refs->size);
7595 refs->list[0] = obj;
7596 refs->pos = 1;
7597 return refs;
7598}
7599
7600static void
7601reflist_destruct(struct reflist *refs)
7602{
7603 xfree(refs->list);
7604 xfree(refs);
7605}
7606
7607static void
7608reflist_add(struct reflist *refs, VALUE obj)
7609{
7610 if (refs->pos == refs->size) {
7611 refs->size *= 2;
7612 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7613 }
7614
7615 refs->list[refs->pos++] = obj;
7616}
7617
7618static void
7619reflist_dump(struct reflist *refs)
7620{
7621 int i;
7622 for (i=0; i<refs->pos; i++) {
7623 VALUE obj = refs->list[i];
7624 if (IS_ROOTSIG(obj)) { /* root */
7625 fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
7626 }
7627 else {
7628 fprintf(stderr, "<%s>", obj_info(obj));
7629 }
7630 if (i+1 < refs->pos) fprintf(stderr, ", ");
7631 }
7632}
7633
7634static int
7635reflist_referred_from_machine_context(struct reflist *refs)
7636{
7637 int i;
7638 for (i=0; i<refs->pos; i++) {
7639 VALUE obj = refs->list[i];
7640 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
7641 }
7642 return 0;
7643}
7644
7645struct allrefs {
7646 rb_objspace_t *objspace;
7647 /* a -> obj1
7648 * b -> obj1
7649 * c -> obj1
7650 * c -> obj2
7651 * d -> obj3
7652 * #=> {obj1 => [a, b, c], obj2 => [c, d]}
7653 */
7654 struct st_table *references;
7655 const char *category;
7656 VALUE root_obj;
7658};
7659
7660static int
7661allrefs_add(struct allrefs *data, VALUE obj)
7662{
7663 struct reflist *refs;
7664 st_data_t r;
7665
7666 if (st_lookup(data->references, obj, &r)) {
7667 refs = (struct reflist *)r;
7668 reflist_add(refs, data->root_obj);
7669 return 0;
7670 }
7671 else {
7672 refs = reflist_create(data->root_obj);
7673 st_insert(data->references, obj, (st_data_t)refs);
7674 return 1;
7675 }
7676}
7677
7678static void
7679allrefs_i(VALUE obj, void *ptr)
7680{
7681 struct allrefs *data = (struct allrefs *)ptr;
7682
7683 if (allrefs_add(data, obj)) {
7684 push_mark_stack(&data->mark_stack, obj);
7685 }
7686}
7687
7688static void
7689allrefs_roots_i(VALUE obj, void *ptr)
7690{
7691 struct allrefs *data = (struct allrefs *)ptr;
7692 if (strlen(data->category) == 0) rb_bug("!!!");
7693 data->root_obj = MAKE_ROOTSIG(data->category);
7694
7695 if (allrefs_add(data, obj)) {
7696 push_mark_stack(&data->mark_stack, obj);
7697 }
7698}
7699#define PUSH_MARK_FUNC_DATA(v) do { \
7700 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7701 GET_RACTOR()->mfd = (v);
7702
7703#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7704
7705static st_table *
7706objspace_allrefs(rb_objspace_t *objspace)
7707{
7708 struct allrefs data;
7709 struct gc_mark_func_data_struct mfd;
7710 VALUE obj;
7711 int prev_dont_gc = dont_gc_val();
7712 dont_gc_on();
7713
7714 data.objspace = objspace;
7715 data.references = st_init_numtable();
7716 init_mark_stack(&data.mark_stack);
7717
7718 mfd.mark_func = allrefs_roots_i;
7719 mfd.data = &data;
7720
7721 /* traverse root objects */
7722 PUSH_MARK_FUNC_DATA(&mfd);
7723 GET_RACTOR()->mfd = &mfd;
7724 gc_mark_roots(objspace, &data.category);
7725 POP_MARK_FUNC_DATA();
7726
7727 /* traverse rest objects reachable from root objects */
7728 while (pop_mark_stack(&data.mark_stack, &obj)) {
7729 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7730 }
7731 free_stack_chunks(&data.mark_stack);
7732
7733 dont_gc_set(prev_dont_gc);
7734 return data.references;
7735}
7736
7737static int
7738objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7739{
7740 struct reflist *refs = (struct reflist *)value;
7741 reflist_destruct(refs);
7742 return ST_CONTINUE;
7743}
7744
7745static void
7746objspace_allrefs_destruct(struct st_table *refs)
7747{
7748 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7749 st_free_table(refs);
7750}
7751
7752#if RGENGC_CHECK_MODE >= 5
7753static int
7754allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7755{
7756 VALUE obj = (VALUE)k;
7757 struct reflist *refs = (struct reflist *)v;
7758 fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
7759 reflist_dump(refs);
7760 fprintf(stderr, "\n");
7761 return ST_CONTINUE;
7762}
7763
7764static void
7765allrefs_dump(rb_objspace_t *objspace)
7766{
7767 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7768 fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
7769 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7770}
7771#endif
7772
7773static int
7774gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7775{
7776 VALUE obj = k;
7777 struct reflist *refs = (struct reflist *)v;
7778 rb_objspace_t *objspace = (rb_objspace_t *)ptr;
7779
7780 /* object should be marked or oldgen */
7781 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7782 fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7783 fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
7784 reflist_dump(refs);
7785
7786 if (reflist_referred_from_machine_context(refs)) {
7787 fprintf(stderr, " (marked from machine stack).\n");
7788 /* marked from machine context can be false positive */
7789 }
7790 else {
7791 objspace->rgengc.error_count++;
7792 fprintf(stderr, "\n");
7793 }
7794 }
7795 return ST_CONTINUE;
7796}
7797
7798static void
7799gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
7800{
7801 size_t saved_malloc_increase = objspace->malloc_params.increase;
7802#if RGENGC_ESTIMATE_OLDMALLOC
7803 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7804#endif
7805 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7806
7807 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7808
7809 if (checker_func) {
7810 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7811 }
7812
7813 if (objspace->rgengc.error_count > 0) {
7814#if RGENGC_CHECK_MODE >= 5
7815 allrefs_dump(objspace);
7816#endif
7817 if (checker_name) rb_bug("%s: GC has problem.", checker_name);
7818 }
7819
7820 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7821 objspace->rgengc.allrefs_table = 0;
7822
7823 if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
7824 objspace->malloc_params.increase = saved_malloc_increase;
7825#if RGENGC_ESTIMATE_OLDMALLOC
7826 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7827#endif
7828}
7829#endif /* RGENGC_CHECK_MODE >= 4 */
7830
7832 rb_objspace_t *objspace;
7833 int err_count;
7834 size_t live_object_count;
7835 size_t zombie_object_count;
7836
7837 VALUE parent;
7838 size_t old_object_count;
7839 size_t remembered_shady_count;
7840};
7841
7842static void
7843check_generation_i(const VALUE child, void *ptr)
7844{
7846 const VALUE parent = data->parent;
7847
7848 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7849
7850 if (!RVALUE_OLD_P(child)) {
7851 if (!RVALUE_REMEMBERED(parent) &&
7852 !RVALUE_REMEMBERED(child) &&
7853 !RVALUE_UNCOLLECTIBLE(child)) {
7854 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7855 data->err_count++;
7856 }
7857 }
7858}
7859
7860static void
7861check_color_i(const VALUE child, void *ptr)
7862{
7864 const VALUE parent = data->parent;
7865
7866 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7867 fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7868 obj_info(parent), obj_info(child));
7869 data->err_count++;
7870 }
7871}
7872
7873static void
7874check_children_i(const VALUE child, void *ptr)
7875{
7877 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7878 fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
7879 obj_info(child), obj_info(data->parent));
7880 rb_print_backtrace(); /* C backtrace will help to debug */
7881
7882 data->err_count++;
7883 }
7884}
7885
7886static int
7887verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
7889{
7890 VALUE obj;
7891 rb_objspace_t *objspace = data->objspace;
7892
7893 for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
7894 void *poisoned = asan_unpoison_object_temporary(obj);
7895
7896 if (is_live_object(objspace, obj)) {
7897 /* count objects */
7898 data->live_object_count++;
7899 data->parent = obj;
7900
7901 /* Normally, we don't expect T_MOVED objects to be in the heap.
7902 * But they can stay alive on the stack, */
7903 if (!gc_object_moved_p(objspace, obj)) {
7904 /* moved slots don't have children */
7905 rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
7906 }
7907
7908 /* check health of children */
7909 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7910 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7911
7912 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7913 /* reachable objects from an oldgen object should be old or (young with remember) */
7914 data->parent = obj;
7915 rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
7916 }
7917
7918 if (is_incremental_marking(objspace)) {
7919 if (RVALUE_BLACK_P(obj)) {
7920 /* reachable objects from black objects should be black or grey objects */
7921 data->parent = obj;
7922 rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
7923 }
7924 }
7925 }
7926 else {
7927 if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
7928 GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
7929 data->zombie_object_count++;
7930 }
7931 }
7932 if (poisoned) {
7933 GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
7934 asan_poison_object(obj);
7935 }
7936 }
7937
7938 return 0;
7939}
7940
7941static int
7942gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
7943{
7944 unsigned int has_remembered_shady = FALSE;
7945 unsigned int has_remembered_old = FALSE;
7946 int remembered_old_objects = 0;
7947 int free_objects = 0;
7948 int zombie_objects = 0;
7949
7950 short slot_size = page->slot_size;
7951 uintptr_t start = (uintptr_t)page->start;
7952 uintptr_t end = start + page->total_slots * slot_size;
7953
7954 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7955 VALUE val = (VALUE)ptr;
7956 void *poisoned = asan_unpoison_object_temporary(val);
7957 enum ruby_value_type type = BUILTIN_TYPE(val);
7958
7959 if (type == T_NONE) free_objects++;
7960 if (type == T_ZOMBIE) zombie_objects++;
7961 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7962 has_remembered_shady = TRUE;
7963 }
7964 if (RVALUE_PAGE_MARKING(page, val)) {
7965 has_remembered_old = TRUE;
7966 remembered_old_objects++;
7967 }
7968
7969 if (poisoned) {
7970 GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
7971 asan_poison_object(val);
7972 }
7973 }
7974
7975 if (!is_incremental_marking(objspace) &&
7976 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7977
7978 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
7979 VALUE val = (VALUE)ptr;
7980 if (RVALUE_PAGE_MARKING(page, val)) {
7981 fprintf(stderr, "marking -> %s\n", obj_info(val));
7982 }
7983 }
7984 rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7985 (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
7986 }
7987
7988 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
7989 rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7990 (void *)page, obj ? obj_info(obj) : "");
7991 }
7992
7993 if (0) {
7994 /* free_slots may not equal to free_objects */
7995 if (page->free_slots != free_objects) {
7996 rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, page->free_slots, free_objects);
7997 }
7998 }
7999 if (page->final_slots != zombie_objects) {
8000 rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, page->final_slots, zombie_objects);
8001 }
8002
8003 return remembered_old_objects;
8004}
8005
8006static int
8007gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
8008{
8009 int remembered_old_objects = 0;
8010 struct heap_page *page = 0;
8011
8012 ccan_list_for_each(head, page, page_node) {
8013 asan_unlock_freelist(page);
8014 RVALUE *p = page->freelist;
8015 while (p) {
8016 VALUE vp = (VALUE)p;
8017 VALUE prev = vp;
8018 asan_unpoison_object(vp, false);
8019 if (BUILTIN_TYPE(vp) != T_NONE) {
8020 fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
8021 }
8022 p = p->as.free.next;
8023 asan_poison_object(prev);
8024 }
8025 asan_lock_freelist(page);
8026
8027 if (page->flags.has_remembered_objects == FALSE) {
8028 remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
8029 }
8030 }
8031
8032 return remembered_old_objects;
8033}
8034
8035static int
8036gc_verify_heap_pages(rb_objspace_t *objspace)
8037{
8038 int remembered_old_objects = 0;
8039 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8040 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
8041 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
8042 }
8043 return remembered_old_objects;
8044}
8045
8046/*
8047 * call-seq:
8048 * GC.verify_internal_consistency -> nil
8049 *
8050 * Verify internal consistency.
8051 *
8052 * This method is implementation specific.
8053 * Now this method checks generational consistency
8054 * if RGenGC is supported.
8055 */
8056static VALUE
8057gc_verify_internal_consistency_m(VALUE dummy)
8058{
8059 gc_verify_internal_consistency(&rb_objspace);
8060 return Qnil;
8061}
8062
8063static void
8064gc_verify_internal_consistency_(rb_objspace_t *objspace)
8065{
8066 struct verify_internal_consistency_struct data = {0};
8067
8068 data.objspace = objspace;
8069 gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
8070
8071 /* check relations */
8072 for (size_t i = 0; i < heap_allocated_pages; i++) {
8073 struct heap_page *page = heap_pages_sorted[i];
8074 short slot_size = page->slot_size;
8075
8076 uintptr_t start = (uintptr_t)page->start;
8077 uintptr_t end = start + page->total_slots * slot_size;
8078
8079 verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
8080 }
8081
8082 if (data.err_count != 0) {
8083#if RGENGC_CHECK_MODE >= 5
8084 objspace->rgengc.error_count = data.err_count;
8085 gc_marks_check(objspace, NULL, NULL);
8086 allrefs_dump(objspace);
8087#endif
8088 rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
8089 }
8090
8091 /* check heap_page status */
8092 gc_verify_heap_pages(objspace);
8093
8094 /* check counters */
8095
8096 if (!is_lazy_sweeping(objspace) &&
8097 !finalizing &&
8098 ruby_single_main_ractor != NULL) {
8099 if (objspace_live_slots(objspace) != data.live_object_count) {
8100 fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", "
8101 "objspace->profile.total_freed_objects: %"PRIdSIZE"\n",
8102 heap_pages_final_slots, objspace->profile.total_freed_objects);
8103 rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8104 objspace_live_slots(objspace), data.live_object_count);
8105 }
8106 }
8107
8108 if (!is_marking(objspace)) {
8109 if (objspace->rgengc.old_objects != data.old_object_count) {
8110 rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
8111 objspace->rgengc.old_objects, data.old_object_count);
8112 }
8113 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
8114 rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
8115 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
8116 }
8117 }
8118
8119 if (!finalizing) {
8120 size_t list_count = 0;
8121
8122 {
8123 VALUE z = heap_pages_deferred_final;
8124 while (z) {
8125 list_count++;
8126 z = RZOMBIE(z)->next;
8127 }
8128 }
8129
8130 if (heap_pages_final_slots != data.zombie_object_count ||
8131 heap_pages_final_slots != list_count) {
8132
8133 rb_bug("inconsistent finalizing object count:\n"
8134 " expect %"PRIuSIZE"\n"
8135 " but %"PRIuSIZE" zombies\n"
8136 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
8137 heap_pages_final_slots,
8138 data.zombie_object_count,
8139 list_count);
8140 }
8141 }
8142
8143 gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
8144}
8145
8146static void
8147gc_verify_internal_consistency(rb_objspace_t *objspace)
8148{
8149 RB_VM_LOCK_ENTER();
8150 {
8151 rb_vm_barrier(); // stop other ractors
8152
8153 unsigned int prev_during_gc = during_gc;
8154 during_gc = FALSE; // stop gc here
8155 {
8156 gc_verify_internal_consistency_(objspace);
8157 }
8158 during_gc = prev_during_gc;
8159 }
8160 RB_VM_LOCK_LEAVE();
8161}
8162
8163void
8164rb_gc_verify_internal_consistency(void)
8165{
8166 gc_verify_internal_consistency(&rb_objspace);
8167}
8168
8169static VALUE
8170gc_verify_transient_heap_internal_consistency(VALUE dmy)
8171{
8172 rb_transient_heap_verify();
8173 return Qnil;
8174}
8175
8176#if GC_ENABLE_INCREMENTAL_MARK
8177static void
8178heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
8179{
8180 if (heap->pooled_pages) {
8181 if (heap->free_pages) {
8182 struct heap_page *free_pages_tail = heap->free_pages;
8183 while (free_pages_tail->free_next) {
8184 free_pages_tail = free_pages_tail->free_next;
8185 }
8186 free_pages_tail->free_next = heap->pooled_pages;
8187 }
8188 else {
8189 heap->free_pages = heap->pooled_pages;
8190 }
8191
8192 heap->pooled_pages = NULL;
8193 }
8194}
8195#endif
8196
8197/* marks */
8198
8199static void
8200gc_marks_start(rb_objspace_t *objspace, int full_mark)
8201{
8202 /* start marking */
8203 gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
8204 gc_mode_transition(objspace, gc_mode_marking);
8205
8206 if (full_mark) {
8207#if GC_ENABLE_INCREMENTAL_MARK
8208 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
8209 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
8210
8211 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
8212 "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
8213 "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
8214 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
8215#endif
8216 objspace->flags.during_minor_gc = FALSE;
8217 if (ruby_enable_autocompact) {
8218 objspace->flags.during_compacting |= TRUE;
8219 }
8220 objspace->profile.major_gc_count++;
8221 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
8222 objspace->rgengc.old_objects = 0;
8223 objspace->rgengc.last_major_gc = objspace->profile.count;
8224 objspace->marked_slots = 0;
8225
8226 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8227 rb_size_pool_t *size_pool = &size_pools[i];
8228 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8229 rgengc_mark_and_rememberset_clear(objspace, heap);
8230 heap_move_pooled_pages_to_free_pages(heap);
8231 }
8232 }
8233 else {
8234 objspace->flags.during_minor_gc = TRUE;
8235 objspace->marked_slots =
8236 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
8237 objspace->profile.minor_gc_count++;
8238
8239 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8240 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8241 }
8242 }
8243
8244 gc_mark_roots(objspace, NULL);
8245
8246 gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
8247 full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
8248}
8249
8250#if GC_ENABLE_INCREMENTAL_MARK
8251static inline void
8252gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
8253{
8254 if (bits) {
8255 do {
8256 if (bits & 1) {
8257 gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
8258 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
8259 GC_ASSERT(RVALUE_MARKED((VALUE)p));
8260 gc_mark_children(objspace, (VALUE)p);
8261 }
8262 p += BASE_SLOT_SIZE;
8263 bits >>= 1;
8264 } while (bits);
8265 }
8266}
8267
8268static void
8269gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
8270{
8271 struct heap_page *page = 0;
8272
8273 ccan_list_for_each(&heap->pages, page, page_node) {
8274 bits_t *mark_bits = page->mark_bits;
8275 bits_t *wbun_bits = page->wb_unprotected_bits;
8276 uintptr_t p = page->start;
8277 size_t j;
8278
8279 bits_t bits = mark_bits[0] & wbun_bits[0];
8280 bits >>= NUM_IN_PAGE(p);
8281 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8282 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8283
8284 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8285 bits_t bits = mark_bits[j] & wbun_bits[j];
8286
8287 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
8288 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8289 }
8290 }
8291
8292 gc_mark_stacked_objects_all(objspace);
8293}
8294#endif
8295
8296static void
8297gc_marks_finish(rb_objspace_t *objspace)
8298{
8299#if GC_ENABLE_INCREMENTAL_MARK
8300 /* finish incremental GC */
8301 if (is_incremental_marking(objspace)) {
8302 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
8303 rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
8304 mark_stack_size(&objspace->mark_stack));
8305 }
8306
8307 gc_mark_roots(objspace, 0);
8308 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
8309
8310#if RGENGC_CHECK_MODE >= 2
8311 if (gc_verify_heap_pages(objspace) != 0) {
8312 rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
8313 }
8314#endif
8315
8316 objspace->flags.during_incremental_marking = FALSE;
8317 /* check children of all marked wb-unprotected objects */
8318 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8319 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
8320 }
8321 }
8322#endif /* GC_ENABLE_INCREMENTAL_MARK */
8323
8324#if RGENGC_CHECK_MODE >= 2
8325 gc_verify_internal_consistency(objspace);
8326#endif
8327
8328 if (is_full_marking(objspace)) {
8329 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8330 const double r = gc_params.oldobject_limit_factor;
8331 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8332 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8333 }
8334
8335#if RGENGC_CHECK_MODE >= 4
8336 during_gc = FALSE;
8337 gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
8338 during_gc = TRUE;
8339#endif
8340
8341 {
8342 /* decide full GC is needed or not */
8343 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
8344 size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
8345 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
8346 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
8347 int full_marking = is_full_marking(objspace);
8348 const int r_cnt = GET_VM()->ractor.cnt;
8349 const int r_mul = r_cnt > 8 ? 8 : r_cnt; // upto 8
8350
8351 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8352
8353 /* setup free-able page counts */
8354 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8355 max_free_slots = gc_params.heap_init_slots * r_mul;
8356 }
8357
8358 if (sweep_slots > max_free_slots) {
8359 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8360 }
8361 else {
8362 heap_pages_freeable_pages = 0;
8363 }
8364
8365 /* check free_min */
8366 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8367 min_free_slots = gc_params.heap_free_slots * r_mul;
8368 }
8369
8370 if (sweep_slots < min_free_slots) {
8371 if (!full_marking) {
8372 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8373 full_marking = TRUE;
8374 /* do not update last_major_gc, because full marking is not done. */
8375 /* goto increment; */
8376 }
8377 else {
8378 gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
8379 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8380 }
8381 }
8382
8383#if !USE_RVARGC
8384 if (full_marking) {
8385 /* increment: */
8386 gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
8387 rb_size_pool_t *size_pool = &size_pools[0];
8388 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, size_pool, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8389
8390 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8391 }
8392#endif
8393 }
8394
8395 if (full_marking) {
8396 /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
8397 const double r = gc_params.oldobject_limit_factor;
8398 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8399 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8400 }
8401
8402 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8403 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8404 }
8405 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8406 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8407 }
8408 if (RGENGC_FORCE_MAJOR_GC) {
8409 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8410 }
8411
8412 gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
8413 "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
8414 "sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
8415 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8416 objspace->rgengc.need_major_gc ? "major" : "minor");
8417 }
8418
8419 rb_transient_heap_finish_marking();
8420 rb_ractor_finish_marking();
8421
8422 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
8423}
8424
8425#if GC_ENABLE_INCREMENTAL_MARK
8426static void
8427gc_marks_step(rb_objspace_t *objspace, size_t slots)
8428{
8429 GC_ASSERT(is_marking(objspace));
8430
8431 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8432 gc_marks_finish(objspace);
8433 gc_sweep(objspace);
8434 }
8435 if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE"\n", objspace->marked_slots);
8436}
8437#endif
8438
8439static bool
8440gc_compact_heap_cursors_met_p(rb_heap_t *heap)
8441{
8442 return heap->sweeping_page == heap->compact_cursor;
8443}
8444
8445static rb_size_pool_t *
8446gc_compact_destination_pool(rb_objspace_t *objspace, rb_size_pool_t *src_pool, VALUE src)
8447{
8448 size_t obj_size;
8449 size_t idx = 0;
8450
8451 switch (BUILTIN_TYPE(src)) {
8452 case T_ARRAY:
8453 obj_size = rb_ary_size_as_embedded(src);
8454 break;
8455
8456 case T_OBJECT:
8457 if (rb_shape_obj_too_complex(src)) {
8458 return &size_pools[0];
8459 }
8460 else {
8461 obj_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(src));
8462 }
8463 break;
8464
8465 case T_STRING:
8466 obj_size = rb_str_size_as_embedded(src);
8467 break;
8468
8469 default:
8470 return src_pool;
8471 }
8472
8473 if (rb_gc_size_allocatable_p(obj_size)){
8474 idx = size_pool_idx_for_size(obj_size);
8475 }
8476 return &size_pools[idx];
8477}
8478
8479static bool
8480gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, rb_size_pool_t *size_pool, VALUE src)
8481{
8482 GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
8483 GC_ASSERT(gc_is_moveable_obj(objspace, src));
8484
8485 rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, size_pool, src);
8486 rb_heap_t *dheap = SIZE_POOL_EDEN_HEAP(dest_pool);
8487 rb_shape_t *new_shape = NULL;
8488 rb_shape_t *orig_shape = NULL;
8489
8490 if (gc_compact_heap_cursors_met_p(dheap)) {
8491 return dheap != heap;
8492 }
8493
8494 if (RB_TYPE_P(src, T_OBJECT)) {
8495 orig_shape = rb_shape_get_shape(src);
8496 if (dheap != heap && !rb_shape_obj_too_complex(src)) {
8497 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)((dest_pool - size_pools) + SIZE_POOL_COUNT));
8498 new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
8499
8500 if (!new_shape) {
8501 dest_pool = size_pool;
8502 dheap = heap;
8503 }
8504 }
8505 }
8506
8507 while (!try_move(objspace, dheap, dheap->free_pages, src)) {
8508 struct gc_sweep_context ctx = {
8509 .page = dheap->sweeping_page,
8510 .final_slots = 0,
8511 .freed_slots = 0,
8512 .empty_slots = 0,
8513 };
8514
8515 /* The page of src could be partially compacted, so it may contain
8516 * T_MOVED. Sweeping a page may read objects on this page, so we
8517 * need to lock the page. */
8518 lock_page_body(objspace, GET_PAGE_BODY(src));
8519 gc_sweep_page(objspace, dheap, &ctx);
8520 unlock_page_body(objspace, GET_PAGE_BODY(src));
8521
8522 if (dheap->sweeping_page->free_slots > 0) {
8523 heap_add_freepage(dheap, dheap->sweeping_page);
8524 };
8525
8526 dheap->sweeping_page = ccan_list_next(&dheap->pages, dheap->sweeping_page, page_node);
8527 if (gc_compact_heap_cursors_met_p(dheap)) {
8528 return dheap != heap;
8529 }
8530 }
8531
8532 if (orig_shape) {
8533 if (new_shape) {
8534 VALUE dest = rb_gc_location(src);
8535 rb_shape_set_shape(dest, new_shape);
8536 }
8537 RMOVED(src)->original_shape_id = rb_shape_id(orig_shape);
8538 }
8539
8540 return true;
8541}
8542
8543static bool
8544gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
8545{
8546 short slot_size = page->slot_size;
8547 short slot_bits = slot_size / BASE_SLOT_SIZE;
8548 GC_ASSERT(slot_bits > 0);
8549
8550 do {
8551 VALUE vp = (VALUE)p;
8552 GC_ASSERT(vp % sizeof(RVALUE) == 0);
8553
8554 if (bitset & 1) {
8555 objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
8556
8557 if (gc_is_moveable_obj(objspace, vp)) {
8558 if (!gc_compact_move(objspace, heap, size_pool, vp)) {
8559 //the cursors met. bubble up
8560 return false;
8561 }
8562 }
8563 }
8564 p += slot_size;
8565 bitset >>= slot_bits;
8566 } while (bitset);
8567
8568 return true;
8569}
8570
8571// Iterate up all the objects in page, moving them to where they want to go
8572static bool
8573gc_compact_page(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap, struct heap_page *page)
8574{
8575 GC_ASSERT(page == heap->compact_cursor);
8576
8577 bits_t *mark_bits, *pin_bits;
8578 bits_t bitset;
8579 uintptr_t p = page->start;
8580
8581 mark_bits = page->mark_bits;
8582 pin_bits = page->pinned_bits;
8583
8584 // objects that can be moved are marked and not pinned
8585 bitset = (mark_bits[0] & ~pin_bits[0]);
8586 bitset >>= NUM_IN_PAGE(p);
8587 if (bitset) {
8588 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8589 return false;
8590 }
8591 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8592
8593 for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8594 bitset = (mark_bits[j] & ~pin_bits[j]);
8595 if (bitset) {
8596 if (!gc_compact_plane(objspace, size_pool, heap, (uintptr_t)p, bitset, page))
8597 return false;
8598 }
8599 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8600 }
8601
8602 return true;
8603}
8604
8605static bool
8606gc_compact_all_compacted_p(rb_objspace_t *objspace)
8607{
8608 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8609 rb_size_pool_t *size_pool = &size_pools[i];
8610 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8611
8612 if (heap->total_pages > 0 &&
8613 !gc_compact_heap_cursors_met_p(heap)) {
8614 return false;
8615 }
8616 }
8617
8618 return true;
8619}
8620
8621static void
8622gc_sweep_compact(rb_objspace_t *objspace)
8623{
8624 gc_compact_start(objspace);
8625#if RGENGC_CHECK_MODE >= 2
8626 gc_verify_internal_consistency(objspace);
8627#endif
8628
8629 while (!gc_compact_all_compacted_p(objspace)) {
8630 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8631 rb_size_pool_t *size_pool = &size_pools[i];
8632 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
8633
8634 if (gc_compact_heap_cursors_met_p(heap)) {
8635 continue;
8636 }
8637
8638 struct heap_page *start_page = heap->compact_cursor;
8639
8640 if (!gc_compact_page(objspace, size_pool, heap, start_page)) {
8641 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8642
8643 continue;
8644 }
8645
8646 // If we get here, we've finished moving all objects on the compact_cursor page
8647 // So we can lock it and move the cursor on to the next one.
8648 lock_page_body(objspace, GET_PAGE_BODY(start_page->start));
8649 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
8650 }
8651 }
8652
8653 gc_compact_finish(objspace);
8654
8655#if RGENGC_CHECK_MODE >= 2
8656 gc_verify_internal_consistency(objspace);
8657#endif
8658}
8659
8660static void
8661gc_marks_rest(rb_objspace_t *objspace)
8662{
8663 gc_report(1, objspace, "gc_marks_rest\n");
8664
8665#if GC_ENABLE_INCREMENTAL_MARK
8666 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
8667 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8668 }
8669#endif
8670
8671 if (is_incremental_marking(objspace)) {
8672 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8673 }
8674 else {
8675 gc_mark_stacked_objects_all(objspace);
8676 }
8677
8678 gc_marks_finish(objspace);
8679
8680 /* move to sweep */
8681 gc_sweep(objspace);
8682}
8683
8684static void
8685gc_marks_continue(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
8686{
8687 GC_ASSERT(dont_gc_val() == FALSE);
8688#if GC_ENABLE_INCREMENTAL_MARK
8689
8690 unsigned int lock_lev;
8691 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8692
8693 if (heap->free_pages) {
8694 gc_report(2, objspace, "gc_marks_continue: has pooled pages");
8695 gc_marks_step(objspace, objspace->rincgc.step_slots);
8696 }
8697 else {
8698 gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
8699 mark_stack_size(&objspace->mark_stack));
8700 gc_marks_rest(objspace);
8701 }
8702
8703 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8704#endif
8705}
8706
8707static void
8708gc_marks(rb_objspace_t *objspace, int full_mark)
8709{
8710 gc_prof_mark_timer_start(objspace);
8711
8712 /* setup marking */
8713
8714 gc_marks_start(objspace, full_mark);
8715 if (!is_incremental_marking(objspace)) {
8716 gc_marks_rest(objspace);
8717 }
8718
8719#if RGENGC_PROFILE > 0
8720 if (gc_prof_record(objspace)) {
8721 gc_profile_record *record = gc_prof_record(objspace);
8722 record->old_objects = objspace->rgengc.old_objects;
8723 }
8724#endif
8725 gc_prof_mark_timer_stop(objspace);
8726}
8727
8728/* RGENGC */
8729
8730static void
8731gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
8732{
8733 if (level <= RGENGC_DEBUG) {
8734 char buf[1024];
8735 FILE *out = stderr;
8736 va_list args;
8737 const char *status = " ";
8738
8739 if (during_gc) {
8740 status = is_full_marking(objspace) ? "+" : "-";
8741 }
8742 else {
8743 if (is_lazy_sweeping(objspace)) {
8744 status = "S";
8745 }
8746 if (is_incremental_marking(objspace)) {
8747 status = "M";
8748 }
8749 }
8750
8751 va_start(args, fmt);
8752 vsnprintf(buf, 1024, fmt, args);
8753 va_end(args);
8754
8755 fprintf(out, "%s|", status);
8756 fputs(buf, out);
8757 }
8758}
8759
8760/* bit operations */
8761
8762static int
8763rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
8764{
8765 return RVALUE_REMEMBERED(obj);
8766}
8767
8768static int
8769rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
8770{
8771 struct heap_page *page = GET_HEAP_PAGE(obj);
8772 bits_t *bits = &page->marking_bits[0];
8773
8774 GC_ASSERT(!is_incremental_marking(objspace));
8775
8776 if (MARKED_IN_BITMAP(bits, obj)) {
8777 return FALSE;
8778 }
8779 else {
8780 page->flags.has_remembered_objects = TRUE;
8781 MARK_IN_BITMAP(bits, obj);
8782 return TRUE;
8783 }
8784}
8785
8786/* wb, etc */
8787
8788/* return FALSE if already remembered */
8789static int
8790rgengc_remember(rb_objspace_t *objspace, VALUE obj)
8791{
8792 gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
8793 rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
8794
8795 check_rvalue_consistency(obj);
8796
8797 if (RGENGC_CHECK_MODE) {
8798 if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
8799 }
8800
8801#if RGENGC_PROFILE > 0
8802 if (!rgengc_remembered(objspace, obj)) {
8803 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8804 objspace->profile.total_remembered_normal_object_count++;
8805#if RGENGC_PROFILE >= 2
8806 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
8807#endif
8808 }
8809 }
8810#endif /* RGENGC_PROFILE > 0 */
8811
8812 return rgengc_remembersetbits_set(objspace, obj);
8813}
8814
8815static int
8816rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
8817{
8818 int result = rgengc_remembersetbits_get(objspace, obj);
8819 check_rvalue_consistency(obj);
8820 return result;
8821}
8822
8823static int
8824rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
8825{
8826 gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
8827 return rgengc_remembered_sweep(objspace, obj);
8828}
8829
8830#ifndef PROFILE_REMEMBERSET_MARK
8831#define PROFILE_REMEMBERSET_MARK 0
8832#endif
8833
8834static inline void
8835rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8836{
8837 if (bitset) {
8838 do {
8839 if (bitset & 1) {
8840 VALUE obj = (VALUE)p;
8841 gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8842 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8843 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8844
8845 gc_mark_children(objspace, obj);
8846 }
8847 p += BASE_SLOT_SIZE;
8848 bitset >>= 1;
8849 } while (bitset);
8850 }
8851}
8852
8853static void
8854rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
8855{
8856 size_t j;
8857 struct heap_page *page = 0;
8858#if PROFILE_REMEMBERSET_MARK
8859 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8860#endif
8861 gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
8862
8863 ccan_list_for_each(&heap->pages, page, page_node) {
8864 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8865 uintptr_t p = page->start;
8866 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8867 bits_t *marking_bits = page->marking_bits;
8868 bits_t *uncollectible_bits = page->uncollectible_bits;
8869 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8870#if PROFILE_REMEMBERSET_MARK
8871 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8872 else if (page->flags.has_remembered_objects) has_old++;
8873 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8874#endif
8875 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8876 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8877 marking_bits[j] = 0;
8878 }
8879 page->flags.has_remembered_objects = FALSE;
8880
8881 bitset = bits[0];
8882 bitset >>= NUM_IN_PAGE(p);
8883 rgengc_rememberset_mark_plane(objspace, p, bitset);
8884 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
8885
8886 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8887 bitset = bits[j];
8888 rgengc_rememberset_mark_plane(objspace, p, bitset);
8889 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
8890 }
8891 }
8892#if PROFILE_REMEMBERSET_MARK
8893 else {
8894 skip++;
8895 }
8896#endif
8897 }
8898
8899#if PROFILE_REMEMBERSET_MARK
8900 fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8901#endif
8902 gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
8903}
8904
8905static void
8906rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
8907{
8908 struct heap_page *page = 0;
8909
8910 ccan_list_for_each(&heap->pages, page, page_node) {
8911 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8912 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8913 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8914 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8915 page->flags.has_uncollectible_shady_objects = FALSE;
8916 page->flags.has_remembered_objects = FALSE;
8917 }
8918}
8919
8920/* RGENGC: APIs */
8921
8922NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
8923
8924static void
8925gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
8926{
8927 if (RGENGC_CHECK_MODE) {
8928 if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8929 if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
8930 if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8931 }
8932
8933#if 1
8934 /* mark `a' and remember (default behavior) */
8935 if (!rgengc_remembered(objspace, a)) {
8936 RB_VM_LOCK_ENTER_NO_BARRIER();
8937 {
8938 rgengc_remember(objspace, a);
8939 }
8940 RB_VM_LOCK_LEAVE_NO_BARRIER();
8941 gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8942 }
8943#else
8944 /* mark `b' and remember */
8945 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8946 if (RVALUE_WB_UNPROTECTED(b)) {
8947 gc_remember_unprotected(objspace, b);
8948 }
8949 else {
8950 RVALUE_AGE_SET_OLD(objspace, b);
8951 rgengc_remember(objspace, b);
8952 }
8953
8954 gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8955#endif
8956
8957 check_rvalue_consistency(a);
8958 check_rvalue_consistency(b);
8959}
8960
8961#if GC_ENABLE_INCREMENTAL_MARK
8962static void
8963gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
8964{
8965 gc_mark_set_parent(objspace, parent);
8966 rgengc_check_relation(objspace, obj);
8967 if (gc_mark_set(objspace, obj) == FALSE) return;
8968 gc_aging(objspace, obj);
8969 gc_grey(objspace, obj);
8970}
8971
8972NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
8973
8974static void
8975gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
8976{
8977 gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
8978
8979 if (RVALUE_BLACK_P(a)) {
8980 if (RVALUE_WHITE_P(b)) {
8981 if (!RVALUE_WB_UNPROTECTED(a)) {
8982 gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
8983 gc_mark_from(objspace, b, a);
8984 }
8985 }
8986 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
8987 if (!RVALUE_WB_UNPROTECTED(b)) {
8988 gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
8989 RVALUE_AGE_SET_OLD(objspace, b);
8990
8991 if (RVALUE_BLACK_P(b)) {
8992 gc_grey(objspace, b);
8993 }
8994 }
8995 else {
8996 gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
8997 gc_remember_unprotected(objspace, b);
8998 }
8999 }
9000
9001 if (UNLIKELY(objspace->flags.during_compacting)) {
9002 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
9003 }
9004 }
9005}
9006#else
9007#define gc_writebarrier_incremental(a, b, objspace)
9008#endif
9009
9010void
9011rb_gc_writebarrier(VALUE a, VALUE b)
9012{
9013 rb_objspace_t *objspace = &rb_objspace;
9014
9015 if (RGENGC_CHECK_MODE) {
9016 if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
9017 if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
9018 }
9019
9020 retry:
9021 if (!is_incremental_marking(objspace)) {
9022 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
9023 // do nothing
9024 }
9025 else {
9026 gc_writebarrier_generational(a, b, objspace);
9027 }
9028 }
9029 else {
9030 bool retry = false;
9031 /* slow path */
9032 RB_VM_LOCK_ENTER_NO_BARRIER();
9033 {
9034 if (is_incremental_marking(objspace)) {
9035 gc_writebarrier_incremental(a, b, objspace);
9036 }
9037 else {
9038 retry = true;
9039 }
9040 }
9041 RB_VM_LOCK_LEAVE_NO_BARRIER();
9042
9043 if (retry) goto retry;
9044 }
9045 return;
9046}
9047
9048void
9049rb_gc_writebarrier_unprotect(VALUE obj)
9050{
9051 if (RVALUE_WB_UNPROTECTED(obj)) {
9052 return;
9053 }
9054 else {
9055 rb_objspace_t *objspace = &rb_objspace;
9056
9057 gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
9058 rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
9059
9060 RB_VM_LOCK_ENTER_NO_BARRIER();
9061 {
9062 if (RVALUE_OLD_P(obj)) {
9063 gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
9064 RVALUE_DEMOTE(objspace, obj);
9065 gc_mark_set(objspace, obj);
9066 gc_remember_unprotected(objspace, obj);
9067
9068#if RGENGC_PROFILE
9069 objspace->profile.total_shade_operation_count++;
9070#if RGENGC_PROFILE >= 2
9071 objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
9072#endif /* RGENGC_PROFILE >= 2 */
9073#endif /* RGENGC_PROFILE */
9074 }
9075 else {
9076 RVALUE_AGE_RESET(obj);
9077 }
9078
9079 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
9080 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
9081 }
9082 RB_VM_LOCK_LEAVE_NO_BARRIER();
9083 }
9084}
9085
9086/*
9087 * remember `obj' if needed.
9088 */
9089MJIT_FUNC_EXPORTED void
9090rb_gc_writebarrier_remember(VALUE obj)
9091{
9092 rb_objspace_t *objspace = &rb_objspace;
9093
9094 gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
9095
9096 if (is_incremental_marking(objspace)) {
9097 if (RVALUE_BLACK_P(obj)) {
9098 gc_grey(objspace, obj);
9099 }
9100 }
9101 else {
9102 if (RVALUE_OLD_P(obj)) {
9103 rgengc_remember(objspace, obj);
9104 }
9105 }
9106}
9107
9108static st_table *rgengc_unprotect_logging_table;
9109
9110static int
9111rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
9112{
9113 fprintf(stderr, "%s\t%"PRIuVALUE"\n", (char *)key, (VALUE)val);
9114 return ST_CONTINUE;
9115}
9116
9117static void
9118rgengc_unprotect_logging_exit_func(void)
9119{
9120 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
9121}
9122
9123void
9124rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
9125{
9126 VALUE obj = (VALUE)objptr;
9127
9128 if (rgengc_unprotect_logging_table == 0) {
9129 rgengc_unprotect_logging_table = st_init_strtable();
9130 atexit(rgengc_unprotect_logging_exit_func);
9131 }
9132
9133 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
9134 char buff[0x100];
9135 st_data_t cnt = 1;
9136 char *ptr = buff;
9137
9138 snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
9139
9140 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
9141 cnt++;
9142 }
9143 else {
9144 ptr = (strdup)(buff);
9145 if (!ptr) rb_memerror();
9146 }
9147 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
9148 }
9149}
9150
9151void
9152rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
9153{
9154 rb_objspace_t *objspace = &rb_objspace;
9155
9156 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
9157 if (!RVALUE_OLD_P(dest)) {
9158 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
9159 RVALUE_AGE_RESET_RAW(dest);
9160 }
9161 else {
9162 RVALUE_DEMOTE(objspace, dest);
9163 }
9164 }
9165
9166 check_rvalue_consistency(dest);
9167}
9168
9169/* RGENGC analysis information */
9170
9171VALUE
9172rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
9173{
9174 return RBOOL(!RVALUE_WB_UNPROTECTED(obj));
9175}
9176
9177VALUE
9178rb_obj_rgengc_promoted_p(VALUE obj)
9179{
9180 return RBOOL(OBJ_PROMOTED(obj));
9181}
9182
9183size_t
9184rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
9185{
9186 size_t n = 0;
9187 static ID ID_marked;
9188 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
9189
9190 if (!ID_marked) {
9191#define I(s) ID_##s = rb_intern(#s);
9192 I(marked);
9193 I(wb_protected);
9194 I(old);
9195 I(marking);
9196 I(uncollectible);
9197 I(pinned);
9198#undef I
9199 }
9200
9201 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
9202 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
9203 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
9204 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
9205 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
9206 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
9207 return n;
9208}
9209
9210/* GC */
9211
9212void
9213rb_gc_ractor_newobj_cache_clear(rb_ractor_newobj_cache_t *newobj_cache)
9214{
9215#if GC_ENABLE_INCREMENTAL_MARK
9216 newobj_cache->incremental_mark_step_allocated_slots = 0;
9217#endif
9218
9219 for (size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
9220 rb_ractor_newobj_size_pool_cache_t *cache = &newobj_cache->size_pool_caches[size_pool_idx];
9221
9222 struct heap_page *page = cache->using_page;
9223 RVALUE *freelist = cache->freelist;
9224 RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
9225
9226 heap_page_freelist_append(page, freelist);
9227
9228 cache->using_page = NULL;
9229 cache->freelist = NULL;
9230 }
9231}
9232
9233void
9234rb_gc_force_recycle(VALUE obj)
9235{
9236 /* no-op */
9237}
9238
9239#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
9240#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
9241#endif
9242
9243void
9244rb_gc_register_mark_object(VALUE obj)
9245{
9246 if (!is_pointer_to_heap(&rb_objspace, (void *)obj))
9247 return;
9248
9249 RB_VM_LOCK_ENTER();
9250 {
9251 VALUE ary_ary = GET_VM()->mark_object_ary;
9252 VALUE ary = rb_ary_last(0, 0, ary_ary);
9253
9254 if (NIL_P(ary) || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
9255 ary = rb_ary_hidden_new(MARK_OBJECT_ARY_BUCKET_SIZE);
9256 rb_ary_push(ary_ary, ary);
9257 }
9258
9259 rb_ary_push(ary, obj);
9260 }
9261 RB_VM_LOCK_LEAVE();
9262}
9263
9264void
9265rb_gc_register_address(VALUE *addr)
9266{
9267 rb_objspace_t *objspace = &rb_objspace;
9268 struct gc_list *tmp;
9269
9270 tmp = ALLOC(struct gc_list);
9271 tmp->next = global_list;
9272 tmp->varptr = addr;
9273 global_list = tmp;
9274}
9275
9276void
9277rb_gc_unregister_address(VALUE *addr)
9278{
9279 rb_objspace_t *objspace = &rb_objspace;
9280 struct gc_list *tmp = global_list;
9281
9282 if (tmp->varptr == addr) {
9283 global_list = tmp->next;
9284 xfree(tmp);
9285 return;
9286 }
9287 while (tmp->next) {
9288 if (tmp->next->varptr == addr) {
9289 struct gc_list *t = tmp->next;
9290
9291 tmp->next = tmp->next->next;
9292 xfree(t);
9293 break;
9294 }
9295 tmp = tmp->next;
9296 }
9297}
9298
9299void
9301{
9302 rb_gc_register_address(var);
9303}
9304
9305#define GC_NOTIFY 0
9306
9307enum {
9308 gc_stress_no_major,
9309 gc_stress_no_immediate_sweep,
9310 gc_stress_full_mark_after_malloc,
9311 gc_stress_max
9312};
9313
9314#define gc_stress_full_mark_after_malloc_p() \
9315 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
9316
9317static void
9318heap_ready_to_gc(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap)
9319{
9320 if (!heap->free_pages) {
9321 if (!heap_increment(objspace, size_pool, heap)) {
9322 size_pool_allocatable_pages_set(objspace, size_pool, 1);
9323 heap_increment(objspace, size_pool, heap);
9324 }
9325 }
9326}
9327
9328static int
9329ready_to_gc(rb_objspace_t *objspace)
9330{
9331 if (dont_gc_val() || during_gc || ruby_disable_gc) {
9332 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
9333 rb_size_pool_t *size_pool = &size_pools[i];
9334 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
9335 }
9336 return FALSE;
9337 }
9338 else {
9339 return TRUE;
9340 }
9341}
9342
9343static void
9344gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
9345{
9346 gc_prof_set_malloc_info(objspace);
9347 {
9348 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
9349 size_t old_limit = malloc_limit;
9350
9351 if (inc > malloc_limit) {
9352 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
9353 if (malloc_limit > gc_params.malloc_limit_max) {
9354 malloc_limit = gc_params.malloc_limit_max;
9355 }
9356 }
9357 else {
9358 malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
9359 if (malloc_limit < gc_params.malloc_limit_min) {
9360 malloc_limit = gc_params.malloc_limit_min;
9361 }
9362 }
9363
9364 if (0) {
9365 if (old_limit != malloc_limit) {
9366 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
9367 rb_gc_count(), old_limit, malloc_limit);
9368 }
9369 else {
9370 fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
9371 rb_gc_count(), malloc_limit);
9372 }
9373 }
9374 }
9375
9376 /* reset oldmalloc info */
9377#if RGENGC_ESTIMATE_OLDMALLOC
9378 if (!full_mark) {
9379 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
9380 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
9381 objspace->rgengc.oldmalloc_increase_limit =
9382 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
9383
9384 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
9385 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
9386 }
9387 }
9388
9389 if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
9390 rb_gc_count(),
9391 objspace->rgengc.need_major_gc,
9392 objspace->rgengc.oldmalloc_increase,
9393 objspace->rgengc.oldmalloc_increase_limit,
9394 gc_params.oldmalloc_limit_max);
9395 }
9396 else {
9397 /* major GC */
9398 objspace->rgengc.oldmalloc_increase = 0;
9399
9400 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
9401 objspace->rgengc.oldmalloc_increase_limit =
9402 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
9403 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
9404 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9405 }
9406 }
9407 }
9408#endif
9409}
9410
9411static int
9412garbage_collect(rb_objspace_t *objspace, unsigned int reason)
9413{
9414 int ret;
9415
9416 RB_VM_LOCK_ENTER();
9417 {
9418#if GC_PROFILE_MORE_DETAIL
9419 objspace->profile.prepare_time = getrusage_time();
9420#endif
9421
9422 gc_rest(objspace);
9423
9424#if GC_PROFILE_MORE_DETAIL
9425 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
9426#endif
9427
9428 ret = gc_start(objspace, reason);
9429 }
9430 RB_VM_LOCK_LEAVE();
9431
9432 return ret;
9433}
9434
9435static int
9436gc_start(rb_objspace_t *objspace, unsigned int reason)
9437{
9438 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
9439#if GC_ENABLE_INCREMENTAL_MARK
9440 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
9441#endif
9442
9443 /* reason may be clobbered, later, so keep set immediate_sweep here */
9444 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
9445
9446 /* Explicitly enable compaction (GC.compact) */
9447 if (do_full_mark && ruby_enable_autocompact) {
9448 objspace->flags.during_compacting = TRUE;
9449 }
9450 else {
9451 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
9452 }
9453
9454 if (!heap_allocated_pages) return FALSE; /* heap is not ready */
9455 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
9456
9457 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
9458 GC_ASSERT(!is_lazy_sweeping(objspace));
9459 GC_ASSERT(!is_incremental_marking(objspace));
9460
9461 unsigned int lock_lev;
9462 gc_enter(objspace, gc_enter_event_start, &lock_lev);
9463
9464#if RGENGC_CHECK_MODE >= 2
9465 gc_verify_internal_consistency(objspace);
9466#endif
9467
9468 if (ruby_gc_stressful) {
9469 int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
9470
9471 if ((flag & (1<<gc_stress_no_major)) == 0) {
9472 do_full_mark = TRUE;
9473 }
9474
9475 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
9476 }
9477 else {
9478 if (objspace->rgengc.need_major_gc) {
9479 reason |= objspace->rgengc.need_major_gc;
9480 do_full_mark = TRUE;
9481 }
9482 else if (RGENGC_FORCE_MAJOR_GC) {
9483 reason = GPR_FLAG_MAJOR_BY_FORCE;
9484 do_full_mark = TRUE;
9485 }
9486
9487 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
9488 }
9489
9490 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
9491 reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
9492 }
9493
9494#if GC_ENABLE_INCREMENTAL_MARK
9495 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
9496 objspace->flags.during_incremental_marking = FALSE;
9497 }
9498 else {
9499 objspace->flags.during_incremental_marking = do_full_mark;
9500 }
9501#endif
9502
9503 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
9504 objspace->flags.immediate_sweep = TRUE;
9505 }
9506
9507 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
9508
9509 gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
9510 reason,
9511 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
9512
9513#if USE_DEBUG_COUNTER
9514 RB_DEBUG_COUNTER_INC(gc_count);
9515
9516 if (reason & GPR_FLAG_MAJOR_MASK) {
9517 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
9518 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
9519 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
9520 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
9521#if RGENGC_ESTIMATE_OLDMALLOC
9522 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
9523#endif
9524 }
9525 else {
9526 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
9527 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
9528 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
9529 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
9530 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
9531 }
9532#endif
9533
9534 objspace->profile.count++;
9535 objspace->profile.latest_gc_info = reason;
9536 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
9537 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
9538 gc_prof_setup_new_record(objspace, reason);
9539 gc_reset_malloc_info(objspace, do_full_mark);
9540 rb_transient_heap_start_marking(do_full_mark);
9541
9542 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
9543 GC_ASSERT(during_gc);
9544
9545 gc_prof_timer_start(objspace);
9546 {
9547 gc_marks(objspace, do_full_mark);
9548 }
9549 gc_prof_timer_stop(objspace);
9550
9551 gc_exit(objspace, gc_enter_event_start, &lock_lev);
9552 return TRUE;
9553}
9554
9555static void
9556gc_rest(rb_objspace_t *objspace)
9557{
9558 int marking = is_incremental_marking(objspace);
9559 int sweeping = is_lazy_sweeping(objspace);
9560
9561 if (marking || sweeping) {
9562 unsigned int lock_lev;
9563 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9564
9565 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9566
9567 if (is_incremental_marking(objspace)) {
9568 gc_marks_rest(objspace);
9569 }
9570 if (is_lazy_sweeping(objspace)) {
9571 gc_sweep_rest(objspace);
9572 }
9573 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9574 }
9575}
9576
9578 rb_objspace_t *objspace;
9579 unsigned int reason;
9580};
9581
9582static void
9583gc_current_status_fill(rb_objspace_t *objspace, char *buff)
9584{
9585 int i = 0;
9586 if (is_marking(objspace)) {
9587 buff[i++] = 'M';
9588 if (is_full_marking(objspace)) buff[i++] = 'F';
9589#if GC_ENABLE_INCREMENTAL_MARK
9590 if (is_incremental_marking(objspace)) buff[i++] = 'I';
9591#endif
9592 }
9593 else if (is_sweeping(objspace)) {
9594 buff[i++] = 'S';
9595 if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
9596 }
9597 else {
9598 buff[i++] = 'N';
9599 }
9600 buff[i] = '\0';
9601}
9602
9603static const char *
9604gc_current_status(rb_objspace_t *objspace)
9605{
9606 static char buff[0x10];
9607 gc_current_status_fill(objspace, buff);
9608 return buff;
9609}
9610
9611#if PRINT_ENTER_EXIT_TICK
9612
9613static tick_t last_exit_tick;
9614static tick_t enter_tick;
9615static int enter_count = 0;
9616static char last_gc_status[0x10];
9617
9618static inline void
9619gc_record(rb_objspace_t *objspace, int direction, const char *event)
9620{
9621 if (direction == 0) { /* enter */
9622 enter_count++;
9623 enter_tick = tick();
9624 gc_current_status_fill(objspace, last_gc_status);
9625 }
9626 else { /* exit */
9627 tick_t exit_tick = tick();
9628 char current_gc_status[0x10];
9629 gc_current_status_fill(objspace, current_gc_status);
9630#if 1
9631 /* [last mutator time] [gc time] [event] */
9632 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9633 enter_tick - last_exit_tick,
9634 exit_tick - enter_tick,
9635 event,
9636 last_gc_status, current_gc_status,
9637 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9638 last_exit_tick = exit_tick;
9639#else
9640 /* [enter_tick] [gc time] [event] */
9641 fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
9642 enter_tick,
9643 exit_tick - enter_tick,
9644 event,
9645 last_gc_status, current_gc_status,
9646 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
9647#endif
9648 }
9649}
9650#else /* PRINT_ENTER_EXIT_TICK */
9651static inline void
9652gc_record(rb_objspace_t *objspace, int direction, const char *event)
9653{
9654 /* null */
9655}
9656#endif /* PRINT_ENTER_EXIT_TICK */
9657
9658static const char *
9659gc_enter_event_cstr(enum gc_enter_event event)
9660{
9661 switch (event) {
9662 case gc_enter_event_start: return "start";
9663 case gc_enter_event_mark_continue: return "mark_continue";
9664 case gc_enter_event_sweep_continue: return "sweep_continue";
9665 case gc_enter_event_rest: return "rest";
9666 case gc_enter_event_finalizer: return "finalizer";
9667 case gc_enter_event_rb_memerror: return "rb_memerror";
9668 }
9669 return NULL;
9670}
9671
9672static void
9673gc_enter_count(enum gc_enter_event event)
9674{
9675 switch (event) {
9676 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
9677 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue); break;
9678 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue); break;
9679 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
9680 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
9681 case gc_enter_event_rb_memerror: /* nothing */ break;
9682 }
9683}
9684
9685#ifndef MEASURE_GC
9686#define MEASURE_GC (objspace->flags.measure_gc)
9687#endif
9688
9689static bool
9690gc_enter_event_measure_p(rb_objspace_t *objspace, enum gc_enter_event event)
9691{
9692 if (!MEASURE_GC) return false;
9693
9694 switch (event) {
9695 case gc_enter_event_start:
9696 case gc_enter_event_mark_continue:
9697 case gc_enter_event_sweep_continue:
9698 case gc_enter_event_rest:
9699 return true;
9700
9701 default:
9702 // case gc_enter_event_finalizer:
9703 // case gc_enter_event_rb_memerror:
9704 return false;
9705 }
9706}
9707
9708static bool current_process_time(struct timespec *ts);
9709
9710static void
9711gc_enter_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9712{
9713 if (gc_enter_event_measure_p(objspace, event)) {
9714 if (!current_process_time(&objspace->profile.start_time)) {
9715 objspace->profile.start_time.tv_sec = 0;
9716 objspace->profile.start_time.tv_nsec = 0;
9717 }
9718 }
9719}
9720
9721static void
9722gc_exit_clock(rb_objspace_t *objspace, enum gc_enter_event event)
9723{
9724 if (gc_enter_event_measure_p(objspace, event)) {
9725 struct timespec end_time;
9726
9727 if ((objspace->profile.start_time.tv_sec > 0 ||
9728 objspace->profile.start_time.tv_nsec > 0) &&
9729 current_process_time(&end_time)) {
9730
9731 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9732 return; // ignore
9733 }
9734 else {
9735 uint64_t ns =
9736 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9737 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9738 objspace->profile.total_time_ns += ns;
9739 }
9740 }
9741 }
9742}
9743
9744static inline void
9745gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9746{
9747 RB_VM_LOCK_ENTER_LEV(lock_lev);
9748
9749 gc_enter_clock(objspace, event);
9750
9751 switch (event) {
9752 case gc_enter_event_rest:
9753 if (!is_marking(objspace)) break;
9754 // fall through
9755 case gc_enter_event_start:
9756 case gc_enter_event_mark_continue:
9757 // stop other ractors
9758 rb_vm_barrier();
9759 break;
9760 default:
9761 break;
9762 }
9763
9764 gc_enter_count(event);
9765 if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
9766 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9767
9768 during_gc = TRUE;
9769 RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9770 gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9771 gc_record(objspace, 0, gc_enter_event_cstr(event));
9772 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
9773}
9774
9775static inline void
9776gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
9777{
9778 GC_ASSERT(during_gc != 0);
9779
9780 gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
9781 gc_record(objspace, 1, gc_enter_event_cstr(event));
9782 RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9783 gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9784 during_gc = FALSE;
9785
9786 gc_exit_clock(objspace, event);
9787 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9788
9789#if RGENGC_CHECK_MODE >= 2
9790 if (event == gc_enter_event_sweep_continue && gc_mode(objspace) == gc_mode_none) {
9791 GC_ASSERT(!during_gc);
9792 // sweep finished
9793 gc_verify_internal_consistency(objspace);
9794 }
9795#endif
9796}
9797
9798static void *
9799gc_with_gvl(void *ptr)
9800{
9801 struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
9802 return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
9803}
9804
9805static int
9806garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
9807{
9808 if (dont_gc_val()) return TRUE;
9809 if (ruby_thread_has_gvl_p()) {
9810 return garbage_collect(objspace, reason);
9811 }
9812 else {
9813 if (ruby_native_thread_p()) {
9814 struct objspace_and_reason oar;
9815 oar.objspace = objspace;
9816 oar.reason = reason;
9817 return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
9818 }
9819 else {
9820 /* no ruby thread */
9821 fprintf(stderr, "[FATAL] failed to allocate memory\n");
9822 exit(EXIT_FAILURE);
9823 }
9824 }
9825}
9826
9827static VALUE
9828gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9829{
9830 rb_objspace_t *objspace = &rb_objspace;
9831 unsigned int reason = (GPR_FLAG_FULL_MARK |
9832 GPR_FLAG_IMMEDIATE_MARK |
9833 GPR_FLAG_IMMEDIATE_SWEEP |
9834 GPR_FLAG_METHOD);
9835
9836 /* For now, compact implies full mark / sweep, so ignore other flags */
9837 if (RTEST(compact)) {
9838 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9839
9840 reason |= GPR_FLAG_COMPACT;
9841 }
9842 else {
9843 if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9844 if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9845 if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9846 }
9847
9848 garbage_collect(objspace, reason);
9849 gc_finalize_deferred(objspace);
9850
9851 return Qnil;
9852}
9853
9854static int
9855gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
9856{
9857 GC_ASSERT(!SPECIAL_CONST_P(obj));
9858
9859 switch (BUILTIN_TYPE(obj)) {
9860 case T_NONE:
9861 case T_NIL:
9862 case T_MOVED:
9863 case T_ZOMBIE:
9864 return FALSE;
9865 case T_SYMBOL:
9866 if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
9867 return FALSE;
9868 }
9869 /* fall through */
9870 case T_STRING:
9871 case T_OBJECT:
9872 case T_FLOAT:
9873 case T_IMEMO:
9874 case T_ARRAY:
9875 case T_BIGNUM:
9876 case T_ICLASS:
9877 case T_MODULE:
9878 case T_REGEXP:
9879 case T_DATA:
9880 case T_MATCH:
9881 case T_STRUCT:
9882 case T_HASH:
9883 case T_FILE:
9884 case T_COMPLEX:
9885 case T_RATIONAL:
9886 case T_NODE:
9887 case T_CLASS:
9888 if (FL_TEST(obj, FL_FINALIZE)) {
9889 /* The finalizer table is a numtable. It looks up objects by address.
9890 * We can't mark the keys in the finalizer table because that would
9891 * prevent the objects from being collected. This check prevents
9892 * objects that are keys in the finalizer table from being moved
9893 * without directly pinning them. */
9894 if (st_is_member(finalizer_table, obj)) {
9895 return FALSE;
9896 }
9897 }
9898 GC_ASSERT(RVALUE_MARKED(obj));
9899 GC_ASSERT(!RVALUE_PINNED(obj));
9900
9901 return TRUE;
9902
9903 default:
9904 rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
9905 break;
9906 }
9907
9908 return FALSE;
9909}
9910
9911/* Used in places that could malloc, which can cause the GC to run. We need to
9912 * temporarily disable the GC to allow the malloc to happen. */
9913#define COULD_MALLOC_REGION_START() \
9914 GC_ASSERT(during_gc); \
9915 VALUE _already_disabled = rb_gc_disable_no_rest(); \
9916 during_gc = false;
9917
9918#define COULD_MALLOC_REGION_END() \
9919 during_gc = true; \
9920 if (_already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
9921
9922static VALUE
9923gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size)
9924{
9925 int marked;
9926 int wb_unprotected;
9927 int uncollectible;
9928 int marking;
9929 RVALUE *dest = (RVALUE *)free;
9930 RVALUE *src = (RVALUE *)scan;
9931
9932 gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
9933
9934 GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
9935 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9936
9937 /* Save off bits for current object. */
9938 marked = rb_objspace_marked_object_p((VALUE)src);
9939 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
9940 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
9941 marking = RVALUE_MARKING((VALUE)src);
9942
9943 /* Clear bits for eventual T_MOVED */
9944 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
9945 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
9946 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
9947 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)src), (VALUE)src);
9948
9949 if (FL_TEST((VALUE)src, FL_EXIVAR)) {
9950 /* Resizing the st table could cause a malloc */
9951 COULD_MALLOC_REGION_START();
9952 {
9953 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
9954 }
9955 COULD_MALLOC_REGION_END();
9956 }
9957
9958 st_data_t srcid = (st_data_t)src, id;
9959
9960 /* If the source object's object_id has been seen, we need to update
9961 * the object to object id mapping. */
9962 if (st_lookup(objspace->obj_to_id_tbl, srcid, &id)) {
9963 gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
9964 /* Resizing the st table could cause a malloc */
9965 COULD_MALLOC_REGION_START();
9966 {
9967 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9968 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
9969 }
9970 COULD_MALLOC_REGION_END();
9971 }
9972
9973 /* Move the object */
9974 memcpy(dest, src, MIN(src_slot_size, slot_size));
9975
9976 if (RVALUE_OVERHEAD > 0) {
9977 void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
9978 void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
9979
9980 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
9981 }
9982
9983 memset(src, 0, src_slot_size);
9984
9985 /* Set bits for object in new location */
9986 if (marking) {
9987 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9988 }
9989 else {
9990 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9991 }
9992
9993 if (marked) {
9994 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9995 }
9996 else {
9997 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9998 }
9999
10000 if (wb_unprotected) {
10001 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10002 }
10003 else {
10004 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
10005 }
10006
10007 if (uncollectible) {
10008 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10009 }
10010 else {
10011 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
10012 }
10013
10014 /* Assign forwarding address */
10015 src->as.moved.flags = T_MOVED;
10016 src->as.moved.dummy = Qundef;
10017 src->as.moved.destination = (VALUE)dest;
10018 GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
10019
10020 return (VALUE)src;
10021}
10022
10023#if GC_CAN_COMPILE_COMPACTION
10024static int
10025compare_free_slots(const void *left, const void *right, void *dummy)
10026{
10027 struct heap_page *left_page;
10028 struct heap_page *right_page;
10029
10030 left_page = *(struct heap_page * const *)left;
10031 right_page = *(struct heap_page * const *)right;
10032
10033 return left_page->free_slots - right_page->free_slots;
10034}
10035
10036static void
10037gc_sort_heap_by_empty_slots(rb_objspace_t *objspace)
10038{
10039 for (int j = 0; j < SIZE_POOL_COUNT; j++) {
10040 rb_size_pool_t *size_pool = &size_pools[j];
10041
10042 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
10043 size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
10044 struct heap_page *page = 0, **page_list = malloc(size);
10045 size_t i = 0;
10046
10047 SIZE_POOL_EDEN_HEAP(size_pool)->free_pages = NULL;
10048 ccan_list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
10049 page_list[i++] = page;
10050 GC_ASSERT(page);
10051 }
10052
10053 GC_ASSERT((size_t)i == total_pages);
10054
10055 /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
10056 * head of the list, so empty pages will end up at the start of the heap */
10057 ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_free_slots, NULL);
10058
10059 /* Reset the eden heap */
10060 ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
10061
10062 for (i = 0; i < total_pages; i++) {
10063 ccan_list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
10064 if (page_list[i]->free_slots != 0) {
10065 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
10066 }
10067 }
10068
10069 free(page_list);
10070 }
10071}
10072#endif
10073
10074static void
10075gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
10076{
10077 if (ARY_SHARED_P(v)) {
10078#if USE_RVARGC
10079 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
10080#endif
10081
10082 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
10083
10084#if USE_RVARGC
10085 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
10086 // If the root is embedded and its location has changed
10087 if (ARY_EMBED_P(new_root) && new_root != old_root) {
10088 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
10089 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
10090 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
10091 }
10092#endif
10093 }
10094 else {
10095 long len = RARRAY_LEN(v);
10096
10097 if (len > 0) {
10099 for (long i = 0; i < len; i++) {
10100 UPDATE_IF_MOVED(objspace, ptr[i]);
10101 }
10102 }
10103
10104#if USE_RVARGC
10105 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
10106 if (rb_ary_embeddable_p(v)) {
10107 rb_ary_make_embedded(v);
10108 }
10109 }
10110#endif
10111 }
10112}
10113
10114static void update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl);
10115
10116static void
10117gc_ref_update_object(rb_objspace_t *objspace, VALUE v)
10118{
10119 VALUE *ptr = ROBJECT_IVPTR(v);
10120
10121 if (rb_shape_obj_too_complex(v)) {
10122 update_m_tbl(objspace, ROBJECT_IV_HASH(v));
10123 return;
10124 }
10125
10126#if USE_RVARGC
10127 size_t slot_size = rb_gc_obj_slot_size(v);
10128 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
10129 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
10130 // Object can be re-embedded
10131 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
10132 RB_FL_SET_RAW(v, ROBJECT_EMBED);
10133 if (ROBJ_TRANSIENT_P(v)) {
10134 ROBJ_TRANSIENT_UNSET(v);
10135 }
10136 else {
10137 xfree(ptr);
10138 }
10139 ptr = ROBJECT(v)->as.ary;
10140 }
10141#endif
10142
10143 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
10144 UPDATE_IF_MOVED(objspace, ptr[i]);
10145 }
10146}
10147
10148static int
10149hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10150{
10151 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10152
10153 if (gc_object_moved_p(objspace, (VALUE)*key)) {
10154 *key = rb_gc_location((VALUE)*key);
10155 }
10156
10157 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10158 *value = rb_gc_location((VALUE)*value);
10159 }
10160
10161 return ST_CONTINUE;
10162}
10163
10164static int
10165hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
10166{
10167 rb_objspace_t *objspace;
10168
10169 objspace = (rb_objspace_t *)argp;
10170
10171 if (gc_object_moved_p(objspace, (VALUE)key)) {
10172 return ST_REPLACE;
10173 }
10174
10175 if (gc_object_moved_p(objspace, (VALUE)value)) {
10176 return ST_REPLACE;
10177 }
10178 return ST_CONTINUE;
10179}
10180
10181static int
10182hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
10183{
10184 rb_objspace_t *objspace = (rb_objspace_t *)argp;
10185
10186 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10187 *value = rb_gc_location((VALUE)*value);
10188 }
10189
10190 return ST_CONTINUE;
10191}
10192
10193static int
10194hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
10195{
10196 rb_objspace_t *objspace;
10197
10198 objspace = (rb_objspace_t *)argp;
10199
10200 if (gc_object_moved_p(objspace, (VALUE)value)) {
10201 return ST_REPLACE;
10202 }
10203 return ST_CONTINUE;
10204}
10205
10206static void
10207gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
10208{
10209 if (!tbl || tbl->num_entries == 0) return;
10210
10211 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
10212 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10213 }
10214}
10215
10216static void
10217gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
10218{
10219 if (!tbl || tbl->num_entries == 0) return;
10220
10221 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
10222 rb_raise(rb_eRuntimeError, "hash modified during iteration");
10223 }
10224}
10225
10226/* Update MOVED references in an st_table */
10227void
10228rb_gc_update_tbl_refs(st_table *ptr)
10229{
10230 rb_objspace_t *objspace = &rb_objspace;
10231 gc_update_table_refs(objspace, ptr);
10232}
10233
10234static void
10235gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
10236{
10237 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
10238}
10239
10240static void
10241gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
10242{
10243 rb_method_definition_t *def = me->def;
10244
10245 UPDATE_IF_MOVED(objspace, me->owner);
10246 UPDATE_IF_MOVED(objspace, me->defined_class);
10247
10248 if (def) {
10249 switch (def->type) {
10250 case VM_METHOD_TYPE_ISEQ:
10251 if (def->body.iseq.iseqptr) {
10252 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
10253 }
10254 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
10255 break;
10256 case VM_METHOD_TYPE_ATTRSET:
10257 case VM_METHOD_TYPE_IVAR:
10258 UPDATE_IF_MOVED(objspace, def->body.attr.location);
10259 break;
10260 case VM_METHOD_TYPE_BMETHOD:
10261 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
10262 break;
10263 case VM_METHOD_TYPE_ALIAS:
10264 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.alias.original_me);
10265 return;
10266 case VM_METHOD_TYPE_REFINED:
10267 TYPED_UPDATE_IF_MOVED(objspace, struct rb_method_entry_struct *, def->body.refined.orig_me);
10268 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
10269 break;
10270 case VM_METHOD_TYPE_CFUNC:
10271 case VM_METHOD_TYPE_ZSUPER:
10272 case VM_METHOD_TYPE_MISSING:
10273 case VM_METHOD_TYPE_OPTIMIZED:
10274 case VM_METHOD_TYPE_UNDEF:
10275 case VM_METHOD_TYPE_NOTIMPLEMENTED:
10276 break;
10277 }
10278 }
10279}
10280
10281static void
10282gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
10283{
10284 long i;
10285
10286 for (i=0; i<n; i++) {
10287 UPDATE_IF_MOVED(objspace, values[i]);
10288 }
10289}
10290
10291static void
10292gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
10293{
10294 switch (imemo_type(obj)) {
10295 case imemo_env:
10296 {
10297 rb_env_t *env = (rb_env_t *)obj;
10298 if (LIKELY(env->ep)) {
10299 // just after newobj() can be NULL here.
10300 TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
10301 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
10302 gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
10303 }
10304 }
10305 break;
10306 case imemo_cref:
10307 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
10308 TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
10309 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
10310 break;
10311 case imemo_svar:
10312 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
10313 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
10314 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
10315 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
10316 break;
10317 case imemo_throw_data:
10318 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
10319 break;
10320 case imemo_ifunc:
10321 break;
10322 case imemo_memo:
10323 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
10324 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
10325 break;
10326 case imemo_ment:
10327 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
10328 break;
10329 case imemo_iseq:
10330 rb_iseq_update_references((rb_iseq_t *)obj);
10331 break;
10332 case imemo_ast:
10333 rb_ast_update_references((rb_ast_t *)obj);
10334 break;
10335 case imemo_callcache:
10336 {
10337 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
10338 if (cc->klass) {
10339 UPDATE_IF_MOVED(objspace, cc->klass);
10340 if (!is_live_object(objspace, cc->klass)) {
10341 *((VALUE *)(&cc->klass)) = (VALUE)0;
10342 }
10343 }
10344
10345 if (cc->cme_) {
10346 TYPED_UPDATE_IF_MOVED(objspace, struct rb_callable_method_entry_struct *, cc->cme_);
10347 if (!is_live_object(objspace, (VALUE)cc->cme_)) {
10348 *((struct rb_callable_method_entry_struct **)(&cc->cme_)) = (struct rb_callable_method_entry_struct *)0;
10349 }
10350 }
10351 }
10352 break;
10353 case imemo_constcache:
10354 {
10356 UPDATE_IF_MOVED(objspace, ice->value);
10357 }
10358 break;
10359 case imemo_parser_strterm:
10360 case imemo_tmpbuf:
10361 case imemo_callinfo:
10362 break;
10363 default:
10364 rb_bug("not reachable %d", imemo_type(obj));
10365 break;
10366 }
10367}
10368
10369static enum rb_id_table_iterator_result
10370check_id_table_move(VALUE value, void *data)
10371{
10372 rb_objspace_t *objspace = (rb_objspace_t *)data;
10373
10374 if (gc_object_moved_p(objspace, (VALUE)value)) {
10375 return ID_TABLE_REPLACE;
10376 }
10377
10378 return ID_TABLE_CONTINUE;
10379}
10380
10381/* Returns the new location of an object, if it moved. Otherwise returns
10382 * the existing location. */
10383VALUE
10384rb_gc_location(VALUE value)
10385{
10386
10387 VALUE destination;
10388
10389 if (!SPECIAL_CONST_P(value)) {
10390 void *poisoned = asan_unpoison_object_temporary(value);
10391
10392 if (BUILTIN_TYPE(value) == T_MOVED) {
10393 destination = (VALUE)RMOVED(value)->destination;
10394 GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
10395 }
10396 else {
10397 destination = value;
10398 }
10399
10400 /* Re-poison slot if it's not the one we want */
10401 if (poisoned) {
10402 GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
10403 asan_poison_object(value);
10404 }
10405 }
10406 else {
10407 destination = value;
10408 }
10409
10410 return destination;
10411}
10412
10413static enum rb_id_table_iterator_result
10414update_id_table(VALUE *value, void *data, int existing)
10415{
10416 rb_objspace_t *objspace = (rb_objspace_t *)data;
10417
10418 if (gc_object_moved_p(objspace, (VALUE)*value)) {
10419 *value = rb_gc_location((VALUE)*value);
10420 }
10421
10422 return ID_TABLE_CONTINUE;
10423}
10424
10425static void
10426update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10427{
10428 if (tbl) {
10429 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
10430 }
10431}
10432
10433static enum rb_id_table_iterator_result
10434update_cc_tbl_i(VALUE ccs_ptr, void *data)
10435{
10436 rb_objspace_t *objspace = (rb_objspace_t *)data;
10437 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
10438 VM_ASSERT(vm_ccs_p(ccs));
10439
10440 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
10441 ccs->cme = (const rb_callable_method_entry_t *)rb_gc_location((VALUE)ccs->cme);
10442 }
10443
10444 for (int i=0; i<ccs->len; i++) {
10445 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
10446 ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
10447 }
10448 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
10449 ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
10450 }
10451 }
10452
10453 // do not replace
10454 return ID_TABLE_CONTINUE;
10455}
10456
10457static void
10458update_cc_tbl(rb_objspace_t *objspace, VALUE klass)
10459{
10460 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
10461 if (tbl) {
10462 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
10463 }
10464}
10465
10466static enum rb_id_table_iterator_result
10467update_cvc_tbl_i(VALUE cvc_entry, void *data)
10468{
10469 struct rb_cvar_class_tbl_entry *entry;
10470
10471 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
10472
10473 entry->class_value = rb_gc_location(entry->class_value);
10474
10475 return ID_TABLE_CONTINUE;
10476}
10477
10478static void
10479update_cvc_tbl(rb_objspace_t *objspace, VALUE klass)
10480{
10481 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
10482 if (tbl) {
10483 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
10484 }
10485}
10486
10487static enum rb_id_table_iterator_result
10488update_const_table(VALUE value, void *data)
10489{
10490 rb_const_entry_t *ce = (rb_const_entry_t *)value;
10491 rb_objspace_t * objspace = (rb_objspace_t *)data;
10492
10493 if (gc_object_moved_p(objspace, ce->value)) {
10494 ce->value = rb_gc_location(ce->value);
10495 }
10496
10497 if (gc_object_moved_p(objspace, ce->file)) {
10498 ce->file = rb_gc_location(ce->file);
10499 }
10500
10501 return ID_TABLE_CONTINUE;
10502}
10503
10504static void
10505update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
10506{
10507 if (!tbl) return;
10508 rb_id_table_foreach_values(tbl, update_const_table, objspace);
10509}
10510
10511static void
10512update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
10513{
10514 while (entry) {
10515 UPDATE_IF_MOVED(objspace, entry->klass);
10516 entry = entry->next;
10517 }
10518}
10519
10520static void
10521update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
10522{
10523 UPDATE_IF_MOVED(objspace, ext->origin_);
10524 UPDATE_IF_MOVED(objspace, ext->includer);
10525 UPDATE_IF_MOVED(objspace, ext->refined_class);
10526 update_subclass_entries(objspace, ext->subclasses);
10527}
10528
10529static void
10530update_superclasses(rb_objspace_t *objspace, VALUE obj)
10531{
10532 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
10533 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
10534 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
10535 }
10536 }
10537}
10538
10539static void
10540gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
10541{
10542 RVALUE *any = RANY(obj);
10543
10544 gc_report(4, objspace, "update-refs: %p ->\n", (void *)obj);
10545
10546 switch (BUILTIN_TYPE(obj)) {
10547 case T_CLASS:
10548 case T_MODULE:
10549 if (RCLASS_SUPER((VALUE)obj)) {
10550 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10551 }
10552 if (!RCLASS_EXT(obj)) break;
10553 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10554 update_cc_tbl(objspace, obj);
10555 update_cvc_tbl(objspace, obj);
10556 update_superclasses(objspace, obj);
10557
10558 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
10559 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
10560 }
10561
10562 update_class_ext(objspace, RCLASS_EXT(obj));
10563 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
10564 break;
10565
10566 case T_ICLASS:
10567 if (FL_TEST(obj, RICLASS_IS_ORIGIN) &&
10568 !FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
10569 update_m_tbl(objspace, RCLASS_M_TBL(obj));
10570 }
10571 if (RCLASS_SUPER((VALUE)obj)) {
10572 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
10573 }
10574 if (!RCLASS_EXT(obj)) break;
10575 update_class_ext(objspace, RCLASS_EXT(obj));
10576 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
10577 update_cc_tbl(objspace, obj);
10578 break;
10579
10580 case T_IMEMO:
10581 gc_ref_update_imemo(objspace, obj);
10582 return;
10583
10584 case T_NIL:
10585 case T_FIXNUM:
10586 case T_NODE:
10587 case T_MOVED:
10588 case T_NONE:
10589 /* These can't move */
10590 return;
10591
10592 case T_ARRAY:
10593 gc_ref_update_array(objspace, obj);
10594 break;
10595
10596 case T_HASH:
10597 gc_ref_update_hash(objspace, obj);
10598 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
10599 break;
10600
10601 case T_STRING:
10602 {
10603#if USE_RVARGC
10604#endif
10605
10606 if (STR_SHARED_P(obj)) {
10607#if USE_RVARGC
10608 VALUE old_root = any->as.string.as.heap.aux.shared;
10609#endif
10610 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
10611#if USE_RVARGC
10612 VALUE new_root = any->as.string.as.heap.aux.shared;
10613 rb_str_update_shared_ary(obj, old_root, new_root);
10614#endif
10615 }
10616
10617#if USE_RVARGC
10618 /* If, after move the string is not embedded, and can fit in the
10619 * slot it's been placed in, then re-embed it. */
10620 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
10621 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
10622 rb_str_make_embedded(obj);
10623 }
10624 }
10625#endif
10626
10627 break;
10628 }
10629 case T_DATA:
10630 /* Call the compaction callback, if it exists */
10631 {
10632 void *const ptr = DATA_PTR(obj);
10633 if (ptr) {
10634 if (RTYPEDDATA_P(obj)) {
10635 RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
10636 if (compact_func) (*compact_func)(ptr);
10637 }
10638 }
10639 }
10640 break;
10641
10642 case T_OBJECT:
10643 gc_ref_update_object(objspace, obj);
10644 break;
10645
10646 case T_FILE:
10647 if (any->as.file.fptr) {
10648 UPDATE_IF_MOVED(objspace, any->as.file.fptr->self);
10649 UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
10650 UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
10651 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_asciicompat);
10652 UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
10653 UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
10654 UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
10655 }
10656 break;
10657 case T_REGEXP:
10658 UPDATE_IF_MOVED(objspace, any->as.regexp.src);
10659 break;
10660
10661 case T_SYMBOL:
10662 if (DYNAMIC_SYM_P((VALUE)any)) {
10663 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10664 }
10665 break;
10666
10667 case T_FLOAT:
10668 case T_BIGNUM:
10669 break;
10670
10671 case T_MATCH:
10672 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10673
10674 if (any->as.match.str) {
10675 UPDATE_IF_MOVED(objspace, any->as.match.str);
10676 }
10677 break;
10678
10679 case T_RATIONAL:
10680 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10681 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10682 break;
10683
10684 case T_COMPLEX:
10685 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10686 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10687
10688 break;
10689
10690 case T_STRUCT:
10691 {
10692 long i, len = RSTRUCT_LEN(obj);
10693 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10694
10695 for (i = 0; i < len; i++) {
10696 UPDATE_IF_MOVED(objspace, ptr[i]);
10697 }
10698 }
10699 break;
10700 default:
10701#if GC_DEBUG
10702 rb_gcdebug_print_obj_condition((VALUE)obj);
10703 rb_obj_info_dump(obj);
10704 rb_bug("unreachable");
10705#endif
10706 break;
10707
10708 }
10709
10710 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
10711
10712 gc_report(4, objspace, "update-refs: %p <-\n", (void *)obj);
10713}
10714
10715static int
10716gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t * objspace, struct heap_page *page)
10717{
10718 VALUE v = (VALUE)vstart;
10719 asan_unlock_freelist(page);
10720 asan_lock_freelist(page);
10721 page->flags.has_uncollectible_shady_objects = FALSE;
10722 page->flags.has_remembered_objects = FALSE;
10723
10724 /* For each object on the page */
10725 for (; v != (VALUE)vend; v += stride) {
10726 void *poisoned = asan_unpoison_object_temporary(v);
10727
10728 switch (BUILTIN_TYPE(v)) {
10729 case T_NONE:
10730 case T_MOVED:
10731 case T_ZOMBIE:
10732 break;
10733 default:
10734 if (RVALUE_WB_UNPROTECTED(v)) {
10735 page->flags.has_uncollectible_shady_objects = TRUE;
10736 }
10737 if (RVALUE_PAGE_MARKING(page, v)) {
10738 page->flags.has_remembered_objects = TRUE;
10739 }
10740 if (page->flags.before_sweep) {
10741 if (RVALUE_MARKED(v)) {
10742 gc_update_object_references(objspace, v);
10743 }
10744 }
10745 else {
10746 gc_update_object_references(objspace, v);
10747 }
10748 }
10749
10750 if (poisoned) {
10751 asan_poison_object(v);
10752 }
10753 }
10754
10755 return 0;
10756}
10757
10758extern rb_symbols_t ruby_global_symbols;
10759#define global_symbols ruby_global_symbols
10760
10761static void
10762gc_update_references(rb_objspace_t *objspace)
10763{
10764 rb_execution_context_t *ec = GET_EC();
10765 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10766
10767 struct heap_page *page = NULL;
10768
10769 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10770 bool should_set_mark_bits = TRUE;
10771 rb_size_pool_t *size_pool = &size_pools[i];
10772 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10773
10774 ccan_list_for_each(&heap->pages, page, page_node) {
10775 uintptr_t start = (uintptr_t)page->start;
10776 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10777
10778 gc_ref_update((void *)start, (void *)end, size_pool->slot_size, objspace, page);
10779 if (page == heap->sweeping_page) {
10780 should_set_mark_bits = FALSE;
10781 }
10782 if (should_set_mark_bits) {
10783 gc_setup_mark_bits(page);
10784 }
10785 }
10786 }
10787 rb_vm_update_references(vm);
10788 rb_transient_heap_update_references();
10789 rb_gc_update_global_tbl();
10790 global_symbols.ids = rb_gc_location(global_symbols.ids);
10791 global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
10792 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10793 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10794 gc_update_table_refs(objspace, global_symbols.str_sym);
10795 gc_update_table_refs(objspace, finalizer_table);
10796}
10797
10798#if GC_CAN_COMPILE_COMPACTION
10799/*
10800 * call-seq:
10801 * GC.latest_compact_info -> hash
10802 *
10803 * Returns information about object moved in the most recent \GC compaction.
10804 *
10805 * The returned hash has two keys :considered and :moved. The hash for
10806 * :considered lists the number of objects that were considered for movement
10807 * by the compactor, and the :moved hash lists the number of objects that
10808 * were actually moved. Some objects can't be moved (maybe they were pinned)
10809 * so these numbers can be used to calculate compaction efficiency.
10810 */
10811static VALUE
10812gc_compact_stats(VALUE self)
10813{
10814 size_t i;
10815 rb_objspace_t *objspace = &rb_objspace;
10816 VALUE h = rb_hash_new();
10817 VALUE considered = rb_hash_new();
10818 VALUE moved = rb_hash_new();
10819 VALUE moved_up = rb_hash_new();
10820 VALUE moved_down = rb_hash_new();
10821
10822 for (i=0; i<T_MASK; i++) {
10823 if (objspace->rcompactor.considered_count_table[i]) {
10824 rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
10825 }
10826
10827 if (objspace->rcompactor.moved_count_table[i]) {
10828 rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
10829 }
10830
10831 if (objspace->rcompactor.moved_up_count_table[i]) {
10832 rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
10833 }
10834
10835 if (objspace->rcompactor.moved_down_count_table[i]) {
10836 rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
10837 }
10838 }
10839
10840 rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
10841 rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
10842 rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
10843 rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
10844
10845 return h;
10846}
10847#else
10848# define gc_compact_stats rb_f_notimplement
10849#endif
10850
10851#if GC_CAN_COMPILE_COMPACTION
10852static void
10853root_obj_check_moved_i(const char *category, VALUE obj, void *data)
10854{
10855 if (gc_object_moved_p(&rb_objspace, obj)) {
10856 rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
10857 }
10858}
10859
10860static void
10861reachable_object_check_moved_i(VALUE ref, void *data)
10862{
10863 VALUE parent = (VALUE)data;
10864 if (gc_object_moved_p(&rb_objspace, ref)) {
10865 rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
10866 }
10867}
10868
10869static int
10870heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
10871{
10872 VALUE v = (VALUE)vstart;
10873 for (; v != (VALUE)vend; v += stride) {
10874 if (gc_object_moved_p(&rb_objspace, v)) {
10875 /* Moved object still on the heap, something may have a reference. */
10876 }
10877 else {
10878 void *poisoned = asan_unpoison_object_temporary(v);
10879
10880 switch (BUILTIN_TYPE(v)) {
10881 case T_NONE:
10882 case T_ZOMBIE:
10883 break;
10884 default:
10885 if (!rb_objspace_garbage_object_p(v)) {
10886 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
10887 }
10888 }
10889
10890 if (poisoned) {
10891 GC_ASSERT(BUILTIN_TYPE(v) == T_NONE);
10892 asan_poison_object(v);
10893 }
10894 }
10895 }
10896
10897 return 0;
10898}
10899
10900/*
10901 * call-seq:
10902 * GC.compact
10903 *
10904 * This function compacts objects together in Ruby's heap. It eliminates
10905 * unused space (or fragmentation) in the heap by moving objects in to that
10906 * unused space. This function returns a hash which contains statistics about
10907 * which objects were moved. See <tt>GC.latest_gc_info</tt> for details about
10908 * compaction statistics.
10909 *
10910 * This method is implementation specific and not expected to be implemented
10911 * in any implementation besides MRI.
10912 *
10913 * To test whether \GC compaction is supported, use the idiom:
10914 *
10915 * GC.respond_to?(:compact)
10916 */
10917static VALUE
10918gc_compact(VALUE self)
10919{
10920 /* Run GC with compaction enabled */
10921 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
10922
10923 return gc_compact_stats(self);
10924}
10925#else
10926# define gc_compact rb_f_notimplement
10927#endif
10928
10929#if GC_CAN_COMPILE_COMPACTION
10930static VALUE
10931gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE expand_heap, VALUE toward_empty)
10932{
10933 rb_objspace_t *objspace = &rb_objspace;
10934
10935 /* Clear the heap. */
10936 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qfalse);
10937 size_t growth_slots = gc_params.heap_init_slots;
10938
10939 if (RTEST(double_heap)) {
10940 rb_warn("double_heap is deprecated, please use expand_heap instead");
10941 }
10942
10943 RB_VM_LOCK_ENTER();
10944 {
10945 gc_rest(objspace);
10946
10947 /* if both double_heap and expand_heap are set, expand_heap takes precedence */
10948 if (RTEST(double_heap) || RTEST(expand_heap)) {
10949 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
10950 rb_size_pool_t *size_pool = &size_pools[i];
10951 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10952
10953 if (RTEST(expand_heap)) {
10954 size_t required_pages = growth_slots / size_pool->slot_size;
10955 heap_add_pages(objspace, size_pool, heap, MAX(required_pages, heap->total_pages));
10956 }
10957 else {
10958 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10959 }
10960 }
10961 }
10962
10963 if (RTEST(toward_empty)) {
10964 gc_sort_heap_by_empty_slots(objspace);
10965 }
10966 }
10967 RB_VM_LOCK_LEAVE();
10968
10969 gc_start_internal(NULL, self, Qtrue, Qtrue, Qtrue, Qtrue);
10970
10971 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
10972 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
10973
10974 return gc_compact_stats(self);
10975}
10976#else
10977# define gc_verify_compaction_references (rb_builtin_arity3_function_type)rb_f_notimplement
10978#endif
10979
10980VALUE
10981rb_gc_start(void)
10982{
10983 rb_gc();
10984 return Qnil;
10985}
10986
10987void
10988rb_gc(void)
10989{
10990 rb_objspace_t *objspace = &rb_objspace;
10991 unsigned int reason = GPR_DEFAULT_REASON;
10992 garbage_collect(objspace, reason);
10993}
10994
10995int
10996rb_during_gc(void)
10997{
10998 rb_objspace_t *objspace = &rb_objspace;
10999 return during_gc;
11000}
11001
11002#if RGENGC_PROFILE >= 2
11003
11004static const char *type_name(int type, VALUE obj);
11005
11006static void
11007gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
11008{
11009 VALUE result = rb_hash_new_with_size(T_MASK);
11010 int i;
11011 for (i=0; i<T_MASK; i++) {
11012 const char *type = type_name(i, 0);
11013 rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
11014 }
11015 rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
11016}
11017#endif
11018
11019size_t
11020rb_gc_count(void)
11021{
11022 return rb_objspace.profile.count;
11023}
11024
11025static VALUE
11026gc_count(rb_execution_context_t *ec, VALUE self)
11027{
11028 return SIZET2NUM(rb_gc_count());
11029}
11030
11031static VALUE
11032gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
11033{
11034 static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
11035 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
11036#if RGENGC_ESTIMATE_OLDMALLOC
11037 static VALUE sym_oldmalloc;
11038#endif
11039 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
11040 static VALUE sym_none, sym_marking, sym_sweeping;
11041 VALUE hash = Qnil, key = Qnil;
11042 VALUE major_by, need_major_by;
11043 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
11044
11045 if (SYMBOL_P(hash_or_key)) {
11046 key = hash_or_key;
11047 }
11048 else if (RB_TYPE_P(hash_or_key, T_HASH)) {
11049 hash = hash_or_key;
11050 }
11051 else {
11052 rb_raise(rb_eTypeError, "non-hash or symbol given");
11053 }
11054
11055 if (NIL_P(sym_major_by)) {
11056#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
11057 S(major_by);
11058 S(gc_by);
11059 S(immediate_sweep);
11060 S(have_finalizer);
11061 S(state);
11062 S(need_major_by);
11063
11064 S(stress);
11065 S(nofree);
11066 S(oldgen);
11067 S(shady);
11068 S(force);
11069#if RGENGC_ESTIMATE_OLDMALLOC
11070 S(oldmalloc);
11071#endif
11072 S(newobj);
11073 S(malloc);
11074 S(method);
11075 S(capi);
11076
11077 S(none);
11078 S(marking);
11079 S(sweeping);
11080#undef S
11081 }
11082
11083#define SET(name, attr) \
11084 if (key == sym_##name) \
11085 return (attr); \
11086 else if (hash != Qnil) \
11087 rb_hash_aset(hash, sym_##name, (attr));
11088
11089 major_by =
11090 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11091 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11092 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11093 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11094#if RGENGC_ESTIMATE_OLDMALLOC
11095 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11096#endif
11097 Qnil;
11098 SET(major_by, major_by);
11099
11100 if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
11101 unsigned int need_major_flags = objspace->rgengc.need_major_gc;
11102 need_major_by =
11103 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
11104 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
11105 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
11106 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
11107#if RGENGC_ESTIMATE_OLDMALLOC
11108 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
11109#endif
11110 Qnil;
11111 SET(need_major_by, need_major_by);
11112 }
11113
11114 SET(gc_by,
11115 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
11116 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
11117 (flags & GPR_FLAG_METHOD) ? sym_method :
11118 (flags & GPR_FLAG_CAPI) ? sym_capi :
11119 (flags & GPR_FLAG_STRESS) ? sym_stress :
11120 Qnil
11121 );
11122
11123 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
11124 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
11125
11126 if (orig_flags == 0) {
11127 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
11128 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
11129 }
11130#undef SET
11131
11132 if (!NIL_P(key)) {/* matched key should return above */
11133 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11134 }
11135
11136 return hash;
11137}
11138
11139VALUE
11140rb_gc_latest_gc_info(VALUE key)
11141{
11142 rb_objspace_t *objspace = &rb_objspace;
11143 return gc_info_decode(objspace, key, 0);
11144}
11145
11146static VALUE
11147gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
11148{
11149 rb_objspace_t *objspace = &rb_objspace;
11150
11151 if (NIL_P(arg)) {
11152 arg = rb_hash_new();
11153 }
11154 else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
11155 rb_raise(rb_eTypeError, "non-hash or symbol given");
11156 }
11157
11158 return gc_info_decode(objspace, arg, 0);
11159}
11160
11161enum gc_stat_sym {
11162 gc_stat_sym_count,
11163 gc_stat_sym_time,
11164 gc_stat_sym_heap_allocated_pages,
11165 gc_stat_sym_heap_sorted_length,
11166 gc_stat_sym_heap_allocatable_pages,
11167 gc_stat_sym_heap_available_slots,
11168 gc_stat_sym_heap_live_slots,
11169 gc_stat_sym_heap_free_slots,
11170 gc_stat_sym_heap_final_slots,
11171 gc_stat_sym_heap_marked_slots,
11172 gc_stat_sym_heap_eden_pages,
11173 gc_stat_sym_heap_tomb_pages,
11174 gc_stat_sym_total_allocated_pages,
11175 gc_stat_sym_total_freed_pages,
11176 gc_stat_sym_total_allocated_objects,
11177 gc_stat_sym_total_freed_objects,
11178 gc_stat_sym_malloc_increase_bytes,
11179 gc_stat_sym_malloc_increase_bytes_limit,
11180 gc_stat_sym_minor_gc_count,
11181 gc_stat_sym_major_gc_count,
11182 gc_stat_sym_compact_count,
11183 gc_stat_sym_read_barrier_faults,
11184 gc_stat_sym_total_moved_objects,
11185 gc_stat_sym_remembered_wb_unprotected_objects,
11186 gc_stat_sym_remembered_wb_unprotected_objects_limit,
11187 gc_stat_sym_old_objects,
11188 gc_stat_sym_old_objects_limit,
11189#if RGENGC_ESTIMATE_OLDMALLOC
11190 gc_stat_sym_oldmalloc_increase_bytes,
11191 gc_stat_sym_oldmalloc_increase_bytes_limit,
11192#endif
11193#if RGENGC_PROFILE
11194 gc_stat_sym_total_generated_normal_object_count,
11195 gc_stat_sym_total_generated_shady_object_count,
11196 gc_stat_sym_total_shade_operation_count,
11197 gc_stat_sym_total_promoted_count,
11198 gc_stat_sym_total_remembered_normal_object_count,
11199 gc_stat_sym_total_remembered_shady_object_count,
11200#endif
11201 gc_stat_sym_last
11202};
11203
11204static VALUE gc_stat_symbols[gc_stat_sym_last];
11205
11206static void
11207setup_gc_stat_symbols(void)
11208{
11209 if (gc_stat_symbols[0] == 0) {
11210#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
11211 S(count);
11212 S(time);
11213 S(heap_allocated_pages);
11214 S(heap_sorted_length);
11215 S(heap_allocatable_pages);
11216 S(heap_available_slots);
11217 S(heap_live_slots);
11218 S(heap_free_slots);
11219 S(heap_final_slots);
11220 S(heap_marked_slots);
11221 S(heap_eden_pages);
11222 S(heap_tomb_pages);
11223 S(total_allocated_pages);
11224 S(total_freed_pages);
11225 S(total_allocated_objects);
11226 S(total_freed_objects);
11227 S(malloc_increase_bytes);
11228 S(malloc_increase_bytes_limit);
11229 S(minor_gc_count);
11230 S(major_gc_count);
11231 S(compact_count);
11232 S(read_barrier_faults);
11233 S(total_moved_objects);
11234 S(remembered_wb_unprotected_objects);
11235 S(remembered_wb_unprotected_objects_limit);
11236 S(old_objects);
11237 S(old_objects_limit);
11238#if RGENGC_ESTIMATE_OLDMALLOC
11239 S(oldmalloc_increase_bytes);
11240 S(oldmalloc_increase_bytes_limit);
11241#endif
11242#if RGENGC_PROFILE
11243 S(total_generated_normal_object_count);
11244 S(total_generated_shady_object_count);
11245 S(total_shade_operation_count);
11246 S(total_promoted_count);
11247 S(total_remembered_normal_object_count);
11248 S(total_remembered_shady_object_count);
11249#endif /* RGENGC_PROFILE */
11250#undef S
11251 }
11252}
11253
11254static size_t
11255gc_stat_internal(VALUE hash_or_sym)
11256{
11257 rb_objspace_t *objspace = &rb_objspace;
11258 VALUE hash = Qnil, key = Qnil;
11259
11260 setup_gc_stat_symbols();
11261
11262 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11263 hash = hash_or_sym;
11264 }
11265 else if (SYMBOL_P(hash_or_sym)) {
11266 key = hash_or_sym;
11267 }
11268 else {
11269 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11270 }
11271
11272#define SET(name, attr) \
11273 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
11274 return attr; \
11275 else if (hash != Qnil) \
11276 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
11277
11278 SET(count, objspace->profile.count);
11279 SET(time, (size_t) (objspace->profile.total_time_ns / (1000 * 1000) /* ns -> ms */)); // TODO: UINT64T2NUM
11280
11281 /* implementation dependent counters */
11282 SET(heap_allocated_pages, heap_allocated_pages);
11283 SET(heap_sorted_length, heap_pages_sorted_length);
11284 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
11285 SET(heap_available_slots, objspace_available_slots(objspace));
11286 SET(heap_live_slots, objspace_live_slots(objspace));
11287 SET(heap_free_slots, objspace_free_slots(objspace));
11288 SET(heap_final_slots, heap_pages_final_slots);
11289 SET(heap_marked_slots, objspace->marked_slots);
11290 SET(heap_eden_pages, heap_eden_total_pages(objspace));
11291 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
11292 SET(total_allocated_pages, total_allocated_pages(objspace));
11293 SET(total_freed_pages, total_freed_pages(objspace));
11294 SET(total_allocated_objects, objspace->total_allocated_objects);
11295 SET(total_freed_objects, objspace->profile.total_freed_objects);
11296 SET(malloc_increase_bytes, malloc_increase);
11297 SET(malloc_increase_bytes_limit, malloc_limit);
11298 SET(minor_gc_count, objspace->profile.minor_gc_count);
11299 SET(major_gc_count, objspace->profile.major_gc_count);
11300 SET(compact_count, objspace->profile.compact_count);
11301 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
11302 SET(total_moved_objects, objspace->rcompactor.total_moved);
11303 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
11304 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
11305 SET(old_objects, objspace->rgengc.old_objects);
11306 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
11307#if RGENGC_ESTIMATE_OLDMALLOC
11308 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
11309 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
11310#endif
11311
11312#if RGENGC_PROFILE
11313 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
11314 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
11315 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
11316 SET(total_promoted_count, objspace->profile.total_promoted_count);
11317 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
11318 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
11319#endif /* RGENGC_PROFILE */
11320#undef SET
11321
11322 if (!NIL_P(key)) { /* matched key should return above */
11323 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11324 }
11325
11326#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
11327 if (hash != Qnil) {
11328 gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
11329 gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
11330 gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
11331 gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
11332 gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
11333 gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
11334 }
11335#endif
11336
11337 return 0;
11338}
11339
11340static VALUE
11341gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
11342{
11343 if (NIL_P(arg)) {
11344 arg = rb_hash_new();
11345 }
11346 else if (SYMBOL_P(arg)) {
11347 size_t value = gc_stat_internal(arg);
11348 return SIZET2NUM(value);
11349 }
11350 else if (RB_TYPE_P(arg, T_HASH)) {
11351 // ok
11352 }
11353 else {
11354 rb_raise(rb_eTypeError, "non-hash or symbol given");
11355 }
11356
11357 gc_stat_internal(arg);
11358 return arg;
11359}
11360
11361size_t
11362rb_gc_stat(VALUE key)
11363{
11364 if (SYMBOL_P(key)) {
11365 size_t value = gc_stat_internal(key);
11366 return value;
11367 }
11368 else {
11369 gc_stat_internal(key);
11370 return 0;
11371 }
11372}
11373
11374
11375enum gc_stat_heap_sym {
11376 gc_stat_heap_sym_slot_size,
11377 gc_stat_heap_sym_heap_allocatable_pages,
11378 gc_stat_heap_sym_heap_eden_pages,
11379 gc_stat_heap_sym_heap_eden_slots,
11380 gc_stat_heap_sym_heap_tomb_pages,
11381 gc_stat_heap_sym_heap_tomb_slots,
11382 gc_stat_heap_sym_total_allocated_pages,
11383 gc_stat_heap_sym_total_freed_pages,
11384 gc_stat_heap_sym_force_major_gc_count,
11385 gc_stat_heap_sym_last
11386};
11387
11388static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
11389
11390static void
11391setup_gc_stat_heap_symbols(void)
11392{
11393 if (gc_stat_heap_symbols[0] == 0) {
11394#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
11395 S(slot_size);
11396 S(heap_allocatable_pages);
11397 S(heap_eden_pages);
11398 S(heap_eden_slots);
11399 S(heap_tomb_pages);
11400 S(heap_tomb_slots);
11401 S(total_allocated_pages);
11402 S(total_freed_pages);
11403 S(force_major_gc_count);
11404#undef S
11405 }
11406}
11407
11408static size_t
11409gc_stat_heap_internal(int size_pool_idx, VALUE hash_or_sym)
11410{
11411 rb_objspace_t *objspace = &rb_objspace;
11412 VALUE hash = Qnil, key = Qnil;
11413
11414 setup_gc_stat_heap_symbols();
11415
11416 if (RB_TYPE_P(hash_or_sym, T_HASH)) {
11417 hash = hash_or_sym;
11418 }
11419 else if (SYMBOL_P(hash_or_sym)) {
11420 key = hash_or_sym;
11421 }
11422 else {
11423 rb_raise(rb_eTypeError, "non-hash or symbol argument");
11424 }
11425
11426 if (size_pool_idx < 0 || size_pool_idx >= SIZE_POOL_COUNT) {
11427 rb_raise(rb_eArgError, "size pool index out of range");
11428 }
11429
11430 rb_size_pool_t *size_pool = &size_pools[size_pool_idx];
11431
11432#define SET(name, attr) \
11433 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
11434 return attr; \
11435 else if (hash != Qnil) \
11436 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
11437
11438 SET(slot_size, size_pool->slot_size);
11439 SET(heap_allocatable_pages, size_pool->allocatable_pages);
11440 SET(heap_eden_pages, SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
11441 SET(heap_eden_slots, SIZE_POOL_EDEN_HEAP(size_pool)->total_slots);
11442 SET(heap_tomb_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
11443 SET(heap_tomb_slots, SIZE_POOL_TOMB_HEAP(size_pool)->total_slots);
11444 SET(total_allocated_pages, size_pool->total_allocated_pages);
11445 SET(total_freed_pages, size_pool->total_freed_pages);
11446 SET(force_major_gc_count, size_pool->force_major_gc_count);
11447#undef SET
11448
11449 if (!NIL_P(key)) { /* matched key should return above */
11450 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
11451 }
11452
11453 return 0;
11454}
11455
11456static VALUE
11457gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
11458{
11459 if (NIL_P(heap_name)) {
11460 if (NIL_P(arg)) {
11461 arg = rb_hash_new();
11462 }
11463 else if (RB_TYPE_P(arg, T_HASH)) {
11464 // ok
11465 }
11466 else {
11467 rb_raise(rb_eTypeError, "non-hash given");
11468 }
11469
11470 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11471 VALUE hash = rb_hash_aref(arg, INT2FIX(i));
11472 if (NIL_P(hash)) {
11473 hash = rb_hash_new();
11474 rb_hash_aset(arg, INT2FIX(i), hash);
11475 }
11476 gc_stat_heap_internal(i, hash);
11477 }
11478 }
11479 else if (FIXNUM_P(heap_name)) {
11480 int size_pool_idx = FIX2INT(heap_name);
11481
11482 if (NIL_P(arg)) {
11483 arg = rb_hash_new();
11484 }
11485 else if (SYMBOL_P(arg)) {
11486 size_t value = gc_stat_heap_internal(size_pool_idx, arg);
11487 return SIZET2NUM(value);
11488 }
11489 else if (RB_TYPE_P(arg, T_HASH)) {
11490 // ok
11491 }
11492 else {
11493 rb_raise(rb_eTypeError, "non-hash or symbol given");
11494 }
11495
11496 gc_stat_heap_internal(size_pool_idx, arg);
11497 }
11498 else {
11499 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
11500 }
11501
11502 return arg;
11503}
11504
11505static VALUE
11506gc_stress_get(rb_execution_context_t *ec, VALUE self)
11507{
11508 rb_objspace_t *objspace = &rb_objspace;
11509 return ruby_gc_stress_mode;
11510}
11511
11512static void
11513gc_stress_set(rb_objspace_t *objspace, VALUE flag)
11514{
11515 objspace->flags.gc_stressful = RTEST(flag);
11516 objspace->gc_stress_mode = flag;
11517}
11518
11519static VALUE
11520gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
11521{
11522 rb_objspace_t *objspace = &rb_objspace;
11523 gc_stress_set(objspace, flag);
11524 return flag;
11525}
11526
11527VALUE
11528rb_gc_enable(void)
11529{
11530 rb_objspace_t *objspace = &rb_objspace;
11531 return rb_objspace_gc_enable(objspace);
11532}
11533
11534VALUE
11535rb_objspace_gc_enable(rb_objspace_t *objspace)
11536{
11537 int old = dont_gc_val();
11538
11539 dont_gc_off();
11540 return RBOOL(old);
11541}
11542
11543static VALUE
11544gc_enable(rb_execution_context_t *ec, VALUE _)
11545{
11546 return rb_gc_enable();
11547}
11548
11549VALUE
11550rb_gc_disable_no_rest(void)
11551{
11552 rb_objspace_t *objspace = &rb_objspace;
11553 return gc_disable_no_rest(objspace);
11554}
11555
11556static VALUE
11557gc_disable_no_rest(rb_objspace_t *objspace)
11558{
11559 int old = dont_gc_val();
11560 dont_gc_on();
11561 return RBOOL(old);
11562}
11563
11564VALUE
11565rb_gc_disable(void)
11566{
11567 rb_objspace_t *objspace = &rb_objspace;
11568 return rb_objspace_gc_disable(objspace);
11569}
11570
11571VALUE
11572rb_objspace_gc_disable(rb_objspace_t *objspace)
11573{
11574 gc_rest(objspace);
11575 return gc_disable_no_rest(objspace);
11576}
11577
11578static VALUE
11579gc_disable(rb_execution_context_t *ec, VALUE _)
11580{
11581 return rb_gc_disable();
11582}
11583
11584#if GC_CAN_COMPILE_COMPACTION
11585/*
11586 * call-seq:
11587 * GC.auto_compact = flag
11588 *
11589 * Updates automatic compaction mode.
11590 *
11591 * When enabled, the compactor will execute on every major collection.
11592 *
11593 * Enabling compaction will degrade performance on major collections.
11594 */
11595static VALUE
11596gc_set_auto_compact(VALUE _, VALUE v)
11597{
11598 GC_ASSERT(GC_COMPACTION_SUPPORTED);
11599
11600 ruby_enable_autocompact = RTEST(v);
11601 return v;
11602}
11603#else
11604# define gc_set_auto_compact rb_f_notimplement
11605#endif
11606
11607#if GC_CAN_COMPILE_COMPACTION
11608/*
11609 * call-seq:
11610 * GC.auto_compact -> true or false
11611 *
11612 * Returns whether or not automatic compaction has been enabled.
11613 */
11614static VALUE
11615gc_get_auto_compact(VALUE _)
11616{
11617 return RBOOL(ruby_enable_autocompact);
11618}
11619#else
11620# define gc_get_auto_compact rb_f_notimplement
11621#endif
11622
11623static int
11624get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
11625{
11626 const char *ptr = getenv(name);
11627 ssize_t val;
11628
11629 if (ptr != NULL && *ptr) {
11630 size_t unit = 0;
11631 char *end;
11632#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
11633 val = strtoll(ptr, &end, 0);
11634#else
11635 val = strtol(ptr, &end, 0);
11636#endif
11637 switch (*end) {
11638 case 'k': case 'K':
11639 unit = 1024;
11640 ++end;
11641 break;
11642 case 'm': case 'M':
11643 unit = 1024*1024;
11644 ++end;
11645 break;
11646 case 'g': case 'G':
11647 unit = 1024*1024*1024;
11648 ++end;
11649 break;
11650 }
11651 while (*end && isspace((unsigned char)*end)) end++;
11652 if (*end) {
11653 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11654 return 0;
11655 }
11656 if (unit > 0) {
11657 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
11658 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
11659 return 0;
11660 }
11661 val *= unit;
11662 }
11663 if (val > 0 && (size_t)val > lower_bound) {
11664 if (RTEST(ruby_verbose)) {
11665 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
11666 }
11667 *default_value = (size_t)val;
11668 return 1;
11669 }
11670 else {
11671 if (RTEST(ruby_verbose)) {
11672 fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
11673 name, val, *default_value, lower_bound);
11674 }
11675 return 0;
11676 }
11677 }
11678 return 0;
11679}
11680
11681static int
11682get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
11683{
11684 const char *ptr = getenv(name);
11685 double val;
11686
11687 if (ptr != NULL && *ptr) {
11688 char *end;
11689 val = strtod(ptr, &end);
11690 if (!*ptr || *end) {
11691 if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
11692 return 0;
11693 }
11694
11695 if (accept_zero && val == 0.0) {
11696 goto accept;
11697 }
11698 else if (val <= lower_bound) {
11699 if (RTEST(ruby_verbose)) {
11700 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
11701 name, val, *default_value, lower_bound);
11702 }
11703 }
11704 else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
11705 val > upper_bound) {
11706 if (RTEST(ruby_verbose)) {
11707 fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
11708 name, val, *default_value, upper_bound);
11709 }
11710 }
11711 else {
11712 goto accept;
11713 }
11714 }
11715 return 0;
11716
11717 accept:
11718 if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
11719 *default_value = val;
11720 return 1;
11721}
11722
11723static void
11724gc_set_initial_pages(rb_objspace_t *objspace)
11725{
11726 gc_rest(objspace);
11727
11728 for (int i = 0; i < SIZE_POOL_COUNT; i++) {
11729 rb_size_pool_t *size_pool = &size_pools[i];
11730
11731 if (gc_params.heap_init_slots > size_pool->eden_heap.total_slots) {
11732 size_t slots = gc_params.heap_init_slots - size_pool->eden_heap.total_slots;
11733 int multiple = size_pool->slot_size / BASE_SLOT_SIZE;
11734 size_pool->allocatable_pages = slots * multiple / HEAP_PAGE_OBJ_LIMIT;
11735 }
11736 else {
11737 /* We already have more slots than heap_init_slots allows, so
11738 * prevent creating more pages. */
11739 size_pool->allocatable_pages = 0;
11740 }
11741 }
11742 heap_pages_expand_sorted(objspace);
11743}
11744
11745/*
11746 * GC tuning environment variables
11747 *
11748 * * RUBY_GC_HEAP_INIT_SLOTS
11749 * - Initial allocation slots.
11750 * * RUBY_GC_HEAP_FREE_SLOTS
11751 * - Prepare at least this amount of slots after GC.
11752 * - Allocate slots if there are not enough slots.
11753 * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
11754 * - Allocate slots by this factor.
11755 * - (next slots number) = (current slots number) * (this factor)
11756 * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
11757 * - Allocation rate is limited to this number of slots.
11758 * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
11759 * - Allocate additional pages when the number of free slots is
11760 * lower than the value (total_slots * (this ratio)).
11761 * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
11762 * - Allocate slots to satisfy this formula:
11763 * free_slots = total_slots * goal_ratio
11764 * - In other words, prepare (total_slots * goal_ratio) free slots.
11765 * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
11766 * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
11767 * - Allow to free pages when the number of free slots is
11768 * greater than the value (total_slots * (this ratio)).
11769 * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
11770 * - Do full GC when the number of old objects is more than R * N
11771 * where R is this factor and
11772 * N is the number of old objects just after last full GC.
11773 *
11774 * * obsolete
11775 * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
11776 * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
11777 *
11778 * * RUBY_GC_MALLOC_LIMIT
11779 * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
11780 * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11781 *
11782 * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
11783 * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
11784 * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
11785 */
11786
11787void
11788ruby_gc_set_params(void)
11789{
11790 rb_objspace_t *objspace = &rb_objspace;
11791 /* RUBY_GC_HEAP_FREE_SLOTS */
11792 if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
11793 /* ok */
11794 }
11795
11796 /* RUBY_GC_HEAP_INIT_SLOTS */
11797 if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
11798 gc_set_initial_pages(objspace);
11799 }
11800
11801 get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
11802 get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
11803 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
11804 0.0, 1.0, FALSE);
11805 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
11806 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
11807 get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
11808 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
11809 get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
11810
11811 if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
11812 malloc_limit = gc_params.malloc_limit_min;
11813 }
11814 get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
11815 if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
11816 gc_params.malloc_limit_max = SIZE_MAX;
11817 }
11818 get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
11819
11820#if RGENGC_ESTIMATE_OLDMALLOC
11821 if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
11822 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
11823 }
11824 get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
11825 get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
11826#endif
11827}
11828
11829static void
11830reachable_objects_from_callback(VALUE obj)
11831{
11832 rb_ractor_t *cr = GET_RACTOR();
11833 cr->mfd->mark_func(obj, cr->mfd->data);
11834}
11835
11836void
11837rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
11838{
11839 rb_objspace_t *objspace = &rb_objspace;
11840
11841 RB_VM_LOCK_ENTER();
11842 {
11843 if (during_gc) rb_bug("rb_objspace_reachable_objects_from() is not supported while during_gc == true");
11844
11845 if (is_markable_object(objspace, obj)) {
11846 rb_ractor_t *cr = GET_RACTOR();
11847 struct gc_mark_func_data_struct mfd = {
11848 .mark_func = func,
11849 .data = data,
11850 }, *prev_mfd = cr->mfd;
11851
11852 cr->mfd = &mfd;
11853 gc_mark_children(objspace, obj);
11854 cr->mfd = prev_mfd;
11855 }
11856 }
11857 RB_VM_LOCK_LEAVE();
11858}
11859
11861 const char *category;
11862 void (*func)(const char *category, VALUE, void *);
11863 void *data;
11864};
11865
11866static void
11867root_objects_from(VALUE obj, void *ptr)
11868{
11869 const struct root_objects_data *data = (struct root_objects_data *)ptr;
11870 (*data->func)(data->category, obj, data->data);
11871}
11872
11873void
11874rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
11875{
11876 rb_objspace_t *objspace = &rb_objspace;
11877 objspace_reachable_objects_from_root(objspace, func, passing_data);
11878}
11879
11880static void
11881objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
11882{
11883 if (during_gc) rb_bug("objspace_reachable_objects_from_root() is not supported while during_gc == true");
11884
11885 rb_ractor_t *cr = GET_RACTOR();
11886 struct root_objects_data data = {
11887 .func = func,
11888 .data = passing_data,
11889 };
11890 struct gc_mark_func_data_struct mfd = {
11891 .mark_func = root_objects_from,
11892 .data = &data,
11893 }, *prev_mfd = cr->mfd;
11894
11895 cr->mfd = &mfd;
11896 gc_mark_roots(objspace, &data.category);
11897 cr->mfd = prev_mfd;
11898}
11899
11900/*
11901 ------------------------ Extended allocator ------------------------
11902*/
11903
11905 VALUE exc;
11906 const char *fmt;
11907 va_list *ap;
11908};
11909
11910static void *
11911gc_vraise(void *ptr)
11912{
11913 struct gc_raise_tag *argv = ptr;
11914 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11915 UNREACHABLE_RETURN(NULL);
11916}
11917
11918static void
11919gc_raise(VALUE exc, const char *fmt, ...)
11920{
11921 va_list ap;
11922 va_start(ap, fmt);
11923 struct gc_raise_tag argv = {
11924 exc, fmt, &ap,
11925 };
11926
11927 if (ruby_thread_has_gvl_p()) {
11928 gc_vraise(&argv);
11930 }
11931 else if (ruby_native_thread_p()) {
11932 rb_thread_call_with_gvl(gc_vraise, &argv);
11934 }
11935 else {
11936 /* Not in a ruby thread */
11937 fprintf(stderr, "%s", "[FATAL] ");
11938 vfprintf(stderr, fmt, ap);
11939 }
11940
11941 va_end(ap);
11942 abort();
11943}
11944
11945static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
11946
11947static void
11948negative_size_allocation_error(const char *msg)
11949{
11950 gc_raise(rb_eNoMemError, "%s", msg);
11951}
11952
11953static void *
11954ruby_memerror_body(void *dummy)
11955{
11956 rb_memerror();
11957 return 0;
11958}
11959
11960NORETURN(static void ruby_memerror(void));
11962static void
11963ruby_memerror(void)
11964{
11965 if (ruby_thread_has_gvl_p()) {
11966 rb_memerror();
11967 }
11968 else {
11969 if (ruby_native_thread_p()) {
11970 rb_thread_call_with_gvl(ruby_memerror_body, 0);
11971 }
11972 else {
11973 /* no ruby thread */
11974 fprintf(stderr, "[FATAL] failed to allocate memory\n");
11975 }
11976 }
11977 exit(EXIT_FAILURE);
11978}
11979
11980void
11981rb_memerror(void)
11982{
11983 rb_execution_context_t *ec = GET_EC();
11984 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
11985 VALUE exc;
11986
11987 if (0) {
11988 // Print out pid, sleep, so you can attach debugger to see what went wrong:
11989 fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
11990 sleep(60);
11991 }
11992
11993 if (during_gc) {
11994 // TODO: OMG!! How to implement it?
11995 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
11996 }
11997
11998 exc = nomem_error;
11999 if (!exc ||
12000 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12001 fprintf(stderr, "[FATAL] failed to allocate memory\n");
12002 exit(EXIT_FAILURE);
12003 }
12004 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
12005 rb_ec_raised_clear(ec);
12006 }
12007 else {
12008 rb_ec_raised_set(ec, RAISED_NOMEMORY);
12009 exc = ruby_vm_special_exception_copy(exc);
12010 }
12011 ec->errinfo = exc;
12012 EC_JUMP_TAG(ec, TAG_RAISE);
12013}
12014
12015void *
12016rb_aligned_malloc(size_t alignment, size_t size)
12017{
12018 /* alignment must be a power of 2 */
12019 GC_ASSERT(((alignment - 1) & alignment) == 0);
12020 GC_ASSERT(alignment % sizeof(void*) == 0);
12021
12022 void *res;
12023
12024#if defined __MINGW32__
12025 res = __mingw_aligned_malloc(size, alignment);
12026#elif defined _WIN32
12027 void *_aligned_malloc(size_t, size_t);
12028 res = _aligned_malloc(size, alignment);
12029#elif defined(HAVE_POSIX_MEMALIGN)
12030 if (posix_memalign(&res, alignment, size) != 0) {
12031 return NULL;
12032 }
12033#elif defined(HAVE_MEMALIGN)
12034 res = memalign(alignment, size);
12035#else
12036 char* aligned;
12037 res = malloc(alignment + size + sizeof(void*));
12038 aligned = (char*)res + alignment + sizeof(void*);
12039 aligned -= ((VALUE)aligned & (alignment - 1));
12040 ((void**)aligned)[-1] = res;
12041 res = (void*)aligned;
12042#endif
12043
12044 GC_ASSERT((uintptr_t)res % alignment == 0);
12045
12046 return res;
12047}
12048
12049static void
12050rb_aligned_free(void *ptr, size_t size)
12051{
12052#if defined __MINGW32__
12053 __mingw_aligned_free(ptr);
12054#elif defined _WIN32
12055 _aligned_free(ptr);
12056#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
12057 free(ptr);
12058#else
12059 free(((void**)ptr)[-1]);
12060#endif
12061}
12062
12063static inline size_t
12064objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
12065{
12066#ifdef HAVE_MALLOC_USABLE_SIZE
12067 return malloc_usable_size(ptr);
12068#else
12069 return hint;
12070#endif
12071}
12072
12073enum memop_type {
12074 MEMOP_TYPE_MALLOC = 0,
12075 MEMOP_TYPE_FREE,
12076 MEMOP_TYPE_REALLOC
12077};
12078
12079static inline void
12080atomic_sub_nounderflow(size_t *var, size_t sub)
12081{
12082 if (sub == 0) return;
12083
12084 while (1) {
12085 size_t val = *var;
12086 if (val < sub) sub = val;
12087 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
12088 }
12089}
12090
12091static void
12092objspace_malloc_gc_stress(rb_objspace_t *objspace)
12093{
12094 if (ruby_gc_stressful && ruby_native_thread_p()) {
12095 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
12096 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
12097
12098 if (gc_stress_full_mark_after_malloc_p()) {
12099 reason |= GPR_FLAG_FULL_MARK;
12100 }
12101 garbage_collect_with_gvl(objspace, reason);
12102 }
12103}
12104
12105static inline bool
12106objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12107{
12108 if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
12109 mem,
12110 type == MEMOP_TYPE_MALLOC ? "malloc" :
12111 type == MEMOP_TYPE_FREE ? "free " :
12112 type == MEMOP_TYPE_REALLOC ? "realloc": "error",
12113 new_size, old_size);
12114 return false;
12115}
12116
12117static bool
12118objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
12119{
12120 if (new_size > old_size) {
12121 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
12122#if RGENGC_ESTIMATE_OLDMALLOC
12123 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
12124#endif
12125 }
12126 else {
12127 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
12128#if RGENGC_ESTIMATE_OLDMALLOC
12129 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
12130#endif
12131 }
12132
12133 if (type == MEMOP_TYPE_MALLOC) {
12134 retry:
12135 if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
12136 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
12137 gc_rest(objspace); /* gc_rest can reduce malloc_increase */
12138 goto retry;
12139 }
12140 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
12141 }
12142 }
12143
12144#if MALLOC_ALLOCATED_SIZE
12145 if (new_size >= old_size) {
12146 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
12147 }
12148 else {
12149 size_t dec_size = old_size - new_size;
12150 size_t allocated_size = objspace->malloc_params.allocated_size;
12151
12152#if MALLOC_ALLOCATED_SIZE_CHECK
12153 if (allocated_size < dec_size) {
12154 rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
12155 }
12156#endif
12157 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
12158 }
12159
12160 switch (type) {
12161 case MEMOP_TYPE_MALLOC:
12162 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
12163 break;
12164 case MEMOP_TYPE_FREE:
12165 {
12166 size_t allocations = objspace->malloc_params.allocations;
12167 if (allocations > 0) {
12168 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
12169 }
12170#if MALLOC_ALLOCATED_SIZE_CHECK
12171 else {
12172 GC_ASSERT(objspace->malloc_params.allocations > 0);
12173 }
12174#endif
12175 }
12176 break;
12177 case MEMOP_TYPE_REALLOC: /* ignore */ break;
12178 }
12179#endif
12180 return true;
12181}
12182
12183#define objspace_malloc_increase(...) \
12184 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
12185 !malloc_increase_done; \
12186 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
12187
12188struct malloc_obj_info { /* 4 words */
12189 size_t size;
12190#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12191 size_t gen;
12192 const char *file;
12193 size_t line;
12194#endif
12195};
12196
12197#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12198const char *ruby_malloc_info_file;
12199int ruby_malloc_info_line;
12200#endif
12201
12202static inline size_t
12203objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
12204{
12205 if (size == 0) size = 1;
12206
12207#if CALC_EXACT_MALLOC_SIZE
12208 size += sizeof(struct malloc_obj_info);
12209#endif
12210
12211 return size;
12212}
12213
12214static bool
12215malloc_during_gc_p(rb_objspace_t *objspace)
12216{
12217 /* malloc is not allowed during GC when we're not using multiple ractors
12218 * (since ractors can run while another thread is sweeping) and when we
12219 * have the GVL (since if we don't have the GVL, we'll try to acquire the
12220 * GVL which will block and ensure the other thread finishes GC). */
12221 return during_gc && !rb_multi_ractor_p() && ruby_thread_has_gvl_p();
12222}
12223
12224static inline void *
12225objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
12226{
12227 size = objspace_malloc_size(objspace, mem, size);
12228 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
12229
12230#if CALC_EXACT_MALLOC_SIZE
12231 {
12232 struct malloc_obj_info *info = mem;
12233 info->size = size;
12234#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12235 info->gen = objspace->profile.count;
12236 info->file = ruby_malloc_info_file;
12237 info->line = info->file ? ruby_malloc_info_line : 0;
12238#endif
12239 mem = info + 1;
12240 }
12241#endif
12242
12243 return mem;
12244}
12245
12246#if defined(__GNUC__) && RUBY_DEBUG
12247#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
12248#endif
12249
12250#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
12251# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
12252#endif
12253
12254#define GC_MEMERROR(...) \
12255 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
12256
12257#define TRY_WITH_GC(siz, expr) do { \
12258 const gc_profile_record_flag gpr = \
12259 GPR_FLAG_FULL_MARK | \
12260 GPR_FLAG_IMMEDIATE_MARK | \
12261 GPR_FLAG_IMMEDIATE_SWEEP | \
12262 GPR_FLAG_MALLOC; \
12263 objspace_malloc_gc_stress(objspace); \
12264 \
12265 if (LIKELY((expr))) { \
12266 /* Success on 1st try */ \
12267 } \
12268 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
12269 /* @shyouhei thinks this doesn't happen */ \
12270 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
12271 } \
12272 else if ((expr)) { \
12273 /* Success on 2nd try */ \
12274 } \
12275 else { \
12276 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
12277 "%"PRIdSIZE" bytes for %s", \
12278 siz, # expr); \
12279 } \
12280 } while (0)
12281
12282/* these shouldn't be called directly.
12283 * objspace_* functions do not check allocation size.
12284 */
12285static void *
12286objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
12287{
12288 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12289 rb_warn("malloc during GC detected, this could cause crashes if it triggers another GC");
12290#if RGENGC_CHECK_MODE || RUBY_DEBUG
12291 rb_bug("Cannot malloc during GC");
12292#endif
12293 }
12294
12295 void *mem;
12296
12297 size = objspace_malloc_prepare(objspace, size);
12298 TRY_WITH_GC(size, mem = malloc(size));
12299 RB_DEBUG_COUNTER_INC(heap_xmalloc);
12300 return objspace_malloc_fixup(objspace, mem, size);
12301}
12302
12303static inline size_t
12304xmalloc2_size(const size_t count, const size_t elsize)
12305{
12306 return size_mul_or_raise(count, elsize, rb_eArgError);
12307}
12308
12309static void *
12310objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
12311{
12312 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12313 rb_warn("realloc during GC detected, this could cause crashes if it triggers another GC");
12314#if RGENGC_CHECK_MODE || RUBY_DEBUG
12315 rb_bug("Cannot realloc during GC");
12316#endif
12317 }
12318
12319 void *mem;
12320
12321 if (!ptr) return objspace_xmalloc0(objspace, new_size);
12322
12323 /*
12324 * The behavior of realloc(ptr, 0) is implementation defined.
12325 * Therefore we don't use realloc(ptr, 0) for portability reason.
12326 * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
12327 */
12328 if (new_size == 0) {
12329 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
12330 /*
12331 * - OpenBSD's malloc(3) man page says that when 0 is passed, it
12332 * returns a non-NULL pointer to an access-protected memory page.
12333 * The returned pointer cannot be read / written at all, but
12334 * still be a valid argument of free().
12335 *
12336 * https://man.openbsd.org/malloc.3
12337 *
12338 * - Linux's malloc(3) man page says that it _might_ perhaps return
12339 * a non-NULL pointer when its argument is 0. That return value
12340 * is safe (and is expected) to be passed to free().
12341 *
12342 * https://man7.org/linux/man-pages/man3/malloc.3.html
12343 *
12344 * - As I read the implementation jemalloc's malloc() returns fully
12345 * normal 16 bytes memory region when its argument is 0.
12346 *
12347 * - As I read the implementation musl libc's malloc() returns
12348 * fully normal 32 bytes memory region when its argument is 0.
12349 *
12350 * - Other malloc implementations can also return non-NULL.
12351 */
12352 objspace_xfree(objspace, ptr, old_size);
12353 return mem;
12354 }
12355 else {
12356 /*
12357 * It is dangerous to return NULL here, because that could lead to
12358 * RCE. Fallback to 1 byte instead of zero.
12359 *
12360 * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
12361 */
12362 new_size = 1;
12363 }
12364 }
12365
12366#if CALC_EXACT_MALLOC_SIZE
12367 {
12368 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12369 new_size += sizeof(struct malloc_obj_info);
12370 ptr = info;
12371 old_size = info->size;
12372 }
12373#endif
12374
12375 old_size = objspace_malloc_size(objspace, ptr, old_size);
12376 TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
12377 new_size = objspace_malloc_size(objspace, mem, new_size);
12378
12379#if CALC_EXACT_MALLOC_SIZE
12380 {
12381 struct malloc_obj_info *info = mem;
12382 info->size = new_size;
12383 mem = info + 1;
12384 }
12385#endif
12386
12387 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
12388
12389 RB_DEBUG_COUNTER_INC(heap_xrealloc);
12390 return mem;
12391}
12392
12393#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
12394
12395#define MALLOC_INFO_GEN_SIZE 100
12396#define MALLOC_INFO_SIZE_SIZE 10
12397static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
12398static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
12399static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
12400static st_table *malloc_info_file_table;
12401
12402static int
12403mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
12404{
12405 const char *file = (void *)key;
12406 const size_t *data = (void *)val;
12407
12408 fprintf(stderr, "%s\t%"PRIdSIZE"\t%"PRIdSIZE"\n", file, data[0], data[1]);
12409
12410 return ST_CONTINUE;
12411}
12412
12413__attribute__((destructor))
12414void
12415rb_malloc_info_show_results(void)
12416{
12417 int i;
12418
12419 fprintf(stderr, "* malloc_info gen statistics\n");
12420 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
12421 if (i == MALLOC_INFO_GEN_SIZE-1) {
12422 fprintf(stderr, "more\t%"PRIdSIZE"\t%"PRIdSIZE"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12423 }
12424 else {
12425 fprintf(stderr, "%d\t%"PRIdSIZE"\t%"PRIdSIZE"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
12426 }
12427 }
12428
12429 fprintf(stderr, "* malloc_info size statistics\n");
12430 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12431 int s = 16 << i;
12432 fprintf(stderr, "%d\t%"PRIdSIZE"\n", s, malloc_info_size[i]);
12433 }
12434 fprintf(stderr, "more\t%"PRIdSIZE"\n", malloc_info_size[i]);
12435
12436 if (malloc_info_file_table) {
12437 fprintf(stderr, "* malloc_info file statistics\n");
12438 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
12439 }
12440}
12441#else
12442void
12443rb_malloc_info_show_results(void)
12444{
12445}
12446#endif
12447
12448static void
12449objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
12450{
12451 if (!ptr) {
12452 /*
12453 * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
12454 * its first version. We would better follow.
12455 */
12456 return;
12457 }
12458#if CALC_EXACT_MALLOC_SIZE
12459 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12460 ptr = info;
12461 old_size = info->size;
12462
12463#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12464 {
12465 int gen = (int)(objspace->profile.count - info->gen);
12466 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
12467 int i;
12468
12469 malloc_info_gen_cnt[gen_index]++;
12470 malloc_info_gen_size[gen_index] += info->size;
12471
12472 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
12473 size_t s = 16 << i;
12474 if (info->size <= s) {
12475 malloc_info_size[i]++;
12476 goto found;
12477 }
12478 }
12479 malloc_info_size[i]++;
12480 found:;
12481
12482 {
12483 st_data_t key = (st_data_t)info->file, d;
12484 size_t *data;
12485
12486 if (malloc_info_file_table == NULL) {
12487 malloc_info_file_table = st_init_numtable_with_size(1024);
12488 }
12489 if (st_lookup(malloc_info_file_table, key, &d)) {
12490 /* hit */
12491 data = (size_t *)d;
12492 }
12493 else {
12494 data = malloc(xmalloc2_size(2, sizeof(size_t)));
12495 if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
12496 data[0] = data[1] = 0;
12497 st_insert(malloc_info_file_table, key, (st_data_t)data);
12498 }
12499 data[0] ++;
12500 data[1] += info->size;
12501 };
12502 if (0 && gen >= 2) { /* verbose output */
12503 if (info->file) {
12504 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d, pos: %s:%"PRIdSIZE"\n",
12505 info->size, gen, info->file, info->line);
12506 }
12507 else {
12508 fprintf(stderr, "free - size:%"PRIdSIZE", gen:%d\n",
12509 info->size, gen);
12510 }
12511 }
12512 }
12513#endif
12514#endif
12515 old_size = objspace_malloc_size(objspace, ptr, old_size);
12516
12517 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
12518 free(ptr);
12519 ptr = NULL;
12520 RB_DEBUG_COUNTER_INC(heap_xfree);
12521 }
12522}
12523
12524static void *
12525ruby_xmalloc0(size_t size)
12526{
12527 return objspace_xmalloc0(&rb_objspace, size);
12528}
12529
12530void *
12531ruby_xmalloc_body(size_t size)
12532{
12533 if ((ssize_t)size < 0) {
12534 negative_size_allocation_error("too large allocation size");
12535 }
12536 return ruby_xmalloc0(size);
12537}
12538
12539void
12540ruby_malloc_size_overflow(size_t count, size_t elsize)
12541{
12543 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
12544 count, elsize);
12545}
12546
12547void *
12548ruby_xmalloc2_body(size_t n, size_t size)
12549{
12550 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
12551}
12552
12553static void *
12554objspace_xcalloc(rb_objspace_t *objspace, size_t size)
12555{
12556 if (UNLIKELY(malloc_during_gc_p(objspace))) {
12557 rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
12558#if RGENGC_CHECK_MODE || RUBY_DEBUG
12559 rb_bug("Cannot calloc during GC");
12560#endif
12561 }
12562
12563 void *mem;
12564
12565 size = objspace_malloc_prepare(objspace, size);
12566 TRY_WITH_GC(size, mem = calloc1(size));
12567 return objspace_malloc_fixup(objspace, mem, size);
12568}
12569
12570void *
12571ruby_xcalloc_body(size_t n, size_t size)
12572{
12573 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
12574}
12575
12576#ifdef ruby_sized_xrealloc
12577#undef ruby_sized_xrealloc
12578#endif
12579void *
12580ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
12581{
12582 if ((ssize_t)new_size < 0) {
12583 negative_size_allocation_error("too large allocation size");
12584 }
12585
12586 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
12587}
12588
12589void *
12590ruby_xrealloc_body(void *ptr, size_t new_size)
12591{
12592 return ruby_sized_xrealloc(ptr, new_size, 0);
12593}
12594
12595#ifdef ruby_sized_xrealloc2
12596#undef ruby_sized_xrealloc2
12597#endif
12598void *
12599ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
12600{
12601 size_t len = xmalloc2_size(n, size);
12602 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
12603}
12604
12605void *
12606ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
12607{
12608 return ruby_sized_xrealloc2(ptr, n, size, 0);
12609}
12610
12611#ifdef ruby_sized_xfree
12612#undef ruby_sized_xfree
12613#endif
12614void
12615ruby_sized_xfree(void *x, size_t size)
12616{
12617 if (x) {
12618 objspace_xfree(&rb_objspace, x, size);
12619 }
12620}
12621
12622void
12623ruby_xfree(void *x)
12624{
12625 ruby_sized_xfree(x, 0);
12626}
12627
12628void *
12629rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12630{
12631 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12632 return ruby_xmalloc(w);
12633}
12634
12635void *
12636rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
12637{
12638 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12639 return ruby_xcalloc(w, 1);
12640}
12641
12642void *
12643rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
12644{
12645 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
12646 return ruby_xrealloc((void *)p, w);
12647}
12648
12649void *
12650rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12651{
12652 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12653 return ruby_xmalloc(u);
12654}
12655
12656void *
12657rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
12658{
12659 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
12660 return ruby_xcalloc(u, 1);
12661}
12662
12663/* Mimic ruby_xmalloc, but need not rb_objspace.
12664 * should return pointer suitable for ruby_xfree
12665 */
12666void *
12667ruby_mimmalloc(size_t size)
12668{
12669 void *mem;
12670#if CALC_EXACT_MALLOC_SIZE
12671 size += sizeof(struct malloc_obj_info);
12672#endif
12673 mem = malloc(size);
12674#if CALC_EXACT_MALLOC_SIZE
12675 if (!mem) {
12676 return NULL;
12677 }
12678 else
12679 /* set 0 for consistency of allocated_size/allocations */
12680 {
12681 struct malloc_obj_info *info = mem;
12682 info->size = 0;
12683#if USE_GC_MALLOC_OBJ_INFO_DETAILS
12684 info->gen = 0;
12685 info->file = NULL;
12686 info->line = 0;
12687#endif
12688 mem = info + 1;
12689 }
12690#endif
12691 return mem;
12692}
12693
12694void
12695ruby_mimfree(void *ptr)
12696{
12697#if CALC_EXACT_MALLOC_SIZE
12698 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
12699 ptr = info;
12700#endif
12701 free(ptr);
12702}
12703
12704void *
12705rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
12706{
12707 void *ptr;
12708 VALUE imemo;
12709 rb_imemo_tmpbuf_t *tmpbuf;
12710
12711 /* Keep the order; allocate an empty imemo first then xmalloc, to
12712 * get rid of potential memory leak */
12713 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
12714 *store = imemo;
12715 ptr = ruby_xmalloc0(size);
12716 tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
12717 tmpbuf->ptr = ptr;
12718 tmpbuf->cnt = cnt;
12719 return ptr;
12720}
12721
12722void *
12723rb_alloc_tmp_buffer(volatile VALUE *store, long len)
12724{
12725 long cnt;
12726
12727 if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
12728 rb_raise(rb_eArgError, "negative buffer size (or size too big)");
12729 }
12730
12731 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
12732}
12733
12734void
12735rb_free_tmp_buffer(volatile VALUE *store)
12736{
12737 rb_imemo_tmpbuf_t *s = (rb_imemo_tmpbuf_t*)ATOMIC_VALUE_EXCHANGE(*store, 0);
12738 if (s) {
12739 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
12740 s->cnt = 0;
12741 ruby_xfree(ptr);
12742 }
12743}
12744
12745#if MALLOC_ALLOCATED_SIZE
12746/*
12747 * call-seq:
12748 * GC.malloc_allocated_size -> Integer
12749 *
12750 * Returns the size of memory allocated by malloc().
12751 *
12752 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12753 */
12754
12755static VALUE
12756gc_malloc_allocated_size(VALUE self)
12757{
12758 return UINT2NUM(rb_objspace.malloc_params.allocated_size);
12759}
12760
12761/*
12762 * call-seq:
12763 * GC.malloc_allocations -> Integer
12764 *
12765 * Returns the number of malloc() allocations.
12766 *
12767 * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
12768 */
12769
12770static VALUE
12771gc_malloc_allocations(VALUE self)
12772{
12773 return UINT2NUM(rb_objspace.malloc_params.allocations);
12774}
12775#endif
12776
12777void
12778rb_gc_adjust_memory_usage(ssize_t diff)
12779{
12780 rb_objspace_t *objspace = &rb_objspace;
12781 if (diff > 0) {
12782 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
12783 }
12784 else if (diff < 0) {
12785 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
12786 }
12787}
12788
12789/*
12790 ------------------------------ WeakMap ------------------------------
12791*/
12792
12793struct weakmap {
12794 st_table *obj2wmap; /* obj -> [ref,...] */
12795 st_table *wmap2obj; /* ref -> obj */
12796 VALUE final;
12797};
12798
12799#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
12800
12801#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12802static int
12803wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
12804{
12805 rb_objspace_t *objspace = (rb_objspace_t *)arg;
12806 VALUE obj = (VALUE)val;
12807 if (!is_live_object(objspace, obj)) return ST_DELETE;
12808 return ST_CONTINUE;
12809}
12810#endif
12811
12812static void
12813wmap_compact(void *ptr)
12814{
12815 struct weakmap *w = ptr;
12816 if (w->wmap2obj) rb_gc_update_tbl_refs(w->wmap2obj);
12817 if (w->obj2wmap) rb_gc_update_tbl_refs(w->obj2wmap);
12818 w->final = rb_gc_location(w->final);
12819}
12820
12821static void
12822wmap_mark(void *ptr)
12823{
12824 struct weakmap *w = ptr;
12825#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
12826 if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
12827#endif
12828 rb_gc_mark_movable(w->final);
12829}
12830
12831static int
12832wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
12833{
12834 VALUE *ptr = (VALUE *)val;
12835 ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
12836 return ST_CONTINUE;
12837}
12838
12839static void
12840wmap_free(void *ptr)
12841{
12842 struct weakmap *w = ptr;
12843 st_foreach(w->obj2wmap, wmap_free_map, 0);
12844 st_free_table(w->obj2wmap);
12845 st_free_table(w->wmap2obj);
12846 xfree(w);
12847}
12848
12849static int
12850wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
12851{
12852 VALUE *ptr = (VALUE *)val;
12853 *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
12854 return ST_CONTINUE;
12855}
12856
12857static size_t
12858wmap_memsize(const void *ptr)
12859{
12860 size_t size;
12861 const struct weakmap *w = ptr;
12862 size = sizeof(*w);
12863 size += st_memsize(w->obj2wmap);
12864 size += st_memsize(w->wmap2obj);
12865 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12866 return size;
12867}
12868
12869static const rb_data_type_t weakmap_type = {
12870 "weakmap",
12871 {
12872 wmap_mark,
12873 wmap_free,
12874 wmap_memsize,
12875 wmap_compact,
12876 },
12877 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12878};
12879
12880static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
12881
12882static VALUE
12883wmap_allocate(VALUE klass)
12884{
12885 struct weakmap *w;
12886 VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
12887 w->obj2wmap = rb_init_identtable();
12888 w->wmap2obj = rb_init_identtable();
12889 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12890 return obj;
12891}
12892
12893static int
12894wmap_live_p(rb_objspace_t *objspace, VALUE obj)
12895{
12896 if (SPECIAL_CONST_P(obj)) return TRUE;
12897 /* If is_pointer_to_heap returns false, the page could be in the tomb heap
12898 * or have already been freed. */
12899 if (!is_pointer_to_heap(objspace, (void *)obj)) return FALSE;
12900
12901 void *poisoned = asan_unpoison_object_temporary(obj);
12902
12903 enum ruby_value_type t = BUILTIN_TYPE(obj);
12904 int ret = (!(t == T_NONE || t >= T_FIXNUM || t == T_ICLASS) &&
12905 is_live_object(objspace, obj));
12906
12907 if (poisoned) {
12908 asan_poison_object(obj);
12909 }
12910
12911 return ret;
12912}
12913
12914static int
12915wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
12916{
12917 VALUE wmap, *ptr, size, i, j;
12918 if (!existing) return ST_STOP;
12919 wmap = (VALUE)arg, ptr = (VALUE *)*value;
12920 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
12921 if (ptr[i] != wmap) {
12922 ptr[j++] = ptr[i];
12923 }
12924 }
12925 if (j == 1) {
12926 ruby_sized_xfree(ptr, i * sizeof(VALUE));
12927 return ST_DELETE;
12928 }
12929 if (j < i) {
12930 SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
12931 ptr[0] = j;
12932 *value = (st_data_t)ptr;
12933 }
12934 return ST_CONTINUE;
12935}
12936
12937/* :nodoc: */
12938static VALUE
12939wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
12940{
12941 st_data_t orig, wmap, data;
12942 VALUE obj, *rids, i, size;
12943 struct weakmap *w;
12944
12945 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
12946 /* Get reference from object id. */
12947 if (UNDEF_P(obj = id2ref_obj_tbl(&rb_objspace, objid))) {
12948 rb_bug("wmap_finalize: objid is not found.");
12949 }
12950
12951 /* obj is original referenced object and/or weak reference. */
12952 orig = (st_data_t)obj;
12953 if (st_delete(w->obj2wmap, &orig, &data)) {
12954 rids = (VALUE *)data;
12955 size = *rids++;
12956 for (i = 0; i < size; ++i) {
12957 wmap = (st_data_t)rids[i];
12958 st_delete(w->wmap2obj, &wmap, NULL);
12959 }
12960 ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
12961 }
12962
12963 wmap = (st_data_t)obj;
12964 if (st_delete(w->wmap2obj, &wmap, &orig)) {
12965 wmap = (st_data_t)obj;
12966 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
12967 }
12968 return self;
12969}
12970
12972 rb_objspace_t *objspace;
12973 VALUE value;
12974};
12975
12976static VALUE
12977wmap_inspect_append(rb_objspace_t *objspace, VALUE str, VALUE obj)
12978{
12979 if (SPECIAL_CONST_P(obj)) {
12980 return rb_str_append(str, rb_inspect(obj));
12981 }
12982 else if (wmap_live_p(objspace, obj)) {
12983 return rb_str_append(str, rb_any_to_s(obj));
12984 }
12985 else {
12986 return rb_str_catf(str, "#<collected:%p>", (void*)obj);
12987 }
12988}
12989
12990static int
12991wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
12992{
12993 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
12994 rb_objspace_t *objspace = argp->objspace;
12995 VALUE str = argp->value;
12996 VALUE k = (VALUE)key, v = (VALUE)val;
12997
12998 if (RSTRING_PTR(str)[0] == '#') {
12999 rb_str_cat2(str, ", ");
13000 }
13001 else {
13002 rb_str_cat2(str, ": ");
13003 RSTRING_PTR(str)[0] = '#';
13004 }
13005 wmap_inspect_append(objspace, str, k);
13006 rb_str_cat2(str, " => ");
13007 wmap_inspect_append(objspace, str, v);
13008
13009 return ST_CONTINUE;
13010}
13011
13012static VALUE
13013wmap_inspect(VALUE self)
13014{
13015 VALUE str;
13016 VALUE c = rb_class_name(CLASS_OF(self));
13017 struct weakmap *w;
13018 struct wmap_iter_arg args;
13019
13020 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13021 str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
13022 if (w->wmap2obj) {
13023 args.objspace = &rb_objspace;
13024 args.value = str;
13025 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
13026 }
13027 RSTRING_PTR(str)[0] = '#';
13028 rb_str_cat2(str, ">");
13029 return str;
13030}
13031
13032static inline bool
13033wmap_live_entry_p(rb_objspace_t *objspace, st_data_t key, st_data_t val)
13034{
13035 return wmap_live_p(objspace, (VALUE)key) && wmap_live_p(objspace, (VALUE)val);
13036}
13037
13038static int
13039wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
13040{
13041 rb_objspace_t *objspace = (rb_objspace_t *)arg;
13042
13043 if (wmap_live_entry_p(objspace, key, val)) {
13044 rb_yield_values(2, (VALUE)key, (VALUE)val);
13045 return ST_CONTINUE;
13046 }
13047 else {
13048 return ST_DELETE;
13049 }
13050}
13051
13052/* Iterates over keys and objects in a weakly referenced object */
13053static VALUE
13054wmap_each(VALUE self)
13055{
13056 struct weakmap *w;
13057 rb_objspace_t *objspace = &rb_objspace;
13058
13059 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13060 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
13061 return self;
13062}
13063
13064static int
13065wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
13066{
13067 rb_objspace_t *objspace = (rb_objspace_t *)arg;
13068
13069 if (wmap_live_entry_p(objspace, key, val)) {
13070 rb_yield((VALUE)key);
13071 return ST_CONTINUE;
13072 }
13073 else {
13074 return ST_DELETE;
13075 }
13076}
13077
13078/* Iterates over keys and objects in a weakly referenced object */
13079static VALUE
13080wmap_each_key(VALUE self)
13081{
13082 struct weakmap *w;
13083 rb_objspace_t *objspace = &rb_objspace;
13084
13085 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13086 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
13087 return self;
13088}
13089
13090static int
13091wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
13092{
13093 rb_objspace_t *objspace = (rb_objspace_t *)arg;
13094
13095 if (wmap_live_entry_p(objspace, key, val)) {
13096 rb_yield((VALUE)val);
13097 return ST_CONTINUE;
13098 }
13099 else {
13100 return ST_DELETE;
13101 }
13102}
13103
13104/* Iterates over keys and objects in a weakly referenced object */
13105static VALUE
13106wmap_each_value(VALUE self)
13107{
13108 struct weakmap *w;
13109 rb_objspace_t *objspace = &rb_objspace;
13110
13111 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13112 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
13113 return self;
13114}
13115
13116static int
13117wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
13118{
13119 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
13120 rb_objspace_t *objspace = argp->objspace;
13121 VALUE ary = argp->value;
13122
13123 if (wmap_live_entry_p(objspace, key, val)) {
13124 rb_ary_push(ary, (VALUE)key);
13125 return ST_CONTINUE;
13126 }
13127 else {
13128 return ST_DELETE;
13129 }
13130}
13131
13132/* Iterates over keys and objects in a weakly referenced object */
13133static VALUE
13134wmap_keys(VALUE self)
13135{
13136 struct weakmap *w;
13137 struct wmap_iter_arg args;
13138
13139 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13140 args.objspace = &rb_objspace;
13141 args.value = rb_ary_new();
13142 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
13143 return args.value;
13144}
13145
13146static int
13147wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
13148{
13149 struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
13150 rb_objspace_t *objspace = argp->objspace;
13151 VALUE ary = argp->value;
13152
13153 if (wmap_live_entry_p(objspace, key, val)) {
13154 rb_ary_push(ary, (VALUE)val);
13155 return ST_CONTINUE;
13156 }
13157 else {
13158 return ST_DELETE;
13159 }
13160}
13161
13162/* Iterates over values and objects in a weakly referenced object */
13163static VALUE
13164wmap_values(VALUE self)
13165{
13166 struct weakmap *w;
13167 struct wmap_iter_arg args;
13168
13169 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13170 args.objspace = &rb_objspace;
13171 args.value = rb_ary_new();
13172 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
13173 return args.value;
13174}
13175
13176static int
13177wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
13178{
13179 VALUE size, *ptr, *optr;
13180 if (existing) {
13181 size = (ptr = optr = (VALUE *)*val)[0];
13182 ++size;
13183 SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
13184 }
13185 else {
13186 optr = 0;
13187 size = 1;
13188 ptr = ruby_xmalloc0(2 * sizeof(VALUE));
13189 }
13190 ptr[0] = size;
13191 ptr[size] = (VALUE)arg;
13192 if (ptr == optr) return ST_STOP;
13193 *val = (st_data_t)ptr;
13194 return ST_CONTINUE;
13195}
13196
13197/* Creates a weak reference from the given key to the given value */
13198static VALUE
13199wmap_aset(VALUE self, VALUE key, VALUE value)
13200{
13201 struct weakmap *w;
13202
13203 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13204 if (FL_ABLE(value)) {
13205 define_final0(value, w->final);
13206 }
13207 if (FL_ABLE(key)) {
13208 define_final0(key, w->final);
13209 }
13210
13211 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
13212 st_insert(w->wmap2obj, (st_data_t)key, (st_data_t)value);
13213 return nonspecial_obj_id(value);
13214}
13215
13216/* Retrieves a weakly referenced object with the given key */
13217static VALUE
13218wmap_lookup(VALUE self, VALUE key)
13219{
13220 st_data_t data;
13221 VALUE obj;
13222 struct weakmap *w;
13223 rb_objspace_t *objspace = &rb_objspace;
13224 GC_ASSERT(wmap_live_p(objspace, key));
13225
13226 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13227 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
13228 obj = (VALUE)data;
13229 if (!wmap_live_p(objspace, obj)) return Qundef;
13230 return obj;
13231}
13232
13233/* Retrieves a weakly referenced object with the given key */
13234static VALUE
13235wmap_aref(VALUE self, VALUE key)
13236{
13237 VALUE obj = wmap_lookup(self, key);
13238 return !UNDEF_P(obj) ? obj : Qnil;
13239}
13240
13241/* Returns +true+ if +key+ is registered */
13242static VALUE
13243wmap_has_key(VALUE self, VALUE key)
13244{
13245 return RBOOL(!UNDEF_P(wmap_lookup(self, key)));
13246}
13247
13248/* Returns the number of referenced objects */
13249static VALUE
13250wmap_size(VALUE self)
13251{
13252 struct weakmap *w;
13253 st_index_t n;
13254
13255 TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
13256 n = w->wmap2obj->num_entries;
13257#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
13258 return ULONG2NUM(n);
13259#else
13260 return ULL2NUM(n);
13261#endif
13262}
13263
13264/*
13265 ------------------------------ GC profiler ------------------------------
13266*/
13267
13268#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
13269
13270static bool
13271current_process_time(struct timespec *ts)
13272{
13273#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
13274 {
13275 static int try_clock_gettime = 1;
13276 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
13277 return true;
13278 }
13279 else {
13280 try_clock_gettime = 0;
13281 }
13282 }
13283#endif
13284
13285#ifdef RUSAGE_SELF
13286 {
13287 struct rusage usage;
13288 struct timeval time;
13289 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13290 time = usage.ru_utime;
13291 ts->tv_sec = time.tv_sec;
13292 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
13293 return true;
13294 }
13295 }
13296#endif
13297
13298#ifdef _WIN32
13299 {
13300 FILETIME creation_time, exit_time, kernel_time, user_time;
13301 ULARGE_INTEGER ui;
13302
13303 if (GetProcessTimes(GetCurrentProcess(),
13304 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
13305 memcpy(&ui, &user_time, sizeof(FILETIME));
13306#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
13307 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
13308 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
13309 return true;
13310 }
13311 }
13312#endif
13313
13314 return false;
13315}
13316
13317static double
13318getrusage_time(void)
13319{
13320 struct timespec ts;
13321 if (current_process_time(&ts)) {
13322 return ts.tv_sec + ts.tv_nsec * 1e-9;
13323 }
13324 else {
13325 return 0.0;
13326 }
13327}
13328
13329
13330static inline void
13331gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
13332{
13333 if (objspace->profile.run) {
13334 size_t index = objspace->profile.next_index;
13335 gc_profile_record *record;
13336
13337 /* create new record */
13338 objspace->profile.next_index++;
13339
13340 if (!objspace->profile.records) {
13341 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
13342 objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13343 }
13344 if (index >= objspace->profile.size) {
13345 void *ptr;
13346 objspace->profile.size += 1000;
13347 ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
13348 if (!ptr) rb_memerror();
13349 objspace->profile.records = ptr;
13350 }
13351 if (!objspace->profile.records) {
13352 rb_bug("gc_profile malloc or realloc miss");
13353 }
13354 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
13355 MEMZERO(record, gc_profile_record, 1);
13356
13357 /* setup before-GC parameter */
13358 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
13359#if MALLOC_ALLOCATED_SIZE
13360 record->allocated_size = malloc_allocated_size;
13361#endif
13362#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
13363#ifdef RUSAGE_SELF
13364 {
13365 struct rusage usage;
13366 if (getrusage(RUSAGE_SELF, &usage) == 0) {
13367 record->maxrss = usage.ru_maxrss;
13368 record->minflt = usage.ru_minflt;
13369 record->majflt = usage.ru_majflt;
13370 }
13371 }
13372#endif
13373#endif
13374 }
13375}
13376
13377static inline void
13378gc_prof_timer_start(rb_objspace_t *objspace)
13379{
13380 if (gc_prof_enabled(objspace)) {
13381 gc_profile_record *record = gc_prof_record(objspace);
13382#if GC_PROFILE_MORE_DETAIL
13383 record->prepare_time = objspace->profile.prepare_time;
13384#endif
13385 record->gc_time = 0;
13386 record->gc_invoke_time = getrusage_time();
13387 }
13388}
13389
13390static double
13391elapsed_time_from(double time)
13392{
13393 double now = getrusage_time();
13394 if (now > time) {
13395 return now - time;
13396 }
13397 else {
13398 return 0;
13399 }
13400}
13401
13402static inline void
13403gc_prof_timer_stop(rb_objspace_t *objspace)
13404{
13405 if (gc_prof_enabled(objspace)) {
13406 gc_profile_record *record = gc_prof_record(objspace);
13407 record->gc_time = elapsed_time_from(record->gc_invoke_time);
13408 record->gc_invoke_time -= objspace->profile.invoke_time;
13409 }
13410}
13411
13412#define RUBY_DTRACE_GC_HOOK(name) \
13413 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
13414static inline void
13415gc_prof_mark_timer_start(rb_objspace_t *objspace)
13416{
13417 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
13418#if GC_PROFILE_MORE_DETAIL
13419 if (gc_prof_enabled(objspace)) {
13420 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
13421 }
13422#endif
13423}
13424
13425static inline void
13426gc_prof_mark_timer_stop(rb_objspace_t *objspace)
13427{
13428 RUBY_DTRACE_GC_HOOK(MARK_END);
13429#if GC_PROFILE_MORE_DETAIL
13430 if (gc_prof_enabled(objspace)) {
13431 gc_profile_record *record = gc_prof_record(objspace);
13432 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
13433 }
13434#endif
13435}
13436
13437static inline void
13438gc_prof_sweep_timer_start(rb_objspace_t *objspace)
13439{
13440 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
13441 if (gc_prof_enabled(objspace)) {
13442 gc_profile_record *record = gc_prof_record(objspace);
13443
13444 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
13445 objspace->profile.gc_sweep_start_time = getrusage_time();
13446 }
13447 }
13448}
13449
13450static inline void
13451gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
13452{
13453 RUBY_DTRACE_GC_HOOK(SWEEP_END);
13454
13455 if (gc_prof_enabled(objspace)) {
13456 double sweep_time;
13457 gc_profile_record *record = gc_prof_record(objspace);
13458
13459 if (record->gc_time > 0) {
13460 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13461 /* need to accumulate GC time for lazy sweep after gc() */
13462 record->gc_time += sweep_time;
13463 }
13464 else if (GC_PROFILE_MORE_DETAIL) {
13465 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
13466 }
13467
13468#if GC_PROFILE_MORE_DETAIL
13469 record->gc_sweep_time += sweep_time;
13470 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
13471#endif
13472 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
13473 }
13474}
13475
13476static inline void
13477gc_prof_set_malloc_info(rb_objspace_t *objspace)
13478{
13479#if GC_PROFILE_MORE_DETAIL
13480 if (gc_prof_enabled(objspace)) {
13481 gc_profile_record *record = gc_prof_record(objspace);
13482 record->allocate_increase = malloc_increase;
13483 record->allocate_limit = malloc_limit;
13484 }
13485#endif
13486}
13487
13488static inline void
13489gc_prof_set_heap_info(rb_objspace_t *objspace)
13490{
13491 if (gc_prof_enabled(objspace)) {
13492 gc_profile_record *record = gc_prof_record(objspace);
13493 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
13494 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
13495
13496#if GC_PROFILE_MORE_DETAIL
13497 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
13498 record->heap_live_objects = live;
13499 record->heap_free_objects = total - live;
13500#endif
13501
13502 record->heap_total_objects = total;
13503 record->heap_use_size = live * sizeof(RVALUE);
13504 record->heap_total_size = total * sizeof(RVALUE);
13505 }
13506}
13507
13508/*
13509 * call-seq:
13510 * GC::Profiler.clear -> nil
13511 *
13512 * Clears the \GC profiler data.
13513 *
13514 */
13515
13516static VALUE
13517gc_profile_clear(VALUE _)
13518{
13519 rb_objspace_t *objspace = &rb_objspace;
13520 void *p = objspace->profile.records;
13521 objspace->profile.records = NULL;
13522 objspace->profile.size = 0;
13523 objspace->profile.next_index = 0;
13524 objspace->profile.current_record = 0;
13525 if (p) {
13526 free(p);
13527 }
13528 return Qnil;
13529}
13530
13531/*
13532 * call-seq:
13533 * GC::Profiler.raw_data -> [Hash, ...]
13534 *
13535 * Returns an Array of individual raw profile data Hashes ordered
13536 * from earliest to latest by +:GC_INVOKE_TIME+.
13537 *
13538 * For example:
13539 *
13540 * [
13541 * {
13542 * :GC_TIME=>1.3000000000000858e-05,
13543 * :GC_INVOKE_TIME=>0.010634999999999999,
13544 * :HEAP_USE_SIZE=>289640,
13545 * :HEAP_TOTAL_SIZE=>588960,
13546 * :HEAP_TOTAL_OBJECTS=>14724,
13547 * :GC_IS_MARKED=>false
13548 * },
13549 * # ...
13550 * ]
13551 *
13552 * The keys mean:
13553 *
13554 * +:GC_TIME+::
13555 * Time elapsed in seconds for this GC run
13556 * +:GC_INVOKE_TIME+::
13557 * Time elapsed in seconds from startup to when the GC was invoked
13558 * +:HEAP_USE_SIZE+::
13559 * Total bytes of heap used
13560 * +:HEAP_TOTAL_SIZE+::
13561 * Total size of heap in bytes
13562 * +:HEAP_TOTAL_OBJECTS+::
13563 * Total number of objects
13564 * +:GC_IS_MARKED+::
13565 * Returns +true+ if the GC is in mark phase
13566 *
13567 * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
13568 * to the following hash keys:
13569 *
13570 * +:GC_MARK_TIME+::
13571 * +:GC_SWEEP_TIME+::
13572 * +:ALLOCATE_INCREASE+::
13573 * +:ALLOCATE_LIMIT+::
13574 * +:HEAP_USE_PAGES+::
13575 * +:HEAP_LIVE_OBJECTS+::
13576 * +:HEAP_FREE_OBJECTS+::
13577 * +:HAVE_FINALIZE+::
13578 *
13579 */
13580
13581static VALUE
13582gc_profile_record_get(VALUE _)
13583{
13584 VALUE prof;
13585 VALUE gc_profile = rb_ary_new();
13586 size_t i;
13587 rb_objspace_t *objspace = (&rb_objspace);
13588
13589 if (!objspace->profile.run) {
13590 return Qnil;
13591 }
13592
13593 for (i =0; i < objspace->profile.next_index; i++) {
13594 gc_profile_record *record = &objspace->profile.records[i];
13595
13596 prof = rb_hash_new();
13597 rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
13598 rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
13599 rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
13600 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
13601 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
13602 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
13603 rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
13604 rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
13605#if GC_PROFILE_MORE_DETAIL
13606 rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
13607 rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
13608 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
13609 rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
13610 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
13611 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
13612 rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
13613
13614 rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
13615 rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
13616
13617 rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), RBOOL(record->flags & GPR_FLAG_HAVE_FINALIZE));
13618#endif
13619
13620#if RGENGC_PROFILE > 0
13621 rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
13622 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
13623 rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
13624#endif
13625 rb_ary_push(gc_profile, prof);
13626 }
13627
13628 return gc_profile;
13629}
13630
13631#if GC_PROFILE_MORE_DETAIL
13632#define MAJOR_REASON_MAX 0x10
13633
13634static char *
13635gc_profile_dump_major_reason(unsigned int flags, char *buff)
13636{
13637 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
13638 int i = 0;
13639
13640 if (reason == GPR_FLAG_NONE) {
13641 buff[0] = '-';
13642 buff[1] = 0;
13643 }
13644 else {
13645#define C(x, s) \
13646 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
13647 buff[i++] = #x[0]; \
13648 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
13649 buff[i] = 0; \
13650 }
13651 C(NOFREE, N);
13652 C(OLDGEN, O);
13653 C(SHADY, S);
13654#if RGENGC_ESTIMATE_OLDMALLOC
13655 C(OLDMALLOC, M);
13656#endif
13657#undef C
13658 }
13659 return buff;
13660}
13661#endif
13662
13663static void
13664gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
13665{
13666 rb_objspace_t *objspace = &rb_objspace;
13667 size_t count = objspace->profile.next_index;
13668#ifdef MAJOR_REASON_MAX
13669 char reason_str[MAJOR_REASON_MAX];
13670#endif
13671
13672 if (objspace->profile.run && count /* > 1 */) {
13673 size_t i;
13674 const gc_profile_record *record;
13675
13676 append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
13677 append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
13678
13679 for (i = 0; i < count; i++) {
13680 record = &objspace->profile.records[i];
13681 append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
13682 i+1, record->gc_invoke_time, record->heap_use_size,
13683 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
13684 }
13685
13686#if GC_PROFILE_MORE_DETAIL
13687 const char *str = "\n\n" \
13688 "More detail.\n" \
13689 "Prepare Time = Previously GC's rest sweep time\n"
13690 "Index Flags Allocate Inc. Allocate Limit"
13691#if CALC_EXACT_MALLOC_SIZE
13692 " Allocated Size"
13693#endif
13694 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
13695#if RGENGC_PROFILE
13696 " OldgenObj RemNormObj RemShadObj"
13697#endif
13698#if GC_PROFILE_DETAIL_MEMORY
13699 " MaxRSS(KB) MinorFLT MajorFLT"
13700#endif
13701 "\n";
13702 append(out, rb_str_new_cstr(str));
13703
13704 for (i = 0; i < count; i++) {
13705 record = &objspace->profile.records[i];
13706 append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
13707#if CALC_EXACT_MALLOC_SIZE
13708 " %15"PRIuSIZE
13709#endif
13710 " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13711#if RGENGC_PROFILE
13712 "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
13713#endif
13714#if GC_PROFILE_DETAIL_MEMORY
13715 "%11ld %8ld %8ld"
13716#endif
13717
13718 "\n",
13719 i+1,
13720 gc_profile_dump_major_reason(record->flags, reason_str),
13721 (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
13722 (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
13723 (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
13724 (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
13725 (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
13726 (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
13727 record->allocate_increase, record->allocate_limit,
13728#if CALC_EXACT_MALLOC_SIZE
13729 record->allocated_size,
13730#endif
13731 record->heap_use_pages,
13732 record->gc_mark_time*1000,
13733 record->gc_sweep_time*1000,
13734 record->prepare_time*1000,
13735
13736 record->heap_live_objects,
13737 record->heap_free_objects,
13738 record->removing_objects,
13739 record->empty_objects
13740#if RGENGC_PROFILE
13741 ,
13742 record->old_objects,
13743 record->remembered_normal_objects,
13744 record->remembered_shady_objects
13745#endif
13746#if GC_PROFILE_DETAIL_MEMORY
13747 ,
13748 record->maxrss / 1024,
13749 record->minflt,
13750 record->majflt
13751#endif
13752
13753 ));
13754 }
13755#endif
13756 }
13757}
13758
13759/*
13760 * call-seq:
13761 * GC::Profiler.result -> String
13762 *
13763 * Returns a profile data report such as:
13764 *
13765 * GC 1 invokes.
13766 * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
13767 * 1 0.012 159240 212940 10647 0.00000000000001530000
13768 */
13769
13770static VALUE
13771gc_profile_result(VALUE _)
13772{
13773 VALUE str = rb_str_buf_new(0);
13774 gc_profile_dump_on(str, rb_str_buf_append);
13775 return str;
13776}
13777
13778/*
13779 * call-seq:
13780 * GC::Profiler.report
13781 * GC::Profiler.report(io)
13782 *
13783 * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
13784 *
13785 */
13786
13787static VALUE
13788gc_profile_report(int argc, VALUE *argv, VALUE self)
13789{
13790 VALUE out;
13791
13792 out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
13793 gc_profile_dump_on(out, rb_io_write);
13794
13795 return Qnil;
13796}
13797
13798/*
13799 * call-seq:
13800 * GC::Profiler.total_time -> float
13801 *
13802 * The total time used for garbage collection in seconds
13803 */
13804
13805static VALUE
13806gc_profile_total_time(VALUE self)
13807{
13808 double time = 0;
13809 rb_objspace_t *objspace = &rb_objspace;
13810
13811 if (objspace->profile.run && objspace->profile.next_index > 0) {
13812 size_t i;
13813 size_t count = objspace->profile.next_index;
13814
13815 for (i = 0; i < count; i++) {
13816 time += objspace->profile.records[i].gc_time;
13817 }
13818 }
13819 return DBL2NUM(time);
13820}
13821
13822/*
13823 * call-seq:
13824 * GC::Profiler.enabled? -> true or false
13825 *
13826 * The current status of \GC profile mode.
13827 */
13828
13829static VALUE
13830gc_profile_enable_get(VALUE self)
13831{
13832 rb_objspace_t *objspace = &rb_objspace;
13833 return RBOOL(objspace->profile.run);
13834}
13835
13836/*
13837 * call-seq:
13838 * GC::Profiler.enable -> nil
13839 *
13840 * Starts the \GC profiler.
13841 *
13842 */
13843
13844static VALUE
13845gc_profile_enable(VALUE _)
13846{
13847 rb_objspace_t *objspace = &rb_objspace;
13848 objspace->profile.run = TRUE;
13849 objspace->profile.current_record = 0;
13850 return Qnil;
13851}
13852
13853/*
13854 * call-seq:
13855 * GC::Profiler.disable -> nil
13856 *
13857 * Stops the \GC profiler.
13858 *
13859 */
13860
13861static VALUE
13862gc_profile_disable(VALUE _)
13863{
13864 rb_objspace_t *objspace = &rb_objspace;
13865
13866 objspace->profile.run = FALSE;
13867 objspace->profile.current_record = 0;
13868 return Qnil;
13869}
13870
13871/*
13872 ------------------------------ DEBUG ------------------------------
13873*/
13874
13875static const char *
13876type_name(int type, VALUE obj)
13877{
13878 switch (type) {
13879#define TYPE_NAME(t) case (t): return #t;
13880 TYPE_NAME(T_NONE);
13881 TYPE_NAME(T_OBJECT);
13882 TYPE_NAME(T_CLASS);
13883 TYPE_NAME(T_MODULE);
13884 TYPE_NAME(T_FLOAT);
13885 TYPE_NAME(T_STRING);
13886 TYPE_NAME(T_REGEXP);
13887 TYPE_NAME(T_ARRAY);
13888 TYPE_NAME(T_HASH);
13889 TYPE_NAME(T_STRUCT);
13890 TYPE_NAME(T_BIGNUM);
13891 TYPE_NAME(T_FILE);
13892 TYPE_NAME(T_MATCH);
13893 TYPE_NAME(T_COMPLEX);
13894 TYPE_NAME(T_RATIONAL);
13895 TYPE_NAME(T_NIL);
13896 TYPE_NAME(T_TRUE);
13897 TYPE_NAME(T_FALSE);
13898 TYPE_NAME(T_SYMBOL);
13899 TYPE_NAME(T_FIXNUM);
13900 TYPE_NAME(T_UNDEF);
13901 TYPE_NAME(T_IMEMO);
13902 TYPE_NAME(T_ICLASS);
13903 TYPE_NAME(T_MOVED);
13904 TYPE_NAME(T_ZOMBIE);
13905 case T_DATA:
13906 if (obj && rb_objspace_data_type_name(obj)) {
13907 return rb_objspace_data_type_name(obj);
13908 }
13909 return "T_DATA";
13910#undef TYPE_NAME
13911 }
13912 return "unknown";
13913}
13914
13915static const char *
13916obj_type_name(VALUE obj)
13917{
13918 return type_name(TYPE(obj), obj);
13919}
13920
13921const char *
13922rb_method_type_name(rb_method_type_t type)
13923{
13924 switch (type) {
13925 case VM_METHOD_TYPE_ISEQ: return "iseq";
13926 case VM_METHOD_TYPE_ATTRSET: return "attrest";
13927 case VM_METHOD_TYPE_IVAR: return "ivar";
13928 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
13929 case VM_METHOD_TYPE_ALIAS: return "alias";
13930 case VM_METHOD_TYPE_REFINED: return "refined";
13931 case VM_METHOD_TYPE_CFUNC: return "cfunc";
13932 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
13933 case VM_METHOD_TYPE_MISSING: return "missing";
13934 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
13935 case VM_METHOD_TYPE_UNDEF: return "undef";
13936 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
13937 }
13938 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
13939}
13940
13941static void
13942rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
13943{
13944 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
13945 VALUE path = rb_iseq_path(iseq);
13946 int n = ISEQ_BODY(iseq)->location.first_lineno;
13947 snprintf(buff, buff_size, " %s@%s:%d",
13948 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
13949 RSTRING_PTR(path), n);
13950 }
13951}
13952
13953static int
13954str_len_no_raise(VALUE str)
13955{
13956 long len = RSTRING_LEN(str);
13957 if (len < 0) return 0;
13958 if (len > INT_MAX) return INT_MAX;
13959 return (int)len;
13960}
13961
13962#define BUFF_ARGS buff + pos, buff_size - pos
13963#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
13964#define APPEND_S(s) do { \
13965 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
13966 goto end; \
13967 } \
13968 else { \
13969 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
13970 } \
13971 } while (0)
13972#define TF(c) ((c) != 0 ? "true" : "false")
13973#define C(c, s) ((c) != 0 ? (s) : " ")
13974
13975static size_t
13976rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
13977{
13978 size_t pos = 0;
13979
13980 if (SPECIAL_CONST_P(obj)) {
13981 APPEND_F("%s", obj_type_name(obj));
13982
13983 if (FIXNUM_P(obj)) {
13984 APPEND_F(" %ld", FIX2LONG(obj));
13985 }
13986 else if (SYMBOL_P(obj)) {
13987 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
13988 }
13989 }
13990 else {
13991 const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
13992
13993 if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
13994 APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
13995 (void *)obj, age,
13996 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
13997 C(RVALUE_MARK_BITMAP(obj), "M"),
13998 C(RVALUE_PIN_BITMAP(obj), "P"),
13999 C(RVALUE_MARKING_BITMAP(obj), "R"),
14000 C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
14001 C(rb_objspace_garbage_object_p(obj), "G"),
14002 obj_type_name(obj));
14003 }
14004 else {
14005 /* fake */
14006 APPEND_F("%p [%dXXXX] %s",
14007 (void *)obj, age,
14008 obj_type_name(obj));
14009 }
14010
14011 if (internal_object_p(obj)) {
14012 /* ignore */
14013 }
14014 else if (RBASIC(obj)->klass == 0) {
14015 APPEND_S("(temporary internal)");
14016 }
14017 else if (RTEST(RBASIC(obj)->klass)) {
14018 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
14019 if (!NIL_P(class_path)) {
14020 APPEND_F("(%s)", RSTRING_PTR(class_path));
14021 }
14022 }
14023
14024#if GC_DEBUG
14025 APPEND_F("@%s:%d", RANY(obj)->file, RANY(obj)->line);
14026#endif
14027 }
14028 end:
14029
14030 return pos;
14031}
14032
14033static size_t
14034rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
14035{
14036 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
14037 const enum ruby_value_type type = BUILTIN_TYPE(obj);
14038
14039 switch (type) {
14040 case T_NODE:
14041 UNEXPECTED_NODE(rb_raw_obj_info);
14042 break;
14043 case T_ARRAY:
14044 if (ARY_SHARED_P(obj)) {
14045 APPEND_S("shared -> ");
14046 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
14047 }
14048 else if (ARY_EMBED_P(obj)) {
14049 APPEND_F("[%s%s] len: %ld (embed)",
14050 C(ARY_EMBED_P(obj), "E"),
14051 C(ARY_SHARED_P(obj), "S"),
14052 RARRAY_LEN(obj));
14053 }
14054 else {
14055 APPEND_F("[%s%s%s] len: %ld, capa:%ld ptr:%p",
14056 C(ARY_EMBED_P(obj), "E"),
14057 C(ARY_SHARED_P(obj), "S"),
14058 C(RARRAY_TRANSIENT_P(obj), "T"),
14059 RARRAY_LEN(obj),
14060 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
14061 (void *)RARRAY_CONST_PTR_TRANSIENT(obj));
14062 }
14063 break;
14064 case T_STRING: {
14065 if (STR_SHARED_P(obj)) {
14066 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
14067 }
14068 else {
14069 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
14070
14071 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
14072 }
14073 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
14074 break;
14075 }
14076 case T_SYMBOL: {
14077 VALUE fstr = RSYMBOL(obj)->fstr;
14078 ID id = RSYMBOL(obj)->id;
14079 if (RB_TYPE_P(fstr, T_STRING)) {
14080 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
14081 }
14082 else {
14083 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
14084 }
14085 break;
14086 }
14087 case T_MOVED: {
14088 APPEND_F("-> %p", (void*)rb_gc_location(obj));
14089 break;
14090 }
14091 case T_HASH: {
14092 APPEND_F("[%c%c] %"PRIdSIZE,
14093 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
14094 RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
14095 RHASH_SIZE(obj));
14096 break;
14097 }
14098 case T_CLASS:
14099 case T_MODULE:
14100 {
14101 VALUE class_path = rb_class_path_cached(obj);
14102 if (!NIL_P(class_path)) {
14103 APPEND_F("%s", RSTRING_PTR(class_path));
14104 }
14105 else {
14106 APPEND_S("(annon)");
14107 }
14108 break;
14109 }
14110 case T_ICLASS:
14111 {
14112 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
14113 if (!NIL_P(class_path)) {
14114 APPEND_F("src:%s", RSTRING_PTR(class_path));
14115 }
14116 break;
14117 }
14118 case T_OBJECT:
14119 {
14120 uint32_t len = ROBJECT_IV_CAPACITY(obj);
14121
14122 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
14123 APPEND_F("(embed) len:%d", len);
14124 }
14125 else {
14126 VALUE *ptr = ROBJECT_IVPTR(obj);
14127 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
14128 }
14129 }
14130 break;
14131 case T_DATA: {
14132 const struct rb_block *block;
14133 const rb_iseq_t *iseq;
14134 if (rb_obj_is_proc(obj) &&
14135 (block = vm_proc_block(obj)) != NULL &&
14136 (vm_block_type(block) == block_type_iseq) &&
14137 (iseq = vm_block_iseq(block)) != NULL) {
14138 rb_raw_iseq_info(BUFF_ARGS, iseq);
14139 }
14140 else if (rb_ractor_p(obj)) {
14141 rb_ractor_t *r = (void *)DATA_PTR(obj);
14142 if (r) {
14143 APPEND_F("r:%d", r->pub.id);
14144 }
14145 }
14146 else {
14147 const char * const type_name = rb_objspace_data_type_name(obj);
14148 if (type_name) {
14149 APPEND_F("%s", type_name);
14150 }
14151 }
14152 break;
14153 }
14154 case T_IMEMO: {
14155 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
14156
14157 switch (imemo_type(obj)) {
14158 case imemo_ment:
14159 {
14160 const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
14161
14162 APPEND_F(":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
14163 rb_id2name(me->called_id),
14164 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
14165 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
14166 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
14167 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
14168 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
14169 me->def ? rb_method_type_name(me->def->type) : "NULL",
14170 me->def ? me->def->alias_count : -1,
14171 (void *)me->owner, // obj_info(me->owner),
14172 (void *)me->defined_class); //obj_info(me->defined_class)));
14173
14174 if (me->def) {
14175 switch (me->def->type) {
14176 case VM_METHOD_TYPE_ISEQ:
14177 APPEND_S(" (iseq:");
14178 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
14179 APPEND_S(")");
14180 break;
14181 default:
14182 break;
14183 }
14184 }
14185
14186 break;
14187 }
14188 case imemo_iseq: {
14189 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
14190 rb_raw_iseq_info(BUFF_ARGS, iseq);
14191 break;
14192 }
14193 case imemo_callinfo:
14194 {
14195 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
14196 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
14197 rb_id2name(vm_ci_mid(ci)),
14198 vm_ci_flag(ci),
14199 vm_ci_argc(ci),
14200 vm_ci_kwarg(ci) ? "available" : "NULL");
14201 break;
14202 }
14203 case imemo_callcache:
14204 {
14205 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
14206 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
14207 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
14208
14209 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
14210 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
14211 cme ? rb_id2name(cme->called_id) : "<NULL>",
14212 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
14213 (void *)cme,
14214 (void *)vm_cc_call(cc));
14215 break;
14216 }
14217 default:
14218 break;
14219 }
14220 }
14221 default:
14222 break;
14223 }
14224 }
14225 end:
14226
14227 return pos;
14228}
14229
14230#undef TF
14231#undef C
14232
14233const char *
14234rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
14235{
14236 asan_unpoisoning_object(obj) {
14237 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
14238 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
14239 if (pos >= buff_size) {} // truncated
14240 }
14241
14242 return buff;
14243}
14244
14245#undef APPEND_S
14246#undef APPEND_F
14247#undef BUFF_ARGS
14248
14249#if RGENGC_OBJ_INFO
14250#define OBJ_INFO_BUFFERS_NUM 10
14251#define OBJ_INFO_BUFFERS_SIZE 0x100
14252static rb_atomic_t obj_info_buffers_index = 0;
14253static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
14254
14255/* Increments *var atomically and resets *var to 0 when maxval is
14256 * reached. Returns the wraparound old *var value (0...maxval). */
14257static rb_atomic_t
14258atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
14259{
14260 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
14261 if (UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
14262 const rb_atomic_t newval = oldval + 1;
14263 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
14264 oldval %= maxval;
14265 }
14266 return oldval;
14267}
14268
14269static const char *
14270obj_info(VALUE obj)
14271{
14272 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
14273 char *const buff = obj_info_buffers[index];
14274 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
14275}
14276#else
14277static const char *
14278obj_info(VALUE obj)
14279{
14280 return obj_type_name(obj);
14281}
14282#endif
14283
14284MJIT_FUNC_EXPORTED const char *
14285rb_obj_info(VALUE obj)
14286{
14287 return obj_info(obj);
14288}
14289
14290void
14291rb_obj_info_dump(VALUE obj)
14292{
14293 char buff[0x100];
14294 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
14295}
14296
14297MJIT_FUNC_EXPORTED void
14298rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
14299{
14300 char buff[0x100];
14301 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
14302}
14303
14304#if GC_DEBUG
14305
14306void
14307rb_gcdebug_print_obj_condition(VALUE obj)
14308{
14309 rb_objspace_t *objspace = &rb_objspace;
14310
14311 fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
14312
14313 if (BUILTIN_TYPE(obj) == T_MOVED) {
14314 fprintf(stderr, "moved?: true\n");
14315 }
14316 else {
14317 fprintf(stderr, "moved?: false\n");
14318 }
14319 if (is_pointer_to_heap(objspace, (void *)obj)) {
14320 fprintf(stderr, "pointer to heap?: true\n");
14321 }
14322 else {
14323 fprintf(stderr, "pointer to heap?: false\n");
14324 return;
14325 }
14326
14327 fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
14328 fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
14329 fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
14330 fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
14331 fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
14332 fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
14333
14334 if (is_lazy_sweeping(objspace)) {
14335 fprintf(stderr, "lazy sweeping?: true\n");
14336 fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
14337 }
14338 else {
14339 fprintf(stderr, "lazy sweeping?: false\n");
14340 }
14341}
14342
14343static VALUE
14344gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
14345{
14346 fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
14347 return Qnil;
14348}
14349
14350void
14351rb_gcdebug_sentinel(VALUE obj, const char *name)
14352{
14353 rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
14354}
14355
14356#endif /* GC_DEBUG */
14357
14358#if GC_DEBUG_STRESS_TO_CLASS
14359/*
14360 * call-seq:
14361 * GC.add_stress_to_class(class[, ...])
14362 *
14363 * Raises NoMemoryError when allocating an instance of the given classes.
14364 *
14365 */
14366static VALUE
14367rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
14368{
14369 rb_objspace_t *objspace = &rb_objspace;
14370
14371 if (!stress_to_class) {
14372 stress_to_class = rb_ary_hidden_new(argc);
14373 }
14374 rb_ary_cat(stress_to_class, argv, argc);
14375 return self;
14376}
14377
14378/*
14379 * call-seq:
14380 * GC.remove_stress_to_class(class[, ...])
14381 *
14382 * No longer raises NoMemoryError when allocating an instance of the
14383 * given classes.
14384 *
14385 */
14386static VALUE
14387rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
14388{
14389 rb_objspace_t *objspace = &rb_objspace;
14390 int i;
14391
14392 if (stress_to_class) {
14393 for (i = 0; i < argc; ++i) {
14394 rb_ary_delete_same(stress_to_class, argv[i]);
14395 }
14396 if (RARRAY_LEN(stress_to_class) == 0) {
14397 stress_to_class = 0;
14398 }
14399 }
14400 return Qnil;
14401}
14402#endif
14403
14404/*
14405 * Document-module: ObjectSpace
14406 *
14407 * The ObjectSpace module contains a number of routines
14408 * that interact with the garbage collection facility and allow you to
14409 * traverse all living objects with an iterator.
14410 *
14411 * ObjectSpace also provides support for object finalizers, procs that will be
14412 * called when a specific object is about to be destroyed by garbage
14413 * collection. See the documentation for
14414 * <code>ObjectSpace.define_finalizer</code> for important information on
14415 * how to use this method correctly.
14416 *
14417 * a = "A"
14418 * b = "B"
14419 *
14420 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
14421 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
14422 *
14423 * a = nil
14424 * b = nil
14425 *
14426 * _produces:_
14427 *
14428 * Finalizer two on 537763470
14429 * Finalizer one on 537763480
14430 */
14431
14432/*
14433 * Document-class: ObjectSpace::WeakMap
14434 *
14435 * An ObjectSpace::WeakMap object holds references to
14436 * any objects, but those objects can get garbage collected.
14437 *
14438 * This class is mostly used internally by WeakRef, please use
14439 * +lib/weakref.rb+ for the public interface.
14440 */
14441
14442/* Document-class: GC::Profiler
14443 *
14444 * The GC profiler provides access to information on GC runs including time,
14445 * length and object space size.
14446 *
14447 * Example:
14448 *
14449 * GC::Profiler.enable
14450 *
14451 * require 'rdoc/rdoc'
14452 *
14453 * GC::Profiler.report
14454 *
14455 * GC::Profiler.disable
14456 *
14457 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
14458 */
14459
14460#include "gc.rbinc"
14461/*
14462 * call-seq:
14463 * GC.using_rvargc? -> true or false
14464 *
14465 * Returns true if using experimental feature Variable Width Allocation, false
14466 * otherwise.
14467 */
14468static VALUE
14469gc_using_rvargc_p(VALUE mod)
14470{
14471#if USE_RVARGC
14472 return Qtrue;
14473#else
14474 return Qfalse;
14475#endif
14476}
14477
14478void
14479Init_GC(void)
14480{
14481#undef rb_intern
14482 VALUE rb_mObjSpace;
14483 VALUE rb_mProfiler;
14484 VALUE gc_constants;
14485
14486 rb_mGC = rb_define_module("GC");
14487
14488 gc_constants = rb_hash_new();
14489 rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
14490 rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
14491 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
14492 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
14493 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
14494 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
14495 rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
14496 rb_hash_aset(gc_constants, ID2SYM(rb_intern("SIZE_POOL_COUNT")), LONG2FIX(SIZE_POOL_COUNT));
14497 rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(size_pool_slot_size(SIZE_POOL_COUNT - 1)));
14498 OBJ_FREEZE(gc_constants);
14499 /* internal constants */
14500 rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
14501
14502 rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
14503 rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
14504 rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
14505 rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
14506 rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
14507 rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
14508 rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
14509 rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
14510 rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
14511
14512 rb_mObjSpace = rb_define_module("ObjectSpace");
14513
14514 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
14515
14516 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
14517 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
14518
14519 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
14520
14521 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
14522
14523 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
14524 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
14525
14526 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
14527
14528 {
14529 VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
14530 rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
14531 rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
14532 rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
14533 rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
14534 rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
14535 rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
14536 rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
14537 rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
14538 rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
14539 rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
14540 rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
14541 rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
14542 rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
14543 rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
14544 rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
14545 rb_include_module(rb_cWeakMap, rb_mEnumerable);
14546 }
14547
14548 /* internal methods */
14549 rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
14550 rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
14551#if MALLOC_ALLOCATED_SIZE
14552 rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
14553 rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
14554#endif
14555
14556 rb_define_singleton_method(rb_mGC, "using_rvargc?", gc_using_rvargc_p, 0);
14557
14558 if (GC_COMPACTION_SUPPORTED) {
14559 rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
14560 rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
14561 rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
14562 rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
14563 }
14564 else {
14568 rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
14569 /* When !GC_COMPACTION_SUPPORTED, this method is not defined in gc.rb */
14570 rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
14571 }
14572
14573#if GC_DEBUG_STRESS_TO_CLASS
14574 rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
14575 rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
14576#endif
14577
14578 {
14579 VALUE opts;
14580 /* \GC build options */
14581 rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
14582#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
14583 OPT(GC_DEBUG);
14584 OPT(USE_RGENGC);
14585 OPT(RGENGC_DEBUG);
14586 OPT(RGENGC_CHECK_MODE);
14587 OPT(RGENGC_PROFILE);
14588 OPT(RGENGC_ESTIMATE_OLDMALLOC);
14589 OPT(GC_PROFILE_MORE_DETAIL);
14590 OPT(GC_ENABLE_LAZY_SWEEP);
14591 OPT(CALC_EXACT_MALLOC_SIZE);
14592 OPT(MALLOC_ALLOCATED_SIZE);
14593 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
14594 OPT(GC_PROFILE_DETAIL_MEMORY);
14595 OPT(GC_COMPACTION_SUPPORTED);
14596#undef OPT
14597 OBJ_FREEZE(opts);
14598 }
14599}
14600
14601#ifdef ruby_xmalloc
14602#undef ruby_xmalloc
14603#endif
14604#ifdef ruby_xmalloc2
14605#undef ruby_xmalloc2
14606#endif
14607#ifdef ruby_xcalloc
14608#undef ruby_xcalloc
14609#endif
14610#ifdef ruby_xrealloc
14611#undef ruby_xrealloc
14612#endif
14613#ifdef ruby_xrealloc2
14614#undef ruby_xrealloc2
14615#endif
14616
14617void *
14618ruby_xmalloc(size_t size)
14619{
14620#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14621 ruby_malloc_info_file = __FILE__;
14622 ruby_malloc_info_line = __LINE__;
14623#endif
14624 return ruby_xmalloc_body(size);
14625}
14626
14627void *
14628ruby_xmalloc2(size_t n, size_t size)
14629{
14630#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14631 ruby_malloc_info_file = __FILE__;
14632 ruby_malloc_info_line = __LINE__;
14633#endif
14634 return ruby_xmalloc2_body(n, size);
14635}
14636
14637void *
14638ruby_xcalloc(size_t n, size_t size)
14639{
14640#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14641 ruby_malloc_info_file = __FILE__;
14642 ruby_malloc_info_line = __LINE__;
14643#endif
14644 return ruby_xcalloc_body(n, size);
14645}
14646
14647void *
14648ruby_xrealloc(void *ptr, size_t new_size)
14649{
14650#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14651 ruby_malloc_info_file = __FILE__;
14652 ruby_malloc_info_line = __LINE__;
14653#endif
14654 return ruby_xrealloc_body(ptr, new_size);
14655}
14656
14657void *
14658ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
14659{
14660#if USE_GC_MALLOC_OBJ_INFO_DETAILS
14661 ruby_malloc_info_file = __FILE__;
14662 ruby_malloc_info_line = __LINE__;
14663#endif
14664 return ruby_xrealloc2_body(ptr, n, new_size);
14665}
#define RUBY_ASSERT(expr)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:177
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:321
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:138
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:91
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
Definition stdalign.h:28
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
Definition vm_trace.c:1703
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition defines.h:91
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition defines.h:89
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition event.h:94
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition event.h:93
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition event.h:92
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition event.h:91
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:95
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition event.h:89
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition event.h:90
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:88
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:207
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
Definition class.c:1090
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition class.c:920
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:998
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition class.c:1022
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:107
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:67
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define rb_str_cat2
Old name of rb_str_cat_cstr.
Definition string.h:1683
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:143
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition fl_type.h:66
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
Definition fl_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:62
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:393
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:130
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:140
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:137
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:652
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
Definition value_type.h:86
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
Definition rgengc.h:237
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:139
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
Definition fl_type.h:61
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:141
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:70
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:6449
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:6489
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition error.c:3148
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1102
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1095
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:459
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1091
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition error.c:411
VALUE rb_eArgError
ArgumentError exception.
Definition error.c:1092
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition eval.c:993
VALUE rb_mKernel
Kernel module.
Definition object.c:51
VALUE rb_cObject
Documented in include/ruby/internal/globals.h.
VALUE rb_any_to_s(VALUE obj)
Generates a textual representation of the given object.
Definition object.c:589
VALUE rb_mEnumerable
Enumerable module.
Definition enum.c:27
VALUE rb_mGC
GC module.
Definition gc.c:1209
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:190
VALUE rb_inspect(VALUE obj)
Generates a human-readable textual representation of the given object.
Definition object.c:600
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:50
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition object.c:122
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:787
VALUE rb_stdout
STDOUT constant.
Definition io.c:194
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3022
Defines RBIMPL_HAS_BUILTIN.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
#define rb_check_frozen
Just another name of rb_check_frozen.
Definition error.h:264
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
Definition io.c:2264
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:175
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1571
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3323
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:871
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition string.c:3291
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition string.c:1532
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:194
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
Definition variable.c:307
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1062
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1159
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:665
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1165
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition vm_method.c:368
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2807
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
Definition symbol.c:942
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition symbol.c:959
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3427
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1756
#define strtod(s, e)
Just another name of ruby_strtod.
Definition util.h:212
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
Definition util.h:176
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition sprintf.c:1219
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition sprintf.c:1242
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1358
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
Definition memory.h:378
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
Definition pid_t.h:38
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition rarray.h:70
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:56
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:63
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:82
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:108
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define USE_RGENGC
Definition rgengc.h:44
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition rgengc.h:118
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:82
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:92
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:507
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:489
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:322
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5411
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
MEMO.
Definition imemo.h:104
Ruby's array.
Definition rarray.h:176
Ruby's object's, base components.
Definition rbasic.h:64
const VALUE klass
Class of an object.
Definition rbasic.h:88
VALUE flags
Per-object flags.
Definition rbasic.h:77
Definition class.h:62
Internal header for Complex.
Definition complex.h:13
Definition rdata.h:124
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:138
Ruby's File and IO.
Definition rfile.h:35
struct rb_io_t * fptr
IO's specific fields.
Definition rfile.h:41
Definition hash.h:43
Regular expression execution context.
Definition rmatch.h:94
VALUE regexp
The expression of this match.
Definition rmatch.h:112
VALUE str
The target string that the match was made against.
Definition rmatch.h:102
Definition gc.c:573
Ruby's ordinal objects.
Definition robject.h:94
VALUE ary[ROBJECT_EMBED_LEN_MAX]
Embedded instance variables.
Definition robject.h:136
Internal header for Rational.
Definition rational.h:17
Ruby's regular expression.
Definition rregexp.h:60
const VALUE src
Source code of this expression.
Definition rregexp.h:74
Ruby's String.
Definition rstring.h:231
union RString::@50 as
String's specific fields.
struct RString::@50::@51 heap
Strings that use separated memory region for contents use this pattern.
union RString::@50::@51::@53 aux
Auxiliary info.
VALUE shared
Parent of the string.
Definition rstring.h:276
"Typed" user data.
Definition rtypeddata.h:340
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
Definition rtypeddata.h:350
Definition gc.c:582
Definition gc.c:1195
Definition gc.c:667
Definition vm_core.h:247
Definition method.h:62
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:44
Definition class.h:29
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:241
struct rb_data_type_struct::@54 function
Function pointers.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:211
VALUE ecopts
Flags as Ruby hash.
Definition io.h:134
Ruby's IO, metadata and buffers.
Definition io.h:138
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
Definition io.h:200
VALUE pathv
pathname for file
Definition io.h:159
struct rb_io_enc_t encs
Decomposed encoding flags.
Definition io.h:180
VALUE write_lock
This is a Ruby level mutex.
Definition io.h:224
VALUE self
The IO's Ruby level counterpart.
Definition io.h:141
VALUE writeconv_pre_ecopts
Value of ::rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
Definition io.h:215
VALUE tied_io_for_writing
Duplex IO object, if set.
Definition io.h:178
VALUE timeout
The timeout associated with this IO when performing blocking operations.
Definition io.h:229
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:134
rb_cref_t * cref
class reference, should be marked
Definition method.h:135
Internal header for Class.
Definition class.h:23
Represents the region of a capture group.
Definition rmatch.h:65
Represents a match.
Definition rmatch.h:71
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
Definition rmatch.h:82
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:53
THROW_DATA.
Definition imemo.h:62
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
ruby_value_type
C-level type of an object.
Definition value_type.h:112
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:144