Ruby  2.7.2p137(2020-10-01revision5445e0435260b449decf2ac16f9d09bae3cafe72)
gc.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  gc.c -
4 
5  $Author$
6  created at: Tue Oct 5 09:44:46 JST 1993
7 
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000 Information-technology Promotion Agency, Japan
11 
12 **********************************************************************/
13 
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
16 
17 #include "ruby/encoding.h"
18 #include "ruby/io.h"
19 #include "ruby/st.h"
20 #include "ruby/re.h"
21 #include "ruby/thread.h"
22 #include "ruby/util.h"
23 #include "ruby/debug.h"
24 #include "internal.h"
25 #include "eval_intern.h"
26 #include "vm_core.h"
27 #include "builtin.h"
28 #include "gc.h"
29 #include "constant.h"
30 #include "ruby_atomic.h"
31 #include "probes.h"
32 #include "id_table.h"
33 #include "symbol.h"
34 #include <stdio.h>
35 #include <stdarg.h>
36 #include <setjmp.h>
37 #include <sys/types.h>
38 #include "ruby_assert.h"
39 #include "debug_counter.h"
40 #include "transient_heap.h"
41 #include "mjit.h"
42 
43 #undef rb_data_object_wrap
44 
45 #ifndef HAVE_MALLOC_USABLE_SIZE
46 # ifdef _WIN32
47 # define HAVE_MALLOC_USABLE_SIZE
48 # define malloc_usable_size(a) _msize(a)
49 # elif defined HAVE_MALLOC_SIZE
50 # define HAVE_MALLOC_USABLE_SIZE
51 # define malloc_usable_size(a) malloc_size(a)
52 # endif
53 #endif
54 #ifdef HAVE_MALLOC_USABLE_SIZE
55 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
56 # include RUBY_ALTERNATIVE_MALLOC_HEADER
57 # elif HAVE_MALLOC_H
58 # include <malloc.h>
59 # elif defined(HAVE_MALLOC_NP_H)
60 # include <malloc_np.h>
61 # elif defined(HAVE_MALLOC_MALLOC_H)
62 # include <malloc/malloc.h>
63 # endif
64 #endif
65 
66 #ifdef HAVE_SYS_TIME_H
67 #include <sys/time.h>
68 #endif
69 
70 #ifdef HAVE_SYS_RESOURCE_H
71 #include <sys/resource.h>
72 #endif
73 
74 #if defined _WIN32 || defined __CYGWIN__
75 #include <windows.h>
76 #elif defined(HAVE_POSIX_MEMALIGN)
77 #elif defined(HAVE_MEMALIGN)
78 #include <malloc.h>
79 #endif
80 
81 #define rb_setjmp(env) RUBY_SETJMP(env)
82 #define rb_jmp_buf rb_jmpbuf_t
83 
84 #if defined(_MSC_VER) && defined(_WIN64)
85 #include <intrin.h>
86 #pragma intrinsic(_umul128)
87 #endif
88 
89 /* Expecting this struct to be eliminated by function inlinings */
90 struct optional {
91  bool left;
92  size_t right;
93 };
94 
95 static inline struct optional
96 size_mul_overflow(size_t x, size_t y)
97 {
98  bool p;
99  size_t z;
100 #if 0
101 
102 #elif defined(HAVE_BUILTIN___BUILTIN_MUL_OVERFLOW)
103  p = __builtin_mul_overflow(x, y, &z);
104 
105 #elif defined(DSIZE_T)
106  RB_GNUC_EXTENSION DSIZE_T dx = x;
107  RB_GNUC_EXTENSION DSIZE_T dy = y;
108  RB_GNUC_EXTENSION DSIZE_T dz = dx * dy;
109  p = dz > SIZE_MAX;
110  z = (size_t)dz;
111 
112 #elif defined(_MSC_VER) && defined(_WIN64)
113  unsigned __int64 dp;
114  unsigned __int64 dz = _umul128(x, y, &dp);
115  p = (bool)dp;
116  z = (size_t)dz;
117 
118 #else
119  /* https://wiki.sei.cmu.edu/confluence/display/c/INT30-C.+Ensure+that+unsigned+integer+operations+do+not+wrap */
120  p = (y != 0) && (x > SIZE_MAX / y);
121  z = x * y;
122 
123 #endif
124  return (struct optional) { p, z, };
125 }
126 
127 static inline struct optional
128 size_add_overflow(size_t x, size_t y)
129 {
130  size_t z;
131  bool p;
132 #if 0
133 
134 #elif defined(HAVE_BUILTIN___BUILTIN_ADD_OVERFLOW)
135  p = __builtin_add_overflow(x, y, &z);
136 
137 #elif defined(DSIZE_T)
138  RB_GNUC_EXTENSION DSIZE_T dx = x;
139  RB_GNUC_EXTENSION DSIZE_T dy = y;
140  RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
141  p = dz > SIZE_MAX;
142  z = (size_t)dz;
143 
144 #else
145  z = x + y;
146  p = z < y;
147 
148 #endif
149  return (struct optional) { p, z, };
150 }
151 
152 static inline struct optional
153 size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
154 {
155  struct optional t = size_mul_overflow(x, y);
156  struct optional u = size_add_overflow(t.right, z);
157  return (struct optional) { t.left || u.left, u.right };
158 }
159 
160 static inline struct optional
161 size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
162 {
163  struct optional t = size_mul_overflow(x, y);
164  struct optional u = size_mul_overflow(z, w);
165  struct optional v = size_add_overflow(t.right, u.right);
166  return (struct optional) { t.left || u.left || v.left, v.right };
167 }
168 
169 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
170 
171 static inline size_t
172 size_mul_or_raise(size_t x, size_t y, VALUE exc)
173 {
174  struct optional t = size_mul_overflow(x, y);
175  if (LIKELY(!t.left)) {
176  return t.right;
177  }
178  else if (rb_during_gc()) {
179  rb_memerror(); /* or...? */
180  }
181  else {
182  gc_raise(
183  exc,
184  "integer overflow: %"PRIuSIZE
185  " * %"PRIuSIZE
186  " > %"PRIuSIZE,
187  x, y, SIZE_MAX);
188  }
189 }
190 
191 size_t
192 rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
193 {
194  return size_mul_or_raise(x, y, exc);
195 }
196 
197 static inline size_t
198 size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
199 {
200  struct optional t = size_mul_add_overflow(x, y, z);
201  if (LIKELY(!t.left)) {
202  return t.right;
203  }
204  else if (rb_during_gc()) {
205  rb_memerror(); /* or...? */
206  }
207  else {
208  gc_raise(
209  exc,
210  "integer overflow: %"PRIuSIZE
211  " * %"PRIuSIZE
212  " + %"PRIuSIZE
213  " > %"PRIuSIZE,
214  x, y, z, SIZE_MAX);
215  }
216 }
217 
218 size_t
219 rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
220 {
221  return size_mul_add_or_raise(x, y, z, exc);
222 }
223 
224 static inline size_t
225 size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
226 {
227  struct optional t = size_mul_add_mul_overflow(x, y, z, w);
228  if (LIKELY(!t.left)) {
229  return t.right;
230  }
231  else if (rb_during_gc()) {
232  rb_memerror(); /* or...? */
233  }
234  else {
235  gc_raise(
236  exc,
237  "integer overflow: %"PRIdSIZE
238  " * %"PRIdSIZE
239  " + %"PRIdSIZE
240  " * %"PRIdSIZE
241  " > %"PRIdSIZE,
242  x, y, z, w, SIZE_MAX);
243  }
244 }
245 
246 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
247 /* trick the compiler into thinking a external signal handler uses this */
249 volatile VALUE *
251 {
252  rb_gc_guarded_val = val;
253 
254  return ptr;
255 }
256 #endif
257 
258 #ifndef GC_HEAP_INIT_SLOTS
259 #define GC_HEAP_INIT_SLOTS 10000
260 #endif
261 #ifndef GC_HEAP_FREE_SLOTS
262 #define GC_HEAP_FREE_SLOTS 4096
263 #endif
264 #ifndef GC_HEAP_GROWTH_FACTOR
265 #define GC_HEAP_GROWTH_FACTOR 1.8
266 #endif
267 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
268 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
269 #endif
270 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
271 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
272 #endif
273 
274 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
275 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
276 #endif
277 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
278 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
279 #endif
280 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
281 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
282 #endif
283 
284 #ifndef GC_MALLOC_LIMIT_MIN
285 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
286 #endif
287 #ifndef GC_MALLOC_LIMIT_MAX
288 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
289 #endif
290 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
291 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
292 #endif
293 
294 #ifndef GC_OLDMALLOC_LIMIT_MIN
295 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
296 #endif
297 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
298 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
299 #endif
300 #ifndef GC_OLDMALLOC_LIMIT_MAX
301 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
302 #endif
303 
304 #ifndef PRINT_MEASURE_LINE
305 #define PRINT_MEASURE_LINE 0
306 #endif
307 #ifndef PRINT_ENTER_EXIT_TICK
308 #define PRINT_ENTER_EXIT_TICK 0
309 #endif
310 #ifndef PRINT_ROOT_TICKS
311 #define PRINT_ROOT_TICKS 0
312 #endif
313 
314 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
315 #define TICK_TYPE 1
316 
317 typedef struct {
322 
327 
331 
335 
338 
339 static ruby_gc_params_t gc_params = {
344 
349 
353 
357 
358  FALSE,
359 };
360 
361 /* GC_DEBUG:
362  * enable to embed GC debugging information.
363  */
364 #ifndef GC_DEBUG
365 #define GC_DEBUG 0
366 #endif
367 
368 #if USE_RGENGC
369 /* RGENGC_DEBUG:
370  * 1: basic information
371  * 2: remember set operation
372  * 3: mark
373  * 4:
374  * 5: sweep
375  */
376 #ifndef RGENGC_DEBUG
377 #ifdef RUBY_DEVEL
378 #define RGENGC_DEBUG -1
379 #else
380 #define RGENGC_DEBUG 0
381 #endif
382 #endif
383 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
384 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
385 #else
386 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
387 #endif
389 
390 /* RGENGC_CHECK_MODE
391  * 0: disable all assertions
392  * 1: enable assertions (to debug RGenGC)
393  * 2: enable internal consistency check at each GC (for debugging)
394  * 3: enable internal consistency check at each GC steps (for debugging)
395  * 4: enable liveness check
396  * 5: show all references
397  */
398 #ifndef RGENGC_CHECK_MODE
399 #define RGENGC_CHECK_MODE 0
400 #endif
401 
402 // Note: using RUBY_ASSERT_WHEN() extend a macro in expr (info by nobu).
403 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
404 
405 /* RGENGC_OLD_NEWOBJ_CHECK
406  * 0: disable all assertions
407  * >0: make a OLD object when new object creation.
408  *
409  * Make one OLD object per RGENGC_OLD_NEWOBJ_CHECK WB protected objects creation.
410  */
411 #ifndef RGENGC_OLD_NEWOBJ_CHECK
412 #define RGENGC_OLD_NEWOBJ_CHECK 0
413 #endif
414 
415 /* RGENGC_PROFILE
416  * 0: disable RGenGC profiling
417  * 1: enable profiling for basic information
418  * 2: enable profiling for each types
419  */
420 #ifndef RGENGC_PROFILE
421 #define RGENGC_PROFILE 0
422 #endif
423 
424 /* RGENGC_ESTIMATE_OLDMALLOC
425  * Enable/disable to estimate increase size of malloc'ed size by old objects.
426  * If estimation exceeds threshold, then will invoke full GC.
427  * 0: disable estimation.
428  * 1: enable estimation.
429  */
430 #ifndef RGENGC_ESTIMATE_OLDMALLOC
431 #define RGENGC_ESTIMATE_OLDMALLOC 1
432 #endif
433 
434 /* RGENGC_FORCE_MAJOR_GC
435  * Force major/full GC if this macro is not 0.
436  */
437 #ifndef RGENGC_FORCE_MAJOR_GC
438 #define RGENGC_FORCE_MAJOR_GC 0
439 #endif
440 
441 #else /* USE_RGENGC */
442 
443 #ifdef RGENGC_DEBUG
444 #undef RGENGC_DEBUG
445 #endif
446 #define RGENGC_DEBUG 0
447 #ifdef RGENGC_CHECK_MODE
448 #undef RGENGC_CHECK_MODE
449 #endif
450 #define RGENGC_CHECK_MODE 0
451 #define RGENGC_PROFILE 0
452 #define RGENGC_ESTIMATE_OLDMALLOC 0
453 #define RGENGC_FORCE_MAJOR_GC 0
454 
455 #endif /* USE_RGENGC */
456 
457 #ifndef GC_PROFILE_MORE_DETAIL
458 #define GC_PROFILE_MORE_DETAIL 0
459 #endif
460 #ifndef GC_PROFILE_DETAIL_MEMORY
461 #define GC_PROFILE_DETAIL_MEMORY 0
462 #endif
463 #ifndef GC_ENABLE_INCREMENTAL_MARK
464 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
465 #endif
466 #ifndef GC_ENABLE_LAZY_SWEEP
467 #define GC_ENABLE_LAZY_SWEEP 1
468 #endif
469 #ifndef CALC_EXACT_MALLOC_SIZE
470 #define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
471 #endif
472 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
473 #ifndef MALLOC_ALLOCATED_SIZE
474 #define MALLOC_ALLOCATED_SIZE 0
475 #endif
476 #else
477 #define MALLOC_ALLOCATED_SIZE 0
478 #endif
479 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
480 #define MALLOC_ALLOCATED_SIZE_CHECK 0
481 #endif
482 
483 #ifndef GC_DEBUG_STRESS_TO_CLASS
484 #define GC_DEBUG_STRESS_TO_CLASS 0
485 #endif
486 
487 #ifndef RGENGC_OBJ_INFO
488 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
489 #endif
490 
491 typedef enum {
492  GPR_FLAG_NONE = 0x000,
493  /* major reason */
498 #if RGENGC_ESTIMATE_OLDMALLOC
500 #endif
502 
503  /* gc reason */
507  GPR_FLAG_CAPI = 0x800,
508  GPR_FLAG_STRESS = 0x1000,
509 
510  /* others */
515 
520 
521 typedef struct gc_profile_record {
522  int flags;
523 
524  double gc_time;
526 
530 
531 #if GC_PROFILE_MORE_DETAIL
532  double gc_mark_time;
533  double gc_sweep_time;
534 
535  size_t heap_use_pages;
536  size_t heap_live_objects;
537  size_t heap_free_objects;
538 
539  size_t allocate_increase;
540  size_t allocate_limit;
541 
542  double prepare_time;
543  size_t removing_objects;
544  size_t empty_objects;
545 #if GC_PROFILE_DETAIL_MEMORY
546  long maxrss;
547  long minflt;
548  long majflt;
549 #endif
550 #endif
551 #if MALLOC_ALLOCATED_SIZE
552  size_t allocated_size;
553 #endif
554 
555 #if RGENGC_PROFILE > 0
556  size_t old_objects;
557  size_t remembered_normal_objects;
558  size_t remembered_shady_objects;
559 #endif
561 
562 #if defined(_MSC_VER) || defined(__CYGWIN__)
563 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
564 #endif
565 
566 typedef struct RVALUE {
567  union {
568  struct {
569  VALUE flags; /* always 0 for freed obj */
570  struct RVALUE *next;
571  } free;
572  struct RMoved moved;
573  struct RBasic basic;
574  struct RObject object;
575  struct RClass klass;
576  struct RFloat flonum;
577  struct RString string;
578  struct RArray array;
579  struct RRegexp regexp;
580  struct RHash hash;
581  struct RData data;
582  struct RTypedData typeddata;
583  struct RStruct rstruct;
584  struct RBignum bignum;
585  struct RFile file;
586  struct RMatch match;
587  struct RRational rational;
588  struct RComplex complex;
589  union {
591  struct vm_svar svar;
592  struct vm_throw_data throw_data;
593  struct vm_ifunc ifunc;
594  struct MEMO memo;
600  } imemo;
601  struct {
602  struct RBasic basic;
607  } as;
608 #if GC_DEBUG
609  const char *file;
610  int line;
611 #endif
613 
614 #if defined(_MSC_VER) || defined(__CYGWIN__)
615 #pragma pack(pop)
616 #endif
617 
619 enum {
620  BITS_SIZE = sizeof(bits_t),
622 };
623 #define popcount_bits rb_popcount_intptr
624 
626  struct heap_page *page;
627 };
628 
630  struct heap_page_header header;
631  /* char gap[]; */
632  /* RVALUE values[]; */
633 };
634 
635 struct gc_list {
637  struct gc_list *next;
638 };
639 
640 #define STACK_CHUNK_SIZE 500
641 
642 typedef struct stack_chunk {
644  struct stack_chunk *next;
646 
647 typedef struct mark_stack {
650  int index;
651  int limit;
652  size_t cache_size;
655 
656 typedef struct rb_heap_struct {
658 
661  struct list_head pages;
662  struct heap_page *sweeping_page; /* iterator for .pages */
663 #if GC_ENABLE_INCREMENTAL_MARK
665 #endif
666  size_t total_pages; /* total page count in a heap */
667  size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
669 
670 enum gc_mode {
674 };
675 
676 typedef struct rb_objspace {
677  struct {
678  size_t limit;
679  size_t increase;
680 #if MALLOC_ALLOCATED_SIZE
681  size_t allocated_size;
682  size_t allocations;
683 #endif
685 
686  struct {
687  unsigned int mode : 2;
688  unsigned int immediate_sweep : 1;
689  unsigned int dont_gc : 1;
690  unsigned int dont_incremental : 1;
691  unsigned int during_gc : 1;
692  unsigned int during_compacting : 1;
693  unsigned int gc_stressful: 1;
694  unsigned int has_hook: 1;
695 #if USE_RGENGC
696  unsigned int during_minor_gc : 1;
697 #endif
698 #if GC_ENABLE_INCREMENTAL_MARK
699  unsigned int during_incremental_marking : 1;
700 #endif
701  } flags;
702 
706 
708  rb_heap_t tomb_heap; /* heap for zombies and ghosts */
709 
710  struct {
713 
715  void *data;
716  void (*mark_func)(VALUE v, void *data);
718 
720  size_t marked_slots;
721 
722  struct {
723  struct heap_page **sorted;
729 
730  /* final */
731  size_t final_slots;
734 
736 
737  struct {
738  int run;
742  size_t next_index;
743  size_t size;
744 
745 #if GC_PROFILE_MORE_DETAIL
746  double prepare_time;
747 #endif
748  double invoke_time;
749 
750 #if USE_RGENGC
754 #if RGENGC_PROFILE > 0
755  size_t total_generated_normal_object_count;
756  size_t total_generated_shady_object_count;
757  size_t total_shade_operation_count;
758  size_t total_promoted_count;
759  size_t total_remembered_normal_object_count;
760  size_t total_remembered_shady_object_count;
761 
762 #if RGENGC_PROFILE >= 2
763  size_t generated_normal_object_count_types[RUBY_T_MASK];
764  size_t generated_shady_object_count_types[RUBY_T_MASK];
765  size_t shade_operation_count_types[RUBY_T_MASK];
766  size_t promoted_types[RUBY_T_MASK];
767  size_t remembered_normal_object_count_types[RUBY_T_MASK];
768  size_t remembered_shady_object_count_types[RUBY_T_MASK];
769 #endif
770 #endif /* RGENGC_PROFILE */
771 #endif /* USE_RGENGC */
772 
773  /* temporary profiling space */
777 
778  /* basic statistics */
779  size_t count;
785 
787 
788 #if USE_RGENGC
789  struct {
795  size_t old_objects;
797 
798 #if RGENGC_ESTIMATE_OLDMALLOC
801 #endif
802 
803 #if RGENGC_CHECK_MODE >= 2
804  struct st_table *allrefs_table;
805  size_t error_count;
806 #endif
808 
809  struct {
813 
814 #if GC_ENABLE_INCREMENTAL_MARK
815  struct {
816  size_t pooled_slots;
817  size_t step_slots;
819 #endif
820 #endif /* USE_RGENGC */
821 
824 
825 #if GC_DEBUG_STRESS_TO_CLASS
827 #endif
829 
830 
831 /* default tiny heap size: 16KB */
832 #define HEAP_PAGE_ALIGN_LOG 14
833 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
834 enum {
837  REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
839  HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header))/sizeof(struct RVALUE)),
842  HEAP_PAGE_BITMAP_PLANES = USE_RGENGC ? 4 : 1 /* RGENGC: mark, unprotected, uncollectible, marking */
843 };
844 
845 struct heap_page {
846  short total_slots;
847  short free_slots;
849  short final_slots;
850  struct {
851  unsigned int before_sweep : 1;
852  unsigned int has_remembered_objects : 1;
854  unsigned int in_tomb : 1;
855  } flags;
856 
860  struct list_node page_node;
861 
862 #if USE_RGENGC
864 #endif
865  /* the following three bitmaps are cleared at the beginning of full GC */
867 #if USE_RGENGC
870 #endif
871 
872  /* If set, the object is not movable */
874 };
875 
876 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
877 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
878 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
879 
880 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
881 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
882 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
883 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
884 
885 /* Bitmap Operations */
886 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
887 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
888 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
889 
890 /* getting bitmap */
891 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
892 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
893 #if USE_RGENGC
894 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
895 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
896 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
897 #endif
898 
899 /* Aliases */
900 #define rb_objspace (*rb_objspace_of(GET_VM()))
901 #define rb_objspace_of(vm) ((vm)->objspace)
902 
903 #define ruby_initial_gc_stress gc_params.gc_stress
904 
906 
907 #define malloc_limit objspace->malloc_params.limit
908 #define malloc_increase objspace->malloc_params.increase
909 #define malloc_allocated_size objspace->malloc_params.allocated_size
910 #define heap_pages_sorted objspace->heap_pages.sorted
911 #define heap_allocated_pages objspace->heap_pages.allocated_pages
912 #define heap_pages_sorted_length objspace->heap_pages.sorted_length
913 #define heap_pages_lomem objspace->heap_pages.range[0]
914 #define heap_pages_himem objspace->heap_pages.range[1]
915 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages
916 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
917 #define heap_pages_final_slots objspace->heap_pages.final_slots
918 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
919 #define heap_eden (&objspace->eden_heap)
920 #define heap_tomb (&objspace->tomb_heap)
921 #define dont_gc objspace->flags.dont_gc
922 #define during_gc objspace->flags.during_gc
923 #define finalizing objspace->atomic_flags.finalizing
924 #define finalizer_table objspace->finalizer_table
925 #define global_list objspace->global_list
926 #define ruby_gc_stressful objspace->flags.gc_stressful
927 #define ruby_gc_stress_mode objspace->gc_stress_mode
928 #if GC_DEBUG_STRESS_TO_CLASS
929 #define stress_to_class objspace->stress_to_class
930 #else
931 #define stress_to_class 0
932 #endif
933 
934 static inline enum gc_mode
935 gc_mode_verify(enum gc_mode mode)
936 {
937 #if RGENGC_CHECK_MODE > 0
938  switch (mode) {
939  case gc_mode_none:
940  case gc_mode_marking:
941  case gc_mode_sweeping:
942  break;
943  default:
944  rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
945  }
946 #endif
947  return mode;
948 }
949 
950 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
951 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
952 
953 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
954 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
955 #if USE_RGENGC
956 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
957 #else
958 #define is_full_marking(objspace) TRUE
959 #endif
960 #if GC_ENABLE_INCREMENTAL_MARK
961 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
962 #else
963 #define is_incremental_marking(objspace) FALSE
964 #endif
965 #if GC_ENABLE_INCREMENTAL_MARK
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
967 #else
968 #define will_be_incremental_marking(objspace) FALSE
969 #endif
970 #define has_sweeping_pages(heap) ((heap)->sweeping_page != 0)
971 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap))
972 
973 #if SIZEOF_LONG == SIZEOF_VOIDP
974 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
975 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
976 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
977 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
978 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
979  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
980 #else
981 # error not supported
982 #endif
983 
984 #define RANY(o) ((RVALUE*)(o))
985 
986 struct RZombie {
987  struct RBasic basic;
989  void (*dfree)(void *);
990  void *data;
991 };
992 
993 #define RZOMBIE(o) ((struct RZombie *)(o))
994 
995 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
996 
997 #if RUBY_MARK_FREE_DEBUG
998 int ruby_gc_debug_indent = 0;
999 #endif
1002 
1003 void rb_iseq_mark(const rb_iseq_t *iseq);
1005 void rb_iseq_free(const rb_iseq_t *iseq);
1006 size_t rb_iseq_memsize(const rb_iseq_t *iseq);
1007 void rb_vm_update_references(void *ptr);
1008 
1010 
1011 static VALUE define_final0(VALUE obj, VALUE block);
1012 
1013 NORETURN(static void negative_size_allocation_error(const char *));
1014 
1015 static void init_mark_stack(mark_stack_t *stack);
1016 
1017 static int ready_to_gc(rb_objspace_t *objspace);
1018 
1019 static int garbage_collect(rb_objspace_t *, int reason);
1020 
1021 static int gc_start(rb_objspace_t *objspace, int reason);
1022 static void gc_rest(rb_objspace_t *objspace);
1023 static inline void gc_enter(rb_objspace_t *objspace, const char *event);
1024 static inline void gc_exit(rb_objspace_t *objspace, const char *event);
1025 
1026 static void gc_marks(rb_objspace_t *objspace, int full_mark);
1027 static void gc_marks_start(rb_objspace_t *objspace, int full);
1028 static int gc_marks_finish(rb_objspace_t *objspace);
1029 static void gc_marks_rest(rb_objspace_t *objspace);
1030 static void gc_marks_step(rb_objspace_t *objspace, int slots);
1031 static void gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1032 
1033 static void gc_sweep(rb_objspace_t *objspace);
1034 static void gc_sweep_start(rb_objspace_t *objspace);
1035 static void gc_sweep_finish(rb_objspace_t *objspace);
1036 static int gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap);
1037 static void gc_sweep_rest(rb_objspace_t *objspace);
1038 static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1039 
1040 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1041 static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1042 static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1043 static void gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr);
1044 NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr));
1045 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
1046 
1047 static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1048 static int gc_mark_stacked_objects_all(rb_objspace_t *);
1049 static void gc_grey(rb_objspace_t *objspace, VALUE ptr);
1050 
1051 static inline int gc_mark_set(rb_objspace_t *objspace, VALUE obj);
1052 NO_SANITIZE("memory", static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr));
1053 
1054 static void push_mark_stack(mark_stack_t *, VALUE);
1055 static int pop_mark_stack(mark_stack_t *, VALUE *);
1056 static size_t mark_stack_size(mark_stack_t *stack);
1057 static void shrink_stack_chunk_cache(mark_stack_t *stack);
1058 
1059 static size_t obj_memsize_of(VALUE obj, int use_all_types);
1060 static void gc_verify_internal_consistency(rb_objspace_t *objspace);
1061 static int gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj);
1062 static int gc_verify_heap_pages(rb_objspace_t *objspace);
1063 
1064 static void gc_stress_set(rb_objspace_t *objspace, VALUE flag);
1065 static VALUE gc_disable_no_rest(rb_objspace_t *);
1066 
1067 static double getrusage_time(void);
1068 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
1069 static inline void gc_prof_timer_start(rb_objspace_t *);
1070 static inline void gc_prof_timer_stop(rb_objspace_t *);
1071 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1072 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1073 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1074 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1075 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1076 static inline void gc_prof_set_heap_info(rb_objspace_t *);
1077 
1078 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1079  if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1080  *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1081  } \
1082 } while (0)
1083 
1084 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1085 
1086 #define gc_prof_record(objspace) (objspace)->profile.current_record
1087 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1088 
1089 #ifdef HAVE_VA_ARGS_MACRO
1090 # define gc_report(level, objspace, ...) \
1091  if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1092 #else
1093 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1094 #endif
1095 PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1096 static const char *obj_info(VALUE obj);
1097 
1098 #define PUSH_MARK_FUNC_DATA(v) do { \
1099  struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \
1100  objspace->mark_func_data = (v);
1101 
1102 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0)
1103 
1104 /*
1105  * 1 - TSC (H/W Time Stamp Counter)
1106  * 2 - getrusage
1107  */
1108 #ifndef TICK_TYPE
1109 #define TICK_TYPE 1
1110 #endif
1111 
1112 #if USE_TICK_T
1113 
1114 #if TICK_TYPE == 1
1115 /* the following code is only for internal tuning. */
1116 
1117 /* Source code to use RDTSC is quoted and modified from
1118  * http://www.mcs.anl.gov/~kazutomo/rdtsc.html
1119  * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1120  */
1121 
1122 #if defined(__GNUC__) && defined(__i386__)
1123 typedef unsigned long long tick_t;
1124 #define PRItick "llu"
1125 static inline tick_t
1126 tick(void)
1127 {
1128  unsigned long long int x;
1129  __asm__ __volatile__ ("rdtsc" : "=A" (x));
1130  return x;
1131 }
1132 
1133 #elif defined(__GNUC__) && defined(__x86_64__)
1134 typedef unsigned long long tick_t;
1135 #define PRItick "llu"
1136 
1137 static __inline__ tick_t
1138 tick(void)
1139 {
1140  unsigned long hi, lo;
1141  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1142  return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1143 }
1144 
1145 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1146 typedef unsigned long long tick_t;
1147 #define PRItick "llu"
1148 
1149 static __inline__ tick_t
1150 tick(void)
1151 {
1152  unsigned long long val = __builtin_ppc_get_timebase();
1153  return val;
1154 }
1155 
1156 #elif defined(_WIN32) && defined(_MSC_VER)
1157 #include <intrin.h>
1158 typedef unsigned __int64 tick_t;
1159 #define PRItick "llu"
1160 
1161 static inline tick_t
1162 tick(void)
1163 {
1164  return __rdtsc();
1165 }
1166 
1167 #else /* use clock */
1168 typedef clock_t tick_t;
1169 #define PRItick "llu"
1170 
1171 static inline tick_t
1172 tick(void)
1173 {
1174  return clock();
1175 }
1176 #endif /* TSC */
1177 
1178 #elif TICK_TYPE == 2
1179 typedef double tick_t;
1180 #define PRItick "4.9f"
1181 
1182 static inline tick_t
1183 tick(void)
1184 {
1185  return getrusage_time();
1186 }
1187 #else /* TICK_TYPE */
1188 #error "choose tick type"
1189 #endif /* TICK_TYPE */
1190 
1191 #define MEASURE_LINE(expr) do { \
1192  volatile tick_t start_time = tick(); \
1193  volatile tick_t end_time; \
1194  expr; \
1195  end_time = tick(); \
1196  fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1197 } while (0)
1198 
1199 #else /* USE_TICK_T */
1200 #define MEASURE_LINE(expr) expr
1201 #endif /* USE_TICK_T */
1202 
1203 #define FL_CHECK2(name, x, pred) \
1204  ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1205  (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1206 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1207 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1208 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1209 
1210 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1211 #define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1212 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1213 
1214 #if USE_RGENGC
1215 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1216 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1217 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1218 
1219 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1220 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1221 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1222 
1223 #define RVALUE_OLD_AGE 3
1224 #define RVALUE_AGE_SHIFT 5 /* FL_PROMOTED0 bit */
1225 
1226 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
1227 static int rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj);
1228 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1229 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1230 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1231 
1232 static inline int
1233 RVALUE_FLAGS_AGE(VALUE flags)
1234 {
1235  return (int)((flags & (FL_PROMOTED0 | FL_PROMOTED1)) >> RVALUE_AGE_SHIFT);
1236 }
1237 
1238 #endif /* USE_RGENGC */
1239 
1240 static int
1241 check_rvalue_consistency_force(const VALUE obj, int terminate)
1242 {
1243  rb_objspace_t *objspace = &rb_objspace;
1244  int err = 0;
1245 
1246  if (SPECIAL_CONST_P(obj)) {
1247  fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1248  err++;
1249  }
1250  else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1251  /* check if it is in tomb_pages */
1252  struct heap_page *page = NULL;
1253  list_for_each(&heap_tomb->pages, page, page_node) {
1254  if (&page->start[0] <= (RVALUE *)obj &&
1255  (RVALUE *)obj < &page->start[page->total_slots]) {
1256  fprintf(stderr, "check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1257  (void *)obj, (void *)page);
1258  err++;
1259  goto skip;
1260  }
1261  }
1262  fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1263  err++;
1264  skip:
1265  ;
1266  }
1267  else {
1268  const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1269  const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1270  const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1271  const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1272  const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1273 
1274  if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1275  fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1276  err++;
1277  }
1278  if (BUILTIN_TYPE(obj) == T_NONE) {
1279  fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1280  err++;
1281  }
1282  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1283  fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1284  err++;
1285  }
1286 
1287  obj_memsize_of((VALUE)obj, FALSE);
1288 
1289  /* check generation
1290  *
1291  * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1292  */
1293  if (age > 0 && wb_unprotected_bit) {
1294  fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1295  err++;
1296  }
1297 
1298  if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1299  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1300  err++;
1301  }
1302 
1303  if (!is_full_marking(objspace)) {
1304  if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1305  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1306  obj_info(obj), age);
1307  err++;
1308  }
1309  if (remembered_bit && age != RVALUE_OLD_AGE) {
1310  fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1311  obj_info(obj), age);
1312  err++;
1313  }
1314  }
1315 
1316  /*
1317  * check coloring
1318  *
1319  * marking:false marking:true
1320  * marked:false white *invalid*
1321  * marked:true black grey
1322  */
1323  if (is_incremental_marking(objspace) && marking_bit) {
1324  if (!is_marking(objspace) && !mark_bit) {
1325  fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1326  err++;
1327  }
1328  }
1329  }
1330 
1331  if (err > 0 && terminate) {
1332  rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1333  }
1334 
1335  return err;
1336 }
1337 
1338 #if RGENGC_CHECK_MODE == 0
1339 static inline VALUE
1340 check_rvalue_consistency(const VALUE obj)
1341 {
1342  return obj;
1343 }
1344 #else
1345 static VALUE
1346 check_rvalue_consistency(const VALUE obj)
1347 {
1348  check_rvalue_consistency_force(obj, TRUE);
1349  return obj;
1350 }
1351 #endif
1352 
1353 static inline int
1354 gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
1355 {
1356  if (RB_SPECIAL_CONST_P(obj)) {
1357  return FALSE;
1358  }
1359  else {
1360  void *poisoned = asan_poisoned_object_p(obj);
1361  asan_unpoison_object(obj, false);
1362 
1363  int ret = BUILTIN_TYPE(obj) == T_MOVED;
1364  /* Re-poison slot if it's not the one we want */
1365  if (poisoned) {
1367  asan_poison_object(obj);
1368  }
1369  return ret;
1370  }
1371 }
1372 
1373 static inline int
1374 RVALUE_MARKED(VALUE obj)
1375 {
1376  check_rvalue_consistency(obj);
1377  return RVALUE_MARK_BITMAP(obj) != 0;
1378 }
1379 
1380 static inline int
1381 RVALUE_PINNED(VALUE obj)
1382 {
1383  check_rvalue_consistency(obj);
1384  return RVALUE_PIN_BITMAP(obj) != 0;
1385 }
1386 
1387 #if USE_RGENGC
1388 static inline int
1389 RVALUE_WB_UNPROTECTED(VALUE obj)
1390 {
1391  check_rvalue_consistency(obj);
1392  return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1393 }
1394 
1395 static inline int
1396 RVALUE_MARKING(VALUE obj)
1397 {
1398  check_rvalue_consistency(obj);
1399  return RVALUE_MARKING_BITMAP(obj) != 0;
1400 }
1401 
1402 static inline int
1403 RVALUE_REMEMBERED(VALUE obj)
1404 {
1405  check_rvalue_consistency(obj);
1406  return RVALUE_MARKING_BITMAP(obj) != 0;
1407 }
1408 
1409 static inline int
1410 RVALUE_UNCOLLECTIBLE(VALUE obj)
1411 {
1412  check_rvalue_consistency(obj);
1413  return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1414 }
1415 
1416 static inline int
1417 RVALUE_OLD_P_RAW(VALUE obj)
1418 {
1419  const VALUE promoted = FL_PROMOTED0 | FL_PROMOTED1;
1420  return (RBASIC(obj)->flags & promoted) == promoted;
1421 }
1422 
1423 static inline int
1424 RVALUE_OLD_P(VALUE obj)
1425 {
1426  check_rvalue_consistency(obj);
1427  return RVALUE_OLD_P_RAW(obj);
1428 }
1429 
1430 #if RGENGC_CHECK_MODE || GC_DEBUG
1431 static inline int
1432 RVALUE_AGE(VALUE obj)
1433 {
1434  check_rvalue_consistency(obj);
1435  return RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
1436 }
1437 #endif
1438 
1439 static inline void
1440 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1441 {
1443  objspace->rgengc.old_objects++;
1445 
1446 #if RGENGC_PROFILE >= 2
1447  objspace->profile.total_promoted_count++;
1448  objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1449 #endif
1450 }
1451 
1452 static inline void
1453 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1454 {
1455  RB_DEBUG_COUNTER_INC(obj_promote);
1456  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1457 }
1458 
1459 static inline VALUE
1460 RVALUE_FLAGS_AGE_SET(VALUE flags, int age)
1461 {
1463  flags |= (age << RVALUE_AGE_SHIFT);
1464  return flags;
1465 }
1466 
1467 /* set age to age+1 */
1468 static inline void
1469 RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1470 {
1471  VALUE flags = RBASIC(obj)->flags;
1472  int age = RVALUE_FLAGS_AGE(flags);
1473 
1474  if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1475  rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1476  }
1477 
1478  age++;
1479  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1480 
1481  if (age == RVALUE_OLD_AGE) {
1482  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1483  }
1484  check_rvalue_consistency(obj);
1485 }
1486 
1487 /* set age to RVALUE_OLD_AGE */
1488 static inline void
1489 RVALUE_AGE_SET_OLD(rb_objspace_t *objspace, VALUE obj)
1490 {
1491  check_rvalue_consistency(obj);
1492  GC_ASSERT(!RVALUE_OLD_P(obj));
1493 
1494  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE);
1495  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1496 
1497  check_rvalue_consistency(obj);
1498 }
1499 
1500 /* set age to RVALUE_OLD_AGE - 1 */
1501 static inline void
1502 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1503 {
1504  check_rvalue_consistency(obj);
1505  GC_ASSERT(!RVALUE_OLD_P(obj));
1506 
1507  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1508 
1509  check_rvalue_consistency(obj);
1510 }
1511 
1512 static inline void
1513 RVALUE_DEMOTE_RAW(rb_objspace_t *objspace, VALUE obj)
1514 {
1515  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1517 }
1518 
1519 static inline void
1520 RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1521 {
1522  check_rvalue_consistency(obj);
1523  GC_ASSERT(RVALUE_OLD_P(obj));
1524 
1525  if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1527  }
1528 
1529  RVALUE_DEMOTE_RAW(objspace, obj);
1530 
1531  if (RVALUE_MARKED(obj)) {
1532  objspace->rgengc.old_objects--;
1533  }
1534 
1535  check_rvalue_consistency(obj);
1536 }
1537 
1538 static inline void
1539 RVALUE_AGE_RESET_RAW(VALUE obj)
1540 {
1541  RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(RBASIC(obj)->flags, 0);
1542 }
1543 
1544 static inline void
1545 RVALUE_AGE_RESET(VALUE obj)
1546 {
1547  check_rvalue_consistency(obj);
1548  GC_ASSERT(!RVALUE_OLD_P(obj));
1549 
1550  RVALUE_AGE_RESET_RAW(obj);
1551  check_rvalue_consistency(obj);
1552 }
1553 
1554 static inline int
1555 RVALUE_BLACK_P(VALUE obj)
1556 {
1557  return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1558 }
1559 
1560 #if 0
1561 static inline int
1562 RVALUE_GREY_P(VALUE obj)
1563 {
1564  return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1565 }
1566 #endif
1567 
1568 static inline int
1569 RVALUE_WHITE_P(VALUE obj)
1570 {
1571  return RVALUE_MARKED(obj) == FALSE;
1572 }
1573 
1574 #endif /* USE_RGENGC */
1575 
1576 /*
1577  --------------------------- ObjectSpace -----------------------------
1578 */
1579 
1580 static inline void *
1581 calloc1(size_t n)
1582 {
1583  return calloc(1, n);
1584 }
1585 
1586 rb_objspace_t *
1588 {
1589  rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
1590  malloc_limit = gc_params.malloc_limit_min;
1591  list_head_init(&objspace->eden_heap.pages);
1592  list_head_init(&objspace->tomb_heap.pages);
1593  dont_gc = TRUE;
1594 
1595  return objspace;
1596 }
1597 
1598 static void free_stack_chunks(mark_stack_t *);
1599 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1600 
1601 void
1603 {
1605  rb_bug("lazy sweeping underway when freeing object space");
1606 
1607  if (objspace->profile.records) {
1608  free(objspace->profile.records);
1609  objspace->profile.records = 0;
1610  }
1611 
1612  if (global_list) {
1613  struct gc_list *list, *next;
1614  for (list = global_list; list; list = next) {
1615  next = list->next;
1616  xfree(list);
1617  }
1618  }
1619  if (heap_pages_sorted) {
1620  size_t i;
1621  for (i = 0; i < heap_allocated_pages; ++i) {
1622  heap_page_free(objspace, heap_pages_sorted[i]);
1623  }
1627  heap_pages_lomem = 0;
1628  heap_pages_himem = 0;
1629 
1630  objspace->eden_heap.total_pages = 0;
1631  objspace->eden_heap.total_slots = 0;
1632  }
1633  st_free_table(objspace->id_to_obj_tbl);
1634  st_free_table(objspace->obj_to_id_tbl);
1635  free_stack_chunks(&objspace->mark_stack);
1636  free(objspace);
1637 }
1638 
1639 static void
1640 heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
1641 {
1642  struct heap_page **sorted;
1643  size_t size = size_mul_or_raise(next_length, sizeof(struct heap_page *), rb_eRuntimeError);
1644 
1645  gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
1646 
1647  if (heap_pages_sorted_length > 0) {
1648  sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
1649  if (sorted) heap_pages_sorted = sorted;
1650  }
1651  else {
1652  sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
1653  }
1654 
1655  if (sorted == 0) {
1656  rb_memerror();
1657  }
1658 
1659  heap_pages_sorted_length = next_length;
1660 }
1661 
1662 static void
1663 heap_pages_expand_sorted(rb_objspace_t *objspace)
1664 {
1665  /* usually heap_allocatable_pages + heap_eden->total_pages == heap_pages_sorted_length
1666  * because heap_allocatable_pages contains heap_tomb->total_pages (recycle heap_tomb pages).
1667  * however, if there are pages which do not have empty slots, then try to create new pages
1668  * so that the additional allocatable_pages counts (heap_tomb->total_pages) are added.
1669  */
1670  size_t next_length = heap_allocatable_pages;
1671  next_length += heap_eden->total_pages;
1672  next_length += heap_tomb->total_pages;
1673 
1674  if (next_length > heap_pages_sorted_length) {
1675  heap_pages_expand_sorted_to(objspace, next_length);
1676  }
1677 
1680 }
1681 
1682 static void
1683 heap_allocatable_pages_set(rb_objspace_t *objspace, size_t s)
1684 {
1686  heap_pages_expand_sorted(objspace);
1687 }
1688 
1689 
1690 static inline void
1691 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1692 {
1693  RVALUE *p = (RVALUE *)obj;
1694  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1695 
1696  p->as.free.flags = 0;
1697  p->as.free.next = page->freelist;
1698  page->freelist = p;
1699  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1700 
1701  if (RGENGC_CHECK_MODE &&
1702  /* obj should belong to page */
1703  !(&page->start[0] <= (RVALUE *)obj &&
1704  (RVALUE *)obj < &page->start[page->total_slots] &&
1705  obj % sizeof(RVALUE) == 0)) {
1706  rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)p);
1707  }
1708 
1709  asan_poison_object(obj);
1710 
1711  gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1712 }
1713 
1714 static inline void
1715 heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1716 {
1717  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1718  GC_ASSERT(page->free_slots != 0);
1719  if (page->freelist) {
1720  page->free_next = heap->free_pages;
1721  heap->free_pages = page;
1722  }
1723  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1724 }
1725 
1726 #if GC_ENABLE_INCREMENTAL_MARK
1727 static inline int
1728 heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1729 {
1730  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1731  if (page->freelist) {
1732  page->free_next = heap->pooled_pages;
1733  heap->pooled_pages = page;
1734  objspace->rincgc.pooled_slots += page->free_slots;
1735  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1736 
1737  return TRUE;
1738  }
1739  else {
1740  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1741 
1742  return FALSE;
1743  }
1744 }
1745 #endif
1746 
1747 static void
1748 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1749 {
1750  list_del(&page->page_node);
1751  heap->total_pages--;
1752  heap->total_slots -= page->total_slots;
1753 }
1754 
1755 static void rb_aligned_free(void *ptr);
1756 
1757 static void
1758 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1759 {
1761  objspace->profile.total_freed_pages++;
1762  rb_aligned_free(GET_PAGE_BODY(page->start));
1763  free(page);
1764 }
1765 
1766 static void
1767 heap_pages_free_unused_pages(rb_objspace_t *objspace)
1768 {
1769  size_t i, j;
1770 
1771  if (!list_empty(&heap_tomb->pages)) {
1772  for (i = j = 1; j < heap_allocated_pages; i++) {
1773  struct heap_page *page = heap_pages_sorted[i];
1774 
1775  if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1776  heap_unlink_page(objspace, heap_tomb, page);
1777  heap_page_free(objspace, page);
1778  }
1779  else {
1780  if (i != j) {
1781  heap_pages_sorted[j] = page;
1782  }
1783  j++;
1784  }
1785  }
1787  }
1788 }
1789 
1790 static struct heap_page *
1791 heap_page_allocate(rb_objspace_t *objspace)
1792 {
1793  RVALUE *start, *end, *p;
1794  struct heap_page *page;
1795  struct heap_page_body *page_body = 0;
1796  size_t hi, lo, mid;
1797  int limit = HEAP_PAGE_OBJ_LIMIT;
1798 
1799  /* assign heap_page body (contains heap_page_header and RVALUEs) */
1801  if (page_body == 0) {
1802  rb_memerror();
1803  }
1804 
1805  /* assign heap_page entry */
1806  page = calloc1(sizeof(struct heap_page));
1807  if (page == 0) {
1808  rb_aligned_free(page_body);
1809  rb_memerror();
1810  }
1811 
1812  /* adjust obj_limit (object number available in this page) */
1813  start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
1814  if ((VALUE)start % sizeof(RVALUE) != 0) {
1815  int delta = (int)(sizeof(RVALUE) - ((VALUE)start % sizeof(RVALUE)));
1816  start = (RVALUE*)((VALUE)start + delta);
1817  limit = (HEAP_PAGE_SIZE - (int)((VALUE)start - (VALUE)page_body))/(int)sizeof(RVALUE);
1818  }
1819  end = start + limit;
1820 
1821  /* setup heap_pages_sorted */
1822  lo = 0;
1824  while (lo < hi) {
1825  struct heap_page *mid_page;
1826 
1827  mid = (lo + hi) / 2;
1828  mid_page = heap_pages_sorted[mid];
1829  if (mid_page->start < start) {
1830  lo = mid + 1;
1831  }
1832  else if (mid_page->start > start) {
1833  hi = mid;
1834  }
1835  else {
1836  rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1837  }
1838  }
1839 
1840  if (hi < heap_allocated_pages) {
1842  }
1843 
1844  heap_pages_sorted[hi] = page;
1845 
1847 
1849  GC_ASSERT(heap_eden->total_pages + heap_tomb->total_pages == heap_allocated_pages - 1);
1851 
1852  objspace->profile.total_allocated_pages++;
1853 
1855  rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
1857  }
1858 
1860  if (heap_pages_himem < end) heap_pages_himem = end;
1861 
1862  page->start = start;
1863  page->total_slots = limit;
1864  page_body->header.page = page;
1865 
1866  for (p = start; p != end; p++) {
1867  gc_report(3, objspace, "assign_heap_page: %p is added to freelist\n", (void *)p);
1868  heap_page_add_freeobj(objspace, page, (VALUE)p);
1869  }
1870  page->free_slots = limit;
1871 
1872  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1873  return page;
1874 }
1875 
1876 static struct heap_page *
1877 heap_page_resurrect(rb_objspace_t *objspace)
1878 {
1879  struct heap_page *page = 0, *next;
1880 
1881  list_for_each_safe(&heap_tomb->pages, page, next, page_node) {
1882  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
1883  if (page->freelist != NULL) {
1884  heap_unlink_page(objspace, heap_tomb, page);
1885  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
1886  return page;
1887  }
1888  }
1889 
1890  return NULL;
1891 }
1892 
1893 static struct heap_page *
1894 heap_page_create(rb_objspace_t *objspace)
1895 {
1896  struct heap_page *page;
1897  const char *method = "recycle";
1898 
1900 
1901  page = heap_page_resurrect(objspace);
1902 
1903  if (page == NULL) {
1904  page = heap_page_allocate(objspace);
1905  method = "allocate";
1906  }
1907  if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1908  method, (void *)page, (int)heap_pages_sorted_length, (int)heap_allocated_pages, (int)heap_tomb->total_pages);
1909  return page;
1910 }
1911 
1912 static void
1913 heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1914 {
1915  page->flags.in_tomb = (heap == heap_tomb);
1916  list_add(&heap->pages, &page->page_node);
1917  heap->total_pages++;
1918  heap->total_slots += page->total_slots;
1919 }
1920 
1921 static void
1922 heap_assign_page(rb_objspace_t *objspace, rb_heap_t *heap)
1923 {
1924  struct heap_page *page = heap_page_create(objspace);
1925  heap_add_page(objspace, heap, page);
1926  heap_add_freepage(heap, page);
1927 }
1928 
1929 static void
1930 heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
1931 {
1932  size_t i;
1933 
1934  heap_allocatable_pages_set(objspace, add);
1935 
1936  for (i = 0; i < add; i++) {
1937  heap_assign_page(objspace, heap);
1938  }
1939 
1941 }
1942 
1943 static size_t
1944 heap_extend_pages(rb_objspace_t *objspace, size_t free_slots, size_t total_slots)
1945 {
1946  double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1948  size_t next_used;
1949 
1950  if (goal_ratio == 0.0) {
1951  next_used = (size_t)(used * gc_params.growth_factor);
1952  }
1953  else {
1954  /* Find `f' where free_slots = f * total_slots * goal_ratio
1955  * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1956  */
1957  double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1958 
1959  if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1960  if (f < 1.0) f = 1.1;
1961 
1962  next_used = (size_t)(f * used);
1963 
1964  if (0) {
1965  fprintf(stderr,
1966  "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1967  " G(%1.2f), f(%1.2f),"
1968  " used(%8"PRIuSIZE") => next_used(%8"PRIuSIZE")\n",
1970  goal_ratio, f, used, next_used);
1971  }
1972  }
1973 
1974  if (gc_params.growth_max_slots > 0) {
1975  size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
1976  if (next_used > max_used) next_used = max_used;
1977  }
1978 
1979  return next_used - used;
1980 }
1981 
1982 static void
1983 heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
1984 {
1985  size_t used = heap_eden->total_pages;
1986  size_t next_used_limit = used + additional_pages;
1987 
1988  if (next_used_limit == heap_allocated_pages) next_used_limit++;
1989 
1990  heap_allocatable_pages_set(objspace, next_used_limit - used);
1991 
1992  gc_report(1, objspace, "heap_set_increment: heap_allocatable_pages is %d\n", (int)heap_allocatable_pages);
1993 }
1994 
1995 static int
1996 heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
1997 {
1998  if (heap_allocatable_pages > 0) {
1999  gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
2001 
2004 
2005  heap_assign_page(objspace, heap);
2006  return TRUE;
2007  }
2008  return FALSE;
2009 }
2010 
2011 static void
2012 heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2013 {
2014  GC_ASSERT(heap->free_pages == NULL);
2015 
2016  if (is_lazy_sweeping(heap)) {
2017  gc_sweep_continue(objspace, heap);
2018  }
2019  else if (is_incremental_marking(objspace)) {
2020  gc_marks_continue(objspace, heap);
2021  }
2022 
2023  if (heap->free_pages == NULL &&
2024  (will_be_incremental_marking(objspace) || heap_increment(objspace, heap) == FALSE) &&
2025  gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2026  rb_memerror();
2027  }
2028 }
2029 
2030 static RVALUE *
2031 heap_get_freeobj_from_next_freepage(rb_objspace_t *objspace, rb_heap_t *heap)
2032 {
2033  struct heap_page *page;
2034  RVALUE *p;
2035 
2036  while (heap->free_pages == NULL) {
2037  heap_prepare(objspace, heap);
2038  }
2039  page = heap->free_pages;
2040  heap->free_pages = page->free_next;
2041  heap->using_page = page;
2042 
2043  GC_ASSERT(page->free_slots != 0);
2044  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
2045  p = page->freelist;
2046  page->freelist = NULL;
2047  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
2048  page->free_slots = 0;
2049  asan_unpoison_object((VALUE)p, true);
2050  return p;
2051 }
2052 
2053 static inline VALUE
2054 heap_get_freeobj_head(rb_objspace_t *objspace, rb_heap_t *heap)
2055 {
2056  RVALUE *p = heap->freelist;
2057  if (LIKELY(p != NULL)) {
2058  heap->freelist = p->as.free.next;
2059  }
2060  asan_unpoison_object((VALUE)p, true);
2061  return (VALUE)p;
2062 }
2063 
2064 static inline VALUE
2065 heap_get_freeobj(rb_objspace_t *objspace, rb_heap_t *heap)
2066 {
2067  RVALUE *p = heap->freelist;
2068 
2069  while (1) {
2070  if (LIKELY(p != NULL)) {
2071  asan_unpoison_object((VALUE)p, true);
2072  heap->freelist = p->as.free.next;
2073  return (VALUE)p;
2074  }
2075  else {
2076  p = heap_get_freeobj_from_next_freepage(objspace, heap);
2077  }
2078  }
2079 }
2080 
2081 void
2083 {
2084  rb_objspace_t *objspace = &rb_objspace;
2085  objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
2086  objspace->flags.has_hook = (objspace->hook_events != 0);
2087 }
2088 
2089 static void
2090 gc_event_hook_body(rb_execution_context_t *ec, rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
2091 {
2092  const VALUE *pc = ec->cfp->pc;
2093  if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2094  /* increment PC because source line is calculated with PC-1 */
2095  ec->cfp->pc++;
2096  }
2097  EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2098  ec->cfp->pc = pc;
2099 }
2100 
2101 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2102 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2103 
2104 #define gc_event_hook(objspace, event, data) do { \
2105  if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2106  gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2107  } \
2108 } while (0)
2109 
2110 static inline VALUE
2111 newobj_init(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2112 {
2113 #if !__has_feature(memory_sanitizer)
2115  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2116 #endif
2117 
2118  /* OBJSETUP */
2119  struct RVALUE buf = {
2120  .as = {
2121  .values = {
2122  .basic = {
2123  .flags = flags,
2124  .klass = klass,
2125  },
2126  .v1 = v1,
2127  .v2 = v2,
2128  .v3 = v3,
2129  },
2130  },
2131  };
2132  MEMCPY(RANY(obj), &buf, RVALUE, 1);
2133 
2134 #if RGENGC_CHECK_MODE
2135  GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2136  GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2137  GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2138  GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2139 
2140  if (flags & FL_PROMOTED1) {
2141  if (RVALUE_AGE(obj) != 2) rb_bug("newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2142  }
2143  else {
2144  if (RVALUE_AGE(obj) > 0) rb_bug("newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2145  }
2146  if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %s is remembered.", obj_info(obj));
2147 #endif
2148 
2149 #if USE_RGENGC
2150  if (UNLIKELY(wb_protected == FALSE)) {
2152  }
2153 #endif
2154 
2155 #if RGENGC_PROFILE
2156  if (wb_protected) {
2157  objspace->profile.total_generated_normal_object_count++;
2158 #if RGENGC_PROFILE >= 2
2159  objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2160 #endif
2161  }
2162  else {
2163  objspace->profile.total_generated_shady_object_count++;
2164 #if RGENGC_PROFILE >= 2
2165  objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2166 #endif
2167  }
2168 #endif
2169 
2170 #if GC_DEBUG
2171  RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2172  GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2173 #endif
2174 
2175  objspace->total_allocated_objects++;
2176 
2177  gc_report(5, objspace, "newobj: %s\n", obj_info(obj));
2178 
2179 #if RGENGC_OLD_NEWOBJ_CHECK > 0
2180  {
2181  static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2182 
2183  if (!is_incremental_marking(objspace) &&
2184  flags & FL_WB_PROTECTED && /* do not promote WB unprotected objects */
2185  ! RB_TYPE_P(obj, T_ARRAY)) { /* array.c assumes that allocated objects are new */
2186  if (--newobj_cnt == 0) {
2187  newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2188 
2189  gc_mark_set(objspace, obj);
2190  RVALUE_AGE_SET_OLD(objspace, obj);
2191 
2193  }
2194  }
2195  }
2196 #endif
2197  check_rvalue_consistency(obj);
2198  return obj;
2199 }
2200 
2201 static inline VALUE
2202 newobj_slowpath(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace, int wb_protected)
2203 {
2204  VALUE obj;
2205 
2207  if (during_gc) {
2208  dont_gc = 1;
2209  during_gc = 0;
2210  rb_bug("object allocation during garbage collection phase");
2211  }
2212 
2213  if (ruby_gc_stressful) {
2214  if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2215  rb_memerror();
2216  }
2217  }
2218  }
2219 
2220  obj = heap_get_freeobj(objspace, heap_eden);
2221  newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
2223  return obj;
2224 }
2225 
2226 NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
2227 NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace));
2228 
2229 static VALUE
2230 newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
2231 {
2232  return newobj_slowpath(klass, flags, v1, v2, v3, objspace, TRUE);
2233 }
2234 
2235 static VALUE
2236 newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace)
2237 {
2238  return newobj_slowpath(klass, flags, v1, v2, v3, objspace, FALSE);
2239 }
2240 
2241 static inline VALUE
2242 newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, int wb_protected)
2243 {
2244  rb_objspace_t *objspace = &rb_objspace;
2245  VALUE obj;
2246 
2247  RB_DEBUG_COUNTER_INC(obj_newobj);
2248  (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2249 
2250 #if GC_DEBUG_STRESS_TO_CLASS
2251  if (UNLIKELY(stress_to_class)) {
2252  long i, cnt = RARRAY_LEN(stress_to_class);
2253  for (i = 0; i < cnt; ++i) {
2255  }
2256  }
2257 #endif
2258  if (!(during_gc ||
2260  gc_event_hook_available_p(objspace)) &&
2261  (obj = heap_get_freeobj_head(objspace, heap_eden)) != Qfalse) {
2262  return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
2263  }
2264  else {
2265  RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2266 
2267  return wb_protected ?
2268  newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
2269  newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
2270  }
2271 }
2272 
2273 VALUE
2275 {
2276  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2277  return newobj_of(klass, flags, 0, 0, 0, FALSE);
2278 }
2279 
2280 VALUE
2282 {
2283  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2284  return newobj_of(klass, flags, 0, 0, 0, TRUE);
2285 }
2286 
2287 /* for compatibility */
2288 
2289 VALUE
2291 {
2292  return newobj_of(0, T_NONE, 0, 0, 0, FALSE);
2293 }
2294 
2295 VALUE
2297 {
2298  return newobj_of(klass, flags & ~FL_WB_PROTECTED, 0, 0, 0, flags & FL_WB_PROTECTED);
2299 }
2300 
2301 #define UNEXPECTED_NODE(func) \
2302  rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2303  BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2304 
2305 #undef rb_imemo_new
2306 
2307 VALUE
2309 {
2310  VALUE flags = T_IMEMO | (type << FL_USHIFT);
2311  return newobj_of(v0, flags, v1, v2, v3, TRUE);
2312 }
2313 
2314 static VALUE
2315 rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2316 {
2318  return newobj_of(v0, flags, v1, v2, v3, FALSE);
2319 }
2320 
2321 static VALUE
2322 rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(void *buf, size_t cnt)
2323 {
2324  return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2325 }
2326 
2329 {
2330  return (rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2331 }
2332 
2333 static size_t
2334 imemo_memsize(VALUE obj)
2335 {
2336  size_t size = 0;
2337  switch (imemo_type(obj)) {
2338  case imemo_ment:
2339  size += sizeof(RANY(obj)->as.imemo.ment.def);
2340  break;
2341  case imemo_iseq:
2343  break;
2344  case imemo_env:
2345  size += RANY(obj)->as.imemo.env.env_size * sizeof(VALUE);
2346  break;
2347  case imemo_tmpbuf:
2348  size += RANY(obj)->as.imemo.alloc.cnt * sizeof(VALUE);
2349  break;
2350  case imemo_ast:
2351  size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2352  break;
2353  case imemo_cref:
2354  case imemo_svar:
2355  case imemo_throw_data:
2356  case imemo_ifunc:
2357  case imemo_memo:
2358  case imemo_parser_strterm:
2359  break;
2360  default:
2361  /* unreachable */
2362  break;
2363  }
2364  return size;
2365 }
2366 
2367 #if IMEMO_DEBUG
2368 VALUE
2369 rb_imemo_new_debug(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0, const char *file, int line)
2370 {
2371  VALUE memo = rb_imemo_new(type, v1, v2, v3, v0);
2372  fprintf(stderr, "memo %p (type: %d) @ %s:%d\n", (void *)memo, imemo_type(memo), file, line);
2373  return memo;
2374 }
2375 #endif
2376 
2377 VALUE
2379 {
2380  if (klass) Check_Type(klass, T_CLASS);
2381  return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE);
2382 }
2383 
2384 #undef rb_data_object_alloc
2386  RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree),
2387  rb_data_object_wrap, (klass, datap, dmark, dfree))
2388 
2389 
2390 VALUE
2392 {
2393  VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
2394  DATA_PTR(obj) = xcalloc(1, size);
2395  return obj;
2396 }
2397 
2398 VALUE
2400 {
2401  if (klass) Check_Type(klass, T_CLASS);
2402  return newobj_of(klass, T_DATA, (VALUE)type, (VALUE)1, (VALUE)datap, type->flags & RUBY_FL_WB_PROTECTED);
2403 }
2404 
2405 #undef rb_data_typed_object_alloc
2407  const rb_data_type_t *type),
2409 
2410 VALUE
2412 {
2414  DATA_PTR(obj) = xcalloc(1, size);
2415  return obj;
2416 }
2417 
2418 size_t
2420 {
2421  if (RTYPEDDATA_P(obj)) {
2423  const void *ptr = RTYPEDDATA_DATA(obj);
2424  if (ptr && type->function.dsize) {
2425  return type->function.dsize(ptr);
2426  }
2427  }
2428  return 0;
2429 }
2430 
2431 const char *
2433 {
2434  if (RTYPEDDATA_P(obj)) {
2435  return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
2436  }
2437  else {
2438  return 0;
2439  }
2440 }
2441 
2442 PUREFUNC(static inline int is_pointer_to_heap(rb_objspace_t *objspace, void *ptr);)
2443 static inline int
2444 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
2445 {
2446  register RVALUE *p = RANY(ptr);
2447  register struct heap_page *page;
2448  register size_t hi, lo, mid;
2449 
2450  RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2451 
2452  if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2453  RB_DEBUG_COUNTER_INC(gc_isptr_range);
2454 
2455  if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
2456  RB_DEBUG_COUNTER_INC(gc_isptr_align);
2457 
2458  /* check if p looks like a pointer using bsearch*/
2459  lo = 0;
2461  while (lo < hi) {
2462  mid = (lo + hi) / 2;
2463  page = heap_pages_sorted[mid];
2464  if (page->start <= p) {
2465  if (p < page->start + page->total_slots) {
2466  RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2467 
2468  if (page->flags.in_tomb) {
2469  return FALSE;
2470  }
2471  else {
2472  return TRUE;
2473  }
2474  }
2475  lo = mid + 1;
2476  }
2477  else {
2478  hi = mid;
2479  }
2480  }
2481  return FALSE;
2482 }
2483 
2484 static enum rb_id_table_iterator_result
2485 free_const_entry_i(VALUE value, void *data)
2486 {
2487  rb_const_entry_t *ce = (rb_const_entry_t *)value;
2488  xfree(ce);
2489  return ID_TABLE_CONTINUE;
2490 }
2491 
2492 void
2494 {
2495  rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2496  rb_id_table_free(tbl);
2497 }
2498 
2499 static inline void
2500 make_zombie(rb_objspace_t *objspace, VALUE obj, void (*dfree)(void *), void *data)
2501 {
2502  struct RZombie *zombie = RZOMBIE(obj);
2503  zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & FL_SEEN_OBJ_ID);
2504  zombie->dfree = dfree;
2505  zombie->data = data;
2506  zombie->next = heap_pages_deferred_final;
2507  heap_pages_deferred_final = (VALUE)zombie;
2508 }
2509 
2510 static inline void
2511 make_io_zombie(rb_objspace_t *objspace, VALUE obj)
2512 {
2513  rb_io_t *fptr = RANY(obj)->as.file.fptr;
2514  make_zombie(objspace, obj, (void (*)(void*))rb_io_fptr_finalize, fptr);
2515 }
2516 
2517 static void
2518 obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2519 {
2520  VALUE id;
2521 
2524 
2525  if (st_delete(objspace->obj_to_id_tbl, (st_data_t *)&obj, &id)) {
2526  GC_ASSERT(id);
2527  st_delete(objspace->id_to_obj_tbl, (st_data_t *)&id, NULL);
2528  }
2529  else {
2530  rb_bug("Object ID seen, but not in mapping table: %s\n", obj_info(obj));
2531  }
2532 }
2533 
2534 static int
2535 obj_free(rb_objspace_t *objspace, VALUE obj)
2536 {
2537  RB_DEBUG_COUNTER_INC(obj_free);
2538 
2540 
2541  switch (BUILTIN_TYPE(obj)) {
2542  case T_NIL:
2543  case T_FIXNUM:
2544  case T_TRUE:
2545  case T_FALSE:
2546  rb_bug("obj_free() called for broken object");
2547  break;
2548  }
2549 
2550  if (FL_TEST(obj, FL_EXIVAR)) {
2553  }
2554 
2556  obj_free_object_id(objspace, obj);
2557  }
2558 
2559 #if USE_RGENGC
2560  if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2561 
2562 #if RGENGC_CHECK_MODE
2563 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
2564  CHECK(RVALUE_WB_UNPROTECTED);
2565  CHECK(RVALUE_MARKED);
2566  CHECK(RVALUE_MARKING);
2567  CHECK(RVALUE_UNCOLLECTIBLE);
2568 #undef CHECK
2569 #endif
2570 #endif
2571 
2572  switch (BUILTIN_TYPE(obj)) {
2573  case T_OBJECT:
2574  if ((RANY(obj)->as.basic.flags & ROBJECT_EMBED) ||
2575  RANY(obj)->as.object.as.heap.ivptr == NULL) {
2576  RB_DEBUG_COUNTER_INC(obj_obj_embed);
2577  }
2578  else if (ROBJ_TRANSIENT_P(obj)) {
2579  RB_DEBUG_COUNTER_INC(obj_obj_transient);
2580  }
2581  else {
2582  xfree(RANY(obj)->as.object.as.heap.ivptr);
2583  RB_DEBUG_COUNTER_INC(obj_obj_ptr);
2584  }
2585  break;
2586  case T_MODULE:
2587  case T_CLASS:
2590  if (RCLASS_IV_TBL(obj)) {
2592  }
2593  if (RCLASS_CONST_TBL(obj)) {
2595  }
2596  if (RCLASS_IV_INDEX_TBL(obj)) {
2598  }
2599  if (RCLASS_EXT(obj)->subclasses) {
2600  if (BUILTIN_TYPE(obj) == T_MODULE) {
2602  }
2603  else {
2605  }
2606  RCLASS_EXT(obj)->subclasses = NULL;
2607  }
2610  if (RANY(obj)->as.klass.ptr)
2611  xfree(RANY(obj)->as.klass.ptr);
2612  RANY(obj)->as.klass.ptr = NULL;
2613 
2614  (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
2615  (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
2616  break;
2617  case T_STRING:
2618  rb_str_free(obj);
2619  break;
2620  case T_ARRAY:
2621  rb_ary_free(obj);
2622  break;
2623  case T_HASH:
2624 #if USE_DEBUG_COUNTER
2625  switch RHASH_SIZE(obj) {
2626  case 0:
2627  RB_DEBUG_COUNTER_INC(obj_hash_empty);
2628  break;
2629  case 1:
2630  RB_DEBUG_COUNTER_INC(obj_hash_1);
2631  break;
2632  case 2:
2633  RB_DEBUG_COUNTER_INC(obj_hash_2);
2634  break;
2635  case 3:
2636  RB_DEBUG_COUNTER_INC(obj_hash_3);
2637  break;
2638  case 4:
2639  RB_DEBUG_COUNTER_INC(obj_hash_4);
2640  break;
2641  case 5:
2642  case 6:
2643  case 7:
2644  case 8:
2645  RB_DEBUG_COUNTER_INC(obj_hash_5_8);
2646  break;
2647  default:
2648  GC_ASSERT(RHASH_SIZE(obj) > 8);
2649  RB_DEBUG_COUNTER_INC(obj_hash_g8);
2650  }
2651 
2652  if (RHASH_AR_TABLE_P(obj)) {
2653  if (RHASH_AR_TABLE(obj) == NULL) {
2654  RB_DEBUG_COUNTER_INC(obj_hash_null);
2655  }
2656  else {
2657  RB_DEBUG_COUNTER_INC(obj_hash_ar);
2658  }
2659  }
2660  else {
2661  RB_DEBUG_COUNTER_INC(obj_hash_st);
2662  }
2663 #endif
2664  if (/* RHASH_AR_TABLE_P(obj) */ !FL_TEST_RAW(obj, RHASH_ST_TABLE_FLAG)) {
2665  struct ar_table_struct *tab = RHASH(obj)->as.ar;
2666 
2667  if (tab) {
2668  if (RHASH_TRANSIENT_P(obj)) {
2669  RB_DEBUG_COUNTER_INC(obj_hash_transient);
2670  }
2671  else {
2672  ruby_xfree(tab);
2673  }
2674  }
2675  }
2676  else {
2678  st_free_table(RHASH(obj)->as.st);
2679  }
2680  break;
2681  case T_REGEXP:
2682  if (RANY(obj)->as.regexp.ptr) {
2683  onig_free(RANY(obj)->as.regexp.ptr);
2684  RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
2685  }
2686  break;
2687  case T_DATA:
2688  if (DATA_PTR(obj)) {
2689  int free_immediately = FALSE;
2690  void (*dfree)(void *);
2691  void *data = DATA_PTR(obj);
2692 
2693  if (RTYPEDDATA_P(obj)) {
2694  free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
2695  dfree = RANY(obj)->as.typeddata.type->function.dfree;
2696  if (0 && free_immediately == 0) {
2697  /* to expose non-free-immediate T_DATA */
2698  fprintf(stderr, "not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
2699  }
2700  }
2701  else {
2702  dfree = RANY(obj)->as.data.dfree;
2703  }
2704 
2705  if (dfree) {
2706  if (dfree == RUBY_DEFAULT_FREE) {
2707  xfree(data);
2708  RB_DEBUG_COUNTER_INC(obj_data_xfree);
2709  }
2710  else if (free_immediately) {
2711  (*dfree)(data);
2712  RB_DEBUG_COUNTER_INC(obj_data_imm_free);
2713  }
2714  else {
2715  make_zombie(objspace, obj, dfree, data);
2716  RB_DEBUG_COUNTER_INC(obj_data_zombie);
2717  return 1;
2718  }
2719  }
2720  else {
2721  RB_DEBUG_COUNTER_INC(obj_data_empty);
2722  }
2723  }
2724  break;
2725  case T_MATCH:
2726  if (RANY(obj)->as.match.rmatch) {
2727  struct rmatch *rm = RANY(obj)->as.match.rmatch;
2728 #if USE_DEBUG_COUNTER
2729  if (rm->regs.num_regs >= 8) {
2730  RB_DEBUG_COUNTER_INC(obj_match_ge8);
2731  }
2732  else if (rm->regs.num_regs >= 4) {
2733  RB_DEBUG_COUNTER_INC(obj_match_ge4);
2734  }
2735  else if (rm->regs.num_regs >= 1) {
2736  RB_DEBUG_COUNTER_INC(obj_match_under4);
2737  }
2738 #endif
2739  onig_region_free(&rm->regs, 0);
2740  if (rm->char_offset)
2741  xfree(rm->char_offset);
2742  xfree(rm);
2743 
2744  RB_DEBUG_COUNTER_INC(obj_match_ptr);
2745  }
2746  break;
2747  case T_FILE:
2748  if (RANY(obj)->as.file.fptr) {
2749  make_io_zombie(objspace, obj);
2750  RB_DEBUG_COUNTER_INC(obj_file_ptr);
2751  return 1;
2752  }
2753  break;
2754  case T_RATIONAL:
2755  RB_DEBUG_COUNTER_INC(obj_rational);
2756  break;
2757  case T_COMPLEX:
2758  RB_DEBUG_COUNTER_INC(obj_complex);
2759  break;
2760  case T_MOVED:
2761  break;
2762  case T_ICLASS:
2763  /* Basically , T_ICLASS shares table with the module */
2764  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
2766  }
2767  if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
2769  }
2770  if (RCLASS_EXT(obj)->subclasses) {
2772  RCLASS_EXT(obj)->subclasses = NULL;
2773  }
2776  xfree(RANY(obj)->as.klass.ptr);
2777  RANY(obj)->as.klass.ptr = NULL;
2778 
2779  RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
2780  break;
2781 
2782  case T_FLOAT:
2783  RB_DEBUG_COUNTER_INC(obj_float);
2784  break;
2785 
2786  case T_BIGNUM:
2787  if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2789  RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
2790  }
2791  else {
2792  RB_DEBUG_COUNTER_INC(obj_bignum_embed);
2793  }
2794  break;
2795 
2796  case T_NODE:
2797  UNEXPECTED_NODE(obj_free);
2798  break;
2799 
2800  case T_STRUCT:
2801  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
2802  RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
2803  RB_DEBUG_COUNTER_INC(obj_struct_embed);
2804  }
2805  else if (RSTRUCT_TRANSIENT_P(obj)) {
2806  RB_DEBUG_COUNTER_INC(obj_struct_transient);
2807  }
2808  else {
2809  xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
2810  RB_DEBUG_COUNTER_INC(obj_struct_ptr);
2811  }
2812  break;
2813 
2814  case T_SYMBOL:
2815  {
2817  RB_DEBUG_COUNTER_INC(obj_symbol);
2818  }
2819  break;
2820 
2821  case T_IMEMO:
2822  switch (imemo_type(obj)) {
2823  case imemo_ment:
2824  rb_free_method_entry(&RANY(obj)->as.imemo.ment);
2825  RB_DEBUG_COUNTER_INC(obj_imemo_ment);
2826  break;
2827  case imemo_iseq:
2828  rb_iseq_free(&RANY(obj)->as.imemo.iseq);
2829  RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
2830  break;
2831  case imemo_env:
2832  GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
2833  xfree((VALUE *)RANY(obj)->as.imemo.env.env);
2834  RB_DEBUG_COUNTER_INC(obj_imemo_env);
2835  break;
2836  case imemo_tmpbuf:
2837  xfree(RANY(obj)->as.imemo.alloc.ptr);
2838  RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
2839  break;
2840  case imemo_ast:
2841  rb_ast_free(&RANY(obj)->as.imemo.ast);
2842  RB_DEBUG_COUNTER_INC(obj_imemo_ast);
2843  break;
2844  case imemo_cref:
2845  RB_DEBUG_COUNTER_INC(obj_imemo_cref);
2846  break;
2847  case imemo_svar:
2848  RB_DEBUG_COUNTER_INC(obj_imemo_svar);
2849  break;
2850  case imemo_throw_data:
2851  RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
2852  break;
2853  case imemo_ifunc:
2854  RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
2855  break;
2856  case imemo_memo:
2857  RB_DEBUG_COUNTER_INC(obj_imemo_memo);
2858  break;
2859  case imemo_parser_strterm:
2860  RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
2861  break;
2862  default:
2863  /* unreachable */
2864  break;
2865  }
2866  return 0;
2867 
2868  default:
2869  rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
2870  BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
2871  }
2872 
2873  if (FL_TEST(obj, FL_FINALIZE)) {
2874  make_zombie(objspace, obj, 0, 0);
2875  return 1;
2876  }
2877  else {
2878  return 0;
2879  }
2880 }
2881 
2882 
2883 #define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
2884 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
2885 
2886 static int
2887 object_id_cmp(st_data_t x, st_data_t y)
2888 {
2889  if (RB_TYPE_P(x, T_BIGNUM)) {
2890  return !rb_big_eql(x, y);
2891  } else {
2892  return x != y;
2893  }
2894 }
2895 
2896 static st_index_t
2897 object_id_hash(st_data_t n)
2898 {
2899  if (RB_TYPE_P(n, T_BIGNUM)) {
2900  return FIX2LONG(rb_big_hash(n));
2901  } else {
2902  return st_numhash(n);
2903  }
2904 }
2905 static const struct st_hash_type object_id_hash_type = {
2906  object_id_cmp,
2907  object_id_hash,
2908 };
2909 
2910 void
2912 {
2913  rb_objspace_t *objspace = &rb_objspace;
2914 
2915  objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
2916  objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
2917  objspace->obj_to_id_tbl = st_init_numtable();
2918 
2919 #if RGENGC_ESTIMATE_OLDMALLOC
2920  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
2921 #endif
2922 
2923  heap_add_pages(objspace, heap_eden, gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
2924  init_mark_stack(&objspace->mark_stack);
2925 
2926  objspace->profile.invoke_time = getrusage_time();
2928 }
2929 
2930 void
2932 {
2933  rb_objspace_t *objspace = &rb_objspace;
2934 
2935  gc_stress_set(objspace, ruby_initial_gc_stress);
2936 }
2937 
2938 typedef int each_obj_callback(void *, void *, size_t, void *);
2939 
2940 static void objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data);
2941 static void objspace_reachable_objects_from_root(rb_objspace_t *, void (func)(const char *, VALUE, void *), void *);
2942 
2946  void *data;
2947 };
2948 
2949 static void
2950 objspace_each_objects_without_setup(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
2951 {
2952  size_t i;
2953  struct heap_page *page;
2954  RVALUE *pstart = NULL, *pend;
2955 
2956  i = 0;
2957  while (i < heap_allocated_pages) {
2958  while (0 < i && pstart < heap_pages_sorted[i-1]->start) i--;
2959  while (i < heap_allocated_pages && heap_pages_sorted[i]->start <= pstart) i++;
2960  if (heap_allocated_pages <= i) break;
2961 
2962  page = heap_pages_sorted[i];
2963 
2964  pstart = page->start;
2965  pend = pstart + page->total_slots;
2966 
2967  if ((*callback)(pstart, pend, sizeof(RVALUE), data)) {
2968  break;
2969  }
2970  }
2971 }
2972 
2973 static VALUE
2974 objspace_each_objects_protected(VALUE arg)
2975 {
2976  struct each_obj_args *args = (struct each_obj_args *)arg;
2977  objspace_each_objects_without_setup(args->objspace, args->callback, args->data);
2978  return Qnil;
2979 }
2980 
2981 static VALUE
2982 incremental_enable(VALUE _)
2983 {
2985 
2987  return Qnil;
2988 }
2989 
2990 /*
2991  * rb_objspace_each_objects() is special C API to walk through
2992  * Ruby object space. This C API is too difficult to use it.
2993  * To be frank, you should not use it. Or you need to read the
2994  * source code of this function and understand what this function does.
2995  *
2996  * 'callback' will be called several times (the number of heap page,
2997  * at current implementation) with:
2998  * vstart: a pointer to the first living object of the heap_page.
2999  * vend: a pointer to next to the valid heap_page area.
3000  * stride: a distance to next VALUE.
3001  *
3002  * If callback() returns non-zero, the iteration will be stopped.
3003  *
3004  * This is a sample callback code to iterate liveness objects:
3005  *
3006  * int
3007  * sample_callback(void *vstart, void *vend, int stride, void *data) {
3008  * VALUE v = (VALUE)vstart;
3009  * for (; v != (VALUE)vend; v += stride) {
3010  * if (RBASIC(v)->flags) { // liveness check
3011  * // do something with live object 'v'
3012  * }
3013  * return 0; // continue to iteration
3014  * }
3015  *
3016  * Note: 'vstart' is not a top of heap_page. This point the first
3017  * living object to grasp at least one object to avoid GC issue.
3018  * This means that you can not walk through all Ruby object page
3019  * including freed object page.
3020  *
3021  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
3022  * However, there are possibilities to pass variable values with
3023  * 'stride' with some reasons. You must use stride instead of
3024  * use some constant value in the iteration.
3025  */
3026 void
3028 {
3029  objspace_each_objects(&rb_objspace, callback, data);
3030 }
3031 
3032 static void
3033 objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data)
3034 {
3035  int prev_dont_incremental = objspace->flags.dont_incremental;
3036 
3037  gc_rest(objspace);
3039 
3040  if (prev_dont_incremental) {
3041  objspace_each_objects_without_setup(objspace, callback, data);
3042  }
3043  else {
3044  struct each_obj_args args = {objspace, callback, data};
3045  rb_ensure(objspace_each_objects_protected, (VALUE)&args, incremental_enable, Qnil);
3046  }
3047 }
3048 
3049 void
3051 {
3052  objspace_each_objects_without_setup(&rb_objspace, callback, data);
3053 }
3054 
3056  size_t num;
3058 };
3059 
3060 static int
3061 internal_object_p(VALUE obj)
3062 {
3063  RVALUE *p = (RVALUE *)obj;
3065  asan_unpoison_object(obj, false);
3066  bool used_p = p->as.basic.flags;
3067 
3068  if (used_p) {
3069  switch (BUILTIN_TYPE(p)) {
3070  case T_NODE:
3071  UNEXPECTED_NODE(internal_object_p);
3072  break;
3073  case T_NONE:
3074  case T_MOVED:
3075  case T_IMEMO:
3076  case T_ICLASS:
3077  case T_ZOMBIE:
3078  break;
3079  case T_CLASS:
3080  if (!p->as.basic.klass) break;
3081  if (FL_TEST(obj, FL_SINGLETON)) {
3083  }
3084  return 0;
3085  default:
3086  if (!p->as.basic.klass) break;
3087  return 0;
3088  }
3089  }
3090  if (ptr || ! used_p) {
3091  asan_poison_object(obj);
3092  }
3093  return 1;
3094 }
3095 
3096 int
3098 {
3099  return internal_object_p(obj);
3100 }
3101 
3102 static int
3103 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
3104 {
3105  struct os_each_struct *oes = (struct os_each_struct *)data;
3106  RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
3107 
3108  for (; p != pend; p++) {
3109  volatile VALUE v = (VALUE)p;
3110  if (!internal_object_p(v)) {
3111  if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3112  rb_yield(v);
3113  oes->num++;
3114  }
3115  }
3116  }
3117 
3118  return 0;
3119 }
3120 
3121 static VALUE
3122 os_obj_of(VALUE of)
3123 {
3124  struct os_each_struct oes;
3125 
3126  oes.num = 0;
3127  oes.of = of;
3128  rb_objspace_each_objects(os_obj_of_i, &oes);
3129  return SIZET2NUM(oes.num);
3130 }
3131 
3132 /*
3133  * call-seq:
3134  * ObjectSpace.each_object([module]) {|obj| ... } -> integer
3135  * ObjectSpace.each_object([module]) -> an_enumerator
3136  *
3137  * Calls the block once for each living, nonimmediate object in this
3138  * Ruby process. If <i>module</i> is specified, calls the block
3139  * for only those classes or modules that match (or are a subclass of)
3140  * <i>module</i>. Returns the number of objects found. Immediate
3141  * objects (<code>Fixnum</code>s, <code>Symbol</code>s
3142  * <code>true</code>, <code>false</code>, and <code>nil</code>) are
3143  * never returned. In the example below, #each_object returns both
3144  * the numbers we defined and several constants defined in the Math
3145  * module.
3146  *
3147  * If no block is given, an enumerator is returned instead.
3148  *
3149  * a = 102.7
3150  * b = 95 # Won't be returned
3151  * c = 12345678987654321
3152  * count = ObjectSpace.each_object(Numeric) {|x| p x }
3153  * puts "Total count: #{count}"
3154  *
3155  * <em>produces:</em>
3156  *
3157  * 12345678987654321
3158  * 102.7
3159  * 2.71828182845905
3160  * 3.14159265358979
3161  * 2.22044604925031e-16
3162  * 1.7976931348623157e+308
3163  * 2.2250738585072e-308
3164  * Total count: 7
3165  *
3166  */
3167 
3168 static VALUE
3169 os_each_obj(int argc, VALUE *argv, VALUE os)
3170 {
3171  VALUE of;
3172 
3173  of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
3174  RETURN_ENUMERATOR(os, 1, &of);
3175  return os_obj_of(of);
3176 }
3177 
3178 /*
3179  * call-seq:
3180  * ObjectSpace.undefine_finalizer(obj)
3181  *
3182  * Removes all finalizers for <i>obj</i>.
3183  *
3184  */
3185 
3186 static VALUE
3187 undefine_final(VALUE os, VALUE obj)
3188 {
3189  return rb_undefine_finalizer(obj);
3190 }
3191 
3192 VALUE
3194 {
3195  rb_objspace_t *objspace = &rb_objspace;
3196  st_data_t data = obj;
3198  st_delete(finalizer_table, &data, 0);
3200  return obj;
3201 }
3202 
3203 static void
3204 should_be_callable(VALUE block)
3205 {
3206  if (!rb_obj_respond_to(block, idCall, TRUE)) {
3207  rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
3208  rb_obj_class(block));
3209  }
3210 }
3211 
3212 static void
3213 should_be_finalizable(VALUE obj)
3214 {
3215  if (!FL_ABLE(obj)) {
3216  rb_raise(rb_eArgError, "cannot define finalizer for %s",
3218  }
3220 }
3221 
3222 /*
3223  * call-seq:
3224  * ObjectSpace.define_finalizer(obj, aProc=proc())
3225  *
3226  * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
3227  * was destroyed. The object ID of the <i>obj</i> will be passed
3228  * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
3229  * method, make sure it can be called with a single argument.
3230  *
3231  */
3232 
3233 static VALUE
3234 define_final(int argc, VALUE *argv, VALUE os)
3235 {
3236  VALUE obj, block;
3237 
3238  rb_scan_args(argc, argv, "11", &obj, &block);
3239  should_be_finalizable(obj);
3240  if (argc == 1) {
3241  block = rb_block_proc();
3242  }
3243  else {
3244  should_be_callable(block);
3245  }
3246 
3247  return define_final0(obj, block);
3248 }
3249 
3250 static VALUE
3251 define_final0(VALUE obj, VALUE block)
3252 {
3253  rb_objspace_t *objspace = &rb_objspace;
3254  VALUE table;
3255  st_data_t data;
3256 
3257  RBASIC(obj)->flags |= FL_FINALIZE;
3258 
3259  block = rb_ary_new3(2, INT2FIX(0), block);
3260  OBJ_FREEZE(block);
3261 
3262  if (st_lookup(finalizer_table, obj, &data)) {
3263  table = (VALUE)data;
3264 
3265  /* avoid duplicate block, table is usually small */
3266  {
3267  long len = RARRAY_LEN(table);
3268  long i;
3269 
3270  for (i = 0; i < len; i++) {
3271  VALUE recv = RARRAY_AREF(table, i);
3272  if (rb_funcall(recv, idEq, 1, block)) {
3273  return recv;
3274  }
3275  }
3276  }
3277 
3278  rb_ary_push(table, block);
3279  }
3280  else {
3281  table = rb_ary_new3(1, block);
3282  RBASIC_CLEAR_CLASS(table);
3284  }
3285  return block;
3286 }
3287 
3288 VALUE
3290 {
3291  should_be_finalizable(obj);
3292  should_be_callable(block);
3293  return define_final0(obj, block);
3294 }
3295 
3296 void
3298 {
3299  rb_objspace_t *objspace = &rb_objspace;
3300  VALUE table;
3301  st_data_t data;
3302 
3303  if (!FL_TEST(obj, FL_FINALIZE)) return;
3304  if (st_lookup(finalizer_table, obj, &data)) {
3305  table = (VALUE)data;
3306  st_insert(finalizer_table, dest, table);
3307  }
3308  FL_SET(dest, FL_FINALIZE);
3309 }
3310 
3311 static VALUE
3312 run_single_final(VALUE final, VALUE objid)
3313 {
3314  const VALUE cmd = RARRAY_AREF(final, 1);
3315  return rb_check_funcall(cmd, idCall, 1, &objid);
3316 }
3317 
3318 static void
3319 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
3320 {
3321  long i;
3322  enum ruby_tag_type state;
3323  volatile struct {
3324  VALUE errinfo;
3325  VALUE objid;
3327  long finished;
3328  } saved;
3329  rb_execution_context_t * volatile ec = GET_EC();
3330 #define RESTORE_FINALIZER() (\
3331  ec->cfp = saved.cfp, \
3332  rb_set_errinfo(saved.errinfo))
3333 
3334  saved.errinfo = rb_errinfo();
3335  saved.objid = rb_obj_id(obj);
3336  saved.cfp = ec->cfp;
3337  saved.finished = 0;
3338 
3339  EC_PUSH_TAG(ec);
3340  state = EC_EXEC_TAG();
3341  if (state != TAG_NONE) {
3342  ++saved.finished; /* skip failed finalizer */
3343  }
3344  for (i = saved.finished;
3345  RESTORE_FINALIZER(), i<RARRAY_LEN(table);
3346  saved.finished = ++i) {
3347  run_single_final(RARRAY_AREF(table, i), saved.objid);
3348  }
3349  EC_POP_TAG();
3350 #undef RESTORE_FINALIZER
3351 }
3352 
3353 static void
3354 run_final(rb_objspace_t *objspace, VALUE zombie)
3355 {
3356  st_data_t key, table;
3357 
3358  if (RZOMBIE(zombie)->dfree) {
3359  RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
3360  }
3361 
3362  key = (st_data_t)zombie;
3363  if (st_delete(finalizer_table, &key, &table)) {
3364  run_finalizer(objspace, zombie, (VALUE)table);
3365  }
3366 }
3367 
3368 static void
3369 finalize_list(rb_objspace_t *objspace, VALUE zombie)
3370 {
3371  while (zombie) {
3372  VALUE next_zombie;
3373  struct heap_page *page;
3374  asan_unpoison_object(zombie, false);
3375  next_zombie = RZOMBIE(zombie)->next;
3376  page = GET_HEAP_PAGE(zombie);
3377 
3378  run_final(objspace, zombie);
3379 
3380  GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
3381  if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
3382  obj_free_object_id(objspace, zombie);
3383  }
3384 
3385  RZOMBIE(zombie)->basic.flags = 0;
3387  page->final_slots--;
3388  page->free_slots++;
3389  heap_page_add_freeobj(objspace, GET_HEAP_PAGE(zombie), zombie);
3390 
3391  objspace->profile.total_freed_objects++;
3392 
3393  zombie = next_zombie;
3394  }
3395 }
3396 
3397 static void
3398 finalize_deferred(rb_objspace_t *objspace)
3399 {
3400  VALUE zombie;
3401 
3402  while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
3403  finalize_list(objspace, zombie);
3404  }
3405 }
3406 
3407 static void
3408 gc_finalize_deferred(void *dmy)
3409 {
3410  rb_objspace_t *objspace = dmy;
3411  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3412  finalize_deferred(objspace);
3413  ATOMIC_SET(finalizing, 0);
3414 }
3415 
3416 static void
3417 gc_finalize_deferred_register(rb_objspace_t *objspace)
3418 {
3419  if (rb_postponed_job_register_one(0, gc_finalize_deferred, objspace) == 0) {
3420  rb_bug("gc_finalize_deferred_register: can't register finalizer.");
3421  }
3422 }
3423 
3428 };
3429 
3430 static int
3431 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
3432 {
3433  struct force_finalize_list **prev = (struct force_finalize_list **)arg;
3434  struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
3435  curr->obj = key;
3436  curr->table = val;
3437  curr->next = *prev;
3438  *prev = curr;
3439  return ST_CONTINUE;
3440 }
3441 
3442 void
3444 {
3445  RVALUE *p, *pend;
3446  size_t i;
3447 
3448 #if RGENGC_CHECK_MODE >= 2
3449  gc_verify_internal_consistency(objspace);
3450 #endif
3451  gc_rest(objspace);
3452 
3453  if (ATOMIC_EXCHANGE(finalizing, 1)) return;
3454 
3455  /* run finalizers */
3456  finalize_deferred(objspace);
3458 
3459  gc_rest(objspace);
3460  /* prohibit incremental GC */
3461  objspace->flags.dont_incremental = 1;
3462 
3463  /* force to run finalizer */
3464  while (finalizer_table->num_entries) {
3465  struct force_finalize_list *list = 0;
3466  st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
3467  while (list) {
3468  struct force_finalize_list *curr = list;
3469  st_data_t obj = (st_data_t)curr->obj;
3470  run_finalizer(objspace, curr->obj, curr->table);
3472  list = curr->next;
3473  xfree(curr);
3474  }
3475  }
3476 
3477  /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3478  dont_gc = 1;
3479 
3480  /* running data/file finalizers are part of garbage collection */
3481  gc_enter(objspace, "rb_objspace_call_finalizer");
3482 
3483  /* run data/file object's finalizers */
3484  for (i = 0; i < heap_allocated_pages; i++) {
3485  p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
3486  while (p < pend) {
3487  void *poisoned = asan_poisoned_object_p((VALUE)p);
3488  asan_unpoison_object((VALUE)p, false);
3489  switch (BUILTIN_TYPE(p)) {
3490  case T_DATA:
3491  if (!DATA_PTR(p) || !RANY(p)->as.data.dfree) break;
3492  if (rb_obj_is_thread((VALUE)p)) break;
3493  if (rb_obj_is_mutex((VALUE)p)) break;
3494  if (rb_obj_is_fiber((VALUE)p)) break;
3495  p->as.free.flags = 0;
3496  if (RTYPEDDATA_P(p)) {
3497  RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
3498  }
3499  if (RANY(p)->as.data.dfree == RUBY_DEFAULT_FREE) {
3500  xfree(DATA_PTR(p));
3501  }
3502  else if (RANY(p)->as.data.dfree) {
3503  make_zombie(objspace, (VALUE)p, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
3504  }
3505  break;
3506  case T_FILE:
3507  if (RANY(p)->as.file.fptr) {
3508  make_io_zombie(objspace, (VALUE)p);
3509  }
3510  break;
3511  }
3512  if (poisoned) {
3513  GC_ASSERT(BUILTIN_TYPE(p) == T_NONE);
3514  asan_poison_object((VALUE)p);
3515  }
3516  p++;
3517  }
3518  }
3519 
3520  gc_exit(objspace, "rb_objspace_call_finalizer");
3521 
3523  finalize_list(objspace, heap_pages_deferred_final);
3524  }
3525 
3527  finalizer_table = 0;
3528  ATOMIC_SET(finalizing, 0);
3529 }
3530 
3531 PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr));
3532 static inline int
3533 is_id_value(rb_objspace_t *objspace, VALUE ptr)
3534 {
3535  if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
3536  if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
3537  if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
3538  return TRUE;
3539 }
3540 
3541 static inline int
3542 heap_is_swept_object(rb_objspace_t *objspace, rb_heap_t *heap, VALUE ptr)
3543 {
3544  struct heap_page *page = GET_HEAP_PAGE(ptr);
3545  return page->flags.before_sweep ? FALSE : TRUE;
3546 }
3547 
3548 static inline int
3549 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
3550 {
3551  if (heap_is_swept_object(objspace, heap_eden, ptr)) {
3552  return TRUE;
3553  }
3554  else {
3555  return FALSE;
3556  }
3557 }
3558 
3559 /* garbage objects will be collected soon. */
3560 static inline int
3561 is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
3562 {
3563  if (!is_lazy_sweeping(heap_eden) ||
3564  is_swept_object(objspace, ptr) ||
3566 
3567  return FALSE;
3568  }
3569  else {
3570  return TRUE;
3571  }
3572 }
3573 
3574 static inline int
3575 is_live_object(rb_objspace_t *objspace, VALUE ptr)
3576 {
3577  switch (BUILTIN_TYPE(ptr)) {
3578  case T_NONE:
3579  case T_ZOMBIE:
3580  return FALSE;
3581  }
3582 
3583  if (!is_garbage_object(objspace, ptr)) {
3584  return TRUE;
3585  }
3586  else {
3587  return FALSE;
3588  }
3589 }
3590 
3591 static inline int
3592 is_markable_object(rb_objspace_t *objspace, VALUE obj)
3593 {
3594  if (rb_special_const_p(obj)) return FALSE; /* special const is not markable */
3595  check_rvalue_consistency(obj);
3596  return TRUE;
3597 }
3598 
3599 int
3601 {
3602  rb_objspace_t *objspace = &rb_objspace;
3603  return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3604 }
3605 
3606 int
3608 {
3609  rb_objspace_t *objspace = &rb_objspace;
3610  return is_garbage_object(objspace, obj);
3611 }
3612 
3613 static VALUE
3614 id2ref_obj_tbl(rb_objspace_t *objspace, VALUE objid)
3615 {
3616  VALUE orig;
3617  if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
3618  return orig;
3619  }
3620  else {
3621  return Qundef;
3622  }
3623 }
3624 
3625 /*
3626  * call-seq:
3627  * ObjectSpace._id2ref(object_id) -> an_object
3628  *
3629  * Converts an object id to a reference to the object. May not be
3630  * called on an object id passed as a parameter to a finalizer.
3631  *
3632  * s = "I am a string" #=> "I am a string"
3633  * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
3634  * r == s #=> true
3635  *
3636  */
3637 
3638 static VALUE
3639 id2ref(VALUE objid)
3640 {
3641 #if SIZEOF_LONG == SIZEOF_VOIDP
3642 #define NUM2PTR(x) NUM2ULONG(x)
3643 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
3644 #define NUM2PTR(x) NUM2ULL(x)
3645 #endif
3646  rb_objspace_t *objspace = &rb_objspace;
3647  VALUE ptr;
3648  VALUE orig;
3649  void *p0;
3650 
3651  objid = rb_to_int(objid);
3652  if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
3653  ptr = NUM2PTR(objid);
3654  if (ptr == Qtrue) return Qtrue;
3655  if (ptr == Qfalse) return Qfalse;
3656  if (ptr == Qnil) return Qnil;
3657  if (FIXNUM_P(ptr)) return (VALUE)ptr;
3658  if (FLONUM_P(ptr)) return (VALUE)ptr;
3659 
3660  ptr = obj_id_to_ref(objid);
3661  if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
3662  ID symid = ptr / sizeof(RVALUE);
3663  p0 = (void *)ptr;
3664  if (rb_id2str(symid) == 0)
3665  rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
3666  return ID2SYM(symid);
3667  }
3668  }
3669 
3670  if ((orig = id2ref_obj_tbl(objspace, objid)) != Qundef &&
3671  is_live_object(objspace, orig)) {
3672  return orig;
3673  }
3674 
3675  if (rb_int_ge(objid, objspace->next_object_id)) {
3676  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
3677  } else {
3678  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_int2str(objid, 10));
3679  }
3680 }
3681 
3682 static VALUE
3683 os_id2ref(VALUE os, VALUE objid)
3684 {
3685  return id2ref(objid);
3686 }
3687 
3688 static VALUE
3689 rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
3690 {
3691  if (STATIC_SYM_P(obj)) {
3692  return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
3693  }
3694  else if (FLONUM_P(obj)) {
3695 #if SIZEOF_LONG == SIZEOF_VOIDP
3696  return LONG2NUM((SIGNED_VALUE)obj);
3697 #else
3698  return LL2NUM((SIGNED_VALUE)obj);
3699 #endif
3700  }
3701  else if (SPECIAL_CONST_P(obj)) {
3702  return LONG2NUM((SIGNED_VALUE)obj);
3703  }
3704 
3705  return get_heap_object_id(obj);
3706 }
3707 
3708 static VALUE
3709 cached_object_id(VALUE obj)
3710 {
3711  VALUE id;
3712  rb_objspace_t *objspace = &rb_objspace;
3713 
3714  if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &id)) {
3716  return id;
3717  }
3718  else {
3720 
3721  id = objspace->next_object_id;
3723 
3724  st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
3725  st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
3727 
3728  return id;
3729  }
3730 }
3731 
3732 static VALUE
3733 nonspecial_obj_id_(VALUE obj)
3734 {
3735  return nonspecial_obj_id(obj);
3736 }
3737 
3738 
3739 VALUE
3741 {
3742  return rb_find_object_id(obj, nonspecial_obj_id_);
3743 }
3744 
3745 /*
3746  * Document-method: __id__
3747  * Document-method: object_id
3748  *
3749  * call-seq:
3750  * obj.__id__ -> integer
3751  * obj.object_id -> integer
3752  *
3753  * Returns an integer identifier for +obj+.
3754  *
3755  * The same number will be returned on all calls to +object_id+ for a given
3756  * object, and no two active objects will share an id.
3757  *
3758  * Note: that some objects of builtin classes are reused for optimization.
3759  * This is the case for immediate values and frozen string literals.
3760  *
3761  * BasicObject implements +__id__+, Kernel implements +object_id+.
3762  *
3763  * Immediate values are not passed by reference but are passed by value:
3764  * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
3765  *
3766  * Object.new.object_id == Object.new.object_id # => false
3767  * (21 * 2).object_id == (21 * 2).object_id # => true
3768  * "hello".object_id == "hello".object_id # => false
3769  * "hi".freeze.object_id == "hi".freeze.object_id # => true
3770  */
3771 
3772 VALUE
3774 {
3775  /*
3776  * 32-bit VALUE space
3777  * MSB ------------------------ LSB
3778  * false 00000000000000000000000000000000
3779  * true 00000000000000000000000000000010
3780  * nil 00000000000000000000000000000100
3781  * undef 00000000000000000000000000000110
3782  * symbol ssssssssssssssssssssssss00001110
3783  * object oooooooooooooooooooooooooooooo00 = 0 (mod sizeof(RVALUE))
3784  * fixnum fffffffffffffffffffffffffffffff1
3785  *
3786  * object_id space
3787  * LSB
3788  * false 00000000000000000000000000000000
3789  * true 00000000000000000000000000000010
3790  * nil 00000000000000000000000000000100
3791  * undef 00000000000000000000000000000110
3792  * symbol 000SSSSSSSSSSSSSSSSSSSSSSSSSSS0 S...S % A = 4 (S...S = s...s * A + 4)
3793  * object oooooooooooooooooooooooooooooo0 o...o % A = 0
3794  * fixnum fffffffffffffffffffffffffffffff1 bignum if required
3795  *
3796  * where A = sizeof(RVALUE)/4
3797  *
3798  * sizeof(RVALUE) is
3799  * 20 if 32-bit, double is 4-byte aligned
3800  * 24 if 32-bit, double is 8-byte aligned
3801  * 40 if 64-bit
3802  */
3803 
3804  return rb_find_object_id(obj, cached_object_id);
3805 }
3806 
3807 #include "regint.h"
3808 
3809 static size_t
3810 obj_memsize_of(VALUE obj, int use_all_types)
3811 {
3812  size_t size = 0;
3813 
3814  if (SPECIAL_CONST_P(obj)) {
3815  return 0;
3816  }
3817 
3818  if (FL_TEST(obj, FL_EXIVAR)) {
3820  }
3821 
3822  switch (BUILTIN_TYPE(obj)) {
3823  case T_OBJECT:
3824  if (!(RBASIC(obj)->flags & ROBJECT_EMBED) &&
3825  ROBJECT(obj)->as.heap.ivptr) {
3826  size += ROBJECT(obj)->as.heap.numiv * sizeof(VALUE);
3827  }
3828  break;
3829  case T_MODULE:
3830  case T_CLASS:
3831  if (RCLASS_EXT(obj)) {
3832  if (RCLASS_M_TBL(obj)) {
3834  }
3835  if (RCLASS_IV_TBL(obj)) {
3837  }
3838  if (RCLASS_IV_INDEX_TBL(obj)) {
3840  }
3841  if (RCLASS(obj)->ptr->iv_tbl) {
3842  size += st_memsize(RCLASS(obj)->ptr->iv_tbl);
3843  }
3844  if (RCLASS(obj)->ptr->const_tbl) {
3845  size += rb_id_table_memsize(RCLASS(obj)->ptr->const_tbl);
3846  }
3847  size += sizeof(rb_classext_t);
3848  }
3849  break;
3850  case T_ICLASS:
3851  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
3852  if (RCLASS_M_TBL(obj)) {
3854  }
3855  }
3856  break;
3857  case T_STRING:
3858  size += rb_str_memsize(obj);
3859  break;
3860  case T_ARRAY:
3861  size += rb_ary_memsize(obj);
3862  break;
3863  case T_HASH:
3864  if (RHASH_AR_TABLE_P(obj)) {
3865  if (RHASH_AR_TABLE(obj) != NULL) {
3866  size_t rb_hash_ar_table_size();
3868  }
3869  }
3870  else {
3873  }
3874  break;
3875  case T_REGEXP:
3876  if (RREGEXP_PTR(obj)) {
3878  }
3879  break;
3880  case T_DATA:
3881  if (use_all_types) size += rb_objspace_data_type_memsize(obj);
3882  break;
3883  case T_MATCH:
3884  if (RMATCH(obj)->rmatch) {
3885  struct rmatch *rm = RMATCH(obj)->rmatch;
3886  size += onig_region_memsize(&rm->regs);
3887  size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
3888  size += sizeof(struct rmatch);
3889  }
3890  break;
3891  case T_FILE:
3892  if (RFILE(obj)->fptr) {
3893  size += rb_io_memsize(RFILE(obj)->fptr);
3894  }
3895  break;
3896  case T_RATIONAL:
3897  case T_COMPLEX:
3898  break;
3899  case T_IMEMO:
3900  size += imemo_memsize(obj);
3901  break;
3902 
3903  case T_FLOAT:
3904  case T_SYMBOL:
3905  break;
3906 
3907  case T_BIGNUM:
3908  if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
3909  size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
3910  }
3911  break;
3912 
3913  case T_NODE:
3914  UNEXPECTED_NODE(obj_memsize_of);
3915  break;
3916 
3917  case T_STRUCT:
3918  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
3919  RSTRUCT(obj)->as.heap.ptr) {
3920  size += sizeof(VALUE) * RSTRUCT_LEN(obj);
3921  }
3922  break;
3923 
3924  case T_ZOMBIE:
3925  case T_MOVED:
3926  break;
3927 
3928  default:
3929  rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
3930  BUILTIN_TYPE(obj), (void*)obj);
3931  }
3932 
3933  return size + sizeof(RVALUE);
3934 }
3935 
3936 size_t
3938 {
3939  return obj_memsize_of(obj, TRUE);
3940 }
3941 
3942 static int
3943 set_zero(st_data_t key, st_data_t val, st_data_t arg)
3944 {
3945  VALUE k = (VALUE)key;
3946  VALUE hash = (VALUE)arg;
3947  rb_hash_aset(hash, k, INT2FIX(0));
3948  return ST_CONTINUE;
3949 }
3950 
3951 static VALUE
3952 type_sym(size_t type)
3953 {
3954  switch (type) {
3955 #define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
3956  COUNT_TYPE(T_NONE);
3964  COUNT_TYPE(T_HASH);
3967  COUNT_TYPE(T_FILE);
3968  COUNT_TYPE(T_DATA);
3972  COUNT_TYPE(T_NIL);
3973  COUNT_TYPE(T_TRUE);
3979  COUNT_TYPE(T_NODE);
3983 #undef COUNT_TYPE
3984  default: return INT2NUM(type); break;
3985  }
3986 }
3987 
3988 /*
3989  * call-seq:
3990  * ObjectSpace.count_objects([result_hash]) -> hash
3991  *
3992  * Counts all objects grouped by type.
3993  *
3994  * It returns a hash, such as:
3995  * {
3996  * :TOTAL=>10000,
3997  * :FREE=>3011,
3998  * :T_OBJECT=>6,
3999  * :T_CLASS=>404,
4000  * # ...
4001  * }
4002  *
4003  * The contents of the returned hash are implementation specific.
4004  * It may be changed in future.
4005  *
4006  * The keys starting with +:T_+ means live objects.
4007  * For example, +:T_ARRAY+ is the number of arrays.
4008  * +:FREE+ means object slots which is not used now.
4009  * +:TOTAL+ means sum of above.
4010  *
4011  * If the optional argument +result_hash+ is given,
4012  * it is overwritten and returned. This is intended to avoid probe effect.
4013  *
4014  * h = {}
4015  * ObjectSpace.count_objects(h)
4016  * puts h
4017  * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
4018  *
4019  * This method is only expected to work on C Ruby.
4020  *
4021  */
4022 
4023 static VALUE
4024 count_objects(int argc, VALUE *argv, VALUE os)
4025 {
4026  rb_objspace_t *objspace = &rb_objspace;
4027  size_t counts[T_MASK+1];
4028  size_t freed = 0;
4029  size_t total = 0;
4030  size_t i;
4031  VALUE hash = Qnil;
4032 
4033  if (rb_check_arity(argc, 0, 1) == 1) {
4034  hash = argv[0];
4035  if (!RB_TYPE_P(hash, T_HASH))
4036  rb_raise(rb_eTypeError, "non-hash given");
4037  }
4038 
4039  for (i = 0; i <= T_MASK; i++) {
4040  counts[i] = 0;
4041  }
4042 
4043  for (i = 0; i < heap_allocated_pages; i++) {
4044  struct heap_page *page = heap_pages_sorted[i];
4045  RVALUE *p, *pend;
4046 
4047  p = page->start; pend = p + page->total_slots;
4048  for (;p < pend; p++) {
4049  void *poisoned = asan_poisoned_object_p((VALUE)p);
4050  asan_unpoison_object((VALUE)p, false);
4051  if (p->as.basic.flags) {
4052  counts[BUILTIN_TYPE(p)]++;
4053  }
4054  else {
4055  freed++;
4056  }
4057  if (poisoned) {
4059  asan_poison_object((VALUE)p);
4060  }
4061  }
4062  total += page->total_slots;
4063  }
4064 
4065  if (hash == Qnil) {
4066  hash = rb_hash_new();
4067  }
4068  else if (!RHASH_EMPTY_P(hash)) {
4069  rb_hash_stlike_foreach(hash, set_zero, hash);
4070  }
4071  rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
4072  rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
4073 
4074  for (i = 0; i <= T_MASK; i++) {
4075  VALUE type = type_sym(i);
4076  if (counts[i])
4077  rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
4078  }
4079 
4080  return hash;
4081 }
4082 
4083 /*
4084  ------------------------ Garbage Collection ------------------------
4085 */
4086 
4087 /* Sweeping */
4088 
4089 static size_t
4090 objspace_available_slots(rb_objspace_t *objspace)
4091 {
4092  return heap_eden->total_slots + heap_tomb->total_slots;
4093 }
4094 
4095 static size_t
4096 objspace_live_slots(rb_objspace_t *objspace)
4097 {
4099 }
4100 
4101 static size_t
4102 objspace_free_slots(rb_objspace_t *objspace)
4103 {
4104  return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4105 }
4106 
4107 static void
4108 gc_setup_mark_bits(struct heap_page *page)
4109 {
4110 #if USE_RGENGC
4111  /* copy oldgen bitmap to mark bitmap */
4113 #else
4114  /* clear mark bitmap */
4115  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
4116 #endif
4117 }
4118 
4119 static inline int
4120 gc_page_sweep(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *sweep_page)
4121 {
4122  int i;
4123  int empty_slots = 0, freed_slots = 0, final_slots = 0;
4124  RVALUE *p, *pend,*offset;
4125  bits_t *bits, bitset;
4126 
4127  gc_report(2, objspace, "page_sweep: start.\n");
4128 
4129  sweep_page->flags.before_sweep = FALSE;
4130 
4131  p = sweep_page->start; pend = p + sweep_page->total_slots;
4132  offset = p - NUM_IN_PAGE(p);
4133  bits = sweep_page->mark_bits;
4134 
4135  /* create guard : fill 1 out-of-range */
4136  bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
4137  bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
4138 
4139  for (i=0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4140  bitset = ~bits[i];
4141  if (bitset) {
4142  p = offset + i * BITS_BITLENGTH;
4143  do {
4144  asan_unpoison_object((VALUE)p, false);
4145  if (bitset & 1) {
4146  switch (BUILTIN_TYPE(p)) {
4147  default: { /* majority case */
4148  gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
4149 #if USE_RGENGC && RGENGC_CHECK_MODE
4150  if (!is_full_marking(objspace)) {
4151  if (RVALUE_OLD_P((VALUE)p)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
4152  if (rgengc_remembered_sweep(objspace, (VALUE)p)) rb_bug("page_sweep: %p - remembered.", (void *)p);
4153  }
4154 #endif
4155  if (obj_free(objspace, (VALUE)p)) {
4156  final_slots++;
4157  }
4158  else {
4159  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
4160  heap_page_add_freeobj(objspace, sweep_page, (VALUE)p);
4161  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", obj_info((VALUE)p));
4162  freed_slots++;
4163  asan_poison_object((VALUE)p);
4164  }
4165  break;
4166  }
4167 
4168  /* minor cases */
4169  case T_ZOMBIE:
4170  /* already counted */
4171  break;
4172  case T_NONE:
4173  empty_slots++; /* already freed */
4174  break;
4175  }
4176  }
4177  p++;
4178  bitset >>= 1;
4179  } while (bitset);
4180  }
4181  }
4182 
4183  gc_setup_mark_bits(sweep_page);
4184 
4185 #if GC_PROFILE_MORE_DETAIL
4186  if (gc_prof_enabled(objspace)) {
4187  gc_profile_record *record = gc_prof_record(objspace);
4188  record->removing_objects += final_slots + freed_slots;
4189  record->empty_objects += empty_slots;
4190  }
4191 #endif
4192  if (0) fprintf(stderr, "gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
4193  (int)rb_gc_count(),
4194  (int)sweep_page->total_slots,
4195  freed_slots, empty_slots, final_slots);
4196 
4197  sweep_page->free_slots = freed_slots + empty_slots;
4198  objspace->profile.total_freed_objects += freed_slots;
4200  sweep_page->final_slots += final_slots;
4201 
4203  rb_thread_t *th = GET_THREAD();
4204  if (th) {
4205  gc_finalize_deferred_register(objspace);
4206  }
4207  }
4208 
4209  gc_report(2, objspace, "page_sweep: end.\n");
4210 
4211  return freed_slots + empty_slots;
4212 }
4213 
4214 /* allocate additional minimum page to work */
4215 static void
4216 gc_heap_prepare_minimum_pages(rb_objspace_t *objspace, rb_heap_t *heap)
4217 {
4218  if (!heap->free_pages && heap_increment(objspace, heap) == FALSE) {
4219  /* there is no free after page_sweep() */
4220  heap_set_increment(objspace, 1);
4221  if (!heap_increment(objspace, heap)) { /* can't allocate additional free objects */
4222  rb_memerror();
4223  }
4224  }
4225 }
4226 
4227 static const char *
4228 gc_mode_name(enum gc_mode mode)
4229 {
4230  switch (mode) {
4231  case gc_mode_none: return "none";
4232  case gc_mode_marking: return "marking";
4233  case gc_mode_sweeping: return "sweeping";
4234  default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
4235  }
4236 }
4237 
4238 static void
4239 gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
4240 {
4241 #if RGENGC_CHECK_MODE
4242  enum gc_mode prev_mode = gc_mode(objspace);
4243  switch (prev_mode) {
4244  case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
4245  case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
4246  case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none); break;
4247  }
4248 #endif
4249  if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
4250  gc_mode_set(objspace, mode);
4251 }
4252 
4253 static void
4254 gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
4255 {
4256  heap->sweeping_page = list_top(&heap->pages, struct heap_page, page_node);
4257  heap->free_pages = NULL;
4258 #if GC_ENABLE_INCREMENTAL_MARK
4259  heap->pooled_pages = NULL;
4260  objspace->rincgc.pooled_slots = 0;
4261 #endif
4262  if (heap->using_page) {
4263  struct heap_page *page = heap->using_page;
4264  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
4265 
4266  RVALUE **p = &page->freelist;
4267  while (*p) {
4268  p = &(*p)->as.free.next;
4269  }
4270  *p = heap->freelist;
4271  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
4272  heap->using_page = NULL;
4273  }
4274  heap->freelist = NULL;
4275 }
4276 
4277 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
4278 __attribute__((noinline))
4279 #endif
4280 static void
4281 gc_sweep_start(rb_objspace_t *objspace)
4282 {
4283  gc_mode_transition(objspace, gc_mode_sweeping);
4284  gc_sweep_start_heap(objspace, heap_eden);
4285 }
4286 
4287 static void
4288 gc_sweep_finish(rb_objspace_t *objspace)
4289 {
4290  gc_report(1, objspace, "gc_sweep_finish\n");
4291 
4292  gc_prof_set_heap_info(objspace);
4293  heap_pages_free_unused_pages(objspace);
4294 
4295  /* if heap_pages has unused pages, then assign them to increment */
4296  if (heap_allocatable_pages < heap_tomb->total_pages) {
4297  heap_allocatable_pages_set(objspace, heap_tomb->total_pages);
4298  }
4299 
4301  gc_mode_transition(objspace, gc_mode_none);
4302 
4303 #if RGENGC_CHECK_MODE >= 2
4304  gc_verify_internal_consistency(objspace);
4305 #endif
4306 }
4307 
4308 static int
4309 gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
4310 {
4311  struct heap_page *sweep_page = heap->sweeping_page;
4312  int unlink_limit = 3;
4313 #if GC_ENABLE_INCREMENTAL_MARK
4314  int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
4315 
4316  gc_report(2, objspace, "gc_sweep_step (need_pool: %d)\n", need_pool);
4317 #else
4318  gc_report(2, objspace, "gc_sweep_step\n");
4319 #endif
4320 
4321  if (sweep_page == NULL) return FALSE;
4322 
4323 #if GC_ENABLE_LAZY_SWEEP
4324  gc_prof_sweep_timer_start(objspace);
4325 #endif
4326 
4327  do {
4328  int free_slots = gc_page_sweep(objspace, heap, sweep_page);
4329  heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
4330 
4331  if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
4333  unlink_limit > 0) {
4335  unlink_limit--;
4336  /* there are no living objects -> move this page to tomb heap */
4337  heap_unlink_page(objspace, heap, sweep_page);
4338  heap_add_page(objspace, heap_tomb, sweep_page);
4339  }
4340  else if (free_slots > 0) {
4341 #if GC_ENABLE_INCREMENTAL_MARK
4342  if (need_pool) {
4343  if (heap_add_poolpage(objspace, heap, sweep_page)) {
4344  need_pool = FALSE;
4345  }
4346  }
4347  else {
4348  heap_add_freepage(heap, sweep_page);
4349  break;
4350  }
4351 #else
4352  heap_add_freepage(heap, sweep_page);
4353  break;
4354 #endif
4355  }
4356  else {
4357  sweep_page->free_next = NULL;
4358  }
4359  } while ((sweep_page = heap->sweeping_page));
4360 
4361  if (!heap->sweeping_page) {
4362  gc_sweep_finish(objspace);
4363  }
4364 
4365 #if GC_ENABLE_LAZY_SWEEP
4366  gc_prof_sweep_timer_stop(objspace);
4367 #endif
4368 
4369  return heap->free_pages != NULL;
4370 }
4371 
4372 static void
4373 gc_sweep_rest(rb_objspace_t *objspace)
4374 {
4375  rb_heap_t *heap = heap_eden; /* lazy sweep only for eden */
4376 
4377  while (has_sweeping_pages(heap)) {
4378  gc_sweep_step(objspace, heap);
4379  }
4380 }
4381 
4382 static void
4383 gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap)
4384 {
4385  GC_ASSERT(dont_gc == FALSE);
4386  if (!GC_ENABLE_LAZY_SWEEP) return;
4387 
4388  gc_enter(objspace, "sweep_continue");
4389 #if USE_RGENGC
4390  if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE && heap_increment(objspace, heap)) {
4391  gc_report(3, objspace, "gc_sweep_continue: success heap_increment().\n");
4392  }
4393 #endif
4394  gc_sweep_step(objspace, heap);
4395  gc_exit(objspace, "sweep_continue");
4396 }
4397 
4398 static void
4399 gc_sweep(rb_objspace_t *objspace)
4400 {
4401  const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4402 
4403  gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
4404 
4405  if (immediate_sweep) {
4406 #if !GC_ENABLE_LAZY_SWEEP
4407  gc_prof_sweep_timer_start(objspace);
4408 #endif
4409  gc_sweep_start(objspace);
4410  gc_sweep_rest(objspace);
4411 #if !GC_ENABLE_LAZY_SWEEP
4412  gc_prof_sweep_timer_stop(objspace);
4413 #endif
4414  }
4415  else {
4416  struct heap_page *page = NULL;
4417  gc_sweep_start(objspace);
4418 
4419  list_for_each(&heap_eden->pages, page, page_node) {
4420  page->flags.before_sweep = TRUE;
4421  }
4422  gc_sweep_step(objspace, heap_eden);
4423  }
4424 
4425  gc_heap_prepare_minimum_pages(objspace, heap_eden);
4426 }
4427 
4428 /* Marking - Marking stack */
4429 
4430 static stack_chunk_t *
4431 stack_chunk_alloc(void)
4432 {
4433  stack_chunk_t *res;
4434 
4435  res = malloc(sizeof(stack_chunk_t));
4436  if (!res)
4437  rb_memerror();
4438 
4439  return res;
4440 }
4441 
4442 static inline int
4443 is_mark_stack_empty(mark_stack_t *stack)
4444 {
4445  return stack->chunk == NULL;
4446 }
4447 
4448 static size_t
4449 mark_stack_size(mark_stack_t *stack)
4450 {
4451  size_t size = stack->index;
4452  stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4453 
4454  while (chunk) {
4455  size += stack->limit;
4456  chunk = chunk->next;
4457  }
4458  return size;
4459 }
4460 
4461 static void
4462 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
4463 {
4464  chunk->next = stack->cache;
4465  stack->cache = chunk;
4466  stack->cache_size++;
4467 }
4468 
4469 static void
4470 shrink_stack_chunk_cache(mark_stack_t *stack)
4471 {
4472  stack_chunk_t *chunk;
4473 
4474  if (stack->unused_cache_size > (stack->cache_size/2)) {
4475  chunk = stack->cache;
4476  stack->cache = stack->cache->next;
4477  stack->cache_size--;
4478  free(chunk);
4479  }
4480  stack->unused_cache_size = stack->cache_size;
4481 }
4482 
4483 static void
4484 push_mark_stack_chunk(mark_stack_t *stack)
4485 {
4486  stack_chunk_t *next;
4487 
4488  GC_ASSERT(stack->index == stack->limit);
4489 
4490  if (stack->cache_size > 0) {
4491  next = stack->cache;
4492  stack->cache = stack->cache->next;
4493  stack->cache_size--;
4494  if (stack->unused_cache_size > stack->cache_size)
4495  stack->unused_cache_size = stack->cache_size;
4496  }
4497  else {
4498  next = stack_chunk_alloc();
4499  }
4500  next->next = stack->chunk;
4501  stack->chunk = next;
4502  stack->index = 0;
4503 }
4504 
4505 static void
4506 pop_mark_stack_chunk(mark_stack_t *stack)
4507 {
4508  stack_chunk_t *prev;
4509 
4510  prev = stack->chunk->next;
4511  GC_ASSERT(stack->index == 0);
4512  add_stack_chunk_cache(stack, stack->chunk);
4513  stack->chunk = prev;
4514  stack->index = stack->limit;
4515 }
4516 
4517 static void
4518 free_stack_chunks(mark_stack_t *stack)
4519 {
4520  stack_chunk_t *chunk = stack->chunk;
4521  stack_chunk_t *next = NULL;
4522 
4523  while (chunk != NULL) {
4524  next = chunk->next;
4525  free(chunk);
4526  chunk = next;
4527  }
4528 }
4529 
4530 static void
4531 push_mark_stack(mark_stack_t *stack, VALUE data)
4532 {
4533  if (stack->index == stack->limit) {
4534  push_mark_stack_chunk(stack);
4535  }
4536  stack->chunk->data[stack->index++] = data;
4537 }
4538 
4539 static int
4540 pop_mark_stack(mark_stack_t *stack, VALUE *data)
4541 {
4542  if (is_mark_stack_empty(stack)) {
4543  return FALSE;
4544  }
4545  if (stack->index == 1) {
4546  *data = stack->chunk->data[--stack->index];
4547  pop_mark_stack_chunk(stack);
4548  }
4549  else {
4550  *data = stack->chunk->data[--stack->index];
4551  }
4552  return TRUE;
4553 }
4554 
4555 #if GC_ENABLE_INCREMENTAL_MARK
4556 static int
4557 invalidate_mark_stack_chunk(stack_chunk_t *chunk, int limit, VALUE obj)
4558 {
4559  int i;
4560  for (i=0; i<limit; i++) {
4561  if (chunk->data[i] == obj) {
4562  chunk->data[i] = Qundef;
4563  return TRUE;
4564  }
4565  }
4566  return FALSE;
4567 }
4568 
4569 static void
4570 invalidate_mark_stack(mark_stack_t *stack, VALUE obj)
4571 {
4572  stack_chunk_t *chunk = stack->chunk;
4573  int limit = stack->index;
4574 
4575  while (chunk) {
4576  if (invalidate_mark_stack_chunk(chunk, limit, obj)) return;
4577  chunk = chunk->next;
4578  limit = stack->limit;
4579  }
4580  rb_bug("invalid_mark_stack: unreachable");
4581 }
4582 #endif
4583 
4584 static void
4585 init_mark_stack(mark_stack_t *stack)
4586 {
4587  int i;
4588 
4589  MEMZERO(stack, mark_stack_t, 1);
4590  stack->index = stack->limit = STACK_CHUNK_SIZE;
4591  stack->cache_size = 0;
4592 
4593  for (i=0; i < 4; i++) {
4594  add_stack_chunk_cache(stack, stack_chunk_alloc());
4595  }
4596  stack->unused_cache_size = stack->cache_size;
4597 }
4598 
4599 /* Marking */
4600 
4601 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
4602 
4603 #define STACK_START (ec->machine.stack_start)
4604 #define STACK_END (ec->machine.stack_end)
4605 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
4606 
4607 #ifdef __EMSCRIPTEN__
4608 #undef STACK_GROW_DIRECTION
4609 #define STACK_GROW_DIRECTION 1
4610 #endif
4611 
4612 #if STACK_GROW_DIRECTION < 0
4613 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
4614 #elif STACK_GROW_DIRECTION > 0
4615 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
4616 #else
4617 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
4618  : (size_t)(STACK_END - STACK_START + 1))
4619 #endif
4620 #if !STACK_GROW_DIRECTION
4622 int
4624 {
4625  VALUE *end;
4626  SET_MACHINE_STACK_END(&end);
4627 
4628  if (end > addr) return ruby_stack_grow_direction = 1;
4629  return ruby_stack_grow_direction = -1;
4630 }
4631 #endif
4632 
4633 size_t
4635 {
4637  SET_STACK_END;
4638  if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
4639  return STACK_LENGTH;
4640 }
4641 
4642 #define PREVENT_STACK_OVERFLOW 1
4643 #ifndef PREVENT_STACK_OVERFLOW
4644 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
4645 # define PREVENT_STACK_OVERFLOW 1
4646 #else
4647 # define PREVENT_STACK_OVERFLOW 0
4648 #endif
4649 #endif
4650 #if PREVENT_STACK_OVERFLOW
4651 static int
4652 stack_check(rb_execution_context_t *ec, int water_mark)
4653 {
4654  SET_STACK_END;
4655 
4656  size_t length = STACK_LENGTH;
4657  size_t maximum_length = STACK_LEVEL_MAX - water_mark;
4658 
4659  return length > maximum_length;
4660 }
4661 #else
4662 #define stack_check(ec, water_mark) FALSE
4663 #endif
4664 
4665 #define STACKFRAME_FOR_CALL_CFUNC 2048
4666 
4669 {
4670  return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
4671 }
4672 
4673 int
4675 {
4676  return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
4677 }
4678 
4679 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n));
4680 static void
4681 mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n)
4682 {
4683  VALUE v;
4684  while (n--) {
4685  v = *x;
4686  gc_mark_maybe(objspace, v);
4687  x++;
4688  }
4689 }
4690 
4691 static void
4692 gc_mark_locations(rb_objspace_t *objspace, const VALUE *start, const VALUE *end)
4693 {
4694  long n;
4695 
4696  if (end <= start) return;
4697  n = end - start;
4698  mark_locations_array(objspace, start, n);
4699 }
4700 
4701 void
4703 {
4704  gc_mark_locations(&rb_objspace, start, end);
4705 }
4706 
4707 static void
4708 gc_mark_values(rb_objspace_t *objspace, long n, const VALUE *values)
4709 {
4710  long i;
4711 
4712  for (i=0; i<n; i++) {
4713  gc_mark(objspace, values[i]);
4714  }
4715 }
4716 
4717 void
4718 rb_gc_mark_values(long n, const VALUE *values)
4719 {
4720  long i;
4721  rb_objspace_t *objspace = &rb_objspace;
4722 
4723  for (i=0; i<n; i++) {
4724  gc_mark_and_pin(objspace, values[i]);
4725  }
4726 }
4727 
4728 static void
4729 gc_mark_and_pin_stack_values(rb_objspace_t *objspace, long n, const VALUE *values)
4730 {
4731  long i;
4732 
4733  for (i=0; i<n; i++) {
4734  /* skip MOVED objects that are on the stack */
4735  if (is_markable_object(objspace, values[i]) && T_MOVED != BUILTIN_TYPE(values[i])) {
4736  gc_mark_and_pin(objspace, values[i]);
4737  }
4738  }
4739 }
4740 
4741 void
4742 rb_gc_mark_vm_stack_values(long n, const VALUE *values)
4743 {
4744  rb_objspace_t *objspace = &rb_objspace;
4745  gc_mark_and_pin_stack_values(objspace, n, values);
4746 }
4747 
4748 static int
4749 mark_value(st_data_t key, st_data_t value, st_data_t data)
4750 {
4751  rb_objspace_t *objspace = (rb_objspace_t *)data;
4752  gc_mark(objspace, (VALUE)value);
4753  return ST_CONTINUE;
4754 }
4755 
4756 static int
4757 mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
4758 {
4759  rb_objspace_t *objspace = (rb_objspace_t *)data;
4760  gc_mark_and_pin(objspace, (VALUE)value);
4761  return ST_CONTINUE;
4762 }
4763 
4764 static void
4765 mark_tbl_no_pin(rb_objspace_t *objspace, st_table *tbl)
4766 {
4767  if (!tbl || tbl->num_entries == 0) return;
4768  st_foreach(tbl, mark_value, (st_data_t)objspace);
4769 }
4770 
4771 static void
4772 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
4773 {
4774  if (!tbl || tbl->num_entries == 0) return;
4775  st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
4776 }
4777 
4778 static int
4779 mark_key(st_data_t key, st_data_t value, st_data_t data)
4780 {
4781  rb_objspace_t *objspace = (rb_objspace_t *)data;
4782  gc_mark_and_pin(objspace, (VALUE)key);
4783  return ST_CONTINUE;
4784 }
4785 
4786 static void
4787 mark_set(rb_objspace_t *objspace, st_table *tbl)
4788 {
4789  if (!tbl) return;
4790  st_foreach(tbl, mark_key, (st_data_t)objspace);
4791 }
4792 
4793 static void
4794 mark_finalizer_tbl(rb_objspace_t *objspace, st_table *tbl)
4795 {
4796  if (!tbl) return;
4797  st_foreach(tbl, mark_value, (st_data_t)objspace);
4798 }
4799 
4800 void
4802 {
4803  mark_set(&rb_objspace, tbl);
4804 }
4805 
4806 static int
4807 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
4808 {
4809  rb_objspace_t *objspace = (rb_objspace_t *)data;
4810 
4811  gc_mark(objspace, (VALUE)key);
4812  gc_mark(objspace, (VALUE)value);
4813  return ST_CONTINUE;
4814 }
4815 
4816 static int
4817 pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
4818 {
4819  rb_objspace_t *objspace = (rb_objspace_t *)data;
4820 
4821  gc_mark_and_pin(objspace, (VALUE)key);
4822  gc_mark_and_pin(objspace, (VALUE)value);
4823  return ST_CONTINUE;
4824 }
4825 
4826 static int
4827 pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
4828 {
4829  rb_objspace_t *objspace = (rb_objspace_t *)data;
4830 
4831  gc_mark_and_pin(objspace, (VALUE)key);
4832  gc_mark(objspace, (VALUE)value);
4833  return ST_CONTINUE;
4834 }
4835 
4836 static void
4837 mark_hash(rb_objspace_t *objspace, VALUE hash)
4838 {
4839  if (rb_hash_compare_by_id_p(hash)) {
4840  rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
4841  }
4842  else {
4843  rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
4844  }
4845 
4846  if (RHASH_AR_TABLE_P(hash)) {
4847  if (objspace->mark_func_data == NULL && RHASH_TRANSIENT_P(hash)) {
4849  }
4850  }
4851  else {
4852  VM_ASSERT(!RHASH_TRANSIENT_P(hash));
4853  }
4854  gc_mark(objspace, RHASH(hash)->ifnone);
4855 }
4856 
4857 static void
4858 mark_st(rb_objspace_t *objspace, st_table *tbl)
4859 {
4860  if (!tbl) return;
4861  st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
4862 }
4863 
4864 void
4866 {
4867  mark_st(&rb_objspace, tbl);
4868 }
4869 
4870 static void
4871 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
4872 {
4873  const rb_method_definition_t *def = me->def;
4874 
4875  gc_mark(objspace, me->owner);
4876  gc_mark(objspace, me->defined_class);
4877 
4878  if (def) {
4879  switch (def->type) {
4880  case VM_METHOD_TYPE_ISEQ:
4881  if (def->body.iseq.iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.iseqptr);
4882  gc_mark(objspace, (VALUE)def->body.iseq.cref);
4883  break;
4885  case VM_METHOD_TYPE_IVAR:
4886  gc_mark(objspace, def->body.attr.location);
4887  break;
4889  gc_mark(objspace, def->body.bmethod.proc);
4891  break;
4892  case VM_METHOD_TYPE_ALIAS:
4893  gc_mark(objspace, (VALUE)def->body.alias.original_me);
4894  return;
4896  gc_mark(objspace, (VALUE)def->body.refined.orig_me);
4897  gc_mark(objspace, (VALUE)def->body.refined.owner);
4898  break;
4899  case VM_METHOD_TYPE_CFUNC:
4900  case VM_METHOD_TYPE_ZSUPER:
4903  case VM_METHOD_TYPE_UNDEF:
4905  break;
4906  }
4907  }
4908 }
4909 
4910 static enum rb_id_table_iterator_result
4911 mark_method_entry_i(VALUE me, void *data)
4912 {
4913  rb_objspace_t *objspace = (rb_objspace_t *)data;
4914 
4915  gc_mark(objspace, me);
4916  return ID_TABLE_CONTINUE;
4917 }
4918 
4919 static void
4920 mark_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4921 {
4922  if (tbl) {
4923  rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
4924  }
4925 }
4926 
4927 static enum rb_id_table_iterator_result
4928 mark_const_entry_i(VALUE value, void *data)
4929 {
4930  const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
4931  rb_objspace_t *objspace = data;
4932 
4933  gc_mark(objspace, ce->value);
4934  gc_mark(objspace, ce->file);
4935  return ID_TABLE_CONTINUE;
4936 }
4937 
4938 static void
4939 mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
4940 {
4941  if (!tbl) return;
4942  rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
4943 }
4944 
4945 #if STACK_GROW_DIRECTION < 0
4946 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
4947 #elif STACK_GROW_DIRECTION > 0
4948 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
4949 #else
4950 #define GET_STACK_BOUNDS(start, end, appendix) \
4951  ((STACK_END < STACK_START) ? \
4952  ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
4953 #endif
4954 
4955 static void mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4956  const VALUE *stack_start, const VALUE *stack_end);
4957 
4958 static void
4959 mark_current_machine_context(rb_objspace_t *objspace, rb_execution_context_t *ec)
4960 {
4961  union {
4962  rb_jmp_buf j;
4963  VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
4964  } save_regs_gc_mark;
4965  VALUE *stack_start, *stack_end;
4966 
4968  memset(&save_regs_gc_mark, 0, sizeof(save_regs_gc_mark));
4969  /* This assumes that all registers are saved into the jmp_buf (and stack) */
4970  rb_setjmp(save_regs_gc_mark.j);
4971 
4972  /* SET_STACK_END must be called in this function because
4973  * the stack frame of this function may contain
4974  * callee save registers and they should be marked. */
4975  SET_STACK_END;
4976  GET_STACK_BOUNDS(stack_start, stack_end, 1);
4977 
4978  mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
4979 
4980  mark_stack_locations(objspace, ec, stack_start, stack_end);
4981 }
4982 
4983 void
4985 {
4986  rb_objspace_t *objspace = &rb_objspace;
4987  VALUE *stack_start, *stack_end;
4988 
4989  GET_STACK_BOUNDS(stack_start, stack_end, 0);
4990  mark_stack_locations(objspace, ec, stack_start, stack_end);
4991 }
4992 
4993 static void
4994 mark_stack_locations(rb_objspace_t *objspace, const rb_execution_context_t *ec,
4995  const VALUE *stack_start, const VALUE *stack_end)
4996 {
4997 
4998  gc_mark_locations(objspace, stack_start, stack_end);
4999 
5000 #if defined(__mc68000__)
5001  gc_mark_locations(objspace,
5002  (VALUE*)((char*)stack_start + 2),
5003  (VALUE*)((char*)stack_end - 2));
5004 #endif
5005 }
5006 
5007 void
5009 {
5010  mark_tbl(&rb_objspace, tbl);
5011 }
5012 
5013 void
5015 {
5016  mark_tbl_no_pin(&rb_objspace, tbl);
5017 }
5018 
5019 static void
5020 gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
5021 {
5022  (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
5023 
5024  if (is_pointer_to_heap(objspace, (void *)obj)) {
5025  void *ptr = __asan_region_is_poisoned((void *)obj, SIZEOF_VALUE);
5026  asan_unpoison_object(obj, false);
5027 
5028  /* Garbage can live on the stack, so do not mark or pin */
5029  switch (BUILTIN_TYPE(obj)) {
5030  case T_MOVED:
5031  case T_ZOMBIE:
5032  case T_NONE:
5033  break;
5034  default:
5035  gc_mark_and_pin(objspace, obj);
5036  break;
5037  }
5038 
5039  if (ptr) {
5041  asan_poison_object(obj);
5042  }
5043  }
5044 }
5045 
5046 void
5048 {
5049  gc_mark_maybe(&rb_objspace, obj);
5050 }
5051 
5052 static inline int
5053 gc_mark_set(rb_objspace_t *objspace, VALUE obj)
5054 {
5055  if (RVALUE_MARKED(obj)) return 0;
5057  return 1;
5058 }
5059 
5060 #if USE_RGENGC
5061 static int
5062 gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5063 {
5064  struct heap_page *page = GET_HEAP_PAGE(obj);
5066 
5071 
5072 #if RGENGC_PROFILE > 0
5073  objspace->profile.total_remembered_shady_object_count++;
5074 #if RGENGC_PROFILE >= 2
5075  objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5076 #endif
5077 #endif
5078  return TRUE;
5079  }
5080  else {
5081  return FALSE;
5082  }
5083 }
5084 #endif
5085 
5086 static void
5087 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
5088 {
5089 #if USE_RGENGC
5090  const VALUE old_parent = objspace->rgengc.parent_object;
5091 
5092  if (old_parent) { /* parent object is old */
5093  if (RVALUE_WB_UNPROTECTED(obj)) {
5094  if (gc_remember_unprotected(objspace, obj)) {
5095  gc_report(2, objspace, "relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5096  }
5097  }
5098  else {
5099  if (!RVALUE_OLD_P(obj)) {
5100  if (RVALUE_MARKED(obj)) {
5101  /* An object pointed from an OLD object should be OLD. */
5102  gc_report(2, objspace, "relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5103  RVALUE_AGE_SET_OLD(objspace, obj);
5104  if (is_incremental_marking(objspace)) {
5105  if (!RVALUE_MARKING(obj)) {
5106  gc_grey(objspace, obj);
5107  }
5108  }
5109  else {
5110  rgengc_remember(objspace, obj);
5111  }
5112  }
5113  else {
5114  gc_report(2, objspace, "relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
5115  RVALUE_AGE_SET_CANDIDATE(objspace, obj);
5116  }
5117  }
5118  }
5119  }
5120 
5121  GC_ASSERT(old_parent == objspace->rgengc.parent_object);
5122 #endif
5123 }
5124 
5125 static void
5126 gc_grey(rb_objspace_t *objspace, VALUE obj)
5127 {
5128 #if RGENGC_CHECK_MODE
5129  if (RVALUE_MARKED(obj) == FALSE) rb_bug("gc_grey: %s is not marked.", obj_info(obj));
5130  if (RVALUE_MARKING(obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", obj_info(obj));
5131 #endif
5132 
5133 #if GC_ENABLE_INCREMENTAL_MARK
5134  if (is_incremental_marking(objspace)) {
5136  }
5137 #endif
5138 
5139  push_mark_stack(&objspace->mark_stack, obj);
5140 }
5141 
5142 static void
5143 gc_aging(rb_objspace_t *objspace, VALUE obj)
5144 {
5145 #if USE_RGENGC
5146  struct heap_page *page = GET_HEAP_PAGE(obj);
5147 
5148  GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
5149  check_rvalue_consistency(obj);
5150 
5151  if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
5152  if (!RVALUE_OLD_P(obj)) {
5153  gc_report(3, objspace, "gc_aging: YOUNG: %s\n", obj_info(obj));
5154  RVALUE_AGE_INC(objspace, obj);
5155  }
5156  else if (is_full_marking(objspace)) {
5158  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
5159  }
5160  }
5161  check_rvalue_consistency(obj);
5162 #endif /* USE_RGENGC */
5163 
5164  objspace->marked_slots++;
5165 }
5166 
5167 NOINLINE(static void gc_mark_ptr(rb_objspace_t *objspace, VALUE obj));
5168 
5169 static void
5170 gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
5171 {
5172  if (LIKELY(objspace->mark_func_data == NULL)) {
5173  rgengc_check_relation(objspace, obj);
5174  if (!gc_mark_set(objspace, obj)) return; /* already marked */
5175  if (RB_TYPE_P(obj, T_NONE)) rb_bug("try to mark T_NONE object"); /* check here will help debugging */
5176  gc_aging(objspace, obj);
5177  gc_grey(objspace, obj);
5178  }
5179  else {
5180  objspace->mark_func_data->mark_func(obj, objspace->mark_func_data->data);
5181  }
5182 }
5183 
5184 static inline void
5185 gc_pin(rb_objspace_t *objspace, VALUE obj)
5186 {
5187  GC_ASSERT(is_markable_object(objspace, obj));
5188  if (UNLIKELY(objspace->flags.during_compacting)) {
5190  }
5191 }
5192 
5193 static inline void
5194 gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
5195 {
5196  if (!is_markable_object(objspace, obj)) return;
5197  gc_pin(objspace, obj);
5198  gc_mark_ptr(objspace, obj);
5199 }
5200 
5201 static inline void
5202 gc_mark(rb_objspace_t *objspace, VALUE obj)
5203 {
5204  if (!is_markable_object(objspace, obj)) return;
5205  gc_mark_ptr(objspace, obj);
5206 }
5207 
5208 void
5210 {
5211  gc_mark(&rb_objspace, ptr);
5212 }
5213 
5214 void
5216 {
5217  gc_mark_and_pin(&rb_objspace, ptr);
5218 }
5219 
5220 /* CAUTION: THIS FUNCTION ENABLE *ONLY BEFORE* SWEEPING.
5221  * This function is only for GC_END_MARK timing.
5222  */
5223 
5224 int
5226 {
5227  return RVALUE_MARKED(obj) ? TRUE : FALSE;
5228 }
5229 
5230 static inline void
5231 gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
5232 {
5233 #if USE_RGENGC
5234  if (RVALUE_OLD_P(obj)) {
5235  objspace->rgengc.parent_object = obj;
5236  }
5237  else {
5238  objspace->rgengc.parent_object = Qfalse;
5239  }
5240 #endif
5241 }
5242 
5243 static void
5244 gc_mark_imemo(rb_objspace_t *objspace, VALUE obj)
5245 {
5246  switch (imemo_type(obj)) {
5247  case imemo_env:
5248  {
5249  const rb_env_t *env = (const rb_env_t *)obj;
5250  GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
5251  gc_mark_values(objspace, (long)env->env_size, env->env);
5252  VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
5253  gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
5254  gc_mark(objspace, (VALUE)env->iseq);
5255  }
5256  return;
5257  case imemo_cref:
5258  gc_mark(objspace, RANY(obj)->as.imemo.cref.klass);
5259  gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
5260  gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
5261  return;
5262  case imemo_svar:
5263  gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
5264  gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
5265  gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
5266  gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
5267  return;
5268  case imemo_throw_data:
5269  gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
5270  return;
5271  case imemo_ifunc:
5272  gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
5273  return;
5274  case imemo_memo:
5275  gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
5276  gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
5277  gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
5278  return;
5279  case imemo_ment:
5280  mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
5281  return;
5282  case imemo_iseq:
5284  return;
5285  case imemo_tmpbuf:
5286  {
5287  const rb_imemo_tmpbuf_t *m = &RANY(obj)->as.imemo.alloc;
5288  do {
5289  rb_gc_mark_locations(m->ptr, m->ptr + m->cnt);
5290  } while ((m = m->next) != NULL);
5291  }
5292  return;
5293  case imemo_ast:
5294  rb_ast_mark(&RANY(obj)->as.imemo.ast);
5295  return;
5296  case imemo_parser_strterm:
5298  return;
5299 #if VM_CHECK_MODE > 0
5300  default:
5301  VM_UNREACHABLE(gc_mark_imemo);
5302 #endif
5303  }
5304 }
5305 
5306 static void
5307 gc_mark_children(rb_objspace_t *objspace, VALUE obj)
5308 {
5309  register RVALUE *any = RANY(obj);
5310  gc_mark_set_parent(objspace, obj);
5311 
5312  if (FL_TEST(obj, FL_EXIVAR)) {
5314  }
5315 
5316  switch (BUILTIN_TYPE(obj)) {
5317  case T_FLOAT:
5318  case T_BIGNUM:
5319  case T_SYMBOL:
5320  /* Not immediates, but does not have references and singleton
5321  * class */
5322  return;
5323 
5324  case T_NIL:
5325  case T_FIXNUM:
5326  rb_bug("rb_gc_mark() called for broken object");
5327  break;
5328 
5329  case T_NODE:
5331  break;
5332 
5333  case T_IMEMO:
5334  gc_mark_imemo(objspace, obj);
5335  return;
5336  }
5337 
5338  gc_mark(objspace, any->as.basic.klass);
5339 
5340  switch (BUILTIN_TYPE(obj)) {
5341  case T_CLASS:
5342  case T_MODULE:
5343  if (RCLASS_SUPER(obj)) {
5344  gc_mark(objspace, RCLASS_SUPER(obj));
5345  }
5346  if (!RCLASS_EXT(obj)) break;
5347  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
5348  mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
5349  mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
5350  break;
5351 
5352  case T_ICLASS:
5353  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
5354  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
5355  }
5356  if (RCLASS_SUPER(obj)) {
5357  gc_mark(objspace, RCLASS_SUPER(obj));
5358  }
5359  if (!RCLASS_EXT(obj)) break;
5360  mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
5361  break;
5362 
5363  case T_ARRAY:
5364  if (FL_TEST(obj, ELTS_SHARED)) {
5365  VALUE root = any->as.array.as.heap.aux.shared_root;
5366  gc_mark(objspace, root);
5367  }
5368  else {
5369  long i, len = RARRAY_LEN(obj);
5371  for (i=0; i < len; i++) {
5372  gc_mark(objspace, ptr[i]);
5373  }
5374 
5375  if (objspace->mark_func_data == NULL) {
5379  }
5380  }
5381  }
5382  break;
5383 
5384  case T_HASH:
5385  mark_hash(objspace, obj);
5386  break;
5387 
5388  case T_STRING:
5389  if (STR_SHARED_P(obj)) {
5390  gc_mark(objspace, any->as.string.as.heap.aux.shared);
5391  }
5392  break;
5393 
5394  case T_DATA:
5395  {
5396  void *const ptr = DATA_PTR(obj);
5397  if (ptr) {
5398  RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
5399  any->as.typeddata.type->function.dmark :
5400  any->as.data.dmark;
5401  if (mark_func) (*mark_func)(ptr);
5402  }
5403  }
5404  break;
5405 
5406  case T_OBJECT:
5407  {
5408  const VALUE * const ptr = ROBJECT_IVPTR(obj);
5409 
5410  if (ptr) {
5412  for (i = 0; i < len; i++) {
5413  gc_mark(objspace, ptr[i]);
5414  }
5415 
5416  if (objspace->mark_func_data == NULL &&
5417  ROBJ_TRANSIENT_P(obj)) {
5419  }
5420  }
5421  }
5422  break;
5423 
5424  case T_FILE:
5425  if (any->as.file.fptr) {
5426  gc_mark(objspace, any->as.file.fptr->pathv);
5427  gc_mark(objspace, any->as.file.fptr->tied_io_for_writing);
5428  gc_mark(objspace, any->as.file.fptr->writeconv_asciicompat);
5429  gc_mark(objspace, any->as.file.fptr->writeconv_pre_ecopts);
5430  gc_mark(objspace, any->as.file.fptr->encs.ecopts);
5431  gc_mark(objspace, any->as.file.fptr->write_lock);
5432  }
5433  break;
5434 
5435  case T_REGEXP:
5436  gc_mark(objspace, any->as.regexp.src);
5437  break;
5438 
5439  case T_MATCH:
5440  gc_mark(objspace, any->as.match.regexp);
5441  if (any->as.match.str) {
5442  gc_mark(objspace, any->as.match.str);
5443  }
5444  break;
5445 
5446  case T_RATIONAL:
5447  gc_mark(objspace, any->as.rational.num);
5448  gc_mark(objspace, any->as.rational.den);
5449  break;
5450 
5451  case T_COMPLEX:
5452  gc_mark(objspace, any->as.complex.real);
5453  gc_mark(objspace, any->as.complex.imag);
5454  break;
5455 
5456  case T_STRUCT:
5457  {
5458  long i;
5459  const long len = RSTRUCT_LEN(obj);
5460  const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
5461 
5462  for (i=0; i<len; i++) {
5463  gc_mark(objspace, ptr[i]);
5464  }
5465 
5466  if (objspace->mark_func_data == NULL &&
5469  }
5470  }
5471  break;
5472 
5473  default:
5474 #if GC_DEBUG
5476 #endif
5477  if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
5478  if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
5479  if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
5480  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
5481  BUILTIN_TYPE(obj), (void *)any,
5482  is_pointer_to_heap(objspace, any) ? "corrupted object" : "non object");
5483  }
5484 }
5485 
5490 static inline int
5491 gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
5492 {
5493  mark_stack_t *mstack = &objspace->mark_stack;
5494  VALUE obj;
5495 #if GC_ENABLE_INCREMENTAL_MARK
5496  size_t marked_slots_at_the_beginning = objspace->marked_slots;
5497  size_t popped_count = 0;
5498 #endif
5499 
5500  while (pop_mark_stack(mstack, &obj)) {
5501  if (obj == Qundef) continue; /* skip */
5502 
5503  if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
5504  rb_bug("gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
5505  }
5506  gc_mark_children(objspace, obj);
5507 
5508 #if GC_ENABLE_INCREMENTAL_MARK
5509  if (incremental) {
5510  if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
5511  rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
5512  }
5514  popped_count++;
5515 
5516  if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
5517  break;
5518  }
5519  }
5520  else {
5521  /* just ignore marking bits */
5522  }
5523 #endif
5524  }
5525 
5526  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
5527 
5528  if (is_mark_stack_empty(mstack)) {
5529  shrink_stack_chunk_cache(mstack);
5530  return TRUE;
5531  }
5532  else {
5533  return FALSE;
5534  }
5535 }
5536 
5537 static int
5538 gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
5539 {
5540  return gc_mark_stacked_objects(objspace, TRUE, count);
5541 }
5542 
5543 static int
5544 gc_mark_stacked_objects_all(rb_objspace_t *objspace)
5545 {
5546  return gc_mark_stacked_objects(objspace, FALSE, 0);
5547 }
5548 
5549 #if PRINT_ROOT_TICKS
5550 #define MAX_TICKS 0x100
5551 static tick_t mark_ticks[MAX_TICKS];
5552 static const char *mark_ticks_categories[MAX_TICKS];
5553 
5554 static void
5555 show_mark_ticks(void)
5556 {
5557  int i;
5558  fprintf(stderr, "mark ticks result:\n");
5559  for (i=0; i<MAX_TICKS; i++) {
5560  const char *category = mark_ticks_categories[i];
5561  if (category) {
5562  fprintf(stderr, "%s\t%8lu\n", category, (unsigned long)mark_ticks[i]);
5563  }
5564  else {
5565  break;
5566  }
5567  }
5568 }
5569 
5570 #endif /* PRINT_ROOT_TICKS */
5571 
5572 static void
5573 gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
5574 {
5575  struct gc_list *list;
5577  rb_vm_t *vm = rb_ec_vm_ptr(ec);
5578 
5579 #if PRINT_ROOT_TICKS
5580  tick_t start_tick = tick();
5581  int tick_count = 0;
5582  const char *prev_category = 0;
5583 
5584  if (mark_ticks_categories[0] == 0) {
5585  atexit(show_mark_ticks);
5586  }
5587 #endif
5588 
5589  if (categoryp) *categoryp = "xxx";
5590 
5591 #if USE_RGENGC
5592  objspace->rgengc.parent_object = Qfalse;
5593 #endif
5594 
5595 #if PRINT_ROOT_TICKS
5596 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \
5597  if (prev_category) { \
5598  tick_t t = tick(); \
5599  mark_ticks[tick_count] = t - start_tick; \
5600  mark_ticks_categories[tick_count] = prev_category; \
5601  tick_count++; \
5602  } \
5603  prev_category = category; \
5604  start_tick = tick(); \
5605 } while (0)
5606 #else /* PRINT_ROOT_TICKS */
5607 #define MARK_CHECKPOINT_PRINT_TICK(category)
5608 #endif
5609 
5610 #define MARK_CHECKPOINT(category) do { \
5611  if (categoryp) *categoryp = category; \
5612  MARK_CHECKPOINT_PRINT_TICK(category); \
5613 } while (0)
5614 
5615  MARK_CHECKPOINT("vm");
5616  SET_STACK_END;
5617  rb_vm_mark(vm);
5618  if (vm->self) gc_mark(objspace, vm->self);
5619 
5620  MARK_CHECKPOINT("finalizers");
5621  mark_finalizer_tbl(objspace, finalizer_table);
5622 
5623  MARK_CHECKPOINT("machine_context");
5624  mark_current_machine_context(objspace, ec);
5625 
5626  /* mark protected global variables */
5627  MARK_CHECKPOINT("global_list");
5628  for (list = global_list; list; list = list->next) {
5629  gc_mark_maybe(objspace, *list->varptr);
5630  }
5631 
5632  MARK_CHECKPOINT("end_proc");
5633  rb_mark_end_proc();
5634 
5635  MARK_CHECKPOINT("global_tbl");
5637 
5638  MARK_CHECKPOINT("object_id");
5639  rb_gc_mark(objspace->next_object_id);
5640  mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
5641 
5643 
5644  MARK_CHECKPOINT("finish");
5645 #undef MARK_CHECKPOINT
5646 }
5647 
5648 #if RGENGC_CHECK_MODE >= 4
5649 
5650 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
5651 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
5652 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
5653 
5654 struct reflist {
5655  VALUE *list;
5656  int pos;
5657  int size;
5658 };
5659 
5660 static struct reflist *
5661 reflist_create(VALUE obj)
5662 {
5663  struct reflist *refs = xmalloc(sizeof(struct reflist));
5664  refs->size = 1;
5665  refs->list = ALLOC_N(VALUE, refs->size);
5666  refs->list[0] = obj;
5667  refs->pos = 1;
5668  return refs;
5669 }
5670 
5671 static void
5672 reflist_destruct(struct reflist *refs)
5673 {
5674  xfree(refs->list);
5675  xfree(refs);
5676 }
5677 
5678 static void
5679 reflist_add(struct reflist *refs, VALUE obj)
5680 {
5681  if (refs->pos == refs->size) {
5682  refs->size *= 2;
5683  SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
5684  }
5685 
5686  refs->list[refs->pos++] = obj;
5687 }
5688 
5689 static void
5690 reflist_dump(struct reflist *refs)
5691 {
5692  int i;
5693  for (i=0; i<refs->pos; i++) {
5694  VALUE obj = refs->list[i];
5695  if (IS_ROOTSIG(obj)) { /* root */
5696  fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
5697  }
5698  else {
5699  fprintf(stderr, "<%s>", obj_info(obj));
5700  }
5701  if (i+1 < refs->pos) fprintf(stderr, ", ");
5702  }
5703 }
5704 
5705 static int
5706 reflist_referred_from_machine_context(struct reflist *refs)
5707 {
5708  int i;
5709  for (i=0; i<refs->pos; i++) {
5710  VALUE obj = refs->list[i];
5711  if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
5712  }
5713  return 0;
5714 }
5715 
5716 struct allrefs {
5717  rb_objspace_t *objspace;
5718  /* a -> obj1
5719  * b -> obj1
5720  * c -> obj1
5721  * c -> obj2
5722  * d -> obj3
5723  * #=> {obj1 => [a, b, c], obj2 => [c, d]}
5724  */
5725  struct st_table *references;
5726  const char *category;
5727  VALUE root_obj;
5729 };
5730 
5731 static int
5732 allrefs_add(struct allrefs *data, VALUE obj)
5733 {
5734  struct reflist *refs;
5735 
5736  if (st_lookup(data->references, obj, (st_data_t *)&refs)) {
5737  reflist_add(refs, data->root_obj);
5738  return 0;
5739  }
5740  else {
5741  refs = reflist_create(data->root_obj);
5742  st_insert(data->references, obj, (st_data_t)refs);
5743  return 1;
5744  }
5745 }
5746 
5747 static void
5748 allrefs_i(VALUE obj, void *ptr)
5749 {
5750  struct allrefs *data = (struct allrefs *)ptr;
5751 
5752  if (allrefs_add(data, obj)) {
5753  push_mark_stack(&data->mark_stack, obj);
5754  }
5755 }
5756 
5757 static void
5758 allrefs_roots_i(VALUE obj, void *ptr)
5759 {
5760  struct allrefs *data = (struct allrefs *)ptr;
5761  if (strlen(data->category) == 0) rb_bug("!!!");
5762  data->root_obj = MAKE_ROOTSIG(data->category);
5763 
5764  if (allrefs_add(data, obj)) {
5765  push_mark_stack(&data->mark_stack, obj);
5766  }
5767 }
5768 
5769 static st_table *
5770 objspace_allrefs(rb_objspace_t *objspace)
5771 {
5772  struct allrefs data;
5773  struct mark_func_data_struct mfd;
5774  VALUE obj;
5775  int prev_dont_gc = dont_gc;
5776  dont_gc = TRUE;
5777 
5778  data.objspace = objspace;
5779  data.references = st_init_numtable();
5780  init_mark_stack(&data.mark_stack);
5781 
5782  mfd.mark_func = allrefs_roots_i;
5783  mfd.data = &data;
5784 
5785  /* traverse root objects */
5786  PUSH_MARK_FUNC_DATA(&mfd);
5787  objspace->mark_func_data = &mfd;
5788  gc_mark_roots(objspace, &data.category);
5790 
5791  /* traverse rest objects reachable from root objects */
5792  while (pop_mark_stack(&data.mark_stack, &obj)) {
5793  rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
5794  }
5795  free_stack_chunks(&data.mark_stack);
5796 
5797  dont_gc = prev_dont_gc;
5798  return data.references;
5799 }
5800 
5801 static int
5802 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, void *ptr)
5803 {
5804  struct reflist *refs = (struct reflist *)value;
5805  reflist_destruct(refs);
5806  return ST_CONTINUE;
5807 }
5808 
5809 static void
5810 objspace_allrefs_destruct(struct st_table *refs)
5811 {
5812  st_foreach(refs, objspace_allrefs_destruct_i, 0);
5813  st_free_table(refs);
5814 }
5815 
5816 #if RGENGC_CHECK_MODE >= 5
5817 static int
5818 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
5819 {
5820  VALUE obj = (VALUE)k;
5821  struct reflist *refs = (struct reflist *)v;
5822  fprintf(stderr, "[allrefs_dump_i] %s <- ", obj_info(obj));
5823  reflist_dump(refs);
5824  fprintf(stderr, "\n");
5825  return ST_CONTINUE;
5826 }
5827 
5828 static void
5829 allrefs_dump(rb_objspace_t *objspace)
5830 {
5831  fprintf(stderr, "[all refs] (size: %d)\n", (int)objspace->rgengc.allrefs_table->num_entries);
5832  st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
5833 }
5834 #endif
5835 
5836 static int
5837 gc_check_after_marks_i(st_data_t k, st_data_t v, void *ptr)
5838 {
5839  VALUE obj = k;
5840  struct reflist *refs = (struct reflist *)v;
5841  rb_objspace_t *objspace = (rb_objspace_t *)ptr;
5842 
5843  /* object should be marked or oldgen */
5845  fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
5846  fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
5847  reflist_dump(refs);
5848 
5849  if (reflist_referred_from_machine_context(refs)) {
5850  fprintf(stderr, " (marked from machine stack).\n");
5851  /* marked from machine context can be false positive */
5852  }
5853  else {
5854  objspace->rgengc.error_count++;
5855  fprintf(stderr, "\n");
5856  }
5857  }
5858  return ST_CONTINUE;
5859 }
5860 
5861 static void
5862 gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
5863 {
5864  size_t saved_malloc_increase = objspace->malloc_params.increase;
5865 #if RGENGC_ESTIMATE_OLDMALLOC
5866  size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
5867 #endif
5868  VALUE already_disabled = rb_objspace_gc_disable(objspace);
5869 
5870  objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
5871 
5872  if (checker_func) {
5873  st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
5874  }
5875 
5876  if (objspace->rgengc.error_count > 0) {
5877 #if RGENGC_CHECK_MODE >= 5
5878  allrefs_dump(objspace);
5879 #endif
5880  if (checker_name) rb_bug("%s: GC has problem.", checker_name);
5881  }
5882 
5883  objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
5884  objspace->rgengc.allrefs_table = 0;
5885 
5886  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
5887  objspace->malloc_params.increase = saved_malloc_increase;
5888 #if RGENGC_ESTIMATE_OLDMALLOC
5889  objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
5890 #endif
5891 }
5892 #endif /* RGENGC_CHECK_MODE >= 4 */
5893 
5899 
5900 #if USE_RGENGC
5904 #endif
5905 };
5906 
5907 #if USE_RGENGC
5908 static void
5909 check_generation_i(const VALUE child, void *ptr)
5910 {
5912  const VALUE parent = data->parent;
5913 
5914  if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
5915 
5916  if (!RVALUE_OLD_P(child)) {
5917  if (!RVALUE_REMEMBERED(parent) &&
5918  !RVALUE_REMEMBERED(child) &&
5919  !RVALUE_UNCOLLECTIBLE(child)) {
5920  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
5921  data->err_count++;
5922  }
5923  }
5924 }
5925 
5926 static void
5927 check_color_i(const VALUE child, void *ptr)
5928 {
5930  const VALUE parent = data->parent;
5931 
5932  if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
5933  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5934  obj_info(parent), obj_info(child));
5935  data->err_count++;
5936  }
5937 }
5938 #endif
5939 
5940 static void
5941 check_children_i(const VALUE child, void *ptr)
5942 {
5944  if (check_rvalue_consistency_force(child, FALSE) != 0) {
5945  fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
5946  obj_info(child), obj_info(data->parent));
5947  rb_print_backtrace(); /* C backtrace will help to debug */
5948 
5949  data->err_count++;
5950  }
5951 }
5952 
5953 static int
5954 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride, void *ptr)
5955 {
5957  VALUE obj;
5958  rb_objspace_t *objspace = data->objspace;
5959 
5960  for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5961  void *poisoned = asan_poisoned_object_p(obj);
5962  asan_unpoison_object(obj, false);
5963 
5964  if (is_live_object(objspace, obj)) {
5965  /* count objects */
5966  data->live_object_count++;
5967  data->parent = obj;
5968 
5969  /* Normally, we don't expect T_MOVED objects to be in the heap.
5970  * But they can stay alive on the stack, */
5971  if (!gc_object_moved_p(objspace, obj)) {
5972  /* moved slots don't have children */
5973  rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5974  }
5975 
5976 #if USE_RGENGC
5977  /* check health of children */
5978  if (RVALUE_OLD_P(obj)) data->old_object_count++;
5979  if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
5980 
5981  if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
5982  /* reachable objects from an oldgen object should be old or (young with remember) */
5983  data->parent = obj;
5984  rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5985  }
5986 
5988  if (RVALUE_BLACK_P(obj)) {
5989  /* reachable objects from black objects should be black or grey objects */
5990  data->parent = obj;
5991  rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
5992  }
5993  }
5994 #endif
5995  }
5996  else {
5997  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
5998  GC_ASSERT((RBASIC(obj)->flags & ~FL_SEEN_OBJ_ID) == T_ZOMBIE);
5999  data->zombie_object_count++;
6000  }
6001  }
6002  if (poisoned) {
6004  asan_poison_object(obj);
6005  }
6006  }
6007 
6008  return 0;
6009 }
6010 
6011 static int
6012 gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
6013 {
6014 #if USE_RGENGC
6015  int i;
6016  unsigned int has_remembered_shady = FALSE;
6017  unsigned int has_remembered_old = FALSE;
6018  int remembered_old_objects = 0;
6019  int free_objects = 0;
6020  int zombie_objects = 0;
6021 
6022  for (i=0; i<page->total_slots; i++) {
6023  VALUE val = (VALUE)&page->start[i];
6024  void *poisoned = asan_poisoned_object_p(val);
6025  asan_unpoison_object(val, false);
6026 
6027  if (RBASIC(val) == 0) free_objects++;
6028  if (BUILTIN_TYPE(val) == T_ZOMBIE) zombie_objects++;
6029  if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
6030  has_remembered_shady = TRUE;
6031  }
6032  if (RVALUE_PAGE_MARKING(page, val)) {
6033  has_remembered_old = TRUE;
6034  remembered_old_objects++;
6035  }
6036 
6037  if (poisoned) {
6038  GC_ASSERT(BUILTIN_TYPE(val) == T_NONE);
6039  asan_poison_object(val);
6040  }
6041  }
6042 
6044  page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
6045 
6046  for (i=0; i<page->total_slots; i++) {
6047  VALUE val = (VALUE)&page->start[i];
6048  if (RVALUE_PAGE_MARKING(page, val)) {
6049  fprintf(stderr, "marking -> %s\n", obj_info(val));
6050  }
6051  }
6052  rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
6053  (void *)page, remembered_old_objects, obj ? obj_info(obj) : "");
6054  }
6055 
6056  if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
6057  rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
6058  (void *)page, obj ? obj_info(obj) : "");
6059  }
6060 
6061  if (0) {
6062  /* free_slots may not equal to free_objects */
6063  if (page->free_slots != free_objects) {
6064  rb_bug("page %p's free_slots should be %d, but %d\n", (void *)page, (int)page->free_slots, free_objects);
6065  }
6066  }
6067  if (page->final_slots != zombie_objects) {
6068  rb_bug("page %p's final_slots should be %d, but %d\n", (void *)page, (int)page->final_slots, zombie_objects);
6069  }
6070 
6071  return remembered_old_objects;
6072 #else
6073  return 0;
6074 #endif
6075 }
6076 
6077 static int
6078 gc_verify_heap_pages_(rb_objspace_t *objspace, struct list_head *head)
6079 {
6080  int remembered_old_objects = 0;
6081  struct heap_page *page = 0;
6082 
6083  list_for_each(head, page, page_node) {
6084  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
6085  RVALUE *p = page->freelist;
6086  while (p) {
6087  RVALUE *prev = p;
6088  asan_unpoison_object((VALUE)p, false);
6089  if (BUILTIN_TYPE(p) != T_NONE) {
6090  fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", obj_info((VALUE)p));
6091  }
6092  p = p->as.free.next;
6093  asan_poison_object((VALUE)prev);
6094  }
6095  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
6096 
6097  if (page->flags.has_remembered_objects == FALSE) {
6098  remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
6099  }
6100  }
6101 
6102  return remembered_old_objects;
6103 }
6104 
6105 static int
6106 gc_verify_heap_pages(rb_objspace_t *objspace)
6107 {
6108  int remembered_old_objects = 0;
6109  remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_eden->pages);
6110  remembered_old_objects += gc_verify_heap_pages_(objspace, &heap_tomb->pages);
6111  return remembered_old_objects;
6112 }
6113 
6114 /*
6115  * call-seq:
6116  * GC.verify_internal_consistency -> nil
6117  *
6118  * Verify internal consistency.
6119  *
6120  * This method is implementation specific.
6121  * Now this method checks generational consistency
6122  * if RGenGC is supported.
6123  */
6124 static VALUE
6125 gc_verify_internal_consistency_m(VALUE dummy)
6126 {
6127  gc_verify_internal_consistency(&rb_objspace);
6128 
6129  return Qnil;
6130 }
6131 
6132 static void
6133 gc_verify_internal_consistency(rb_objspace_t *objspace)
6134 {
6135  struct verify_internal_consistency_struct data = {0};
6136 
6137  data.objspace = objspace;
6138  gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
6139 
6140  /* check relations */
6141 
6142  objspace_each_objects_without_setup(objspace, verify_internal_consistency_i, &data);
6143 
6144  if (data.err_count != 0) {
6145 #if RGENGC_CHECK_MODE >= 5
6146  objspace->rgengc.error_count = data.err_count;
6147  gc_marks_check(objspace, NULL, NULL);
6148  allrefs_dump(objspace);
6149 #endif
6150  rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
6151  }
6152 
6153  /* check heap_page status */
6154  gc_verify_heap_pages(objspace);
6155 
6156  /* check counters */
6157 
6159  if (objspace_live_slots(objspace) != data.live_object_count) {
6160  fprintf(stderr, "heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
6162  rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace_live_slots(objspace), data.live_object_count);
6163  }
6164  }
6165 
6166 #if USE_RGENGC
6167  if (!is_marking(objspace)) {
6169  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.old_objects, data.old_object_count);
6170  }
6172  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".", objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
6173  }
6174  }
6175 #endif
6176 
6177  if (!finalizing) {
6178  size_t list_count = 0;
6179 
6180  {
6182  while (z) {
6183  list_count++;
6184  z = RZOMBIE(z)->next;
6185  }
6186  }
6187 
6189  heap_pages_final_slots != list_count) {
6190 
6191  rb_bug("inconsistent finalizing object count:\n"
6192  " expect %"PRIuSIZE"\n"
6193  " but %"PRIuSIZE" zombies\n"
6194  " heap_pages_deferred_final list has %"PRIuSIZE" items.",
6196  data.zombie_object_count,
6197  list_count);
6198  }
6199  }
6200 
6201  gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
6202 }
6203 
6204 void
6206 {
6207  gc_verify_internal_consistency(&rb_objspace);
6208 }
6209 
6210 static VALUE
6211 gc_verify_transient_heap_internal_consistency(VALUE dmy)
6212 {
6214  return Qnil;
6215 }
6216 
6217 /* marks */
6218 
6219 static void
6220 gc_marks_start(rb_objspace_t *objspace, int full_mark)
6221 {
6222  /* start marking */
6223  gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
6224  gc_mode_transition(objspace, gc_mode_marking);
6225 
6226 #if USE_RGENGC
6227  if (full_mark) {
6228 #if GC_ENABLE_INCREMENTAL_MARK
6230 
6231  if (0) fprintf(stderr, "objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
6233 #endif
6239  objspace->marked_slots = 0;
6240  rgengc_mark_and_rememberset_clear(objspace, heap_eden);
6241  }
6242  else {
6245  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
6247  rgengc_rememberset_mark(objspace, heap_eden);
6248  }
6249 #endif
6250 
6251  gc_mark_roots(objspace, NULL);
6252 
6253  gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %d\n", full_mark ? "full" : "minor", (int)mark_stack_size(&objspace->mark_stack));
6254 }
6255 
6256 #if GC_ENABLE_INCREMENTAL_MARK
6257 static void
6258 gc_marks_wb_unprotected_objects(rb_objspace_t *objspace)
6259 {
6260  struct heap_page *page = 0;
6261 
6262  list_for_each(&heap_eden->pages, page, page_node) {
6263  bits_t *mark_bits = page->mark_bits;
6264  bits_t *wbun_bits = page->wb_unprotected_bits;
6265  RVALUE *p = page->start;
6266  RVALUE *offset = p - NUM_IN_PAGE(p);
6267  size_t j;
6268 
6269  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6270  bits_t bits = mark_bits[j] & wbun_bits[j];
6271 
6272  if (bits) {
6273  p = offset + j * BITS_BITLENGTH;
6274 
6275  do {
6276  if (bits & 1) {
6277  gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
6278  GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
6279  GC_ASSERT(RVALUE_MARKED((VALUE)p));
6280  gc_mark_children(objspace, (VALUE)p);
6281  }
6282  p++;
6283  bits >>= 1;
6284  } while (bits);
6285  }
6286  }
6287  }
6288 
6289  gc_mark_stacked_objects_all(objspace);
6290 }
6291 
6292 static struct heap_page *
6293 heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
6294 {
6295  struct heap_page *page = heap->pooled_pages;
6296 
6297  if (page) {
6298  heap->pooled_pages = page->free_next;
6299  heap_add_freepage(heap, page);
6300  }
6301 
6302  return page;
6303 }
6304 #endif
6305 
6306 static int
6307 gc_marks_finish(rb_objspace_t *objspace)
6308 {
6309 #if GC_ENABLE_INCREMENTAL_MARK
6310  /* finish incremental GC */
6311  if (is_incremental_marking(objspace)) {
6312  if (heap_eden->pooled_pages) {
6313  heap_move_pooled_pages_to_free_pages(heap_eden);
6314  gc_report(1, objspace, "gc_marks_finish: pooled pages are exists. retry.\n");
6315  return FALSE; /* continue marking phase */
6316  }
6317 
6318  if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
6319  rb_bug("gc_marks_finish: mark stack is not empty (%d).", (int)mark_stack_size(&objspace->mark_stack));
6320  }
6321 
6322  gc_mark_roots(objspace, 0);
6323 
6324  if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
6325  gc_report(1, objspace, "gc_marks_finish: not empty (%d). retry.\n", (int)mark_stack_size(&objspace->mark_stack));
6326  return FALSE;
6327  }
6328 
6329 #if RGENGC_CHECK_MODE >= 2
6330  if (gc_verify_heap_pages(objspace) != 0) {
6331  rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
6332  }
6333 #endif
6334 
6336  /* check children of all marked wb-unprotected objects */
6337  gc_marks_wb_unprotected_objects(objspace);
6338  }
6339 #endif /* GC_ENABLE_INCREMENTAL_MARK */
6340 
6341 #if RGENGC_CHECK_MODE >= 2
6342  gc_verify_internal_consistency(objspace);
6343 #endif
6344 
6345 #if USE_RGENGC
6346  if (is_full_marking(objspace)) {
6347  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
6348  const double r = gc_params.oldobject_limit_factor;
6350  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
6351  }
6352 #endif
6353 
6354 #if RGENGC_CHECK_MODE >= 4
6355  gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
6356 #endif
6357 
6358  {
6359  /* decide full GC is needed or not */
6360  rb_heap_t *heap = heap_eden;
6362  size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
6363  size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
6364  size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
6365  int full_marking = is_full_marking(objspace);
6366 
6367  GC_ASSERT(heap->total_slots >= objspace->marked_slots);
6368 
6369  /* setup free-able page counts */
6370  if (max_free_slots < gc_params.heap_init_slots) max_free_slots = gc_params.heap_init_slots;
6371 
6372  if (sweep_slots > max_free_slots) {
6373  heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
6374  }
6375  else {
6377  }
6378 
6379  /* check free_min */
6380  if (min_free_slots < gc_params.heap_free_slots) min_free_slots = gc_params.heap_free_slots;
6381 
6382 #if USE_RGENGC
6383  if (sweep_slots < min_free_slots) {
6384  if (!full_marking) {
6385  if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
6386  full_marking = TRUE;
6387  /* do not update last_major_gc, because full marking is not done. */
6388  goto increment;
6389  }
6390  else {
6391  gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
6393  }
6394  }
6395  else {
6396  increment:
6397  gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
6398  heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
6399  heap_increment(objspace, heap);
6400  }
6401  }
6402 
6403  if (full_marking) {
6404  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
6405  const double r = gc_params.oldobject_limit_factor;
6407  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
6408  }
6409 
6412  }
6413  if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
6415  }
6416  if (RGENGC_FORCE_MAJOR_GC) {
6418  }
6419 
6420  gc_report(1, objspace, "gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
6421  (int)objspace->marked_slots, (int)objspace->rgengc.old_objects, (int)heap->total_slots, (int)sweep_slots, (int)heap_allocatable_pages,
6422  objspace->rgengc.need_major_gc ? "major" : "minor");
6423 #else /* USE_RGENGC */
6424  if (sweep_slots < min_free_slots) {
6425  gc_report(1, objspace, "gc_marks_finish: heap_set_increment!!\n");
6426  heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
6427  heap_increment(objspace, heap);
6428  }
6429 #endif
6430  }
6431 
6433 
6435 
6436  return TRUE;
6437 }
6438 
6439 static void
6440 gc_marks_step(rb_objspace_t *objspace, int slots)
6441 {
6442 #if GC_ENABLE_INCREMENTAL_MARK
6443  GC_ASSERT(is_marking(objspace));
6444 
6445  if (gc_mark_stacked_objects_incremental(objspace, slots)) {
6446  if (gc_marks_finish(objspace)) {
6447  /* finish */
6448  gc_sweep(objspace);
6449  }
6450  }
6451  if (0) fprintf(stderr, "objspace->marked_slots: %d\n", (int)objspace->marked_slots);
6452 #endif
6453 }
6454 
6455 static void
6456 gc_marks_rest(rb_objspace_t *objspace)
6457 {
6458  gc_report(1, objspace, "gc_marks_rest\n");
6459 
6460 #if GC_ENABLE_INCREMENTAL_MARK
6461  heap_eden->pooled_pages = NULL;
6462 #endif
6463 
6464  if (is_incremental_marking(objspace)) {
6465  do {
6466  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
6467  } while (gc_marks_finish(objspace) == FALSE);
6468  }
6469  else {
6470  gc_mark_stacked_objects_all(objspace);
6471  gc_marks_finish(objspace);
6472  }
6473 
6474  /* move to sweep */
6475  gc_sweep(objspace);
6476 }
6477 
6478 static void
6479 gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
6480 {
6481  GC_ASSERT(dont_gc == FALSE);
6482 #if GC_ENABLE_INCREMENTAL_MARK
6483 
6484  gc_enter(objspace, "marks_continue");
6485 
6487  {
6488  int slots = 0;
6489  const char *from;
6490 
6491  if (heap->pooled_pages) {
6492  while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
6493  struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
6494  slots += page->free_slots;
6495  }
6496  from = "pooled-pages";
6497  }
6498  else if (heap_increment(objspace, heap)) {
6499  slots = heap->free_pages->free_slots;
6500  from = "incremented-pages";
6501  }
6502 
6503  if (slots > 0) {
6504  gc_report(2, objspace, "gc_marks_continue: provide %d slots from %s.\n", slots, from);
6505  gc_marks_step(objspace, (int)objspace->rincgc.step_slots);
6506  }
6507  else {
6508  gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %d).\n", (int)mark_stack_size(&objspace->mark_stack));
6509  gc_marks_rest(objspace);
6510  }
6511  }
6513 
6514  gc_exit(objspace, "marks_continue");
6515 #endif
6516 }
6517 
6518 static void
6519 gc_marks(rb_objspace_t *objspace, int full_mark)
6520 {
6521  gc_prof_mark_timer_start(objspace);
6522 
6524  {
6525  /* setup marking */
6526 
6527 #if USE_RGENGC
6528  gc_marks_start(objspace, full_mark);
6529  if (!is_incremental_marking(objspace)) {
6530  gc_marks_rest(objspace);
6531  }
6532 
6533 #if RGENGC_PROFILE > 0
6534  if (gc_prof_record(objspace)) {
6535  gc_profile_record *record = gc_prof_record(objspace);
6536  record->old_objects = objspace->rgengc.old_objects;
6537  }
6538 #endif
6539 
6540 #else /* USE_RGENGC */
6541  gc_marks_start(objspace, TRUE);
6542  gc_marks_rest(objspace);
6543 #endif
6544  }
6546  gc_prof_mark_timer_stop(objspace);
6547 }
6548 
6549 /* RGENGC */
6550 
6551 static void
6552 gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
6553 {
6554  if (level <= RGENGC_DEBUG) {
6555  char buf[1024];
6556  FILE *out = stderr;
6557  va_list args;
6558  const char *status = " ";
6559 
6560 #if USE_RGENGC
6561  if (during_gc) {
6562  status = is_full_marking(objspace) ? "+" : "-";
6563  }
6564  else {
6565  if (is_lazy_sweeping(heap_eden)) {
6566  status = "S";
6567  }
6568  if (is_incremental_marking(objspace)) {
6569  status = "M";
6570  }
6571  }
6572 #endif
6573 
6574  va_start(args, fmt);
6575  vsnprintf(buf, 1024, fmt, args);
6576  va_end(args);
6577 
6578  fprintf(out, "%s|", status);
6579  fputs(buf, out);
6580  }
6581 }
6582 
6583 #if USE_RGENGC
6584 
6585 /* bit operations */
6586 
6587 static int
6588 rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
6589 {
6590  return RVALUE_REMEMBERED(obj);
6591 }
6592 
6593 static int
6594 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
6595 {
6596  struct heap_page *page = GET_HEAP_PAGE(obj);
6597  bits_t *bits = &page->marking_bits[0];
6598 
6599  GC_ASSERT(!is_incremental_marking(objspace));
6600 
6601  if (MARKED_IN_BITMAP(bits, obj)) {
6602  return FALSE;
6603  }
6604  else {
6606  MARK_IN_BITMAP(bits, obj);
6607  return TRUE;
6608  }
6609 }
6610 
6611 /* wb, etc */
6612 
6613 /* return FALSE if already remembered */
6614 static int
6615 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
6616 {
6617  gc_report(6, objspace, "rgengc_remember: %s %s\n", obj_info(obj),
6618  rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
6619 
6620  check_rvalue_consistency(obj);
6621 
6622  if (RGENGC_CHECK_MODE) {
6623  if (RVALUE_WB_UNPROTECTED(obj)) rb_bug("rgengc_remember: %s is not wb protected.", obj_info(obj));
6624  }
6625 
6626 #if RGENGC_PROFILE > 0
6627  if (!rgengc_remembered(objspace, obj)) {
6628  if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6629  objspace->profile.total_remembered_normal_object_count++;
6630 #if RGENGC_PROFILE >= 2
6631  objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
6632 #endif
6633  }
6634  }
6635 #endif /* RGENGC_PROFILE > 0 */
6636 
6637  return rgengc_remembersetbits_set(objspace, obj);
6638 }
6639 
6640 static int
6641 rgengc_remembered_sweep(rb_objspace_t *objspace, VALUE obj)
6642 {
6643  int result = rgengc_remembersetbits_get(objspace, obj);
6644  check_rvalue_consistency(obj);
6645  return result;
6646 }
6647 
6648 static int
6649 rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
6650 {
6651  gc_report(6, objspace, "rgengc_remembered: %s\n", obj_info(obj));
6652  return rgengc_remembered_sweep(objspace, obj);
6653 }
6654 
6655 #ifndef PROFILE_REMEMBERSET_MARK
6656 #define PROFILE_REMEMBERSET_MARK 0
6657 #endif
6658 
6659 static void
6660 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
6661 {
6662  size_t j;
6663  struct heap_page *page = 0;
6664 #if PROFILE_REMEMBERSET_MARK
6665  int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
6666 #endif
6667  gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
6668 
6669  list_for_each(&heap->pages, page, page_node) {
6671  RVALUE *p = page->start;
6672  RVALUE *offset = p - NUM_IN_PAGE(p);
6673  bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
6674  bits_t *marking_bits = page->marking_bits;
6677 #if PROFILE_REMEMBERSET_MARK
6678  if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
6679  else if (page->flags.has_remembered_objects) has_old++;
6680  else if (page->flags.has_uncollectible_shady_objects) has_shady++;
6681 #endif
6682  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
6683  bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
6684  marking_bits[j] = 0;
6685  }
6687 
6688  for (j=0; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
6689  bitset = bits[j];
6690 
6691  if (bitset) {
6692  p = offset + j * BITS_BITLENGTH;
6693 
6694  do {
6695  if (bitset & 1) {
6696  VALUE obj = (VALUE)p;
6697  gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", obj_info(obj));
6698  GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
6699  GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
6700 
6701  gc_mark_children(objspace, obj);
6702  }
6703  p++;
6704  bitset >>= 1;
6705  } while (bitset);
6706  }
6707  }
6708  }
6709 #if PROFILE_REMEMBERSET_MARK
6710  else {
6711  skip++;
6712  }
6713 #endif
6714  }
6715 
6716 #if PROFILE_REMEMBERSET_MARK
6717  fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6718 #endif
6719  gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
6720 }
6721 
6722 static void
6723 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
6724 {
6725  struct heap_page *page = 0;
6726 
6727  list_for_each(&heap->pages, page, page_node) {
6728  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6730  memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6731  memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6734  }
6735 }
6736 
6737 /* RGENGC: APIs */
6738 
6739 NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6740 
6741 static void
6742 gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6743 {
6744  if (RGENGC_CHECK_MODE) {
6745  if (!RVALUE_OLD_P(a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", obj_info(a));
6746  if ( RVALUE_OLD_P(b)) rb_bug("gc_writebarrier_generational: %s is an old object.", obj_info(b));
6747  if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
6748  }
6749 
6750 #if 1
6751  /* mark `a' and remember (default behavior) */
6752  if (!rgengc_remembered(objspace, a)) {
6753  rgengc_remember(objspace, a);
6754  gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
6755  }
6756 #else
6757  /* mark `b' and remember */
6759  if (RVALUE_WB_UNPROTECTED(b)) {
6760  gc_remember_unprotected(objspace, b);
6761  }
6762  else {
6763  RVALUE_AGE_SET_OLD(objspace, b);
6764  rgengc_remember(objspace, b);
6765  }
6766 
6767  gc_report(1, objspace, "gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
6768 #endif
6769 
6770  check_rvalue_consistency(a);
6771  check_rvalue_consistency(b);
6772 }
6773 
6774 #if GC_ENABLE_INCREMENTAL_MARK
6775 static void
6776 gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6777 {
6778  gc_mark_set_parent(objspace, parent);
6779  rgengc_check_relation(objspace, obj);
6780  if (gc_mark_set(objspace, obj) == FALSE) return;
6781  gc_aging(objspace, obj);
6782  gc_grey(objspace, obj);
6783 }
6784 
6785 NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6786 
6787 static void
6788 gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6789 {
6790  gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, obj_info(b));
6791 
6792  if (RVALUE_BLACK_P(a)) {
6793  if (RVALUE_WHITE_P(b)) {
6794  if (!RVALUE_WB_UNPROTECTED(a)) {
6795  gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, obj_info(b));
6796  gc_mark_from(objspace, b, a);
6797  }
6798  }
6799  else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
6800  if (!RVALUE_WB_UNPROTECTED(b)) {
6801  gc_report(1, objspace, "gc_writebarrier_incremental: [GN] %p -> %s\n", (void *)a, obj_info(b));
6802  RVALUE_AGE_SET_OLD(objspace, b);
6803 
6804  if (RVALUE_BLACK_P(b)) {
6805  gc_grey(objspace, b);
6806  }
6807  }
6808  else {
6809  gc_report(1, objspace, "gc_writebarrier_incremental: [LL] %p -> %s\n", (void *)a, obj_info(b));
6810  gc_remember_unprotected(objspace, b);
6811  }
6812  }
6813  }
6814 }
6815 #else
6816 #define gc_writebarrier_incremental(a, b, objspace)
6817 #endif
6818 
6819 void
6821 {
6822  rb_objspace_t *objspace = &rb_objspace;
6823 
6824  if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const");
6825  if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const");
6826 
6827  if (!is_incremental_marking(objspace)) {
6828  if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
6829  return;
6830  }
6831  else {
6832  gc_writebarrier_generational(a, b, objspace);
6833  }
6834  }
6835  else { /* slow path */
6836  gc_writebarrier_incremental(a, b, objspace);
6837  }
6838 }
6839 
6840 void
6842 {
6843  if (RVALUE_WB_UNPROTECTED(obj)) {
6844  return;
6845  }
6846  else {
6847  rb_objspace_t *objspace = &rb_objspace;
6848 
6849  gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
6850  rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
6851 
6852  if (RVALUE_OLD_P(obj)) {
6853  gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
6854  RVALUE_DEMOTE(objspace, obj);
6855  gc_mark_set(objspace, obj);
6856  gc_remember_unprotected(objspace, obj);
6857 
6858 #if RGENGC_PROFILE
6859  objspace->profile.total_shade_operation_count++;
6860 #if RGENGC_PROFILE >= 2
6861  objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6862 #endif /* RGENGC_PROFILE >= 2 */
6863 #endif /* RGENGC_PROFILE */
6864  }
6865  else {
6866  RVALUE_AGE_RESET(obj);
6867  }
6868 
6869  RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6871  }
6872 }
6873 
6874 /*
6875  * remember `obj' if needed.
6876  */
6877 MJIT_FUNC_EXPORTED void
6879 {
6880  rb_objspace_t *objspace = &rb_objspace;
6881 
6882  gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", obj_info(obj));
6883 
6884  if (is_incremental_marking(objspace)) {
6885  if (RVALUE_BLACK_P(obj)) {
6886  gc_grey(objspace, obj);
6887  }
6888  }
6889  else {
6890  if (RVALUE_OLD_P(obj)) {
6891  rgengc_remember(objspace, obj);
6892  }
6893  }
6894 }
6895 
6896 static st_table *rgengc_unprotect_logging_table;
6897 
6898 static int
6899 rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
6900 {
6901  fprintf(stderr, "%s\t%d\n", (char *)key, (int)val);
6902  return ST_CONTINUE;
6903 }
6904 
6905 static void
6906 rgengc_unprotect_logging_exit_func(void)
6907 {
6908  st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6909 }
6910 
6911 void
6912 rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
6913 {
6914  VALUE obj = (VALUE)objptr;
6915 
6916  if (rgengc_unprotect_logging_table == 0) {
6917  rgengc_unprotect_logging_table = st_init_strtable();
6918  atexit(rgengc_unprotect_logging_exit_func);
6919  }
6920 
6921  if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6922  char buff[0x100];
6923  st_data_t cnt = 1;
6924  char *ptr = buff;
6925 
6926  snprintf(ptr, 0x100 - 1, "%s|%s:%d", obj_info(obj), filename, line);
6927 
6928  if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
6929  cnt++;
6930  }
6931  else {
6932  ptr = (strdup)(buff);
6933  if (!ptr) rb_memerror();
6934  }
6935  st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
6936  }
6937 }
6938 #endif /* USE_RGENGC */
6939 
6940 void
6942 {
6943 #if USE_RGENGC
6944  rb_objspace_t *objspace = &rb_objspace;
6945 
6946  if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6947  if (!RVALUE_OLD_P(dest)) {
6949  RVALUE_AGE_RESET_RAW(dest);
6950  }
6951  else {
6952  RVALUE_DEMOTE(objspace, dest);
6953  }
6954  }
6955 
6956  check_rvalue_consistency(dest);
6957 #endif
6958 }
6959 
6960 /* RGENGC analysis information */
6961 
6962 VALUE
6964 {
6965 #if USE_RGENGC
6966  return RVALUE_WB_UNPROTECTED(obj) ? Qfalse : Qtrue;
6967 #else
6968  return Qfalse;
6969 #endif
6970 }
6971 
6972 VALUE
6974 {
6975  return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
6976 }
6977 
6978 size_t
6980 {
6981  size_t n = 0;
6982  static ID ID_marked;
6983 #if USE_RGENGC
6984  static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6985 #endif
6986 
6987  if (!ID_marked) {
6988 #define I(s) ID_##s = rb_intern(#s);
6989  I(marked);
6990 #if USE_RGENGC
6991  I(wb_protected);
6992  I(old);
6993  I(marking);
6994  I(uncollectible);
6995  I(pinned);
6996 #endif
6997 #undef I
6998  }
6999 
7000 #if USE_RGENGC
7001  if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
7002  if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
7003  if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
7004  if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
7005 #endif
7006  if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
7007  if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
7008  return n;
7009 }
7010 
7011 /* GC */
7012 
7013 void
7015 {
7016  rb_objspace_t *objspace = &rb_objspace;
7017 
7018 #if USE_RGENGC
7019  int is_old = RVALUE_OLD_P(obj);
7020 
7021  gc_report(2, objspace, "rb_gc_force_recycle: %s\n", obj_info(obj));
7022 
7023  if (is_old) {
7024  if (RVALUE_MARKED(obj)) {
7025  objspace->rgengc.old_objects--;
7026  }
7027  }
7030 
7031 #if GC_ENABLE_INCREMENTAL_MARK
7032  if (is_incremental_marking(objspace)) {
7034  invalidate_mark_stack(&objspace->mark_stack, obj);
7036  }
7038  }
7039  else {
7040 #endif
7041  if (is_old || !GET_HEAP_PAGE(obj)->flags.before_sweep) {
7043  }
7045 #if GC_ENABLE_INCREMENTAL_MARK
7046  }
7047 #endif
7048 #endif
7049 
7050  objspace->profile.total_freed_objects++;
7051 
7052  heap_page_add_freeobj(objspace, GET_HEAP_PAGE(obj), obj);
7053 
7054  /* Disable counting swept_slots because there are no meaning.
7055  * if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
7056  * objspace->heap.swept_slots++;
7057  * }
7058  */
7059 }
7060 
7061 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE
7062 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024
7063 #endif
7064 
7065 void
7067 {
7068  VALUE ary_ary = GET_VM()->mark_object_ary;
7069  VALUE ary = rb_ary_last(0, 0, ary_ary);
7070 
7071  if (ary == Qnil || RARRAY_LEN(ary) >= MARK_OBJECT_ARY_BUCKET_SIZE) {
7073  rb_ary_push(ary_ary, ary);
7074  }
7075 
7076  rb_ary_push(ary, obj);
7077 }
7078 
7079 void
7081 {
7082  rb_objspace_t *objspace = &rb_objspace;
7083  struct gc_list *tmp;
7084 
7085  tmp = ALLOC(struct gc_list);
7086  tmp->next = global_list;
7087  tmp->varptr = addr;
7088  global_list = tmp;
7089 }
7090 
7091 void
7093 {
7094  rb_objspace_t *objspace = &rb_objspace;
7095  struct gc_list *tmp = global_list;
7096 
7097  if (tmp->varptr == addr) {
7098  global_list = tmp->next;
7099  xfree(tmp);
7100  return;
7101  }
7102  while (tmp->next) {
7103  if (tmp->next->varptr == addr) {
7104  struct gc_list *t = tmp->next;
7105 
7106  tmp->next = tmp->next->next;
7107  xfree(t);
7108  break;
7109  }
7110  tmp = tmp->next;
7111  }
7112 }
7113 
7114 void
7116 {
7118 }
7119 
7120 #define GC_NOTIFY 0
7121 
7122 enum {
7127 };
7128 
7129 #define gc_stress_full_mark_after_malloc_p() \
7130  (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7131 
7132 static void
7133 heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
7134 {
7135  if (!heap->freelist && !heap->free_pages) {
7136  if (!heap_increment(objspace, heap)) {
7137  heap_set_increment(objspace, 1);
7138  heap_increment(objspace, heap);
7139  }
7140  }
7141 }
7142 
7143 static int
7144 ready_to_gc(rb_objspace_t *objspace)
7145 {
7146  if (dont_gc || during_gc || ruby_disable_gc) {
7147  heap_ready_to_gc(objspace, heap_eden);
7148  return FALSE;
7149  }
7150  else {
7151  return TRUE;
7152  }
7153 }
7154 
7155 static void
7156 gc_reset_malloc_info(rb_objspace_t *objspace)
7157 {
7158  gc_prof_set_malloc_info(objspace);
7159  {
7160  size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
7161  size_t old_limit = malloc_limit;
7162 
7163  if (inc > malloc_limit) {
7164  malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
7165  if (malloc_limit > gc_params.malloc_limit_max) {
7166  malloc_limit = gc_params.malloc_limit_max;
7167  }
7168  }
7169  else {
7170  malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
7171  if (malloc_limit < gc_params.malloc_limit_min) {
7172  malloc_limit = gc_params.malloc_limit_min;
7173  }
7174  }
7175 
7176  if (0) {
7177  if (old_limit != malloc_limit) {
7178  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
7179  rb_gc_count(), old_limit, malloc_limit);
7180  }
7181  else {
7182  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
7184  }
7185  }
7186  }
7187 
7188  /* reset oldmalloc info */
7189 #if RGENGC_ESTIMATE_OLDMALLOC
7190  if (!is_full_marking(objspace)) {
7191  if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
7193  objspace->rgengc.oldmalloc_increase_limit =
7195 
7196  if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
7197  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
7198  }
7199  }
7200 
7201  if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
7202  (int)rb_gc_count(),
7203  (int)objspace->rgengc.need_major_gc,
7204  (unsigned int)objspace->rgengc.oldmalloc_increase,
7205  (unsigned int)objspace->rgengc.oldmalloc_increase_limit,
7206  (unsigned int)gc_params.oldmalloc_limit_max);
7207  }
7208  else {
7209  /* major GC */
7210  objspace->rgengc.oldmalloc_increase = 0;
7211 
7212  if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
7213  objspace->rgengc.oldmalloc_increase_limit =
7214  (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
7215  if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
7216  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
7217  }
7218  }
7219  }
7220 #endif
7221 }
7222 
7223 static int
7224 garbage_collect(rb_objspace_t *objspace, int reason)
7225 {
7226 #if GC_PROFILE_MORE_DETAIL
7227  objspace->profile.prepare_time = getrusage_time();
7228 #endif
7229 
7230  gc_rest(objspace);
7231 
7232 #if GC_PROFILE_MORE_DETAIL
7233  objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
7234 #endif
7235 
7236  return gc_start(objspace, reason);
7237 }
7238 
7239 static int
7240 gc_start(rb_objspace_t *objspace, int reason)
7241 {
7242  unsigned int do_full_mark = !!((unsigned)reason & GPR_FLAG_FULL_MARK);
7243  unsigned int immediate_mark = (unsigned)reason & GPR_FLAG_IMMEDIATE_MARK;
7244 
7245  /* reason may be clobbered, later, so keep set immediate_sweep here */
7246  objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
7247 
7248  if (!heap_allocated_pages) return FALSE; /* heap is not ready */
7249  if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
7250 
7251  GC_ASSERT(gc_mode(objspace) == gc_mode_none);
7253  GC_ASSERT(!is_incremental_marking(objspace));
7254 #if RGENGC_CHECK_MODE >= 2
7255  gc_verify_internal_consistency(objspace);
7256 #endif
7257 
7258  gc_enter(objspace, "gc_start");
7259 
7260  if (ruby_gc_stressful) {
7262 
7263  if ((flag & (1<<gc_stress_no_major)) == 0) {
7264  do_full_mark = TRUE;
7265  }
7266 
7267  objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
7268  }
7269  else {
7270 #if USE_RGENGC
7271  if (objspace->rgengc.need_major_gc) {
7272  reason |= objspace->rgengc.need_major_gc;
7273  do_full_mark = TRUE;
7274  }
7275  else if (RGENGC_FORCE_MAJOR_GC) {
7276  reason = GPR_FLAG_MAJOR_BY_FORCE;
7277  do_full_mark = TRUE;
7278  }
7279 
7280  objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
7281 #endif
7282  }
7283 
7284  if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
7285  reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
7286  }
7287 
7288 #if GC_ENABLE_INCREMENTAL_MARK
7289  if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
7291  }
7292  else {
7293  objspace->flags.during_incremental_marking = do_full_mark;
7294  }
7295 #endif
7296 
7297  if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
7298  objspace->flags.immediate_sweep = TRUE;
7299  }
7300 
7301  if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
7302 
7303  gc_report(1, objspace, "gc_start(reason: %d) => %u, %d, %d\n",
7304  reason,
7305  do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
7306 
7307 #if USE_DEBUG_COUNTER
7308  RB_DEBUG_COUNTER_INC(gc_count);
7309 
7310  if (reason & GPR_FLAG_MAJOR_MASK) {
7311  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
7312  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
7313  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
7314  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
7315 #if RGENGC_ESTIMATE_OLDMALLOC
7316  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
7317 #endif
7318  }
7319  else {
7320  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
7321  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
7322  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
7323  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
7324  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
7325  }
7326 #endif
7327 
7328  objspace->profile.count++;
7329  objspace->profile.latest_gc_info = reason;
7332  gc_prof_setup_new_record(objspace, reason);
7333  gc_reset_malloc_info(objspace);
7334  rb_transient_heap_start_marking(do_full_mark);
7335 
7336  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
7338 
7339  gc_prof_timer_start(objspace);
7340  {
7341  gc_marks(objspace, do_full_mark);
7342  }
7343  gc_prof_timer_stop(objspace);
7344 
7345  gc_exit(objspace, "gc_start");
7346  return TRUE;
7347 }
7348 
7349 static void
7350 gc_rest(rb_objspace_t *objspace)
7351 {
7352  int marking = is_incremental_marking(objspace);
7353  int sweeping = is_lazy_sweeping(heap_eden);
7354 
7355  if (marking || sweeping) {
7356  gc_enter(objspace, "gc_rest");
7357 
7358  if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
7359 
7360  if (is_incremental_marking(objspace)) {
7362  gc_marks_rest(objspace);
7364  }
7365  if (is_lazy_sweeping(heap_eden)) {
7366  gc_sweep_rest(objspace);
7367  }
7368  gc_exit(objspace, "gc_rest");
7369  }
7370 }
7371 
7374  int reason;
7375 };
7376 
7377 static void
7378 gc_current_status_fill(rb_objspace_t *objspace, char *buff)
7379 {
7380  int i = 0;
7381  if (is_marking(objspace)) {
7382  buff[i++] = 'M';
7383 #if USE_RGENGC
7384  if (is_full_marking(objspace)) buff[i++] = 'F';
7385 #if GC_ENABLE_INCREMENTAL_MARK
7386  if (is_incremental_marking(objspace)) buff[i++] = 'I';
7387 #endif
7388 #endif
7389  }
7390  else if (is_sweeping(objspace)) {
7391  buff[i++] = 'S';
7392  if (is_lazy_sweeping(heap_eden)) buff[i++] = 'L';
7393  }
7394  else {
7395  buff[i++] = 'N';
7396  }
7397  buff[i] = '\0';
7398 }
7399 
7400 static const char *
7401 gc_current_status(rb_objspace_t *objspace)
7402 {
7403  static char buff[0x10];
7404  gc_current_status_fill(objspace, buff);
7405  return buff;
7406 }
7407 
7408 #if PRINT_ENTER_EXIT_TICK
7409 
7410 static tick_t last_exit_tick;
7411 static tick_t enter_tick;
7412 static int enter_count = 0;
7413 static char last_gc_status[0x10];
7414 
7415 static inline void
7416 gc_record(rb_objspace_t *objspace, int direction, const char *event)
7417 {
7418  if (direction == 0) { /* enter */
7419  enter_count++;
7420  enter_tick = tick();
7421  gc_current_status_fill(objspace, last_gc_status);
7422  }
7423  else { /* exit */
7424  tick_t exit_tick = tick();
7425  char current_gc_status[0x10];
7426  gc_current_status_fill(objspace, current_gc_status);
7427 #if 1
7428  /* [last mutator time] [gc time] [event] */
7429  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
7430  enter_tick - last_exit_tick,
7431  exit_tick - enter_tick,
7432  event,
7433  last_gc_status, current_gc_status,
7434  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
7435  last_exit_tick = exit_tick;
7436 #else
7437  /* [enter_tick] [gc time] [event] */
7438  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
7439  enter_tick,
7440  exit_tick - enter_tick,
7441  event,
7442  last_gc_status, current_gc_status,
7443  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
7444 #endif
7445  }
7446 }
7447 #else /* PRINT_ENTER_EXIT_TICK */
7448 static inline void
7449 gc_record(rb_objspace_t *objspace, int direction, const char *event)
7450 {
7451  /* null */
7452 }
7453 #endif /* PRINT_ENTER_EXIT_TICK */
7454 
7455 static inline void
7456 gc_enter(rb_objspace_t *objspace, const char *event)
7457 {
7458  GC_ASSERT(during_gc == 0);
7459  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7460 
7462 
7463  during_gc = TRUE;
7464  gc_report(1, objspace, "gc_enter: %s [%s]\n", event, gc_current_status(objspace));
7465  gc_record(objspace, 0, event);
7466  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_ENTER, 0); /* TODO: which parameter should be passed? */
7467 }
7468 
7469 static inline void
7470 gc_exit(rb_objspace_t *objspace, const char *event)
7471 {
7472  GC_ASSERT(during_gc != 0);
7473 
7474  gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_EXIT, 0); /* TODO: which parameter should be passsed? */
7475  gc_record(objspace, 1, event);
7476  gc_report(1, objspace, "gc_exit: %s [%s]\n", event, gc_current_status(objspace));
7477  during_gc = FALSE;
7478 
7480 }
7481 
7482 static void *
7483 gc_with_gvl(void *ptr)
7484 {
7485  struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
7486  return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
7487 }
7488 
7489 static int
7490 garbage_collect_with_gvl(rb_objspace_t *objspace, int reason)
7491 {
7492  if (dont_gc) return TRUE;
7493  if (ruby_thread_has_gvl_p()) {
7494  return garbage_collect(objspace, reason);
7495  }
7496  else {
7497  if (ruby_native_thread_p()) {
7498  struct objspace_and_reason oar;
7499  oar.objspace = objspace;
7500  oar.reason = reason;
7501  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
7502  }
7503  else {
7504  /* no ruby thread */
7505  fprintf(stderr, "[FATAL] failed to allocate memory\n");
7506  exit(EXIT_FAILURE);
7507  }
7508  }
7509 }
7510 
7511 static VALUE
7512 gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep)
7513 {
7515  int reason = GPR_FLAG_FULL_MARK |
7519 
7520  if (!RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
7521  if (!RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
7522  if (!RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
7523 
7524  garbage_collect(objspace, reason);
7525  gc_finalize_deferred(objspace);
7526 
7527  return Qnil;
7528 }
7529 
7530 static int
7531 gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
7532 {
7533  if (SPECIAL_CONST_P(obj)) {
7534  return FALSE;
7535  }
7536 
7537  switch (BUILTIN_TYPE(obj)) {
7538  case T_NONE:
7539  case T_NIL:
7540  case T_MOVED:
7541  case T_ZOMBIE:
7542  return FALSE;
7543  break;
7544  case T_SYMBOL:
7545  if (DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->id & ~ID_SCOPE_MASK)) {
7546  return FALSE;
7547  }
7548  /* fall through */
7549  case T_STRING:
7550  case T_OBJECT:
7551  case T_FLOAT:
7552  case T_IMEMO:
7553  case T_ARRAY:
7554  case T_BIGNUM:
7555  case T_ICLASS:
7556  case T_MODULE:
7557  case T_REGEXP:
7558  case T_DATA:
7559  case T_MATCH:
7560  case T_STRUCT:
7561  case T_HASH:
7562  case T_FILE:
7563  case T_COMPLEX:
7564  case T_RATIONAL:
7565  case T_NODE:
7566  case T_CLASS:
7567  if (FL_TEST(obj, FL_FINALIZE)) {
7569  return FALSE;
7570  }
7571  }
7572  return RVALUE_MARKED(obj) && !RVALUE_PINNED(obj);
7573  break;
7574 
7575  default:
7576  rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
7577  break;
7578  }
7579 
7580  return FALSE;
7581 }
7582 
7583 static VALUE
7584 gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, VALUE moved_list)
7585 {
7586  int marked;
7587  int wb_unprotected;
7588  int uncollectible;
7589  int marking;
7590  RVALUE *dest = (RVALUE *)free;
7591  RVALUE *src = (RVALUE *)scan;
7592 
7593  gc_report(4, objspace, "Moving object: %p -> %p\n", (void*)scan, (void *)free);
7594 
7595  GC_ASSERT(BUILTIN_TYPE(scan) != T_NONE);
7597 
7598  /* Save off bits for current object. */
7600  wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
7601  uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
7602  marking = RVALUE_MARKING((VALUE)src);
7603 
7605 
7606  /* Clear bits for eventual T_MOVED */
7611 
7612  if (FL_TEST(src, FL_EXIVAR)) {
7613  rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
7614  }
7615 
7616  VALUE id;
7617 
7618  /* If the source object's object_id has been seen, we need to update
7619  * the object to object id mapping. */
7620  if (st_lookup(objspace->obj_to_id_tbl, (VALUE)src, &id)) {
7621  gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
7623  st_insert(objspace->obj_to_id_tbl, (VALUE)dest, id);
7624  }
7625 
7626  /* Move the object */
7627  memcpy(dest, src, sizeof(RVALUE));
7628  memset(src, 0, sizeof(RVALUE));
7629 
7630  /* Set bits for object in new location */
7631  if (marking) {
7633  }
7634  else {
7636  }
7637 
7638  if (marked) {
7640  }
7641  else {
7643  }
7644 
7645  if (wb_unprotected) {
7647  }
7648  else {
7650  }
7651 
7652  if (uncollectible) {
7654  }
7655  else {
7657  }
7658 
7659  /* Assign forwarding address */
7660  src->as.moved.flags = T_MOVED;
7661  src->as.moved.destination = (VALUE)dest;
7662  src->as.moved.next = moved_list;
7663  GC_ASSERT(BUILTIN_TYPE((VALUE)dest) != T_NONE);
7664 
7665  return (VALUE)src;
7666 }
7667 
7668 struct heap_cursor {
7670  size_t index;
7671  struct heap_page *page;
7673 };
7674 
7675 static void
7676 advance_cursor(struct heap_cursor *free, struct heap_page **page_list)
7677 {
7678  if (free->slot == free->page->start + free->page->total_slots - 1) {
7679  free->index++;
7680  free->page = page_list[free->index];
7681  free->slot = free->page->start;
7682  }
7683  else {
7684  free->slot++;
7685  }
7686 }
7687 
7688 static void
7689 retreat_cursor(struct heap_cursor *scan, struct heap_page **page_list)
7690 {
7691  if (scan->slot == scan->page->start) {
7692  scan->index--;
7693  scan->page = page_list[scan->index];
7694  scan->slot = scan->page->start + scan->page->total_slots - 1;
7695  }
7696  else {
7697  scan->slot--;
7698  }
7699 }
7700 
7701 static int
7702 not_met(struct heap_cursor *free, struct heap_cursor *scan)
7703 {
7704  if (free->index < scan->index)
7705  return 1;
7706 
7707  if (free->index > scan->index)
7708  return 0;
7709 
7710  return free->slot < scan->slot;
7711 }
7712 
7713 static void
7714 init_cursors(rb_objspace_t *objspace, struct heap_cursor *free, struct heap_cursor *scan, struct heap_page **page_list)
7715 {
7716  struct heap_page *page;
7717  size_t total_pages = heap_eden->total_pages;
7718  page = page_list[0];
7719 
7720  free->index = 0;
7721  free->page = page;
7722  free->slot = page->start;
7723  free->objspace = objspace;
7724 
7725  page = page_list[total_pages - 1];
7726  scan->index = total_pages - 1;
7727  scan->page = page;
7728  scan->slot = page->start + page->total_slots - 1;
7729  scan->objspace = objspace;
7730 }
7731 
7732 static int
7733 count_pinned(struct heap_page *page)
7734 {
7735  int pinned = 0;
7736  int i;
7737 
7738  for (i = 0; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
7739  pinned += popcount_bits(page->pinned_bits[i]);
7740  }
7741 
7742  return pinned;
7743 }
7744 
7745 static int
7746 compare_pinned(const void *left, const void *right, void *dummy)
7747 {
7748  struct heap_page *left_page;
7749  struct heap_page *right_page;
7750 
7751  left_page = *(struct heap_page * const *)left;
7752  right_page = *(struct heap_page * const *)right;
7753 
7754  return right_page->pinned_slots - left_page->pinned_slots;
7755 }
7756 
7757 static int
7758 compare_free_slots(const void *left, const void *right, void *dummy)
7759 {
7760  struct heap_page *left_page;
7761  struct heap_page *right_page;
7762 
7763  left_page = *(struct heap_page * const *)left;
7764  right_page = *(struct heap_page * const *)right;
7765 
7766  return right_page->free_slots - left_page->free_slots;
7767 }
7768 
7769 typedef int page_compare_func_t(const void *, const void *, void *);
7770 
7771 static struct heap_page **
7772 allocate_page_list(rb_objspace_t *objspace, page_compare_func_t *comparator)
7773 {
7774  size_t total_pages = heap_eden->total_pages;
7775  size_t size = size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
7776  struct heap_page *page = 0, **page_list = malloc(size);
7777  int i = 0;
7778 
7779  list_for_each(&heap_eden->pages, page, page_node) {
7780  page_list[i++] = page;
7781  page->pinned_slots = count_pinned(page);
7782  GC_ASSERT(page != NULL);
7783  }
7784  GC_ASSERT(total_pages > 0);
7785  GC_ASSERT((size_t)i == total_pages);
7786 
7787  ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), comparator, NULL);
7788 
7789  return page_list;
7790 }
7791 
7792 static VALUE
7793 gc_compact_heap(rb_objspace_t *objspace, page_compare_func_t *comparator)
7794 {
7795  struct heap_cursor free_cursor;
7796  struct heap_cursor scan_cursor;
7797  struct heap_page **page_list;
7798  VALUE moved_list;
7799 
7800  moved_list = Qfalse;
7801  memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
7802  memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
7803 
7804  page_list = allocate_page_list(objspace, comparator);
7805 
7806  init_cursors(objspace, &free_cursor, &scan_cursor, page_list);
7807 
7808  /* Two finger algorithm */
7809  while (not_met(&free_cursor, &scan_cursor)) {
7810  /* Free cursor movement */
7811 
7812  /* Unpoison free_cursor slot */
7813  void *free_slot_poison = asan_poisoned_object_p((VALUE)free_cursor.slot);
7814  asan_unpoison_object((VALUE)free_cursor.slot, false);
7815 
7816  while (BUILTIN_TYPE(free_cursor.slot) != T_NONE && not_met(&free_cursor, &scan_cursor)) {
7817  /* Re-poison slot if it's not the one we want */
7818  if (free_slot_poison) {
7819  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) == T_NONE);
7820  asan_poison_object((VALUE)free_cursor.slot);
7821  }
7822 
7823  advance_cursor(&free_cursor, page_list);
7824 
7825  /* Unpoison free_cursor slot */
7826  free_slot_poison = asan_poisoned_object_p((VALUE)free_cursor.slot);
7827  asan_unpoison_object((VALUE)free_cursor.slot, false);
7828  }
7829 
7830  /* Unpoison scan_cursor slot */
7831  void *scan_slot_poison = asan_poisoned_object_p((VALUE)scan_cursor.slot);
7832  asan_unpoison_object((VALUE)scan_cursor.slot, false);
7833 
7834  /* Scan cursor movement */
7835  objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7836 
7837  while (!gc_is_moveable_obj(objspace, (VALUE)scan_cursor.slot) && not_met(&free_cursor, &scan_cursor)) {
7838 
7839  /* Re-poison slot if it's not the one we want */
7840  if (scan_slot_poison) {
7841  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) == T_NONE);
7842  asan_poison_object((VALUE)scan_cursor.slot);
7843  }
7844 
7845  retreat_cursor(&scan_cursor, page_list);
7846 
7847  /* Unpoison scan_cursor slot */
7848  scan_slot_poison = asan_poisoned_object_p((VALUE)scan_cursor.slot);
7849  asan_unpoison_object((VALUE)scan_cursor.slot, false);
7850 
7851  objspace->rcompactor.considered_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7852  }
7853 
7854  if (not_met(&free_cursor, &scan_cursor)) {
7855  objspace->rcompactor.moved_count_table[BUILTIN_TYPE((VALUE)scan_cursor.slot)]++;
7856 
7857  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) == T_NONE);
7858  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) != T_NONE);
7859  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) != T_MOVED);
7860 
7861  moved_list = gc_move(objspace, (VALUE)scan_cursor.slot, (VALUE)free_cursor.slot, moved_list);
7862 
7863  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) != T_MOVED);
7864  GC_ASSERT(BUILTIN_TYPE(free_cursor.slot) != T_NONE);
7865  GC_ASSERT(BUILTIN_TYPE(scan_cursor.slot) == T_MOVED);
7866 
7867  advance_cursor(&free_cursor, page_list);
7868  retreat_cursor(&scan_cursor, page_list);
7869  }
7870  }
7871  free(page_list);
7872 
7873  return moved_list;
7874 }
7875 
7876 static void
7877 gc_ref_update_array(rb_objspace_t * objspace, VALUE v)
7878 {
7879  long i, len;
7880 
7881  if (FL_TEST(v, ELTS_SHARED))
7882  return;
7883 
7884  len = RARRAY_LEN(v);
7885  if (len > 0) {
7887  for (i = 0; i < len; i++) {
7888  UPDATE_IF_MOVED(objspace, ptr[i]);
7889  }
7890  }
7891 }
7892 
7893 static void
7894 gc_ref_update_object(rb_objspace_t * objspace, VALUE v)
7895 {
7896  VALUE *ptr = ROBJECT_IVPTR(v);
7897 
7898  if (ptr) {
7900  for (i = 0; i < len; i++) {
7901  UPDATE_IF_MOVED(objspace, ptr[i]);
7902  }
7903  }
7904 }
7905 
7906 static int
7907 hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
7908 {
7909  rb_objspace_t *objspace = (rb_objspace_t *)argp;
7910 
7911  if (gc_object_moved_p(objspace, (VALUE)*key)) {
7912  *key = rb_gc_location((VALUE)*key);
7913  }
7914 
7915  if (gc_object_moved_p(objspace, (VALUE)*value)) {
7916  *value = rb_gc_location((VALUE)*value);
7917  }
7918 
7919  return ST_CONTINUE;
7920 }
7921 
7922 static int
7923 hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp, int error)
7924 {
7925  rb_objspace_t *objspace;
7926 
7927  objspace = (rb_objspace_t *)argp;
7928 
7929  if (gc_object_moved_p(objspace, (VALUE)key)) {
7930  return ST_REPLACE;
7931  }
7932 
7933  if (gc_object_moved_p(objspace, (VALUE)value)) {
7934  return ST_REPLACE;
7935  }
7936  return ST_CONTINUE;
7937 }
7938 
7939 static int
7940 hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp, int existing)
7941 {
7942  rb_objspace_t *objspace = (rb_objspace_t *)argp;
7943 
7944  if (gc_object_moved_p(objspace, (VALUE)*value)) {
7945  *value = rb_gc_location((VALUE)*value);
7946  }
7947 
7948  return ST_CONTINUE;
7949 }
7950 
7951 static int
7952 hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp, int error)
7953 {
7954  rb_objspace_t *objspace;
7955 
7956  objspace = (rb_objspace_t *)argp;
7957 
7958  if (gc_object_moved_p(objspace, (VALUE)value)) {
7959  return ST_REPLACE;
7960  }
7961  return ST_CONTINUE;
7962 }
7963 
7964 static void
7965 gc_update_tbl_refs(rb_objspace_t * objspace, st_table *tbl)
7966 {
7967  if (!tbl || tbl->num_entries == 0) return;
7968 
7969  if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
7970  rb_raise(rb_eRuntimeError, "hash modified during iteration");
7971  }
7972 }
7973 
7974 static void
7975 gc_update_table_refs(rb_objspace_t * objspace, st_table *tbl)
7976 {
7977  if (!tbl || tbl->num_entries == 0) return;
7978 
7979  if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
7980  rb_raise(rb_eRuntimeError, "hash modified during iteration");
7981  }
7982 }
7983 
7984 /* Update MOVED references in an st_table */
7985 void
7987 {
7988  rb_objspace_t *objspace = &rb_objspace;
7989  gc_update_table_refs(objspace, ptr);
7990 }
7991 
7992 static void
7993 gc_ref_update_hash(rb_objspace_t * objspace, VALUE v)
7994 {
7995  rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
7996 }
7997 
7998 static void
7999 gc_ref_update_method_entry(rb_objspace_t *objspace, rb_method_entry_t *me)
8000 {
8001  rb_method_definition_t *def = me->def;
8002 
8003  UPDATE_IF_MOVED(objspace, me->owner);
8004  UPDATE_IF_MOVED(objspace, me->defined_class);
8005 
8006  if (def) {
8007  switch (def->type) {
8008  case VM_METHOD_TYPE_ISEQ:
8009  if (def->body.iseq.iseqptr) {
8010  TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, def->body.iseq.iseqptr);
8011  }
8012  TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, def->body.iseq.cref);
8013  break;
8015  case VM_METHOD_TYPE_IVAR:
8016  UPDATE_IF_MOVED(objspace, def->body.attr.location);
8017  break;
8019  UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
8020  break;
8021  case VM_METHOD_TYPE_ALIAS:
8023  return;
8026  UPDATE_IF_MOVED(objspace, def->body.refined.owner);
8027  break;
8028  case VM_METHOD_TYPE_CFUNC:
8029  case VM_METHOD_TYPE_ZSUPER:
8032  case VM_METHOD_TYPE_UNDEF:
8034  break;
8035  }
8036  }
8037 }
8038 
8039 static void
8040 gc_update_values(rb_objspace_t *objspace, long n, VALUE *values)
8041 {
8042  long i;
8043 
8044  for (i=0; i<n; i++) {
8045  UPDATE_IF_MOVED(objspace, values[i]);
8046  }
8047 }
8048 
8049 static void
8050 gc_ref_update_imemo(rb_objspace_t *objspace, VALUE obj)
8051 {
8052  switch (imemo_type(obj)) {
8053  case imemo_env:
8054  {
8055  rb_env_t *env = (rb_env_t *)obj;
8056  TYPED_UPDATE_IF_MOVED(objspace, rb_iseq_t *, env->iseq);
8057  UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
8058  gc_update_values(objspace, (long)env->env_size, (VALUE *)env->env);
8059  }
8060  break;
8061  case imemo_cref:
8062  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass);
8063  TYPED_UPDATE_IF_MOVED(objspace, struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
8064  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
8065  break;
8066  case imemo_svar:
8067  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
8068  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
8069  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
8070  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
8071  break;
8072  case imemo_throw_data:
8073  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
8074  break;
8075  case imemo_ifunc:
8076  break;
8077  case imemo_memo:
8078  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
8079  UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
8080  break;
8081  case imemo_ment:
8082  gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
8083  break;
8084  case imemo_iseq:
8086  break;
8087  case imemo_ast:
8089  break;
8090  case imemo_parser_strterm:
8091  case imemo_tmpbuf:
8092  break;
8093  default:
8094  rb_bug("not reachable %d", imemo_type(obj));
8095  break;
8096  }
8097 }
8098 
8099 static enum rb_id_table_iterator_result
8100 check_id_table_move(ID id, VALUE value, void *data)
8101 {
8102  rb_objspace_t *objspace = (rb_objspace_t *)data;
8103 
8104  if (gc_object_moved_p(objspace, (VALUE)value)) {
8105  return ID_TABLE_REPLACE;
8106  }
8107 
8108  return ID_TABLE_CONTINUE;
8109 }
8110 
8111 /* Returns the new location of an object, if it moved. Otherwise returns
8112  * the existing location. */
8113 VALUE
8115 {
8116 
8117  VALUE destination;
8118 
8119  if (!SPECIAL_CONST_P((void *)value)) {
8120  void *poisoned = asan_poisoned_object_p(value);
8121  asan_unpoison_object(value, false);
8122 
8123  if (BUILTIN_TYPE(value) == T_MOVED) {
8124  destination = (VALUE)RMOVED(value)->destination;
8125  GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
8126  }
8127  else {
8128  destination = value;
8129  }
8130 
8131  /* Re-poison slot if it's not the one we want */
8132  if (poisoned) {
8133  GC_ASSERT(BUILTIN_TYPE(value) == T_NONE);
8134  asan_poison_object(value);
8135  }
8136  }
8137  else {
8138  destination = value;
8139  }
8140 
8141  return destination;
8142 }
8143 
8144 static enum rb_id_table_iterator_result
8145 update_id_table(ID *key, VALUE * value, void *data, int existing)
8146 {
8147  rb_objspace_t *objspace = (rb_objspace_t *)data;
8148 
8149  if (gc_object_moved_p(objspace, (VALUE)*value)) {
8150  *value = rb_gc_location((VALUE)*value);
8151  }
8152 
8153  return ID_TABLE_CONTINUE;
8154 }
8155 
8156 static void
8157 update_m_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
8158 {
8159  if (tbl) {
8160  rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
8161  }
8162 }
8163 
8164 static enum rb_id_table_iterator_result
8165 update_const_table(VALUE value, void *data)
8166 {
8167  rb_const_entry_t *ce = (rb_const_entry_t *)value;
8168  rb_objspace_t * objspace = (rb_objspace_t *)data;
8169 
8170  if (gc_object_moved_p(objspace, ce->value)) {
8171  ce->value = rb_gc_location(ce->value);
8172  }
8173 
8174  if (gc_object_moved_p(objspace, ce->file)) {
8175  ce->file = rb_gc_location(ce->file);
8176  }
8177 
8178  return ID_TABLE_CONTINUE;
8179 }
8180 
8181 static void
8182 update_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
8183 {
8184  if (!tbl) return;
8185  rb_id_table_foreach_values(tbl, update_const_table, objspace);
8186 }
8187 
8188 static void
8189 update_subclass_entries(rb_objspace_t *objspace, rb_subclass_entry_t *entry)
8190 {
8191  while (entry) {
8192  UPDATE_IF_MOVED(objspace, entry->klass);
8193  entry = entry->next;
8194  }
8195 }
8196 
8197 static void
8198 update_class_ext(rb_objspace_t *objspace, rb_classext_t *ext)
8199 {
8200  UPDATE_IF_MOVED(objspace, ext->origin_);
8201  UPDATE_IF_MOVED(objspace, ext->refined_class);
8202  update_subclass_entries(objspace, ext->subclasses);
8203 }
8204 
8205 static void
8206 gc_update_object_references(rb_objspace_t *objspace, VALUE obj)
8207 {
8208  RVALUE *any = RANY(obj);
8209 
8210  gc_report(4, objspace, "update-refs: %p ->", (void *)obj);
8211 
8212  switch (BUILTIN_TYPE(obj)) {
8213  case T_CLASS:
8214  case T_MODULE:
8215  if (RCLASS_SUPER((VALUE)obj)) {
8216  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
8217  }
8218  if (!RCLASS_EXT(obj)) break;
8219  update_m_tbl(objspace, RCLASS_M_TBL(obj));
8220  gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
8221  update_class_ext(objspace, RCLASS_EXT(obj));
8222  update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
8223  break;
8224 
8225  case T_ICLASS:
8226  if (FL_TEST(obj, RICLASS_IS_ORIGIN)) {
8227  update_m_tbl(objspace, RCLASS_M_TBL(obj));
8228  }
8229  if (RCLASS_SUPER((VALUE)obj)) {
8230  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
8231  }
8232  if (!RCLASS_EXT(obj)) break;
8233  if (RCLASS_IV_TBL(obj)) {
8234  gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
8235  }
8236  update_class_ext(objspace, RCLASS_EXT(obj));
8237  update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
8238  break;
8239 
8240  case T_IMEMO:
8241  gc_ref_update_imemo(objspace, obj);
8242  return;
8243 
8244  case T_NIL:
8245  case T_FIXNUM:
8246  case T_NODE:
8247  case T_MOVED:
8248  case T_NONE:
8249  /* These can't move */
8250  return;
8251 
8252  case T_ARRAY:
8253  if (FL_TEST(obj, ELTS_SHARED)) {
8254  UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.shared_root);
8255  }
8256  else {
8257  gc_ref_update_array(objspace, obj);
8258  }
8259  break;
8260 
8261  case T_HASH:
8262  gc_ref_update_hash(objspace, obj);
8263  UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
8264  break;
8265 
8266  case T_STRING:
8267  if (STR_SHARED_P(obj)) {
8268  UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.shared);
8269  }
8270  break;
8271 
8272  case T_DATA:
8273  /* Call the compaction callback, if it exists */
8274  {
8275  void *const ptr = DATA_PTR(obj);
8276  if (ptr) {
8277  if (RTYPEDDATA_P(obj)) {
8278  RUBY_DATA_FUNC compact_func = any->as.typeddata.type->function.dcompact;
8279  if (compact_func) (*compact_func)(ptr);
8280  }
8281  }
8282  }
8283  break;
8284 
8285  case T_OBJECT:
8286  gc_ref_update_object(objspace, obj);
8287  break;
8288 
8289  case T_FILE:
8290  if (any->as.file.fptr) {
8291  UPDATE_IF_MOVED(objspace, any->as.file.fptr->pathv);
8292  UPDATE_IF_MOVED(objspace, any->as.file.fptr->tied_io_for_writing);
8294  UPDATE_IF_MOVED(objspace, any->as.file.fptr->writeconv_pre_ecopts);
8295  UPDATE_IF_MOVED(objspace, any->as.file.fptr->encs.ecopts);
8296  UPDATE_IF_MOVED(objspace, any->as.file.fptr->write_lock);
8297  }
8298  break;
8299  case T_REGEXP:
8300  UPDATE_IF_MOVED(objspace, any->as.regexp.src);
8301  break;
8302 
8303  case T_SYMBOL:
8304  if (DYNAMIC_SYM_P((VALUE)any)) {
8305  UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
8306  }
8307  break;
8308 
8309  case T_FLOAT:
8310  case T_BIGNUM:
8311  break;
8312 
8313  case T_MATCH:
8314  UPDATE_IF_MOVED(objspace, any->as.match.regexp);
8315 
8316  if (any->as.match.str) {
8317  UPDATE_IF_MOVED(objspace, any->as.match.str);
8318  }
8319  break;
8320 
8321  case T_RATIONAL:
8322  UPDATE_IF_MOVED(objspace, any->as.rational.num);
8323  UPDATE_IF_MOVED(objspace, any->as.rational.den);
8324  break;
8325 
8326  case T_COMPLEX:
8327  UPDATE_IF_MOVED(objspace, any->as.complex.real);
8328  UPDATE_IF_MOVED(objspace, any->as.complex.imag);
8329 
8330  break;
8331 
8332  case T_STRUCT:
8333  {
8334  long i, len = RSTRUCT_LEN(obj);
8336 
8337  for (i = 0; i < len; i++) {
8338  UPDATE_IF_MOVED(objspace, ptr[i]);
8339  }
8340  }
8341  break;
8342  default:
8343 #if GC_DEBUG
8346  rb_bug("unreachable");
8347 #endif
8348  break;
8349 
8350  }
8351 
8352  UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
8353 
8354  gc_report(4, objspace, "update-refs: %p <-", (void *)obj);
8355 }
8356 
8357 static int
8358 gc_ref_update(void *vstart, void *vend, size_t stride, void * data)
8359 {
8360  rb_objspace_t * objspace;
8361  struct heap_page *page;
8362  short free_slots = 0;
8363 
8364  VALUE v = (VALUE)vstart;
8365  objspace = (rb_objspace_t *)data;
8366  page = GET_HEAP_PAGE(v);
8367  asan_unpoison_memory_region(&page->freelist, sizeof(RVALUE*), false);
8368  page->freelist = NULL;
8369  asan_poison_memory_region(&page->freelist, sizeof(RVALUE*));
8372 
8373  /* For each object on the page */
8374  for (; v != (VALUE)vend; v += stride) {
8375  if (!SPECIAL_CONST_P(v)) {
8376  void *poisoned = asan_poisoned_object_p(v);
8377  asan_unpoison_object(v, false);
8378 
8379  switch (BUILTIN_TYPE(v)) {
8380  case T_NONE:
8381  heap_page_add_freeobj(objspace, page, v);
8382  free_slots++;
8383  break;
8384  case T_MOVED:
8385  break;
8386  case T_ZOMBIE:
8387  break;
8388  default:
8389  if (RVALUE_WB_UNPROTECTED(v)) {
8391  }
8392  if (RVALUE_PAGE_MARKING(page, v)) {
8394  }
8395  gc_update_object_references(objspace, v);
8396  }
8397 
8398  if (poisoned) {
8400  asan_poison_object(v);
8401  }
8402  }
8403  }
8404 
8405  page->free_slots = free_slots;
8406  return 0;
8407 }
8408 
8410 #define global_symbols ruby_global_symbols
8411 
8412 static void
8413 gc_update_references(rb_objspace_t * objspace)
8414 {
8416  rb_vm_t *vm = rb_ec_vm_ptr(ec);
8417 
8418  objspace_each_objects_without_setup(objspace, gc_ref_update, objspace);
8422  global_symbols.dsymbol_fstr_hash = rb_gc_location(global_symbols.dsymbol_fstr_hash);
8423  gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
8424  gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
8425  gc_update_table_refs(objspace, global_symbols.str_sym);
8426  gc_update_table_refs(objspace, finalizer_table);
8427 }
8428 
8429 static VALUE type_sym(size_t type);
8430 
8431 static VALUE
8432 gc_compact_stats(rb_objspace_t *objspace)
8433 {
8434  size_t i;
8435  VALUE h = rb_hash_new();
8436  VALUE considered = rb_hash_new();
8437  VALUE moved = rb_hash_new();
8438 
8439  for (i=0; i<T_MASK; i++) {
8440  rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
8441  }
8442 
8443  for (i=0; i<T_MASK; i++) {
8444  rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
8445  }
8446 
8447  rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
8448  rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
8449 
8450  return h;
8451 }
8452 
8453 static void gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier);
8454 
8455 static void
8456 gc_compact(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier)
8457 {
8458 
8459  objspace->flags.during_compacting = TRUE;
8460  {
8461  /* pin objects referenced by maybe pointers */
8462  garbage_collect(objspace, GPR_DEFAULT_REASON);
8463  /* compact */
8464  gc_compact_after_gc(objspace, use_toward_empty, use_double_pages, use_verifier);
8465  }
8466  objspace->flags.during_compacting = FALSE;
8467 }
8468 
8469 static VALUE
8470 rb_gc_compact(rb_execution_context_t *ec, VALUE self)
8471 {
8472  rb_objspace_t *objspace = &rb_objspace;
8473  if (dont_gc) return Qnil;
8474 
8475  gc_compact(objspace, FALSE, FALSE, FALSE);
8476  return gc_compact_stats(objspace);
8477 }
8478 
8479 static void
8480 root_obj_check_moved_i(const char *category, VALUE obj, void *data)
8481 {
8482  if (gc_object_moved_p(&rb_objspace, obj)) {
8483  rb_bug("ROOT %s points to MOVED: %p -> %s\n", category, (void *)obj, obj_info(rb_gc_location(obj)));
8484  }
8485 }
8486 
8487 static void
8488 reachable_object_check_moved_i(VALUE ref, void *data)
8489 {
8490  VALUE parent = (VALUE)data;
8491  if (gc_object_moved_p(&rb_objspace, ref)) {
8492  rb_bug("Object %s points to MOVED: %p -> %s\n", obj_info(parent), (void *)ref, obj_info(rb_gc_location(ref)));
8493  }
8494 }
8495 
8496 static int
8497 heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
8498 {
8499  VALUE v = (VALUE)vstart;
8500  for (; v != (VALUE)vend; v += stride) {
8501  if (gc_object_moved_p(&rb_objspace, v)) {
8502  /* Moved object still on the heap, something may have a reference. */
8503  }
8504  else {
8505  void *poisoned = asan_poisoned_object_p(v);
8506  asan_unpoison_object(v, false);
8507 
8508  switch (BUILTIN_TYPE(v)) {
8509  case T_NONE:
8510  case T_ZOMBIE:
8511  break;
8512  default:
8513  rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
8514  }
8515 
8516  if (poisoned) {
8518  asan_poison_object(v);
8519  }
8520  }
8521  }
8522 
8523  return 0;
8524 }
8525 
8526 static VALUE
8527 gc_check_references_for_moved(rb_objspace_t *objspace)
8528 {
8529  objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
8530  objspace_each_objects(objspace, heap_check_moved_i, NULL);
8531  return Qnil;
8532 }
8533 
8534 static void
8535 gc_compact_after_gc(rb_objspace_t *objspace, int use_toward_empty, int use_double_pages, int use_verifier)
8536 {
8537  if (0) fprintf(stderr, "gc_compact_after_gc: %d,%d,%d\n", use_toward_empty, use_double_pages, use_verifier);
8538 
8539  mjit_gc_start_hook(); // prevent MJIT from running while moving pointers related to ISeq
8540 
8541  objspace->profile.compact_count++;
8542 
8543  if (use_verifier) {
8544  gc_verify_internal_consistency(objspace);
8545  }
8546 
8547  if (use_double_pages) {
8548  /* Double heap size */
8549  heap_add_pages(objspace, heap_eden, heap_allocated_pages);
8550  }
8551 
8552  VALUE moved_list_head;
8553  VALUE disabled = rb_objspace_gc_disable(objspace);
8554 
8555  if (use_toward_empty) {
8556  moved_list_head = gc_compact_heap(objspace, compare_free_slots);
8557  }
8558  else {
8559  moved_list_head = gc_compact_heap(objspace, compare_pinned);
8560  }
8561  heap_eden->freelist = NULL;
8562 
8563  gc_update_references(objspace);
8564  if (!RTEST(disabled)) rb_objspace_gc_enable(objspace);
8565 
8566  if (use_verifier) {
8567  gc_check_references_for_moved(objspace);
8568  }
8569 
8572  heap_eden->free_pages = NULL;
8573  heap_eden->using_page = NULL;
8574 
8575  /* For each moved slot */
8576  while (moved_list_head) {
8577  VALUE next_moved;
8578  struct heap_page *page;
8579 
8580  page = GET_HEAP_PAGE(moved_list_head);
8581  next_moved = RMOVED(moved_list_head)->next;
8582 
8583  /* clear the memory for that moved slot */
8584  RMOVED(moved_list_head)->flags = 0;
8585  RMOVED(moved_list_head)->destination = 0;
8586  RMOVED(moved_list_head)->next = 0;
8587  page->free_slots++;
8588  heap_page_add_freeobj(objspace, page, moved_list_head);
8589 
8590  if (page->free_slots == page->total_slots && heap_pages_freeable_pages > 0) {
8592  heap_unlink_page(objspace, heap_eden, page);
8593  heap_add_page(objspace, heap_tomb, page);
8594  }
8595  objspace->profile.total_freed_objects++;
8596  moved_list_head = next_moved;
8597  }
8598 
8599  /* Add any eden pages with free slots back to the free pages list */
8600  struct heap_page *page = NULL;
8601  list_for_each(&heap_eden->pages, page, page_node) {
8602  if (page->free_slots > 0) {
8603  heap_add_freepage(heap_eden, page);
8604  } else {
8605  page->free_next = NULL;
8606  }
8607  }
8608 
8609  /* Set up "using_page" if we have any pages with free slots */
8610  if (heap_eden->free_pages) {
8611  heap_eden->using_page = heap_eden->free_pages;
8612  heap_eden->free_pages = heap_eden->free_pages->free_next;
8613  }
8614 
8615  if (use_verifier) {
8616  gc_verify_internal_consistency(objspace);
8617  }
8618 
8619  mjit_gc_exit_hook(); // unlock MJIT here, because `rb_gc()` calls `mjit_gc_start_hook()` again.
8620 }
8621 
8622 /*
8623  * call-seq:
8624  * GC.verify_compaction_references(toward: nil, double_heap: nil) -> nil
8625  *
8626  * Verify compaction reference consistency.
8627  *
8628  * This method is implementation specific. During compaction, objects that
8629  * were moved are replaced with T_MOVED objects. No object should have a
8630  * reference to a T_MOVED object after compaction.
8631  *
8632  * This function doubles the heap to ensure room to move all objects,
8633  * compacts the heap to make sure everything moves, updates all references,
8634  * then performs a full GC. If any object contains a reference to a T_MOVED
8635  * object, that object should be pushed on the mark stack, and will
8636  * make a SEGV.
8637  */
8638 static VALUE
8639 gc_verify_compaction_references(int argc, VALUE *argv, VALUE mod)
8640 {
8641  rb_objspace_t *objspace = &rb_objspace;
8642  int use_toward_empty = FALSE;
8643  int use_double_pages = FALSE;
8644 
8645  if (dont_gc) return Qnil;
8646 
8647  VALUE opt = Qnil;
8648  static ID keyword_ids[2];
8649  VALUE kwvals[2];
8650 
8651  kwvals[1] = Qtrue;
8652 
8653  rb_scan_args(argc, argv, "0:", &opt);
8654 
8655  if (!NIL_P(opt)) {
8656  if (!keyword_ids[0]) {
8657  keyword_ids[0] = rb_intern("toward");
8658  keyword_ids[1] = rb_intern("double_heap");
8659  }
8660 
8661  rb_get_kwargs(opt, keyword_ids, 0, 2, kwvals);
8662  if (kwvals[0] != Qundef && rb_intern("empty") == rb_sym2id(kwvals[0])) {
8663  use_toward_empty = TRUE;
8664  }
8665  if (kwvals[1] != Qundef && RTEST(kwvals[1])) {
8666  use_double_pages = TRUE;
8667  }
8668  }
8669 
8670  gc_compact(objspace, use_toward_empty, use_double_pages, TRUE);
8671  return gc_compact_stats(objspace);
8672 }
8673 
8674 VALUE
8676 {
8677  rb_gc();
8678  return Qnil;
8679 }
8680 
8681 void
8682 rb_gc(void)
8683 {
8684  rb_objspace_t *objspace = &rb_objspace;
8685  int reason = GPR_DEFAULT_REASON;
8686  garbage_collect(objspace, reason);
8687 }
8688 
8689 int
8691 {
8692  rb_objspace_t *objspace = &rb_objspace;
8693  return during_gc;
8694 }
8695 
8696 #if RGENGC_PROFILE >= 2
8697 
8698 static const char *type_name(int type, VALUE obj);
8699 
8700 static void
8701 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
8702 {
8704  int i;
8705  for (i=0; i<T_MASK; i++) {
8706  const char *type = type_name(i, 0);
8708  }
8709  rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
8710 }
8711 #endif
8712 
8713 size_t
8715 {
8716  return rb_objspace.profile.count;
8717 }
8718 
8719 static VALUE
8720 gc_count(rb_execution_context_t *ec, VALUE self)
8721 {
8722  return SIZET2NUM(rb_gc_count());
8723 }
8724 
8725 static VALUE
8726 gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const int orig_flags)
8727 {
8728  static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
8729  static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
8730 #if RGENGC_ESTIMATE_OLDMALLOC
8731  static VALUE sym_oldmalloc;
8732 #endif
8733  static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
8734  static VALUE sym_none, sym_marking, sym_sweeping;
8735  VALUE hash = Qnil, key = Qnil;
8736  VALUE major_by;
8737  VALUE flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
8738 
8739  if (SYMBOL_P(hash_or_key)) {
8740  key = hash_or_key;
8741  }
8742  else if (RB_TYPE_P(hash_or_key, T_HASH)) {
8743  hash = hash_or_key;
8744  }
8745  else {
8746  rb_raise(rb_eTypeError, "non-hash or symbol given");
8747  }
8748 
8749  if (sym_major_by == Qnil) {
8750 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
8751  S(major_by);
8752  S(gc_by);
8753  S(immediate_sweep);
8754  S(have_finalizer);
8755  S(state);
8756 
8757  S(stress);
8758  S(nofree);
8759  S(oldgen);
8760  S(shady);
8761  S(force);
8762 #if RGENGC_ESTIMATE_OLDMALLOC
8763  S(oldmalloc);
8764 #endif
8765  S(newobj);
8766  S(malloc);
8767  S(method);
8768  S(capi);
8769 
8770  S(none);
8771  S(marking);
8772  S(sweeping);
8773 #undef S
8774  }
8775 
8776 #define SET(name, attr) \
8777  if (key == sym_##name) \
8778  return (attr); \
8779  else if (hash != Qnil) \
8780  rb_hash_aset(hash, sym_##name, (attr));
8781 
8782  major_by =
8783  (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
8784  (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
8785  (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
8786  (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
8787 #if RGENGC_ESTIMATE_OLDMALLOC
8788  (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
8789 #endif
8790  Qnil;
8791  SET(major_by, major_by);
8792 
8793  SET(gc_by,
8794  (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
8795  (flags & GPR_FLAG_MALLOC) ? sym_malloc :
8796  (flags & GPR_FLAG_METHOD) ? sym_method :
8797  (flags & GPR_FLAG_CAPI) ? sym_capi :
8798  (flags & GPR_FLAG_STRESS) ? sym_stress :
8799  Qnil
8800  );
8801 
8802  SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
8803  SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
8804 
8805  if (orig_flags == 0) {
8806  SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
8807  gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
8808  }
8809 #undef SET
8810 
8811  if (!NIL_P(key)) {/* matched key should return above */
8812  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
8813  }
8814 
8815  return hash;
8816 }
8817 
8818 VALUE
8820 {
8821  rb_objspace_t *objspace = &rb_objspace;
8822  return gc_info_decode(objspace, key, 0);
8823 }
8824 
8825 static VALUE
8826 gc_latest_gc_info(rb_execution_context_t *ec, VALUE self, VALUE arg)
8827 {
8828  rb_objspace_t *objspace = &rb_objspace;
8829 
8830  if (NIL_P(arg)) {
8831  arg = rb_hash_new();
8832  }
8833  else if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
8834  rb_raise(rb_eTypeError, "non-hash or symbol given");
8835  }
8836 
8837  return gc_info_decode(objspace, arg, 0);
8838 }
8839 
8858 #if USE_RGENGC
8866 #if RGENGC_ESTIMATE_OLDMALLOC
8869 #endif
8870 #if RGENGC_PROFILE
8871  gc_stat_sym_total_generated_normal_object_count,
8872  gc_stat_sym_total_generated_shady_object_count,
8873  gc_stat_sym_total_shade_operation_count,
8874  gc_stat_sym_total_promoted_count,
8875  gc_stat_sym_total_remembered_normal_object_count,
8876  gc_stat_sym_total_remembered_shady_object_count,
8877 #endif
8878 #endif
8880 };
8881 
8892 #if USE_RGENGC
8897 #endif
8902 #if RGENGC_ESTIMATE_OLDMALLOC
8905 #endif
8907 };
8908 
8909 static VALUE gc_stat_symbols[gc_stat_sym_last];
8910 static VALUE gc_stat_compat_symbols[gc_stat_compat_sym_last];
8911 static VALUE gc_stat_compat_table;
8912 
8913 static void
8914 setup_gc_stat_symbols(void)
8915 {
8916  if (gc_stat_symbols[0] == 0) {
8917 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
8918  S(count);
8920  S(heap_sorted_length);
8922  S(heap_available_slots);
8923  S(heap_live_slots);
8924  S(heap_free_slots);
8925  S(heap_final_slots);
8926  S(heap_marked_slots);
8927  S(heap_eden_pages);
8928  S(heap_tomb_pages);
8929  S(total_allocated_pages);
8930  S(total_freed_pages);
8931  S(total_allocated_objects);
8932  S(total_freed_objects);
8933  S(malloc_increase_bytes);
8934  S(malloc_increase_bytes_limit);
8935 #if USE_RGENGC
8936  S(minor_gc_count);
8937  S(major_gc_count);
8938  S(compact_count);
8939  S(remembered_wb_unprotected_objects);
8940  S(remembered_wb_unprotected_objects_limit);
8941  S(old_objects);
8942  S(old_objects_limit);
8943 #if RGENGC_ESTIMATE_OLDMALLOC
8944  S(oldmalloc_increase_bytes);
8945  S(oldmalloc_increase_bytes_limit);
8946 #endif
8947 #if RGENGC_PROFILE
8948  S(total_generated_normal_object_count);
8949  S(total_generated_shady_object_count);
8950  S(total_shade_operation_count);
8951  S(total_promoted_count);
8952  S(total_remembered_normal_object_count);
8953  S(total_remembered_shady_object_count);
8954 #endif /* RGENGC_PROFILE */
8955 #endif /* USE_RGENGC */
8956 #undef S
8957 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s))
8958  S(gc_stat_heap_used);
8959  S(heap_eden_page_length);
8960  S(heap_tomb_page_length);
8961  S(heap_increment);
8962  S(heap_length);
8963  S(heap_live_slot);
8964  S(heap_free_slot);
8965  S(heap_final_slot);
8966  S(heap_swept_slot);
8967 #if USE_RGEGC
8968  S(remembered_shady_object);
8969  S(remembered_shady_object_limit);
8970  S(old_object);
8971  S(old_object_limit);
8972 #endif
8973  S(total_allocated_object);
8974  S(total_freed_object);
8975  S(malloc_increase);
8976  S(malloc_limit);
8977 #if RGENGC_ESTIMATE_OLDMALLOC
8978  S(oldmalloc_increase);
8979  S(oldmalloc_limit);
8980 #endif
8981 #undef S
8982 
8983  {
8984  VALUE table = gc_stat_compat_table = rb_hash_new();
8985  rb_obj_hide(table);
8987 
8988  /* compatibility layer for Ruby 2.1 */
8989 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s]
8990 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s]
8991  rb_hash_aset(table, OLD_SYM(gc_stat_heap_used), NEW_SYM(heap_allocated_pages));
8992  rb_hash_aset(table, OLD_SYM(heap_eden_page_length), NEW_SYM(heap_eden_pages));
8993  rb_hash_aset(table, OLD_SYM(heap_tomb_page_length), NEW_SYM(heap_tomb_pages));
8994  rb_hash_aset(table, OLD_SYM(heap_increment), NEW_SYM(heap_allocatable_pages));
8995  rb_hash_aset(table, OLD_SYM(heap_length), NEW_SYM(heap_sorted_length));
8996  rb_hash_aset(table, OLD_SYM(heap_live_slot), NEW_SYM(heap_live_slots));
8997  rb_hash_aset(table, OLD_SYM(heap_free_slot), NEW_SYM(heap_free_slots));
8998  rb_hash_aset(table, OLD_SYM(heap_final_slot), NEW_SYM(heap_final_slots));
8999 #if USE_RGEGC
9000  rb_hash_aset(table, OLD_SYM(remembered_shady_object), NEW_SYM(remembered_wb_unprotected_objects));
9001  rb_hash_aset(table, OLD_SYM(remembered_shady_object_limit), NEW_SYM(remembered_wb_unprotected_objects_limit));
9002  rb_hash_aset(table, OLD_SYM(old_object), NEW_SYM(old_objects));
9003  rb_hash_aset(table, OLD_SYM(old_object_limit), NEW_SYM(old_objects_limit));
9004 #endif
9005  rb_hash_aset(table, OLD_SYM(total_allocated_object), NEW_SYM(total_allocated_objects));
9006  rb_hash_aset(table, OLD_SYM(total_freed_object), NEW_SYM(total_freed_objects));
9007  rb_hash_aset(table, OLD_SYM(malloc_increase), NEW_SYM(malloc_increase_bytes));
9008  rb_hash_aset(table, OLD_SYM(malloc_limit), NEW_SYM(malloc_increase_bytes_limit));
9009 #if RGENGC_ESTIMATE_OLDMALLOC
9010  rb_hash_aset(table, OLD_SYM(oldmalloc_increase), NEW_SYM(oldmalloc_increase_bytes));
9011  rb_hash_aset(table, OLD_SYM(oldmalloc_limit), NEW_SYM(oldmalloc_increase_bytes_limit));
9012 #endif
9013 #undef OLD_SYM
9014 #undef NEW_SYM
9015  rb_obj_freeze(table);
9016  }
9017  }
9018 }
9019 
9020 static VALUE
9021 compat_key(VALUE key)
9022 {
9023  VALUE new_key = rb_hash_lookup(gc_stat_compat_table, key);
9024 
9025  if (!NIL_P(new_key)) {
9026  static int warned = 0;
9027  if (warned == 0) {
9028  rb_warn("GC.stat keys were changed from Ruby 2.1. "
9029  "In this case, you refer to obsolete `%"PRIsVALUE"' (new key is `%"PRIsVALUE"'). "
9030  "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
9031  key, new_key);
9032  warned = 1;
9033  }
9034  }
9035 
9036  return new_key;
9037 }
9038 
9039 static VALUE
9040 default_proc_for_compat_func(RB_BLOCK_CALL_FUNC_ARGLIST(hash, _))
9041 {
9042  VALUE key, new_key;
9043 
9044  Check_Type(hash, T_HASH);
9045  rb_check_arity(argc, 2, 2);
9046  key = argv[1];
9047 
9048  if ((new_key = compat_key(key)) != Qnil) {
9049  return rb_hash_lookup(hash, new_key);
9050  }
9051 
9052  return Qnil;
9053 }
9054 
9055 static size_t
9056 gc_stat_internal(VALUE hash_or_sym)
9057 {
9058  rb_objspace_t *objspace = &rb_objspace;
9059  VALUE hash = Qnil, key = Qnil;
9060 
9061  setup_gc_stat_symbols();
9062 
9063  if (RB_TYPE_P(hash_or_sym, T_HASH)) {
9064  hash = hash_or_sym;
9065 
9066  if (NIL_P(RHASH_IFNONE(hash))) {
9067  static VALUE default_proc_for_compat = 0;
9068  if (default_proc_for_compat == 0) { /* TODO: it should be */
9069  default_proc_for_compat = rb_proc_new(default_proc_for_compat_func, Qnil);
9070  rb_gc_register_mark_object(default_proc_for_compat);
9071  }
9072  rb_hash_set_default_proc(hash, default_proc_for_compat);
9073  }
9074  }
9075  else if (SYMBOL_P(hash_or_sym)) {
9076  key = hash_or_sym;
9077  }
9078  else {
9079  rb_raise(rb_eTypeError, "non-hash or symbol argument");
9080  }
9081 
9082 #define SET(name, attr) \
9083  if (key == gc_stat_symbols[gc_stat_sym_##name]) \
9084  return attr; \
9085  else if (hash != Qnil) \
9086  rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
9087 
9088  again:
9089  SET(count, objspace->profile.count);
9090 
9091  /* implementation dependent counters */
9093  SET(heap_sorted_length, heap_pages_sorted_length);
9095  SET(heap_available_slots, objspace_available_slots(objspace));
9096  SET(heap_live_slots, objspace_live_slots(objspace));
9097  SET(heap_free_slots, objspace_free_slots(objspace));
9098  SET(heap_final_slots, heap_pages_final_slots);
9099  SET(heap_marked_slots, objspace->marked_slots);
9100  SET(heap_eden_pages, heap_eden->total_pages);
9101  SET(heap_tomb_pages, heap_tomb->total_pages);
9102  SET(total_allocated_pages, objspace->profile.total_allocated_pages);
9103  SET(total_freed_pages, objspace->profile.total_freed_pages);
9104  SET(total_allocated_objects, objspace->total_allocated_objects);
9105  SET(total_freed_objects, objspace->profile.total_freed_objects);
9106  SET(malloc_increase_bytes, malloc_increase);
9107  SET(malloc_increase_bytes_limit, malloc_limit);
9108 #if USE_RGENGC
9109  SET(minor_gc_count, objspace->profile.minor_gc_count);
9110  SET(major_gc_count, objspace->profile.major_gc_count);
9111  SET(compact_count, objspace->profile.compact_count);
9112  SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
9113  SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
9114  SET(old_objects, objspace->rgengc.old_objects);
9115  SET(old_objects_limit, objspace->rgengc.old_objects_limit);
9116 #if RGENGC_ESTIMATE_OLDMALLOC
9117  SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
9118  SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
9119 #endif
9120 
9121 #if RGENGC_PROFILE
9122  SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
9123  SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
9124  SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
9125  SET(total_promoted_count, objspace->profile.total_promoted_count);
9126  SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
9127  SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
9128 #endif /* RGENGC_PROFILE */
9129 #endif /* USE_RGENGC */
9130 #undef SET
9131 
9132  if (!NIL_P(key)) { /* matched key should return above */
9133  VALUE new_key;
9134  if ((new_key = compat_key(key)) != Qnil) {
9135  key = new_key;
9136  goto again;
9137  }
9138  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
9139  }
9140 
9141 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
9142  if (hash != Qnil) {
9143  gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
9144  gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
9145  gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
9146  gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
9147  gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
9148  gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
9149  }
9150 #endif
9151 
9152  return 0;
9153 }
9154 
9155 static VALUE
9156 gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
9157 {
9158  if (NIL_P(arg)) {
9159  arg = rb_hash_new();
9160  }
9161  else if (SYMBOL_P(arg)) {
9162  size_t value = gc_stat_internal(arg);
9163  return SIZET2NUM(value);
9164  }
9165  else if (RB_TYPE_P(arg, T_HASH)) {
9166  // ok
9167  }
9168  else {
9169  rb_raise(rb_eTypeError, "non-hash or symbol given");
9170  }
9171 
9172  gc_stat_internal(arg);
9173  return arg;
9174 }
9175 
9176 size_t
9178 {
9179  if (SYMBOL_P(key)) {
9180  size_t value = gc_stat_internal(key);
9181  return value;
9182  }
9183  else {
9184  gc_stat_internal(key);
9185  return 0;
9186  }
9187 }
9188 
9189 static VALUE
9190 gc_stress_get(rb_execution_context_t *ec, VALUE self)
9191 {
9192  rb_objspace_t *objspace = &rb_objspace;
9193  return ruby_gc_stress_mode;
9194 }
9195 
9196 static void
9197 gc_stress_set(rb_objspace_t *objspace, VALUE flag)
9198 {
9199  objspace->flags.gc_stressful = RTEST(flag);
9200  objspace->gc_stress_mode = flag;
9201 }
9202 
9203 static VALUE
9204 gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
9205 {
9206  rb_objspace_t *objspace = &rb_objspace;
9207  gc_stress_set(objspace, flag);
9208  return flag;
9209 }
9210 
9211 VALUE
9213 {
9214  rb_objspace_t *objspace = &rb_objspace;
9215  return rb_objspace_gc_enable(objspace);
9216 }
9217 
9218 VALUE
9220 {
9221  int old = dont_gc;
9222 
9223  dont_gc = FALSE;
9224  return old ? Qtrue : Qfalse;
9225 }
9226 
9227 static VALUE
9228 gc_enable(rb_execution_context_t *ec, VALUE _)
9229 {
9230  return rb_gc_enable();
9231 }
9232 
9233 VALUE
9235 {
9236  rb_objspace_t *objspace = &rb_objspace;
9237  return gc_disable_no_rest(objspace);
9238 }
9239 
9240 static VALUE
9241 gc_disable_no_rest(rb_objspace_t *objspace)
9242 {
9243  int old = dont_gc;
9244  dont_gc = TRUE;
9245  return old ? Qtrue : Qfalse;
9246 }
9247 
9248 VALUE
9250 {
9251  rb_objspace_t *objspace = &rb_objspace;
9252  return rb_objspace_gc_disable(objspace);
9253 }
9254 
9255 VALUE
9257 {
9258  gc_rest(objspace);
9259  return gc_disable_no_rest(objspace);
9260 }
9261 
9262 static VALUE
9263 gc_disable(rb_execution_context_t *ec, VALUE _)
9264 {
9265  return rb_gc_disable();
9266 }
9267 
9268 static int
9269 get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
9270 {
9271  char *ptr = getenv(name);
9272  ssize_t val;
9273 
9274  if (ptr != NULL && *ptr) {
9275  size_t unit = 0;
9276  char *end;
9277 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
9278  val = strtoll(ptr, &end, 0);
9279 #else
9280  val = strtol(ptr, &end, 0);
9281 #endif
9282  switch (*end) {
9283  case 'k': case 'K':
9284  unit = 1024;
9285  ++end;
9286  break;
9287  case 'm': case 'M':
9288  unit = 1024*1024;
9289  ++end;
9290  break;
9291  case 'g': case 'G':
9292  unit = 1024*1024*1024;
9293  ++end;
9294  break;
9295  }
9296  while (*end && isspace((unsigned char)*end)) end++;
9297  if (*end) {
9298  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9299  return 0;
9300  }
9301  if (unit > 0) {
9302  if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
9303  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
9304  return 0;
9305  }
9306  val *= unit;
9307  }
9308  if (val > 0 && (size_t)val > lower_bound) {
9309  if (RTEST(ruby_verbose)) {
9310  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
9311  }
9312  *default_value = (size_t)val;
9313  return 1;
9314  }
9315  else {
9316  if (RTEST(ruby_verbose)) {
9317  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
9318  name, val, *default_value, lower_bound);
9319  }
9320  return 0;
9321  }
9322  }
9323  return 0;
9324 }
9325 
9326 static int
9327 get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
9328 {
9329  char *ptr = getenv(name);
9330  double val;
9331 
9332  if (ptr != NULL && *ptr) {
9333  char *end;
9334  val = strtod(ptr, &end);
9335  if (!*ptr || *end) {
9336  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
9337  return 0;
9338  }
9339 
9340  if (accept_zero && val == 0.0) {
9341  goto accept;
9342  }
9343  else if (val <= lower_bound) {
9344  if (RTEST(ruby_verbose)) {
9345  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
9346  name, val, *default_value, lower_bound);
9347  }
9348  }
9349  else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
9350  val > upper_bound) {
9351  if (RTEST(ruby_verbose)) {
9352  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
9353  name, val, *default_value, upper_bound);
9354  }
9355  }
9356  else {
9357  accept:
9358  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
9359  *default_value = val;
9360  return 1;
9361  }
9362  }
9363  return 0;
9364 }
9365 
9366 static void
9367 gc_set_initial_pages(void)
9368 {
9369  size_t min_pages;
9370  rb_objspace_t *objspace = &rb_objspace;
9371 
9372  min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
9373  if (min_pages > heap_eden->total_pages) {
9374  heap_add_pages(objspace, heap_eden, min_pages - heap_eden->total_pages);
9375  }
9376 }
9377 
9378 /*
9379  * GC tuning environment variables
9380  *
9381  * * RUBY_GC_HEAP_INIT_SLOTS
9382  * - Initial allocation slots.
9383  * * RUBY_GC_HEAP_FREE_SLOTS
9384  * - Prepare at least this amount of slots after GC.
9385  * - Allocate slots if there are not enough slots.
9386  * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
9387  * - Allocate slots by this factor.
9388  * - (next slots number) = (current slots number) * (this factor)
9389  * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
9390  * - Allocation rate is limited to this number of slots.
9391  * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
9392  * - Allocate additional pages when the number of free slots is
9393  * lower than the value (total_slots * (this ratio)).
9394  * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
9395  * - Allocate slots to satisfy this formula:
9396  * free_slots = total_slots * goal_ratio
9397  * - In other words, prepare (total_slots * goal_ratio) free slots.
9398  * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
9399  * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
9400  * - Allow to free pages when the number of free slots is
9401  * greater than the value (total_slots * (this ratio)).
9402  * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
9403  * - Do full GC when the number of old objects is more than R * N
9404  * where R is this factor and
9405  * N is the number of old objects just after last full GC.
9406  *
9407  * * obsolete
9408  * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
9409  * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
9410  *
9411  * * RUBY_GC_MALLOC_LIMIT
9412  * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
9413  * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
9414  *
9415  * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
9416  * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
9417  * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
9418  */
9419 
9420 void
9422 {
9423  /* RUBY_GC_HEAP_FREE_SLOTS */
9424  if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
9425  /* ok */
9426  }
9427  else if (get_envparam_size("RUBY_FREE_MIN", &gc_params.heap_free_slots, 0)) {
9428  rb_warn("RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
9429  }
9430 
9431  /* RUBY_GC_HEAP_INIT_SLOTS */
9432  if (get_envparam_size("RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
9433  gc_set_initial_pages();
9434  }
9435  else if (get_envparam_size("RUBY_HEAP_MIN_SLOTS", &gc_params.heap_init_slots, 0)) {
9436  rb_warn("RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
9437  gc_set_initial_pages();
9438  }
9439 
9440  get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
9441  get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
9442  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
9443  0.0, 1.0, FALSE);
9444  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
9445  gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
9446  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
9448  get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
9449 
9450  get_envparam_size ("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
9451  get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
9452  if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
9453  gc_params.malloc_limit_max = SIZE_MAX;
9454  }
9455  get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
9456 
9457 #if RGENGC_ESTIMATE_OLDMALLOC
9458  if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
9459  rb_objspace_t *objspace = &rb_objspace;
9460  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9461  }
9462  get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
9463  get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
9464 #endif
9465 }
9466 
9467 void
9468 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
9469 {
9470  rb_objspace_t *objspace = &rb_objspace;
9471 
9472  if (is_markable_object(objspace, obj)) {
9473  struct mark_func_data_struct mfd;
9474  mfd.mark_func = func;
9475  mfd.data = data;
9476  PUSH_MARK_FUNC_DATA(&mfd);
9477  gc_mark_children(objspace, obj);
9479  }
9480 }
9481 
9483  const char *category;
9484  void (*func)(const char *category, VALUE, void *);
9485  void *data;
9486 };
9487 
9488 static void
9489 root_objects_from(VALUE obj, void *ptr)
9490 {
9491  const struct root_objects_data *data = (struct root_objects_data *)ptr;
9492  (*data->func)(data->category, obj, data->data);
9493 }
9494 
9495 void
9496 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
9497 {
9498  rb_objspace_t *objspace = &rb_objspace;
9499  objspace_reachable_objects_from_root(objspace, func, passing_data);
9500 }
9501 
9502 static void
9503 objspace_reachable_objects_from_root(rb_objspace_t *objspace, void (func)(const char *category, VALUE, void *), void *passing_data)
9504 {
9505  struct root_objects_data data;
9506  struct mark_func_data_struct mfd;
9507 
9508  data.func = func;
9509  data.data = passing_data;
9510 
9511  mfd.mark_func = root_objects_from;
9512  mfd.data = &data;
9513 
9514  PUSH_MARK_FUNC_DATA(&mfd);
9515  gc_mark_roots(objspace, &data.category);
9517 }
9518 
9519 /*
9520  ------------------------ Extended allocator ------------------------
9521 */
9522 
9525  const char *fmt;
9527 };
9528 
9529 static void *
9530 gc_vraise(void *ptr)
9531 {
9532  struct gc_raise_tag *argv = ptr;
9533  rb_vraise(argv->exc, argv->fmt, *argv->ap);
9535 }
9536 
9537 static void
9538 gc_raise(VALUE exc, const char *fmt, ...)
9539 {
9540  va_list ap;
9541  va_start(ap, fmt);
9542  struct gc_raise_tag argv = {
9543  exc, fmt, &ap,
9544  };
9545 
9546  if (ruby_thread_has_gvl_p()) {
9547  gc_vraise(&argv);
9548  UNREACHABLE;
9549  }
9550  else if (ruby_native_thread_p()) {
9551  rb_thread_call_with_gvl(gc_vraise, &argv);
9552  UNREACHABLE;
9553  }
9554  else {
9555  /* Not in a ruby thread */
9556  fprintf(stderr, "%s", "[FATAL] ");
9557  vfprintf(stderr, fmt, ap);
9558  abort();
9559  }
9560 
9561  va_end(ap);
9562 }
9563 
9564 static void objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t size);
9565 
9566 static void
9567 negative_size_allocation_error(const char *msg)
9568 {
9569  gc_raise(rb_eNoMemError, "%s", msg);
9570 }
9571 
9572 static void *
9573 ruby_memerror_body(void *dummy)
9574 {
9575  rb_memerror();
9576  return 0;
9577 }
9578 
9579 static void
9580 ruby_memerror(void)
9581 {
9582  if (ruby_thread_has_gvl_p()) {
9583  rb_memerror();
9584  }
9585  else {
9586  if (ruby_native_thread_p()) {
9587  rb_thread_call_with_gvl(ruby_memerror_body, 0);
9588  }
9589  else {
9590  /* no ruby thread */
9591  fprintf(stderr, "[FATAL] failed to allocate memory\n");
9592  exit(EXIT_FAILURE);
9593  }
9594  }
9595 }
9596 
9597 void
9599 {
9601  rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
9602  VALUE exc;
9603 
9604  if (0) {
9605  // Print out pid, sleep, so you can attach debugger to see what went wrong:
9606  fprintf(stderr, "rb_memerror pid=%"PRI_PIDT_PREFIX"d\n", getpid());
9607  sleep(60);
9608  }
9609 
9610  if (during_gc) gc_exit(objspace, "rb_memerror");
9611 
9612  exc = nomem_error;
9613  if (!exc ||
9615  fprintf(stderr, "[FATAL] failed to allocate memory\n");
9616  exit(EXIT_FAILURE);
9617  }
9618  if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
9619  rb_ec_raised_clear(ec);
9620  }
9621  else {
9624  }
9625  ec->errinfo = exc;
9626  EC_JUMP_TAG(ec, TAG_RAISE);
9627 }
9628 
9629 void *
9630 rb_aligned_malloc(size_t alignment, size_t size)
9631 {
9632  void *res;
9633 
9634 #if defined __MINGW32__
9635  res = __mingw_aligned_malloc(size, alignment);
9636 #elif defined _WIN32
9637  void *_aligned_malloc(size_t, size_t);
9638  res = _aligned_malloc(size, alignment);
9639 #elif defined(HAVE_POSIX_MEMALIGN)
9640  if (posix_memalign(&res, alignment, size) == 0) {
9641  return res;
9642  }
9643  else {
9644  return NULL;
9645  }
9646 #elif defined(HAVE_MEMALIGN)
9647  res = memalign(alignment, size);
9648 #else
9649  char* aligned;
9650  res = malloc(alignment + size + sizeof(void*));
9651  aligned = (char*)res + alignment + sizeof(void*);
9652  aligned -= ((VALUE)aligned & (alignment - 1));
9653  ((void**)aligned)[-1] = res;
9654  res = (void*)aligned;
9655 #endif
9656 
9657  /* alignment must be a power of 2 */
9658  GC_ASSERT(((alignment - 1) & alignment) == 0);
9659  GC_ASSERT(alignment % sizeof(void*) == 0);
9660  return res;
9661 }
9662 
9663 static void
9664 rb_aligned_free(void *ptr)
9665 {
9666 #if defined __MINGW32__
9667  __mingw_aligned_free(ptr);
9668 #elif defined _WIN32
9669  _aligned_free(ptr);
9670 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
9671  free(ptr);
9672 #else
9673  free(((void**)ptr)[-1]);
9674 #endif
9675 }
9676 
9677 static inline size_t
9678 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
9679 {
9680 #ifdef HAVE_MALLOC_USABLE_SIZE
9681  return malloc_usable_size(ptr);
9682 #else
9683  return hint;
9684 #endif
9685 }
9686 
9691 };
9692 
9693 static inline void
9694 atomic_sub_nounderflow(size_t *var, size_t sub)
9695 {
9696  if (sub == 0) return;
9697 
9698  while (1) {
9699  size_t val = *var;
9700  if (val < sub) sub = val;
9701  if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
9702  }
9703 }
9704 
9705 static void
9706 objspace_malloc_gc_stress(rb_objspace_t *objspace)
9707 {
9711 
9713  reason |= GPR_FLAG_FULL_MARK;
9714  }
9715  garbage_collect_with_gvl(objspace, reason);
9716  }
9717 }
9718 
9719 static void
9720 objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
9721 {
9722  if (new_size > old_size) {
9723  ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
9724 #if RGENGC_ESTIMATE_OLDMALLOC
9725  ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
9726 #endif
9727  }
9728  else {
9729  atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
9730 #if RGENGC_ESTIMATE_OLDMALLOC
9731  atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
9732 #endif
9733  }
9734 
9735  if (type == MEMOP_TYPE_MALLOC) {
9736  retry:
9739  gc_rest(objspace); /* gc_rest can reduce malloc_increase */
9740  goto retry;
9741  }
9742  garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
9743  }
9744  }
9745 
9746 #if MALLOC_ALLOCATED_SIZE
9747  if (new_size >= old_size) {
9748  ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
9749  }
9750  else {
9751  size_t dec_size = old_size - new_size;
9752  size_t allocated_size = objspace->malloc_params.allocated_size;
9753 
9754 #if MALLOC_ALLOCATED_SIZE_CHECK
9755  if (allocated_size < dec_size) {
9756  rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
9757  }
9758 #endif
9759  atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
9760  }
9761 
9762  if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
9763  mem,
9764  type == MEMOP_TYPE_MALLOC ? "malloc" :
9765  type == MEMOP_TYPE_FREE ? "free " :
9766  type == MEMOP_TYPE_REALLOC ? "realloc": "error",
9767  (int)new_size, (int)old_size);
9768 
9769  switch (type) {
9770  case MEMOP_TYPE_MALLOC:
9771  ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
9772  break;
9773  case MEMOP_TYPE_FREE:
9774  {
9775  size_t allocations = objspace->malloc_params.allocations;
9776  if (allocations > 0) {
9777  atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
9778  }
9779 #if MALLOC_ALLOCATED_SIZE_CHECK
9780  else {
9781  GC_ASSERT(objspace->malloc_params.allocations > 0);
9782  }
9783 #endif
9784  }
9785  break;
9786  case MEMOP_TYPE_REALLOC: /* ignore */ break;
9787  }
9788 #endif
9789 }
9790 
9791 struct malloc_obj_info { /* 4 words */
9792  size_t size;
9793 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9794  size_t gen;
9795  const char *file;
9796  size_t line;
9797 #endif
9798 };
9799 
9800 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9801 const char *ruby_malloc_info_file;
9802 int ruby_malloc_info_line;
9803 #endif
9804 
9805 static inline size_t
9806 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
9807 {
9808  if (size == 0) size = 1;
9809 
9810 #if CALC_EXACT_MALLOC_SIZE
9811  size += sizeof(struct malloc_obj_info);
9812 #endif
9813 
9814  return size;
9815 }
9816 
9817 static inline void *
9818 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
9819 {
9820  size = objspace_malloc_size(objspace, mem, size);
9821  objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
9822 
9823 #if CALC_EXACT_MALLOC_SIZE
9824  {
9825  struct malloc_obj_info *info = mem;
9826  info->size = size;
9827 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
9828  info->gen = objspace->profile.count;
9829  info->file = ruby_malloc_info_file;
9830  info->line = info->file ? ruby_malloc_info_line : 0;
9831 #else
9832  info->file = NULL;
9833 #endif
9834  mem = info + 1;
9835  }
9836 #endif
9837 
9838  return mem;
9839 }
9840 
9841 #define TRY_WITH_GC(alloc) do { \
9842  objspace_malloc_gc_stress(objspace); \
9843  if (!(alloc) && \
9844  (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
9845  GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
9846  GPR_FLAG_MALLOC) || \
9847  !(alloc))) { \
9848  ruby_memerror(); \
9849  } \
9850  } while (0)
9851 
9852 /* these shouldn't be called directly.
9853  * objspace_* functinos do not check allocation size.
9854  */
9855 static void *
9856 objspace_xmalloc0(rb_objspace_t *objspace, size_t size)
9857 {
9858  void *mem;
9859 
9860  size = objspace_malloc_prepare(objspace, size);
9861  TRY_WITH_GC(mem = malloc(size));
9862  RB_DEBUG_COUNTER_INC(heap_xmalloc);
9863  return objspace_malloc_fixup(objspace, mem, size);
9864 }
9865 
9866 static inline size_t
9867 xmalloc2_size(const size_t count, const size_t elsize)
9868 {
9869  return size_mul_or_raise(count, elsize, rb_eArgError);
9870 }
9871 
9872 static void *
9873 objspace_xrealloc(rb_objspace_t *objspace, void *ptr, size_t new_size, size_t old_size)
9874 {
9875  void *mem;
9876 
9877  if (!ptr) return objspace_xmalloc0(objspace, new_size);
9878 
9879  /*
9880  * The behavior of realloc(ptr, 0) is implementation defined.
9881  * Therefore we don't use realloc(ptr, 0) for portability reason.
9882  * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
9883  */
9884  if (new_size == 0) {
9885  if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
9886  /*
9887  * - OpenBSD's malloc(3) man page says that when 0 is passed, it
9888  * returns a non-NULL pointer to an access-protected memory page.
9889  * The returned pointer cannot be read / written at all, but
9890  * still be a valid argument of free().
9891  *
9892  * https://man.openbsd.org/malloc.3
9893  *
9894  * - Linux's malloc(3) man page says that it _might_ perhaps return
9895  * a non-NULL pointer when its argument is 0. That return value
9896  * is safe (and is expected) to be passed to free().
9897  *
9898  * http://man7.org/linux/man-pages/man3/malloc.3.html
9899  *
9900  * - As I read the implementation jemalloc's malloc() returns fully
9901  * normal 16 bytes memory region when its argument is 0.
9902  *
9903  * - As I read the implementation musl libc's malloc() returns
9904  * fully normal 32 bytes memory region when its argument is 0.
9905  *
9906  * - Other malloc implementations can also return non-NULL.
9907  */
9908  objspace_xfree(objspace, ptr, old_size);
9909  return mem;
9910  }
9911  else {
9912  /*
9913  * It is dangerous to return NULL here, because that could lead to
9914  * RCE. Fallback to 1 byte instead of zero.
9915  *
9916  * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
9917  */
9918  new_size = 1;
9919  }
9920  }
9921 
9922 #if CALC_EXACT_MALLOC_SIZE
9923  {
9924  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
9925  new_size += sizeof(struct malloc_obj_info);
9926  ptr = info;
9927  old_size = info->size;
9928  }
9929 #endif
9930 
9931  old_size = objspace_malloc_size(objspace, ptr, old_size);
9932  TRY_WITH_GC(mem = realloc(ptr, new_size));
9933  new_size = objspace_malloc_size(objspace, mem, new_size);
9934 
9935 #if CALC_EXACT_MALLOC_SIZE
9936  {
9937  struct malloc_obj_info *info = mem;
9938  info->size = new_size;
9939  mem = info + 1;
9940  }
9941 #endif
9942 
9943  objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
9944 
9945  RB_DEBUG_COUNTER_INC(heap_xrealloc);
9946  return mem;
9947 }
9948 
9949 #if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
9950 
9951 #define MALLOC_INFO_GEN_SIZE 100
9952 #define MALLOC_INFO_SIZE_SIZE 10
9953 static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
9954 static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
9955 static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
9956 static st_table *malloc_info_file_table;
9957 
9958 static int
9959 mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
9960 {
9961  const char *file = (void *)key;
9962  const size_t *data = (void *)val;
9963 
9964  fprintf(stderr, "%s\t%d\t%d\n", file, (int)data[0], (int)data[1]);
9965 
9966  return ST_CONTINUE;
9967 }
9968 
9969 __attribute__((destructor))
9970 void
9972 {
9973  int i;
9974 
9975  fprintf(stderr, "* malloc_info gen statistics\n");
9976  for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
9977  if (i == MALLOC_INFO_GEN_SIZE-1) {
9978  fprintf(stderr, "more\t%d\t%d\n", (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
9979  }
9980  else {
9981  fprintf(stderr, "%d\t%d\t%d\n", i, (int)malloc_info_gen_cnt[i], (int)malloc_info_gen_size[i]);
9982  }
9983  }
9984 
9985  fprintf(stderr, "* malloc_info size statistics\n");
9986  for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
9987  int s = 16 << i;
9988  fprintf(stderr, "%d\t%d\n", (int)s, (int)malloc_info_size[i]);
9989  }
9990  fprintf(stderr, "more\t%d\n", (int)malloc_info_size[i]);
9991 
9992  if (malloc_info_file_table) {
9993  fprintf(stderr, "* malloc_info file statistics\n");
9994  st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
9995  }
9996 }
9997 #else
9998 void
10000 {
10001 }
10002 #endif
10003 
10004 static void
10005 objspace_xfree(rb_objspace_t *objspace, void *ptr, size_t old_size)
10006 {
10007  if (!ptr) {
10008  /*
10009  * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
10010  * its first version. We would better follow.
10011  */
10012  return;
10013  }
10014 #if CALC_EXACT_MALLOC_SIZE
10015  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10016  ptr = info;
10017  old_size = info->size;
10018 
10019 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10020  {
10021  int gen = (int)(objspace->profile.count - info->gen);
10022  int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
10023  int i;
10024 
10025  malloc_info_gen_cnt[gen_index]++;
10026  malloc_info_gen_size[gen_index] += info->size;
10027 
10028  for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
10029  size_t s = 16 << i;
10030  if (info->size <= s) {
10031  malloc_info_size[i]++;
10032  goto found;
10033  }
10034  }
10035  malloc_info_size[i]++;
10036  found:;
10037 
10038  {
10039  st_data_t key = (st_data_t)info->file;
10040  size_t *data;
10041 
10042  if (malloc_info_file_table == NULL) {
10043  malloc_info_file_table = st_init_numtable_with_size(1024);
10044  }
10045  if (st_lookup(malloc_info_file_table, key, (st_data_t *)&data)) {
10046  /* hit */
10047  }
10048  else {
10049  data = malloc(xmalloc2_size(2, sizeof(size_t)));
10050  if (data == NULL) rb_bug("objspace_xfree: can not allocate memory");
10051  data[0] = data[1] = 0;
10052  st_insert(malloc_info_file_table, key, (st_data_t)data);
10053  }
10054  data[0] ++;
10055  data[1] += info->size;
10056  };
10057 #if 0 /* verbose output */
10058  if (gen >= 2) {
10059  if (info->file) {
10060  fprintf(stderr, "free - size:%d, gen:%d, pos: %s:%d\n", (int)info->size, gen, info->file, (int)info->line);
10061  }
10062  else {
10063  fprintf(stderr, "free - size:%d, gen:%d\n", (int)info->size, gen);
10064  }
10065  }
10066 #endif
10067  }
10068 #endif
10069 #endif
10070  old_size = objspace_malloc_size(objspace, ptr, old_size);
10071 
10072  free(ptr);
10073  RB_DEBUG_COUNTER_INC(heap_xfree);
10074 
10075  objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE);
10076 }
10077 
10078 static void *
10079 ruby_xmalloc0(size_t size)
10080 {
10081  return objspace_xmalloc0(&rb_objspace, size);
10082 }
10083 
10084 void *
10086 {
10087  if ((ssize_t)size < 0) {
10088  negative_size_allocation_error("too large allocation size");
10089  }
10090  return ruby_xmalloc0(size);
10091 }
10092 
10093 void
10094 ruby_malloc_size_overflow(size_t count, size_t elsize)
10095 {
10097  "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
10098  count, elsize);
10099 }
10100 
10101 void *
10102 ruby_xmalloc2_body(size_t n, size_t size)
10103 {
10104  return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
10105 }
10106 
10107 static void *
10108 objspace_xcalloc(rb_objspace_t *objspace, size_t size)
10109 {
10110  void *mem;
10111 
10112  size = objspace_malloc_prepare(objspace, size);
10113  TRY_WITH_GC(mem = calloc1(size));
10114  return objspace_malloc_fixup(objspace, mem, size);
10115 }
10116 
10117 void *
10118 ruby_xcalloc_body(size_t n, size_t size)
10119 {
10120  return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
10121 }
10122 
10123 #ifdef ruby_sized_xrealloc
10124 #undef ruby_sized_xrealloc
10125 #endif
10126 void *
10127 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
10128 {
10129  if ((ssize_t)new_size < 0) {
10130  negative_size_allocation_error("too large allocation size");
10131  }
10132 
10133  return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
10134 }
10135 
10136 void *
10137 ruby_xrealloc_body(void *ptr, size_t new_size)
10138 {
10139  return ruby_sized_xrealloc(ptr, new_size, 0);
10140 }
10141 
10142 #ifdef ruby_sized_xrealloc2
10143 #undef ruby_sized_xrealloc2
10144 #endif
10145 void *
10146 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
10147 {
10148  size_t len = xmalloc2_size(n, size);
10149  return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
10150 }
10151 
10152 void *
10153 ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
10154 {
10155  return ruby_sized_xrealloc2(ptr, n, size, 0);
10156 }
10157 
10158 #ifdef ruby_sized_xfree
10159 #undef ruby_sized_xfree
10160 #endif
10161 void
10162 ruby_sized_xfree(void *x, size_t size)
10163 {
10164  if (x) {
10165  objspace_xfree(&rb_objspace, x, size);
10166  }
10167 }
10168 
10169 void
10170 ruby_xfree(void *x)
10171 {
10172  ruby_sized_xfree(x, 0);
10173 }
10174 
10175 void *
10176 rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
10177 {
10178  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10179  return ruby_xmalloc(w);
10180 }
10181 
10182 void *
10183 rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
10184 {
10185  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
10186  return ruby_xrealloc((void *)p, w);
10187 }
10188 
10189 void *
10190 rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10191 {
10192  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10193  return ruby_xmalloc(u);
10194 }
10195 
10196 void *
10197 rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
10198 {
10199  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
10200  return ruby_xcalloc(u, 1);
10201 }
10202 
10203 /* Mimic ruby_xmalloc, but need not rb_objspace.
10204  * should return pointer suitable for ruby_xfree
10205  */
10206 void *
10208 {
10209  void *mem;
10210 #if CALC_EXACT_MALLOC_SIZE
10211  size += sizeof(struct malloc_obj_info);
10212 #endif
10213  mem = malloc(size);
10214 #if CALC_EXACT_MALLOC_SIZE
10215  if (!mem) {
10216  return NULL;
10217  }
10218  else
10219  /* set 0 for consistency of allocated_size/allocations */
10220  {
10221  struct malloc_obj_info *info = mem;
10222  info->size = 0;
10223 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
10224  info->gen = 0;
10225  info->file = NULL;
10226  info->line = 0;
10227 #else
10228  info->file = NULL;
10229 #endif
10230  mem = info + 1;
10231  }
10232 #endif
10233  return mem;
10234 }
10235 
10236 void
10238 {
10239 #if CALC_EXACT_MALLOC_SIZE
10240  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
10241  ptr = info;
10242 #endif
10243  free(ptr);
10244 }
10245 
10246 void *
10247 rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
10248 {
10249  void *ptr;
10250  VALUE imemo;
10251  rb_imemo_tmpbuf_t *tmpbuf;
10252 
10253  /* Keep the order; allocate an empty imemo first then xmalloc, to
10254  * get rid of potential memory leak */
10255  imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
10256  *store = imemo;
10257  ptr = ruby_xmalloc0(size);
10258  tmpbuf = (rb_imemo_tmpbuf_t *)imemo;
10259  tmpbuf->ptr = ptr;
10260  tmpbuf->cnt = cnt;
10261  return ptr;
10262 }
10263 
10264 void *
10265 rb_alloc_tmp_buffer(volatile VALUE *store, long len)
10266 {
10267  long cnt;
10268 
10269  if (len < 0 || (cnt = (long)roomof(len, sizeof(VALUE))) < 0) {
10270  rb_raise(rb_eArgError, "negative buffer size (or size too big)");
10271  }
10272 
10273  return rb_alloc_tmp_buffer_with_count(store, len, cnt);
10274 }
10275 
10276 void
10277 rb_free_tmp_buffer(volatile VALUE *store)
10278 {
10280  if (s) {
10281  void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
10282  s->cnt = 0;
10283  ruby_xfree(ptr);
10284  }
10285 }
10286 
10287 #if MALLOC_ALLOCATED_SIZE
10288 /*
10289  * call-seq:
10290  * GC.malloc_allocated_size -> Integer
10291  *
10292  * Returns the size of memory allocated by malloc().
10293  *
10294  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
10295  */
10296 
10297 static VALUE
10298 gc_malloc_allocated_size(VALUE self)
10299 {
10300  return UINT2NUM(rb_objspace.malloc_params.allocated_size);
10301 }
10302 
10303 /*
10304  * call-seq:
10305  * GC.malloc_allocations -> Integer
10306  *
10307  * Returns the number of malloc() allocations.
10308  *
10309  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
10310  */
10311 
10312 static VALUE
10313 gc_malloc_allocations(VALUE self)
10314 {
10315  return UINT2NUM(rb_objspace.malloc_params.allocations);
10316 }
10317 #endif
10318 
10319 void
10321 {
10322  rb_objspace_t *objspace = &rb_objspace;
10323  if (diff > 0) {
10324  objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
10325  }
10326  else if (diff < 0) {
10327  objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
10328  }
10329 }
10330 
10331 /*
10332  ------------------------------ WeakMap ------------------------------
10333 */
10334 
10335 struct weakmap {
10336  st_table *obj2wmap; /* obj -> [ref,...] */
10337  st_table *wmap2obj; /* ref -> obj */
10338  VALUE final;
10339 };
10340 
10341 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
10342 
10343 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10344 static int
10345 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
10346 {
10347  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10348  VALUE obj = (VALUE)val;
10349  if (!is_live_object(objspace, obj)) return ST_DELETE;
10350  return ST_CONTINUE;
10351 }
10352 #endif
10353 
10354 static void
10355 wmap_compact(void *ptr)
10356 {
10357  struct weakmap *w = ptr;
10360  w->final = rb_gc_location(w->final);
10361 }
10362 
10363 static void
10364 wmap_mark(void *ptr)
10365 {
10366  struct weakmap *w = ptr;
10367 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK
10368  if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
10369 #endif
10371 }
10372 
10373 static int
10374 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
10375 {
10376  VALUE *ptr = (VALUE *)val;
10377  ruby_sized_xfree(ptr, (ptr[0] + 1) * sizeof(VALUE));
10378  return ST_CONTINUE;
10379 }
10380 
10381 static void
10382 wmap_free(void *ptr)
10383 {
10384  struct weakmap *w = ptr;
10385  st_foreach(w->obj2wmap, wmap_free_map, 0);
10386  st_free_table(w->obj2wmap);
10387  st_free_table(w->wmap2obj);
10388 }
10389 
10390 static int
10391 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
10392 {
10393  VALUE *ptr = (VALUE *)val;
10394  *(size_t *)arg += (ptr[0] + 1) * sizeof(VALUE);
10395  return ST_CONTINUE;
10396 }
10397 
10398 static size_t
10399 wmap_memsize(const void *ptr)
10400 {
10401  size_t size;
10402  const struct weakmap *w = ptr;
10403  size = sizeof(*w);
10404  size += st_memsize(w->obj2wmap);
10405  size += st_memsize(w->wmap2obj);
10406  st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
10407  return size;
10408 }
10409 
10410 static const rb_data_type_t weakmap_type = {
10411  "weakmap",
10412  {
10413  wmap_mark,
10414  wmap_free,
10415  wmap_memsize,
10416  wmap_compact,
10417  },
10419 };
10420 
10421 extern const struct st_hash_type rb_hashtype_ident;
10422 static VALUE wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self));
10423 
10424 static VALUE
10425 wmap_allocate(VALUE klass)
10426 {
10427  struct weakmap *w;
10428  VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
10431  w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
10432  return obj;
10433 }
10434 
10435 static int
10436 wmap_live_p(rb_objspace_t *objspace, VALUE obj)
10437 {
10438  if (!FL_ABLE(obj)) return TRUE;
10439  if (!is_id_value(objspace, obj)) return FALSE;
10440  if (!is_live_object(objspace, obj)) return FALSE;
10441  return TRUE;
10442 }
10443 
10444 static int
10445 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
10446 {
10447  VALUE wmap, *ptr, size, i, j;
10448  if (!existing) return ST_STOP;
10449  wmap = (VALUE)arg, ptr = (VALUE *)*value;
10450  for (i = j = 1, size = ptr[0]; i <= size; ++i) {
10451  if (ptr[i] != wmap) {
10452  ptr[j++] = ptr[i];
10453  }
10454  }
10455  if (j == 1) {
10456  ruby_sized_xfree(ptr, i * sizeof(VALUE));
10457  return ST_DELETE;
10458  }
10459  if (j < i) {
10460  SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
10461  ptr[0] = j;
10462  *value = (st_data_t)ptr;
10463  }
10464  return ST_CONTINUE;
10465 }
10466 
10467 /* :nodoc: */
10468 static VALUE
10469 wmap_finalize(RB_BLOCK_CALL_FUNC_ARGLIST(objid, self))
10470 {
10471  st_data_t orig, wmap, data;
10472  VALUE obj, *rids, i, size;
10473  struct weakmap *w;
10474 
10475  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10476  /* Get reference from object id. */
10477  if ((obj = id2ref_obj_tbl(&rb_objspace, objid)) == Qundef) {
10478  rb_bug("wmap_finalize: objid is not found.");
10479  }
10480 
10481  /* obj is original referenced object and/or weak reference. */
10482  orig = (st_data_t)obj;
10483  if (st_delete(w->obj2wmap, &orig, &data)) {
10484  rids = (VALUE *)data;
10485  size = *rids++;
10486  for (i = 0; i < size; ++i) {
10487  wmap = (st_data_t)rids[i];
10488  st_delete(w->wmap2obj, &wmap, NULL);
10489  }
10490  ruby_sized_xfree((VALUE *)data, (size + 1) * sizeof(VALUE));
10491  }
10492 
10493  wmap = (st_data_t)obj;
10494  if (st_delete(w->wmap2obj, &wmap, &orig)) {
10495  wmap = (st_data_t)obj;
10496  st_update(w->obj2wmap, orig, wmap_final_func, wmap);
10497  }
10498  return self;
10499 }
10500 
10504 };
10505 
10506 static int
10507 wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
10508 {
10509  VALUE str = (VALUE)arg;
10510  VALUE k = (VALUE)key, v = (VALUE)val;
10511 
10512  if (RSTRING_PTR(str)[0] == '#') {
10513  rb_str_cat2(str, ", ");
10514  }
10515  else {
10516  rb_str_cat2(str, ": ");
10517  RSTRING_PTR(str)[0] = '#';
10518  }
10519  k = SPECIAL_CONST_P(k) ? rb_inspect(k) : rb_any_to_s(k);
10520  rb_str_append(str, k);
10521  rb_str_cat2(str, " => ");
10523  rb_str_append(str, v);
10524 
10525  return ST_CONTINUE;
10526 }
10527 
10528 static VALUE
10529 wmap_inspect(VALUE self)
10530 {
10531  VALUE str;
10532  VALUE c = rb_class_name(CLASS_OF(self));
10533  struct weakmap *w;
10534 
10535  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10536  str = rb_sprintf("-<%"PRIsVALUE":%p", c, (void *)self);
10537  if (w->wmap2obj) {
10538  st_foreach(w->wmap2obj, wmap_inspect_i, str);
10539  }
10540  RSTRING_PTR(str)[0] = '#';
10541  rb_str_cat2(str, ">");
10542  return str;
10543 }
10544 
10545 static int
10546 wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
10547 {
10548  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10549  VALUE obj = (VALUE)val;
10550  if (wmap_live_p(objspace, obj)) {
10551  rb_yield_values(2, (VALUE)key, obj);
10552  }
10553  return ST_CONTINUE;
10554 }
10555 
10556 /* Iterates over keys and objects in a weakly referenced object */
10557 static VALUE
10558 wmap_each(VALUE self)
10559 {
10560  struct weakmap *w;
10561  rb_objspace_t *objspace = &rb_objspace;
10562 
10563  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10564  st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
10565  return self;
10566 }
10567 
10568 static int
10569 wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
10570 {
10571  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10572  VALUE obj = (VALUE)val;
10573  if (wmap_live_p(objspace, obj)) {
10574  rb_yield((VALUE)key);
10575  }
10576  return ST_CONTINUE;
10577 }
10578 
10579 /* Iterates over keys and objects in a weakly referenced object */
10580 static VALUE
10581 wmap_each_key(VALUE self)
10582 {
10583  struct weakmap *w;
10584  rb_objspace_t *objspace = &rb_objspace;
10585 
10586  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10587  st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
10588  return self;
10589 }
10590 
10591 static int
10592 wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
10593 {
10594  rb_objspace_t *objspace = (rb_objspace_t *)arg;
10595  VALUE obj = (VALUE)val;
10596  if (wmap_live_p(objspace, obj)) {
10597  rb_yield(obj);
10598  }
10599  return ST_CONTINUE;
10600 }
10601 
10602 /* Iterates over keys and objects in a weakly referenced object */
10603 static VALUE
10604 wmap_each_value(VALUE self)
10605 {
10606  struct weakmap *w;
10607  rb_objspace_t *objspace = &rb_objspace;
10608 
10609  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10610  st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
10611  return self;
10612 }
10613 
10614 static int
10615 wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
10616 {
10617  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10618  rb_objspace_t *objspace = argp->objspace;
10619  VALUE ary = argp->value;
10620  VALUE obj = (VALUE)val;
10621  if (wmap_live_p(objspace, obj)) {
10622  rb_ary_push(ary, (VALUE)key);
10623  }
10624  return ST_CONTINUE;
10625 }
10626 
10627 /* Iterates over keys and objects in a weakly referenced object */
10628 static VALUE
10629 wmap_keys(VALUE self)
10630 {
10631  struct weakmap *w;
10632  struct wmap_iter_arg args;
10633 
10634  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10635  args.objspace = &rb_objspace;
10636  args.value = rb_ary_new();
10637  st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
10638  return args.value;
10639 }
10640 
10641 static int
10642 wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
10643 {
10644  struct wmap_iter_arg *argp = (struct wmap_iter_arg *)arg;
10645  rb_objspace_t *objspace = argp->objspace;
10646  VALUE ary = argp->value;
10647  VALUE obj = (VALUE)val;
10648  if (wmap_live_p(objspace, obj)) {
10649  rb_ary_push(ary, obj);
10650  }
10651  return ST_CONTINUE;
10652 }
10653 
10654 /* Iterates over values and objects in a weakly referenced object */
10655 static VALUE
10656 wmap_values(VALUE self)
10657 {
10658  struct weakmap *w;
10659  struct wmap_iter_arg args;
10660 
10661  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10662  args.objspace = &rb_objspace;
10663  args.value = rb_ary_new();
10664  st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
10665  return args.value;
10666 }
10667 
10668 static int
10669 wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg, int existing)
10670 {
10671  VALUE size, *ptr, *optr;
10672  if (existing) {
10673  size = (ptr = optr = (VALUE *)*val)[0];
10674  ++size;
10675  SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
10676  }
10677  else {
10678  optr = 0;
10679  size = 1;
10680  ptr = ruby_xmalloc0(2 * sizeof(VALUE));
10681  }
10682  ptr[0] = size;
10683  ptr[size] = (VALUE)arg;
10684  if (ptr == optr) return ST_STOP;
10685  *val = (st_data_t)ptr;
10686  return ST_CONTINUE;
10687 }
10688 
10689 /* Creates a weak reference from the given key to the given value */
10690 static VALUE
10691 wmap_aset(VALUE self, VALUE wmap, VALUE orig)
10692 {
10693  struct weakmap *w;
10694 
10695  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10696  if (FL_ABLE(orig)) {
10697  define_final0(orig, w->final);
10698  }
10699  if (FL_ABLE(wmap)) {
10700  define_final0(wmap, w->final);
10701  }
10702 
10703  st_update(w->obj2wmap, (st_data_t)orig, wmap_aset_update, wmap);
10704  st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
10705  return nonspecial_obj_id(orig);
10706 }
10707 
10708 /* Retrieves a weakly referenced object with the given key */
10709 static VALUE
10710 wmap_lookup(VALUE self, VALUE key)
10711 {
10712  st_data_t data;
10713  VALUE obj;
10714  struct weakmap *w;
10715  rb_objspace_t *objspace = &rb_objspace;
10716 
10717  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10718  if (!st_lookup(w->wmap2obj, (st_data_t)key, &data)) return Qundef;
10719  obj = (VALUE)data;
10720  if (!wmap_live_p(objspace, obj)) return Qundef;
10721  return obj;
10722 }
10723 
10724 /* Retrieves a weakly referenced object with the given key */
10725 static VALUE
10726 wmap_aref(VALUE self, VALUE key)
10727 {
10728  VALUE obj = wmap_lookup(self, key);
10729  return obj != Qundef ? obj : Qnil;
10730 }
10731 
10732 /* Returns +true+ if +key+ is registered */
10733 static VALUE
10734 wmap_has_key(VALUE self, VALUE key)
10735 {
10736  return wmap_lookup(self, key) == Qundef ? Qfalse : Qtrue;
10737 }
10738 
10739 /* Returns the number of referenced objects */
10740 static VALUE
10741 wmap_size(VALUE self)
10742 {
10743  struct weakmap *w;
10744  st_index_t n;
10745 
10746  TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
10747  n = w->wmap2obj->num_entries;
10748 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
10749  return ULONG2NUM(n);
10750 #else
10751  return ULL2NUM(n);
10752 #endif
10753 }
10754 
10755 /*
10756  ------------------------------ GC profiler ------------------------------
10757 */
10758 
10759 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
10760 
10761 /* return sec in user time */
10762 static double
10763 getrusage_time(void)
10764 {
10765 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
10766  {
10767  static int try_clock_gettime = 1;
10768  struct timespec ts;
10769  if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
10770  return ts.tv_sec + ts.tv_nsec * 1e-9;
10771  }
10772  else {
10773  try_clock_gettime = 0;
10774  }
10775  }
10776 #endif
10777 
10778 #ifdef RUSAGE_SELF
10779  {
10780  struct rusage usage;
10781  struct timeval time;
10782  if (getrusage(RUSAGE_SELF, &usage) == 0) {
10783  time = usage.ru_utime;
10784  return time.tv_sec + time.tv_usec * 1e-6;
10785  }
10786  }
10787 #endif
10788 
10789 #ifdef _WIN32
10790  {
10791  FILETIME creation_time, exit_time, kernel_time, user_time;
10792  ULARGE_INTEGER ui;
10793  LONG_LONG q;
10794  double t;
10795 
10796  if (GetProcessTimes(GetCurrentProcess(),
10797  &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
10798  memcpy(&ui, &user_time, sizeof(FILETIME));
10799  q = ui.QuadPart / 10L;
10800  t = (DWORD)(q % 1000000L) * 1e-6;
10801  q /= 1000000L;
10802 #ifdef __GNUC__
10803  t += q;
10804 #else
10805  t += (double)(DWORD)(q >> 16) * (1 << 16);
10806  t += (DWORD)q & ~(~0 << 16);
10807 #endif
10808  return t;
10809  }
10810  }
10811 #endif
10812 
10813  return 0.0;
10814 }
10815 
10816 static inline void
10817 gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
10818 {
10819  if (objspace->profile.run) {
10820  size_t index = objspace->profile.next_index;
10821  gc_profile_record *record;
10822 
10823  /* create new record */
10824  objspace->profile.next_index++;
10825 
10826  if (!objspace->profile.records) {
10828  objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
10829  }
10830  if (index >= objspace->profile.size) {
10831  void *ptr;
10832  objspace->profile.size += 1000;
10833  ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
10834  if (!ptr) rb_memerror();
10835  objspace->profile.records = ptr;
10836  }
10837  if (!objspace->profile.records) {
10838  rb_bug("gc_profile malloc or realloc miss");
10839  }
10840  record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
10841  MEMZERO(record, gc_profile_record, 1);
10842 
10843  /* setup before-GC parameter */
10844  record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
10845 #if MALLOC_ALLOCATED_SIZE
10846  record->allocated_size = malloc_allocated_size;
10847 #endif
10848 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
10849 #ifdef RUSAGE_SELF
10850  {
10851  struct rusage usage;
10852  if (getrusage(RUSAGE_SELF, &usage) == 0) {
10853  record->maxrss = usage.ru_maxrss;
10854  record->minflt = usage.ru_minflt;
10855  record->majflt = usage.ru_majflt;
10856  }
10857  }
10858 #endif
10859 #endif
10860  }
10861 }
10862 
10863 static inline void
10864 gc_prof_timer_start(rb_objspace_t *objspace)
10865 {
10866  if (gc_prof_enabled(objspace)) {
10867  gc_profile_record *record = gc_prof_record(objspace);
10868 #if GC_PROFILE_MORE_DETAIL
10869  record->prepare_time = objspace->profile.prepare_time;
10870 #endif
10871  record->gc_time = 0;
10872  record->gc_invoke_time = getrusage_time();
10873  }
10874 }
10875 
10876 static double
10877 elapsed_time_from(double time)
10878 {
10879  double now = getrusage_time();
10880  if (now > time) {
10881  return now - time;
10882  }
10883  else {
10884  return 0;
10885  }
10886 }
10887 
10888 static inline void
10889 gc_prof_timer_stop(rb_objspace_t *objspace)
10890 {
10891  if (gc_prof_enabled(objspace)) {
10892  gc_profile_record *record = gc_prof_record(objspace);
10893  record->gc_time = elapsed_time_from(record->gc_invoke_time);
10894  record->gc_invoke_time -= objspace->profile.invoke_time;
10895  }
10896 }
10897 
10898 #define RUBY_DTRACE_GC_HOOK(name) \
10899  do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
10900 static inline void
10901 gc_prof_mark_timer_start(rb_objspace_t *objspace)
10902 {
10903  RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
10904 #if GC_PROFILE_MORE_DETAIL
10905  if (gc_prof_enabled(objspace)) {
10906  gc_prof_record(objspace)->gc_mark_time = getrusage_time();
10907  }
10908 #endif
10909 }
10910 
10911 static inline void
10912 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
10913 {
10914  RUBY_DTRACE_GC_HOOK(MARK_END);
10915 #if GC_PROFILE_MORE_DETAIL
10916  if (gc_prof_enabled(objspace)) {
10917  gc_profile_record *record = gc_prof_record(objspace);
10918  record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
10919  }
10920 #endif
10921 }
10922 
10923 static inline void
10924 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
10925 {
10926  RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
10927  if (gc_prof_enabled(objspace)) {
10928  gc_profile_record *record = gc_prof_record(objspace);
10929 
10930  if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
10931  objspace->profile.gc_sweep_start_time = getrusage_time();
10932  }
10933  }
10934 }
10935 
10936 static inline void
10937 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
10938 {
10939  RUBY_DTRACE_GC_HOOK(SWEEP_END);
10940 
10941  if (gc_prof_enabled(objspace)) {
10942  double sweep_time;
10943  gc_profile_record *record = gc_prof_record(objspace);
10944 
10945  if (record->gc_time > 0) {
10946  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
10947  /* need to accumulate GC time for lazy sweep after gc() */
10948  record->gc_time += sweep_time;
10949  }
10950  else if (GC_PROFILE_MORE_DETAIL) {
10951  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
10952  }
10953 
10954 #if GC_PROFILE_MORE_DETAIL
10955  record->gc_sweep_time += sweep_time;
10957 #endif
10959  }
10960 }
10961 
10962 static inline void
10963 gc_prof_set_malloc_info(rb_objspace_t *objspace)
10964 {
10965 #if GC_PROFILE_MORE_DETAIL
10966  if (gc_prof_enabled(objspace)) {
10967  gc_profile_record *record = gc_prof_record(objspace);
10968  record->allocate_increase = malloc_increase;
10969  record->allocate_limit = malloc_limit;
10970  }
10971 #endif
10972 }
10973 
10974 static inline void
10975 gc_prof_set_heap_info(rb_objspace_t *objspace)
10976 {
10977  if (gc_prof_enabled(objspace)) {
10978  gc_profile_record *record = gc_prof_record(objspace);
10979  size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
10980  size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
10981 
10982 #if GC_PROFILE_MORE_DETAIL
10983  record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
10984  record->heap_live_objects = live;
10985  record->heap_free_objects = total - live;
10986 #endif
10987 
10988  record->heap_total_objects = total;
10989  record->heap_use_size = live * sizeof(RVALUE);
10990  record->heap_total_size = total * sizeof(RVALUE);
10991  }
10992 }
10993 
10994 /*
10995  * call-seq:
10996  * GC::Profiler.clear -> nil
10997  *
10998  * Clears the GC profiler data.
10999  *
11000  */
11001 
11002 static VALUE
11003 gc_profile_clear(VALUE _)
11004 {
11005  rb_objspace_t *objspace = &rb_objspace;
11006  void *p = objspace->profile.records;
11007  objspace->profile.records = NULL;
11008  objspace->profile.size = 0;
11009  objspace->profile.next_index = 0;
11010  objspace->profile.current_record = 0;
11011  if (p) {
11012  free(p);
11013  }
11014  return Qnil;
11015 }
11016 
11017 /*
11018  * call-seq:
11019  * GC::Profiler.raw_data -> [Hash, ...]
11020  *
11021  * Returns an Array of individual raw profile data Hashes ordered
11022  * from earliest to latest by +:GC_INVOKE_TIME+.
11023  *
11024  * For example:
11025  *
11026  * [
11027  * {
11028  * :GC_TIME=>1.3000000000000858e-05,
11029  * :GC_INVOKE_TIME=>0.010634999999999999,
11030  * :HEAP_USE_SIZE=>289640,
11031  * :HEAP_TOTAL_SIZE=>588960,
11032  * :HEAP_TOTAL_OBJECTS=>14724,
11033  * :GC_IS_MARKED=>false
11034  * },
11035  * # ...
11036  * ]
11037  *
11038  * The keys mean:
11039  *
11040  * +:GC_TIME+::
11041  * Time elapsed in seconds for this GC run
11042  * +:GC_INVOKE_TIME+::
11043  * Time elapsed in seconds from startup to when the GC was invoked
11044  * +:HEAP_USE_SIZE+::
11045  * Total bytes of heap used
11046  * +:HEAP_TOTAL_SIZE+::
11047  * Total size of heap in bytes
11048  * +:HEAP_TOTAL_OBJECTS+::
11049  * Total number of objects
11050  * +:GC_IS_MARKED+::
11051  * Returns +true+ if the GC is in mark phase
11052  *
11053  * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
11054  * to the following hash keys:
11055  *
11056  * +:GC_MARK_TIME+::
11057  * +:GC_SWEEP_TIME+::
11058  * +:ALLOCATE_INCREASE+::
11059  * +:ALLOCATE_LIMIT+::
11060  * +:HEAP_USE_PAGES+::
11061  * +:HEAP_LIVE_OBJECTS+::
11062  * +:HEAP_FREE_OBJECTS+::
11063  * +:HAVE_FINALIZE+::
11064  *
11065  */
11066 
11067 static VALUE
11068 gc_profile_record_get(VALUE _)
11069 {
11070  VALUE prof;
11071  VALUE gc_profile = rb_ary_new();
11072  size_t i;
11073  rb_objspace_t *objspace = (&rb_objspace);
11074 
11075  if (!objspace->profile.run) {
11076  return Qnil;
11077  }
11078 
11079  for (i =0; i < objspace->profile.next_index; i++) {
11080  gc_profile_record *record = &objspace->profile.records[i];
11081 
11082  prof = rb_hash_new();
11083  rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(0, rb_hash_new(), record->flags));
11084  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
11085  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
11086  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
11087  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
11088  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
11089  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
11090 #if GC_PROFILE_MORE_DETAIL
11091  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
11092  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
11093  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
11094  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
11095  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
11096  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
11097  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
11098 
11099  rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
11100  rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
11101 
11102  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
11103 #endif
11104 
11105 #if RGENGC_PROFILE > 0
11106  rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
11107  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
11108  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
11109 #endif
11110  rb_ary_push(gc_profile, prof);
11111  }
11112 
11113  return gc_profile;
11114 }
11115 
11116 #if GC_PROFILE_MORE_DETAIL
11117 #define MAJOR_REASON_MAX 0x10
11118 
11119 static char *
11120 gc_profile_dump_major_reason(int flags, char *buff)
11121 {
11122  int reason = flags & GPR_FLAG_MAJOR_MASK;
11123  int i = 0;
11124 
11125  if (reason == GPR_FLAG_NONE) {
11126  buff[0] = '-';
11127  buff[1] = 0;
11128  }
11129  else {
11130 #define C(x, s) \
11131  if (reason & GPR_FLAG_MAJOR_BY_##x) { \
11132  buff[i++] = #x[0]; \
11133  if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
11134  buff[i] = 0; \
11135  }
11136  C(NOFREE, N);
11137  C(OLDGEN, O);
11138  C(SHADY, S);
11139 #if RGENGC_ESTIMATE_OLDMALLOC
11140  C(OLDMALLOC, M);
11141 #endif
11142 #undef C
11143  }
11144  return buff;
11145 }
11146 #endif
11147 
11148 static void
11149 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
11150 {
11151  rb_objspace_t *objspace = &rb_objspace;
11152  size_t count = objspace->profile.next_index;
11153 #ifdef MAJOR_REASON_MAX
11154  char reason_str[MAJOR_REASON_MAX];
11155 #endif
11156 
11157  if (objspace->profile.run && count /* > 1 */) {
11158  size_t i;
11159  const gc_profile_record *record;
11160 
11161  append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
11162  append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
11163 
11164  for (i = 0; i < count; i++) {
11165  record = &objspace->profile.records[i];
11166  append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
11167  i+1, record->gc_invoke_time, record->heap_use_size,
11168  record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
11169  }
11170 
11171 #if GC_PROFILE_MORE_DETAIL
11172  append(out, rb_str_new_cstr("\n\n" \
11173  "More detail.\n" \
11174  "Prepare Time = Previously GC's rest sweep time\n"
11175  "Index Flags Allocate Inc. Allocate Limit"
11177  " Allocated Size"
11178 #endif
11179  " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
11180 #if RGENGC_PROFILE
11181  " OldgenObj RemNormObj RemShadObj"
11182 #endif
11184  " MaxRSS(KB) MinorFLT MajorFLT"
11185 #endif
11186  "\n"));
11187 
11188  for (i = 0; i < count; i++) {
11189  record = &objspace->profile.records[i];
11190  append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
11192  " %15"PRIuSIZE
11193 #endif
11194  " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11195 #if RGENGC_PROFILE
11196  "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
11197 #endif
11199  "%11ld %8ld %8ld"
11200 #endif
11201 
11202  "\n",
11203  i+1,
11204  gc_profile_dump_major_reason(record->flags, reason_str),
11205  (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
11206  (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
11207  (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
11208  (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
11209  (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
11210  (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
11211  record->allocate_increase, record->allocate_limit,
11213  record->allocated_size,
11214 #endif
11215  record->heap_use_pages,
11216  record->gc_mark_time*1000,
11217  record->gc_sweep_time*1000,
11218  record->prepare_time*1000,
11219 
11220  record->heap_live_objects,
11221  record->heap_free_objects,
11222  record->removing_objects,
11223  record->empty_objects
11224 #if RGENGC_PROFILE
11225  ,
11226  record->old_objects,
11227  record->remembered_normal_objects,
11228  record->remembered_shady_objects
11229 #endif
11231  ,
11232  record->maxrss / 1024,
11233  record->minflt,
11234  record->majflt
11235 #endif
11236 
11237  ));
11238  }
11239 #endif
11240  }
11241 }
11242 
11243 /*
11244  * call-seq:
11245  * GC::Profiler.result -> String
11246  *
11247  * Returns a profile data report such as:
11248  *
11249  * GC 1 invokes.
11250  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
11251  * 1 0.012 159240 212940 10647 0.00000000000001530000
11252  */
11253 
11254 static VALUE
11255 gc_profile_result(VALUE _)
11256 {
11257  VALUE str = rb_str_buf_new(0);
11258  gc_profile_dump_on(str, rb_str_buf_append);
11259  return str;
11260 }
11261 
11262 /*
11263  * call-seq:
11264  * GC::Profiler.report
11265  * GC::Profiler.report(io)
11266  *
11267  * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
11268  *
11269  */
11270 
11271 static VALUE
11272 gc_profile_report(int argc, VALUE *argv, VALUE self)
11273 {
11274  VALUE out;
11275 
11276  out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
11277  gc_profile_dump_on(out, rb_io_write);
11278 
11279  return Qnil;
11280 }
11281 
11282 /*
11283  * call-seq:
11284  * GC::Profiler.total_time -> float
11285  *
11286  * The total time used for garbage collection in seconds
11287  */
11288 
11289 static VALUE
11290 gc_profile_total_time(VALUE self)
11291 {
11292  double time = 0;
11293  rb_objspace_t *objspace = &rb_objspace;
11294 
11295  if (objspace->profile.run && objspace->profile.next_index > 0) {
11296  size_t i;
11297  size_t count = objspace->profile.next_index;
11298 
11299  for (i = 0; i < count; i++) {
11300  time += objspace->profile.records[i].gc_time;
11301  }
11302  }
11303  return DBL2NUM(time);
11304 }
11305 
11306 /*
11307  * call-seq:
11308  * GC::Profiler.enabled? -> true or false
11309  *
11310  * The current status of GC profile mode.
11311  */
11312 
11313 static VALUE
11314 gc_profile_enable_get(VALUE self)
11315 {
11316  rb_objspace_t *objspace = &rb_objspace;
11317  return objspace->profile.run ? Qtrue : Qfalse;
11318 }
11319 
11320 /*
11321  * call-seq:
11322  * GC::Profiler.enable -> nil
11323  *
11324  * Starts the GC profiler.
11325  *
11326  */
11327 
11328 static VALUE
11329 gc_profile_enable(VALUE _)
11330 {
11331  rb_objspace_t *objspace = &rb_objspace;
11332  objspace->profile.run = TRUE;
11333  objspace->profile.current_record = 0;
11334  return Qnil;
11335 }
11336 
11337 /*
11338  * call-seq:
11339  * GC::Profiler.disable -> nil
11340  *
11341  * Stops the GC profiler.
11342  *
11343  */
11344 
11345 static VALUE
11346 gc_profile_disable(VALUE _)
11347 {
11348  rb_objspace_t *objspace = &rb_objspace;
11349 
11350  objspace->profile.run = FALSE;
11351  objspace->profile.current_record = 0;
11352  return Qnil;
11353 }
11354 
11355 /*
11356  ------------------------------ DEBUG ------------------------------
11357 */
11358 
11359 static const char *
11360 type_name(int type, VALUE obj)
11361 {
11362  switch (type) {
11363 #define TYPE_NAME(t) case (t): return #t;
11364  TYPE_NAME(T_NONE);
11366  TYPE_NAME(T_CLASS);
11368  TYPE_NAME(T_FLOAT);
11371  TYPE_NAME(T_ARRAY);
11372  TYPE_NAME(T_HASH);
11375  TYPE_NAME(T_FILE);
11376  TYPE_NAME(T_MATCH);
11379  TYPE_NAME(T_NIL);
11380  TYPE_NAME(T_TRUE);
11381  TYPE_NAME(T_FALSE);
11384  TYPE_NAME(T_UNDEF);
11385  TYPE_NAME(T_IMEMO);
11387  TYPE_NAME(T_MOVED);
11389  case T_DATA:
11392  }
11393  return "T_DATA";
11394 #undef TYPE_NAME
11395  }
11396  return "unknown";
11397 }
11398 
11399 static const char *
11400 obj_type_name(VALUE obj)
11401 {
11402  return type_name(TYPE(obj), obj);
11403 }
11404 
11405 const char *
11407 {
11408  switch (type) {
11409  case VM_METHOD_TYPE_ISEQ: return "iseq";
11410  case VM_METHOD_TYPE_ATTRSET: return "attrest";
11411  case VM_METHOD_TYPE_IVAR: return "ivar";
11412  case VM_METHOD_TYPE_BMETHOD: return "bmethod";
11413  case VM_METHOD_TYPE_ALIAS: return "alias";
11414  case VM_METHOD_TYPE_REFINED: return "refined";
11415  case VM_METHOD_TYPE_CFUNC: return "cfunc";
11416  case VM_METHOD_TYPE_ZSUPER: return "zsuper";
11417  case VM_METHOD_TYPE_MISSING: return "missing";
11418  case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
11419  case VM_METHOD_TYPE_UNDEF: return "undef";
11420  case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
11421  }
11422  rb_bug("rb_method_type_name: unreachable (type: %d)", type);
11423 }
11424 
11425 /* from array.c */
11426 # define ARY_SHARED_P(ary) \
11427  (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11428  FL_TEST((ary),ELTS_SHARED)!=0)
11429 # define ARY_EMBED_P(ary) \
11430  (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
11431  FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
11432 
11433 static void
11434 rb_raw_iseq_info(char *buff, const int buff_size, const rb_iseq_t *iseq)
11435 {
11436  if (buff_size > 0 && iseq->body && iseq->body->location.label && !RB_TYPE_P(iseq->body->location.pathobj, T_MOVED)) {
11439  snprintf(buff, buff_size, " %s@%s:%d",
11441  RSTRING_PTR(path),
11442  n ? FIX2INT(n) : 0 );
11443  }
11444 }
11445 
11446 const char *
11447 rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
11448 {
11449  int pos = 0;
11450 
11451 #define BUFF_ARGS buff + pos, buff_size - pos
11452 #define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
11453  if (SPECIAL_CONST_P(obj)) {
11454  APPENDF((BUFF_ARGS, "%s", obj_type_name(obj)));
11455 
11456  if (FIXNUM_P(obj)) {
11457  APPENDF((BUFF_ARGS, " %ld", FIX2LONG(obj)));
11458  }
11459  else if (SYMBOL_P(obj)) {
11460  APPENDF((BUFF_ARGS, " %s", rb_id2name(SYM2ID(obj))));
11461  }
11462  }
11463  else {
11464 #define TF(c) ((c) != 0 ? "true" : "false")
11465 #define C(c, s) ((c) != 0 ? (s) : " ")
11466  const int type = BUILTIN_TYPE(obj);
11467 #if USE_RGENGC
11468  const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
11469 
11470  if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
11471  APPENDF((BUFF_ARGS, "%p [%d%s%s%s%s%s] %s ",
11472  (void *)obj, age,
11474  C(RVALUE_MARK_BITMAP(obj), "M"),
11475  C(RVALUE_PIN_BITMAP(obj), "P"),
11476  C(RVALUE_MARKING_BITMAP(obj), "R"),
11478  obj_type_name(obj)));
11479  }
11480  else {
11481  /* fake */
11482  APPENDF((BUFF_ARGS, "%p [%dXXXX] %s",
11483  (void *)obj, age,
11484  obj_type_name(obj)));
11485  }
11486 #else
11487  APPENDF((BUFF_ARGS, "%p [%s] %s",
11488  (void *)obj,
11489  C(RVALUE_MARK_BITMAP(obj), "M"),
11490  obj_type_name(obj)));
11491 #endif
11492 
11493  if (internal_object_p(obj)) {
11494  /* ignore */
11495  }
11496  else if (RBASIC(obj)->klass == 0) {
11497  APPENDF((BUFF_ARGS, "(temporary internal)"));
11498  }
11499  else {
11500  if (RTEST(RBASIC(obj)->klass)) {
11501  VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
11502  if (!NIL_P(class_path)) {
11503  APPENDF((BUFF_ARGS, "(%s)", RSTRING_PTR(class_path)));
11504  }
11505  }
11506  }
11507 
11508 #if GC_DEBUG
11509  APPENDF((BUFF_ARGS, "@%s:%d", RANY(obj)->file, RANY(obj)->line));
11510 #endif
11511 
11512  switch (type) {
11513  case T_NODE:
11515  break;
11516  case T_ARRAY:
11517  if (FL_TEST(obj, ELTS_SHARED)) {
11518  APPENDF((BUFF_ARGS, "shared -> %s",
11519  rb_obj_info(RARRAY(obj)->as.heap.aux.shared_root)));
11520  }
11521  else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
11522  APPENDF((BUFF_ARGS, "[%s%s] len: %d (embed)",
11523  C(ARY_EMBED_P(obj), "E"),
11524  C(ARY_SHARED_P(obj), "S"),
11525  (int)RARRAY_LEN(obj)));
11526  }
11527  else {
11528  APPENDF((BUFF_ARGS, "[%s%s%s] len: %d, capa:%d ptr:%p",
11529  C(ARY_EMBED_P(obj), "E"),
11530  C(ARY_SHARED_P(obj), "S"),
11531  C(RARRAY_TRANSIENT_P(obj), "T"),
11532  (int)RARRAY_LEN(obj),
11533  ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
11534  (void *)RARRAY_CONST_PTR_TRANSIENT(obj)));
11535  }
11536  break;
11537  case T_STRING: {
11538  APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(obj)));
11539  break;
11540  }
11541  case T_MOVED: {
11542  APPENDF((BUFF_ARGS, "-> %p", (void*)rb_gc_location(obj)));
11543  break;
11544  }
11545  case T_HASH: {
11546  APPENDF((BUFF_ARGS, "[%c%c] %d",
11547  RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
11548  RHASH_TRANSIENT_P(obj) ? 'T' : ' ',
11549  (int)RHASH_SIZE(obj)));
11550  break;
11551  }
11552  case T_CLASS:
11553  case T_MODULE:
11554  {
11555  VALUE class_path = rb_class_path_cached(obj);
11556  if (!NIL_P(class_path)) {
11557  APPENDF((BUFF_ARGS, "%s", RSTRING_PTR(class_path)));
11558  }
11559  break;
11560  }
11561  case T_ICLASS:
11562  {
11563  VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
11564  if (!NIL_P(class_path)) {
11565  APPENDF((BUFF_ARGS, "src:%s", RSTRING_PTR(class_path)));
11566  }
11567  break;
11568  }
11569  case T_OBJECT:
11570  {
11572 
11573  if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
11574  APPENDF((BUFF_ARGS, "(embed) len:%d", len));
11575  }
11576  else {
11577  VALUE *ptr = ROBJECT_IVPTR(obj);
11578  APPENDF((BUFF_ARGS, "len:%d ptr:%p", len, (void *)ptr));
11579  }
11580  }
11581  break;
11582  case T_DATA: {
11583  const struct rb_block *block;
11584  const rb_iseq_t *iseq;
11585  if (rb_obj_is_proc(obj) &&
11586  (block = vm_proc_block(obj)) != NULL &&
11587  (vm_block_type(block) == block_type_iseq) &&
11588  (iseq = vm_block_iseq(block)) != NULL) {
11589  rb_raw_iseq_info(BUFF_ARGS, iseq);
11590  }
11591  else {
11592  const char * const type_name = rb_objspace_data_type_name(obj);
11593  if (type_name) {
11594  APPENDF((BUFF_ARGS, "%s", type_name));
11595  }
11596  }
11597  break;
11598  }
11599  case T_IMEMO: {
11600  const char *imemo_name = "\0";
11601  switch (imemo_type(obj)) {
11602 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break;
11603  IMEMO_NAME(env);
11604  IMEMO_NAME(cref);
11605  IMEMO_NAME(svar);
11606  IMEMO_NAME(throw_data);
11607  IMEMO_NAME(ifunc);
11608  IMEMO_NAME(memo);
11609  IMEMO_NAME(ment);
11610  IMEMO_NAME(iseq);
11611  IMEMO_NAME(tmpbuf);
11612  IMEMO_NAME(ast);
11613  IMEMO_NAME(parser_strterm);
11614 #undef IMEMO_NAME
11615  default: UNREACHABLE;
11616  }
11617  APPENDF((BUFF_ARGS, "/%s", imemo_name));
11618 
11619  switch (imemo_type(obj)) {
11620  case imemo_ment: {
11621  const rb_method_entry_t *me = &RANY(obj)->as.imemo.ment;
11622  if (me->def) {
11623  APPENDF((BUFF_ARGS, "(called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)",
11626  me->def->alias_count,
11627  obj_info(me->owner),
11628  obj_info(me->defined_class)));
11629  }
11630  else {
11631  APPENDF((BUFF_ARGS, "%s", rb_id2name(me->called_id)));
11632  }
11633  break;
11634  }
11635  case imemo_iseq: {
11636  const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
11637  rb_raw_iseq_info(BUFF_ARGS, iseq);
11638  break;
11639  }
11640  default:
11641  break;
11642  }
11643  }
11644  default:
11645  break;
11646  }
11647 #undef TF
11648 #undef C
11649  }
11650  end:
11651  return buff;
11652 #undef APPENDF
11653 #undef BUFF_ARGS
11654 }
11655 
11656 #if RGENGC_OBJ_INFO
11657 #define OBJ_INFO_BUFFERS_NUM 10
11658 #define OBJ_INFO_BUFFERS_SIZE 0x100
11659 static int obj_info_buffers_index = 0;
11660 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
11661 
11662 static const char *
11663 obj_info(VALUE obj)
11664 {
11665  const int index = obj_info_buffers_index++;
11666  char *const buff = &obj_info_buffers[index][0];
11667 
11668  if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
11669  obj_info_buffers_index = 0;
11670  }
11671 
11672  return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
11673 }
11674 #else
11675 static const char *
11676 obj_info(VALUE obj)
11677 {
11678  return obj_type_name(obj);
11679 }
11680 #endif
11681 
11682 MJIT_FUNC_EXPORTED const char *
11684 {
11685  return obj_info(obj);
11686 }
11687 
11688 void
11690 {
11691  char buff[0x100];
11692  fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
11693 }
11694 
11695 void
11696 rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
11697 {
11698  char buff[0x100];
11699  fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
11700 }
11701 
11702 #if GC_DEBUG
11703 
11704 void
11706 {
11707  rb_objspace_t *objspace = &rb_objspace;
11708 
11709  fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
11710 
11711  if (BUILTIN_TYPE(obj) == T_MOVED) {
11712  fprintf(stderr, "moved?: true\n");
11713  }
11714  else {
11715  fprintf(stderr, "moved?: false\n");
11716  }
11717  if (is_pointer_to_heap(objspace, (void *)obj)) {
11718  fprintf(stderr, "pointer to heap?: true\n");
11719  }
11720  else {
11721  fprintf(stderr, "pointer to heap?: false\n");
11722  return;
11723  }
11724 
11725  fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
11726  fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
11727 #if USE_RGENGC
11728  fprintf(stderr, "age? : %d\n", RVALUE_AGE(obj));
11729  fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
11730  fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
11731  fprintf(stderr, "remembered? : %s\n", RVALUE_REMEMBERED(obj) ? "true" : "false");
11732 #endif
11733 
11734  if (is_lazy_sweeping(heap_eden)) {
11735  fprintf(stderr, "lazy sweeping?: true\n");
11736  fprintf(stderr, "swept?: %s\n", is_swept_object(objspace, obj) ? "done" : "not yet");
11737  }
11738  else {
11739  fprintf(stderr, "lazy sweeping?: false\n");
11740  }
11741 }
11742 
11743 static VALUE
11744 gcdebug_sentinel(RB_BLOCK_CALL_FUNC_ARGLIST(obj, name))
11745 {
11746  fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
11747  return Qnil;
11748 }
11749 
11750 void
11751 rb_gcdebug_sentinel(VALUE obj, const char *name)
11752 {
11753  rb_define_finalizer(obj, rb_proc_new(gcdebug_sentinel, (VALUE)name));
11754 }
11755 
11756 #endif /* GC_DEBUG */
11757 
11758 #if GC_DEBUG_STRESS_TO_CLASS
11759 /*
11760  * call-seq:
11761  * GC.add_stress_to_class(class[, ...])
11762  *
11763  * Raises NoMemoryError when allocating an instance of the given classes.
11764  *
11765  */
11766 static VALUE
11767 rb_gcdebug_add_stress_to_class(int argc, VALUE *argv, VALUE self)
11768 {
11769  rb_objspace_t *objspace = &rb_objspace;
11770 
11771  if (!stress_to_class) {
11773  }
11775  return self;
11776 }
11777 
11778 /*
11779  * call-seq:
11780  * GC.remove_stress_to_class(class[, ...])
11781  *
11782  * No longer raises NoMemoryError when allocating an instance of the
11783  * given classes.
11784  *
11785  */
11786 static VALUE
11787 rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
11788 {
11789  rb_objspace_t *objspace = &rb_objspace;
11790  int i;
11791 
11792  if (stress_to_class) {
11793  for (i = 0; i < argc; ++i) {
11795  }
11796  if (RARRAY_LEN(stress_to_class) == 0) {
11797  stress_to_class = 0;
11798  }
11799  }
11800  return Qnil;
11801 }
11802 #endif
11803 
11804 /*
11805  * Document-module: ObjectSpace
11806  *
11807  * The ObjectSpace module contains a number of routines
11808  * that interact with the garbage collection facility and allow you to
11809  * traverse all living objects with an iterator.
11810  *
11811  * ObjectSpace also provides support for object finalizers, procs that will be
11812  * called when a specific object is about to be destroyed by garbage
11813  * collection.
11814  *
11815  * require 'objspace'
11816  *
11817  * a = "A"
11818  * b = "B"
11819  *
11820  * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
11821  * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
11822  *
11823  * _produces:_
11824  *
11825  * Finalizer two on 537763470
11826  * Finalizer one on 537763480
11827  */
11828 
11829 /*
11830  * Document-class: ObjectSpace::WeakMap
11831  *
11832  * An ObjectSpace::WeakMap object holds references to
11833  * any objects, but those objects can get garbage collected.
11834  *
11835  * This class is mostly used internally by WeakRef, please use
11836  * +lib/weakref.rb+ for the public interface.
11837  */
11838 
11839 /* Document-class: GC::Profiler
11840  *
11841  * The GC profiler provides access to information on GC runs including time,
11842  * length and object space size.
11843  *
11844  * Example:
11845  *
11846  * GC::Profiler.enable
11847  *
11848  * require 'rdoc/rdoc'
11849  *
11850  * GC::Profiler.report
11851  *
11852  * GC::Profiler.disable
11853  *
11854  * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
11855  */
11856 
11857 #include "gc.rbinc"
11858 
11859 void
11860 Init_GC(void)
11861 {
11862 #undef rb_intern
11863  VALUE rb_mObjSpace;
11864  VALUE rb_mProfiler;
11865  VALUE gc_constants;
11866 
11867  rb_mGC = rb_define_module("GC");
11868  load_gc();
11869 
11870  gc_constants = rb_hash_new();
11871  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
11872  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
11873  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
11874  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_PLANES")), SIZET2NUM(HEAP_PAGE_BITMAP_PLANES));
11875  OBJ_FREEZE(gc_constants);
11876  /* internal constants */
11877  rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
11878 
11879  rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
11880  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
11881  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
11882  rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
11883  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
11884  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
11885  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
11886  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
11887  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
11888 
11889  rb_mObjSpace = rb_define_module("ObjectSpace");
11890 
11891  rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
11892 
11893  rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
11894  rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
11895 
11896  rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
11897 
11899 
11901  rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
11902 
11903  rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
11904 
11905  {
11906  VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
11907  rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
11908  rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
11909  rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
11910  rb_define_method(rb_cWeakMap, "include?", wmap_has_key, 1);
11911  rb_define_method(rb_cWeakMap, "member?", wmap_has_key, 1);
11912  rb_define_method(rb_cWeakMap, "key?", wmap_has_key, 1);
11913  rb_define_method(rb_cWeakMap, "inspect", wmap_inspect, 0);
11914  rb_define_method(rb_cWeakMap, "each", wmap_each, 0);
11915  rb_define_method(rb_cWeakMap, "each_pair", wmap_each, 0);
11916  rb_define_method(rb_cWeakMap, "each_key", wmap_each_key, 0);
11917  rb_define_method(rb_cWeakMap, "each_value", wmap_each_value, 0);
11918  rb_define_method(rb_cWeakMap, "keys", wmap_keys, 0);
11919  rb_define_method(rb_cWeakMap, "values", wmap_values, 0);
11920  rb_define_method(rb_cWeakMap, "size", wmap_size, 0);
11921  rb_define_method(rb_cWeakMap, "length", wmap_size, 0);
11922  rb_include_module(rb_cWeakMap, rb_mEnumerable);
11923  }
11924 
11925  /* internal methods */
11926  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
11927  rb_define_singleton_method(rb_mGC, "verify_compaction_references", gc_verify_compaction_references, -1);
11928  rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
11929 #if MALLOC_ALLOCATED_SIZE
11930  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
11931  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
11932 #endif
11933 
11934 #if GC_DEBUG_STRESS_TO_CLASS
11935  rb_define_singleton_method(rb_mGC, "add_stress_to_class", rb_gcdebug_add_stress_to_class, -1);
11936  rb_define_singleton_method(rb_mGC, "remove_stress_to_class", rb_gcdebug_remove_stress_to_class, -1);
11937 #endif
11938 
11939  {
11940  VALUE opts;
11941  /* GC build options */
11942  rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
11943 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
11944  OPT(GC_DEBUG);
11945  OPT(USE_RGENGC);
11946  OPT(RGENGC_DEBUG);
11956 #undef OPT
11957  OBJ_FREEZE(opts);
11958  }
11959 }
11960 
11961 #ifdef ruby_xmalloc
11962 #undef ruby_xmalloc
11963 #endif
11964 #ifdef ruby_xmalloc2
11965 #undef ruby_xmalloc2
11966 #endif
11967 #ifdef ruby_xcalloc
11968 #undef ruby_xcalloc
11969 #endif
11970 #ifdef ruby_xrealloc
11971 #undef ruby_xrealloc
11972 #endif
11973 #ifdef ruby_xrealloc2
11974 #undef ruby_xrealloc2
11975 #endif
11976 
11977 void *
11979 {
11980 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11981  ruby_malloc_info_file = __FILE__;
11982  ruby_malloc_info_line = __LINE__;
11983 #endif
11984  return ruby_xmalloc_body(size);
11985 }
11986 
11987 void *
11988 ruby_xmalloc2(size_t n, size_t size)
11989 {
11990 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
11991  ruby_malloc_info_file = __FILE__;
11992  ruby_malloc_info_line = __LINE__;
11993 #endif
11994  return ruby_xmalloc2_body(n, size);
11995 }
11996 
11997 void *
11998 ruby_xcalloc(size_t n, size_t size)
11999 {
12000 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12001  ruby_malloc_info_file = __FILE__;
12002  ruby_malloc_info_line = __LINE__;
12003 #endif
12004  return ruby_xcalloc_body(n, size);
12005 }
12006 
12007 void *
12008 ruby_xrealloc(void *ptr, size_t new_size)
12009 {
12010 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12011  ruby_malloc_info_file = __FILE__;
12012  ruby_malloc_info_line = __LINE__;
12013 #endif
12014  return ruby_xrealloc_body(ptr, new_size);
12015 }
12016 
12017 void *
12018 ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
12019 {
12020 #if USE_GC_MALLOC_OBJ_INFO_DETAILS
12021  ruby_malloc_info_file = __FILE__;
12022  ruby_malloc_info_line = __LINE__;
12023 #endif
12024  return ruby_xrealloc2_body(ptr, n, new_size);
12025 }
memset
void * memset(void *, int, size_t)
list_empty
#define list_empty(h)
Definition: rb_mjit_min_header-2.7.2.h:8994
RMatch::regexp
VALUE regexp
Definition: re.h:47
rb_objspace::dont_gc
unsigned int dont_gc
Definition: gc.c:689
FLONUM_P
#define FLONUM_P(x)
Definition: ruby.h:430
va_end
#define va_end(v)
Definition: rb_mjit_min_header-2.7.2.h:3982
global_symbols
#define global_symbols
Definition: gc.c:8410
gc_stat_sym_heap_available_slots
@ gc_stat_sym_heap_available_slots
Definition: gc.c:8845
abort
void abort(void) __attribute__((__noreturn__))
__attribute__
unsigned int UINT8 __attribute__((__mode__(__QI__)))
Definition: ffi_common.h:110
rb_objspace::mark_func_data_struct::mark_func
void(* mark_func)(VALUE v, void *data)
Definition: gc.c:716
gc_stat_sym_total_allocated_pages
@ gc_stat_sym_total_allocated_pages
Definition: gc.c:8852
rb_io_t::writeconv_pre_ecopts
VALUE writeconv_pre_ecopts
Definition: io.h:99
ATOMIC_VALUE_EXCHANGE
#define ATOMIC_VALUE_EXCHANGE(var, val)
Definition: ruby_atomic.h:216
rb_subclass_entry::next
rb_subclass_entry_t * next
Definition: internal.h:1000
gc_stat_compat_sym_malloc_limit
@ gc_stat_compat_sym_malloc_limit
Definition: gc.c:8901
rb_objspace::count
size_t count
Definition: gc.c:779
RARRAY_TRANSIENT_P
#define RARRAY_TRANSIENT_P(ary)
Definition: ruby.h:1076
rb_get_kwargs
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Definition: class.c:1886
FL_FINALIZE
#define FL_FINALIZE
Definition: ruby.h:1282
rmatch::regs
struct re_registers regs
Definition: re.h:37
heap_eden
#define heap_eden
Definition: gc.c:919
rb_big_eql
VALUE rb_big_eql(VALUE x, VALUE y)
Definition: bignum.c:5544
nonspecial_obj_id
#define nonspecial_obj_id(obj)
Definition: gc.c:974
UNLIKELY
#define UNLIKELY(x)
Definition: ffi_common.h:126
OBJ_ID_INCREMENT
#define OBJ_ID_INCREMENT
Definition: gc.c:2883
ID
unsigned long ID
Definition: ruby.h:103
gc_stat_sym_oldmalloc_increase_bytes_limit
@ gc_stat_sym_oldmalloc_increase_bytes_limit
Definition: gc.c:8868
MEMOP_TYPE_MALLOC
@ MEMOP_TYPE_MALLOC
Definition: gc.c:9688
rb_check_funcall
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
Definition: vm_eval.c:505
ruby_xfree
void ruby_xfree(void *x)
Definition: gc.c:10170
GC_MALLOC_LIMIT_GROWTH_FACTOR
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:291
T_FALSE
#define T_FALSE
Definition: ruby.h:537
ruby::backward::cxxanyargs::rb_proc_new
VALUE rb_proc_new(type *q, VALUE w)
Creates a rb_cProc instance.
Definition: cxxanyargs.hpp:324
BIGNUM_DIGITS
#define BIGNUM_DIGITS(b)
Definition: internal.h:780
MEMOP_TYPE_FREE
@ MEMOP_TYPE_FREE
Definition: gc.c:9689
ruby_stack_length
size_t ruby_stack_length(VALUE **p)
Definition: gc.c:4634
ruby_gc_stress_mode
#define ruby_gc_stress_mode
Definition: gc.c:927
STACK_END
#define STACK_END
Definition: gc.c:4604
COUNT_TYPE
#define COUNT_TYPE(t)
rb_objspace::id_to_obj_tbl
st_table * id_to_obj_tbl
Definition: gc.c:822
rb_raw_obj_info
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
Definition: gc.c:11447
rb_id2name
const char * rb_id2name(ID)
Definition: symbol.c:801
rb_method_bmethod_struct::proc
VALUE proc
Definition: method.h:152
constant.h
bits_t
uintptr_t bits_t
Definition: gc.c:618
gc_stat_sym_heap_free_slots
@ gc_stat_sym_heap_free_slots
Definition: gc.c:8847
STATIC_SYM_P
#define STATIC_SYM_P(x)
Definition: ruby.h:411
TypedData_Make_Struct
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1244
idEq
@ idEq
Definition: id.h:96
RVALUE::imemo
union RVALUE::@3::@5 imemo
Check_Type
#define Check_Type(v, t)
Definition: ruby.h:595
gc_stat_compat_sym_heap_final_slot
@ gc_stat_compat_sym_heap_final_slot
Definition: gc.c:8890
RGENGC_PROFILE
#define RGENGC_PROFILE
Definition: gc.c:421
rb_xcalloc_mul_add_mul
void * rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10197
TRUE
#define TRUE
Definition: nkf.h:175
rb_transient_heap_mark
void rb_transient_heap_mark(VALUE obj, const void *ptr)
Definition: transient_heap.c:529
error
const rb_iseq_t const char * error
Definition: rb_mjit_min_header-2.7.2.h:13471
MALLOC_ALLOCATED_SIZE_CHECK
#define MALLOC_ALLOCATED_SIZE_CHECK
Definition: gc.c:480
SIZEOF_VOIDP
#define SIZEOF_VOIDP
Definition: rb_mjit_min_header-2.7.2.h:90
st_foreach_with_replace
int st_foreach_with_replace(st_table *tab, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: st.c:1700
rb_gc_count
size_t rb_gc_count(void)
Definition: gc.c:8714
GPR_FLAG_IMMEDIATE_MARK
@ GPR_FLAG_IMMEDIATE_MARK
Definition: gc.c:513
RFILE
#define RFILE(obj)
Definition: ruby.h:1276
rb_memory_id
VALUE rb_memory_id(VALUE obj)
Definition: gc.c:3740
GPR_FLAG_CAPI
@ GPR_FLAG_CAPI
Definition: gc.c:507
T_FLOAT
#define T_FLOAT
Definition: ruby.h:527
STACK_UPPER
#define STACK_UPPER(x, a, b)
Definition: gc.h:83
RB_DEBUG_COUNTER_INC_IF
#define RB_DEBUG_COUNTER_INC_IF(type, cond)
Definition: debug_counter.h:377
rb_mGC
VALUE rb_mGC
Definition: gc.c:1000
ruby_mimmalloc
void * ruby_mimmalloc(size_t size)
Definition: gc.c:10207
xcalloc
#define xcalloc
Definition: defines.h:213
rb_include_module
void rb_include_module(VALUE klass, VALUE module)
Definition: class.c:869
GPR_FLAG_HAVE_FINALIZE
@ GPR_FLAG_HAVE_FINALIZE
Definition: gc.c:512
imemo
union @0::@2 imemo
RVALUE::v3
VALUE v3
Definition: gc.c:605
rb_mark_generic_ivar
void rb_mark_generic_ivar(VALUE)
Definition: variable.c:973
rb_objspace_call_finalizer
void rb_objspace_call_finalizer(rb_objspace_t *objspace)
Definition: gc.c:3443
strtod
#define strtod(s, e)
Definition: util.h:76
RTypedData::type
const rb_data_type_t * type
Definition: ruby.h:1170
rb_obj_hide
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:78
rb_objspace::oldmalloc_increase
size_t oldmalloc_increase
Definition: gc.c:799
RUBY_FL_WB_PROTECTED
@ RUBY_FL_WB_PROTECTED
Definition: ruby.h:842
rb_objspace::dont_incremental
unsigned int dont_incremental
Definition: gc.c:690
rb_ec_raised_clear
#define rb_ec_raised_clear(ec)
Definition: eval_intern.h:261
rb_objspace::total_freed_pages
size_t total_freed_pages
Definition: gc.c:782
rb_objspace::oldmalloc_increase_limit
size_t oldmalloc_increase_limit
Definition: gc.c:800
RVALUE::string
struct RString string
Definition: gc.c:577
rb_objspace_data_type_name
const char * rb_objspace_data_type_name(VALUE obj)
Definition: gc.c:2432
RZombie::basic
struct RBasic basic
Definition: gc.c:987
rb_objspace::total_allocated_objects
size_t total_allocated_objects
Definition: gc.c:704
ruby_gc_params_t::heap_init_slots
size_t heap_init_slots
Definition: gc.c:318
force_finalize_list::table
VALUE table
Definition: gc.c:3426
gc_stat_sym_heap_marked_slots
@ gc_stat_sym_heap_marked_slots
Definition: gc.c:8849
GC_PROFILE_DETAIL_MEMORY
#define GC_PROFILE_DETAIL_MEMORY
Definition: gc.c:461
rb_io_t::write_lock
VALUE write_lock
Definition: io.h:101
id
const int id
Definition: nkf.c:209
onig_memsize
size_t onig_memsize(const regex_t *reg)
Definition: regcomp.c:5654
RZOMBIE
#define RZOMBIE(o)
Definition: gc.c:993
st_table::num_entries
st_index_t num_entries
Definition: st.h:86
rb_objspace::during_minor_gc
unsigned int during_minor_gc
Definition: gc.c:696
VM_METHOD_TYPE_REFINED
@ VM_METHOD_TYPE_REFINED
refinement
Definition: method.h:113
rb_objspace::considered_count_table
size_t considered_count_table[T_MASK]
Definition: gc.c:810
env
#define env
FIX2INT
#define FIX2INT(x)
Definition: ruby.h:717
RVALUE::flags
VALUE flags
Definition: gc.c:569
posix_memalign
int posix_memalign(void **, size_t, size_t) __attribute__((__nonnull__(1))) __attribute__((__warn_unused_result__))
rb_iseq_struct
Definition: vm_core.h:456
rb_hash_new
VALUE rb_hash_new(void)
Definition: hash.c:1523
RFloat
Definition: internal.h:798
ruby_gc_stressful
#define ruby_gc_stressful
Definition: gc.c:926
ATOMIC_PTR_EXCHANGE
#define ATOMIC_PTR_EXCHANGE(var, val)
Definition: ruby_atomic.h:186
RVALUE::bignum
struct RBignum bignum
Definition: gc.c:584
rb_gc_register_mark_object
void rb_gc_register_mark_object(VALUE obj)
Definition: gc.c:7066
RVALUE::env
rb_env_t env
Definition: gc.c:597
ruby_malloc_size_overflow
void ruby_malloc_size_overflow(size_t count, size_t elsize)
Definition: gc.c:10094
cfp
rb_control_frame_t * cfp
Definition: rb_mjit_min_header-2.7.2.h:14524
rb_define_module_under
VALUE rb_define_module_under(VALUE outer, const char *name)
Definition: class.c:797
gc_mode
#define gc_mode(objspace)
Definition: gc.c:950
mark_stack::chunk
stack_chunk_t * chunk
Definition: gc.c:648
rb_classext_struct::subclasses
rb_subclass_entry_t * subclasses
Definition: internal.h:1028
rb_objspace::next_object_id
VALUE next_object_id
Definition: gc.c:705
RVALUE::complex
struct RComplex complex
Definition: gc.c:588
VM_METHOD_TYPE_OPTIMIZED
@ VM_METHOD_TYPE_OPTIMIZED
Kernel::send, Proc::call, etc.
Definition: method.h:111
rb_objspace::records
gc_profile_record * records
Definition: gc.c:740
rb_str_buf_new
VALUE rb_str_buf_new(long)
Definition: string.c:1315
rb_warn
void rb_warn(const char *fmt,...)
Definition: error.c:315
rb_postponed_job_register_one
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Definition: vm_trace.c:1614
is_incremental_marking
#define is_incremental_marking(objspace)
Definition: gc.c:961
ruby_stack_grow_direction
int ruby_stack_grow_direction
Definition: gc.c:4621
rb_gc_update_tbl_refs
void rb_gc_update_tbl_refs(st_table *ptr)
Definition: gc.c:7986
imemo_memo
@ imemo_memo
Definition: internal.h:1138
RVALUE_PAGE_MARKING
#define RVALUE_PAGE_MARKING(page, obj)
Definition: gc.c:1221
rb_data_type_struct::dmark
void(* dmark)(void *)
Definition: ruby.h:1151
memalign
void * memalign(size_t, size_t)
rb_method_definition_struct::attr
rb_method_attr_t attr
Definition: method.h:171
rb_func_lambda_new
VALUE rb_func_lambda_new(rb_block_call_func_t func, VALUE val, int min_argc, int max_argc)
Definition: proc.c:735
gc_stat_compat_sym_old_object
@ gc_stat_compat_sym_old_object
Definition: gc.c:8895
weakmap::wmap2obj
st_table * wmap2obj
Definition: gc.c:10337
CALC_EXACT_MALLOC_SIZE
#define CALC_EXACT_MALLOC_SIZE
Definition: gc.c:470
rb_singleton_class_internal_p
int rb_singleton_class_internal_p(VALUE sklass)
Definition: class.c:455
gc.h
BITMAP_INDEX
#define BITMAP_INDEX(p)
Definition: gc.c:881
rb_ary_free
void rb_ary_free(VALUE ary)
Definition: array.c:786
int
__inline__ int
Definition: rb_mjit_min_header-2.7.2.h:2845
RBASIC_CLEAR_CLASS
#define RBASIC_CLEAR_CLASS(obj)
Definition: internal.h:1987
HEAP_PAGE_BITMAP_PLANES
@ HEAP_PAGE_BITMAP_PLANES
Definition: gc.c:842
rb_method_iseq_struct::cref
rb_cref_t * cref
class reference, should be marked
Definition: method.h:128
ST_STOP
@ ST_STOP
Definition: st.h:99
gc_stress_no_immediate_sweep
@ gc_stress_no_immediate_sweep
Definition: gc.c:7124
RRational::num
VALUE num
Definition: internal.h:790
rb_gc_force_recycle
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:7014
INT2FIX
#define INT2FIX(i)
Definition: ruby.h:263
RVALUE::rstruct
struct RStruct rstruct
Definition: gc.c:583
T_MASK
#define T_MASK
Definition: md5.c:131
ruby_gc_params_t::oldobject_limit_factor
double oldobject_limit_factor
Definition: gc.c:326
GPR_FLAG_MAJOR_BY_NOFREE
@ GPR_FLAG_MAJOR_BY_NOFREE
Definition: gc.c:494
st_is_member
#define st_is_member(table, key)
Definition: st.h:97
rb_obj_info
MJIT_FUNC_EXPORTED const char * rb_obj_info(VALUE obj)
Definition: gc.c:11683
gc_stat_compat_sym_heap_free_slot
@ gc_stat_compat_sym_heap_free_slot
Definition: gc.c:8889
gc_report
#define gc_report
Definition: gc.c:1093
rb_during_gc
int rb_during_gc(void)
Definition: gc.c:8690
RObject
Definition: ruby.h:922
heap_page::final_slots
short final_slots
Definition: gc.c:849
rb_heap_struct::pages
struct list_head pages
Definition: gc.c:661
rb_mark_tbl_no_pin
void rb_mark_tbl_no_pin(st_table *tbl)
Definition: gc.c:5014
PRIxVALUE
#define PRIxVALUE
Definition: ruby.h:164
ruby_xmalloc
void * ruby_xmalloc(size_t size)
Definition: gc.c:11978
gc_stat_sym_total_allocated_objects
@ gc_stat_sym_total_allocated_objects
Definition: gc.c:8854
rb_int2str
VALUE rb_int2str(VALUE num, int base)
Definition: numeric.c:3562
RHash::ifnone
const VALUE ifnone
Definition: internal.h:893
gc_profile_record::heap_total_size
size_t heap_total_size
Definition: gc.c:529
RHASH_SIZE
#define RHASH_SIZE(h)
Definition: ruby.h:1130
rb_class_remove_from_module_subclasses
void rb_class_remove_from_module_subclasses(VALUE klass)
Definition: class.c:94
RSTRING_PTR
#define RSTRING_PTR(str)
Definition: ruby.h:1009
rb_objspace::during_incremental_marking
unsigned int during_incremental_marking
Definition: gc.c:699
re.h
i
uint32_t i
Definition: rb_mjit_min_header-2.7.2.h:5460
rb_gc_mark_locations
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4702
SIZE_MAX
#define SIZE_MAX
Definition: ruby.h:307
GET_HEAP_MARK_BITS
#define GET_HEAP_MARK_BITS(x)
Definition: gc.c:891
STACKFRAME_FOR_CALL_CFUNC
#define STACKFRAME_FOR_CALL_CFUNC
Definition: gc.c:4665
rb_callable_method_entry_struct::owner
const VALUE owner
Definition: method.h:64
RTYPEDDATA_TYPE
#define RTYPEDDATA_TYPE(v)
Definition: ruby.h:1178
rb_gc_verify_internal_consistency
void rb_gc_verify_internal_consistency(void)
Definition: gc.c:6205
gc_stat_compat_sym_malloc_increase
@ gc_stat_compat_sym_malloc_increase
Definition: gc.c:8900
os_each_struct::num
size_t num
Definition: gc.c:3056
EXEC_EVENT_HOOK
#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1935
rb_io_t::pathv
VALUE pathv
Definition: io.h:72
ruby_sized_xrealloc
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
Definition: gc.c:10127
malloc_obj_info::size
size_t size
Definition: gc.c:9792
rb_strterm_mark
void rb_strterm_mark(VALUE obj)
Definition: ripper.c:765
sleep
unsigned sleep(unsigned int __seconds)
rb_objspace::mark_func_data_struct::data
void * data
Definition: gc.c:715
rb_objspace
Definition: gc.c:676
GPR_FLAG_NEWOBJ
@ GPR_FLAG_NEWOBJ
Definition: gc.c:504
FLUSH_REGISTER_WINDOWS
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:431
GET_HEAP_WB_UNPROTECTED_BITS
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
Definition: gc.c:895
mark_stack
Definition: gc.c:647
RVALUE::free
struct RVALUE::@3::@4 free
st_init_numtable
st_table * st_init_numtable(void)
Definition: st.c:653
PUSH_MARK_FUNC_DATA
#define PUSH_MARK_FUNC_DATA(v)
Definition: gc.c:1098
ruby_gc_params_t::malloc_limit_min
size_t malloc_limit_min
Definition: gc.c:328
objspace_and_reason::reason
int reason
Definition: gc.c:7374
RUBY_INTERNAL_EVENT_FREEOBJ
#define RUBY_INTERNAL_EVENT_FREEOBJ
Definition: ruby.h:2269
ruby_xrealloc_body
void * ruby_xrealloc_body(void *ptr, size_t new_size)
Definition: gc.c:10137
heap_page::wb_unprotected_bits
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:863
gc_stat_compat_sym_heap_swept_slot
@ gc_stat_compat_sym_heap_swept_slot
Definition: gc.c:8891
VALGRIND_MAKE_MEM_DEFINED
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
Definition: zlib.c:24
rb_objspace::has_hook
unsigned int has_hook
Definition: gc.c:694
rb_objspace_of
#define rb_objspace_of(vm)
Definition: gc.c:901
VALUE
unsigned long VALUE
Definition: ruby.h:102
long
#define long
Definition: rb_mjit_min_header-2.7.2.h:2889
M
#define M
Definition: mt19937.c:53
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
RVALUE_MARKING_BITMAP
#define RVALUE_MARKING_BITMAP(obj)
Definition: gc.c:1217
rb_eArgError
VALUE rb_eArgError
Definition: error.c:925
each_obj_args
Definition: gc.c:2943
rb_clear_method_cache_by_class
void rb_clear_method_cache_by_class(VALUE)
Definition: vm_method.c:93
va_list
__gnuc_va_list va_list
Definition: rb_mjit_min_header-2.7.2.h:834
encoding.h
OPT
#define OPT(o)
ruby_verbose
#define ruby_verbose
Definition: ruby.h:1925
rb_intern
#define rb_intern(str)
os_each_struct::of
VALUE of
Definition: gc.c:3057
HEAP_PAGE_ALIGN_MASK
@ HEAP_PAGE_ALIGN_MASK
Definition: gc.c:836
RUBY_DATA_FUNC
void(* RUBY_DATA_FUNC)(void *)
Definition: ruby.h:1184
st_delete
int st_delete(st_table *tab, st_data_t *key, st_data_t *value)
Definition: st.c:1418
C
#define C(c, s)
RB_TYPE_P
#define RB_TYPE_P(obj, type)
Definition: ruby.h:560
rb_ary_memsize
RUBY_FUNC_EXPORTED size_t rb_ary_memsize(VALUE ary)
Definition: array.c:816
heap_allocatable_pages
#define heap_allocatable_pages
Definition: gc.c:915
RVALUE_PAGE_WB_UNPROTECTED
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
Definition: gc.c:1219
NORETURN
NORETURN(static void negative_size_allocation_error(const char *))
rb_xmalloc_mul_add
void * rb_xmalloc_mul_add(size_t x, size_t y, size_t z)
Definition: gc.c:10176
RVALUE::as
union RVALUE::@3 as
RMatch
Definition: re.h:43
rb_str_memsize
size_t rb_str_memsize(VALUE)
Definition: string.c:1371
rb_gc_location
VALUE rb_gc_location(VALUE value)
Definition: gc.c:8114
TYPE
#define TYPE(x)
Definition: ruby.h:554
rb_const_entry_struct::value
VALUE value
Definition: constant.h:34
st_add_direct
void st_add_direct(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1251
rb_aligned_malloc
void * rb_aligned_malloc(size_t alignment, size_t size)
Definition: gc.c:9630
imemo_iseq
@ imemo_iseq
Definition: internal.h:1140
RUBY_INTERNAL_EVENT_GC_ENTER
#define RUBY_INTERNAL_EVENT_GC_ENTER
Definition: ruby.h:2273
gc_stat_compat_sym_heap_live_slot
@ gc_stat_compat_sym_heap_live_slot
Definition: gc.c:8888
rb_method_definition_struct::type
rb_method_type_t type
Definition: rb_mjit_min_header-2.7.2.h:8841
imemo_env
@ imemo_env
Definition: internal.h:1133
weakmap
Definition: gc.c:10335
rb_objspace::pooled_slots
size_t pooled_slots
Definition: gc.c:816
RVALUE::flonum
struct RFloat flonum
Definition: gc.c:576
rb_data_type_struct::function
struct rb_data_type_struct::@8 function
nomem_error
#define nomem_error
Definition: gc.c:995
GET_STACK_BOUNDS
#define GET_STACK_BOUNDS(start, end, appendix)
Definition: gc.c:4950
RRational
Definition: internal.h:788
rb_objspace::gc_sweep_start_time
double gc_sweep_start_time
Definition: gc.c:774
rb_define_module
VALUE rb_define_module(const char *name)
Definition: class.c:772
rb_id_table_iterator_result
rb_id_table_iterator_result
Definition: id_table.h:8
Init_GC
void Init_GC(void)
Definition: gc.c:11860
unsigned
#define unsigned
Definition: rb_mjit_min_header-2.7.2.h:2883
rb_objspace::heap_pages
struct rb_objspace::@10 heap_pages
ruby_rgengc_debug
int ruby_rgengc_debug
Definition: gc.c:388
SIGNED_VALUE
#define SIGNED_VALUE
Definition: ruby.h:104
rb_ast_mark
void rb_ast_mark(rb_ast_t *ast)
Definition: node.c:1340
rb_objspace
#define rb_objspace
Definition: gc.c:900
rb_iseq_path
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:1027
rb_heap_struct::total_pages
size_t total_pages
Definition: gc.c:666
rb_data_object_wrap
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Definition: gc.c:2378
ID_SCOPE_MASK
#define ID_SCOPE_MASK
Definition: id.h:32
I
#define I(s)
getenv
#define getenv(name)
Definition: win32.c:73
HEAP_PAGE_ALIGN
@ HEAP_PAGE_ALIGN
Definition: gc.c:835
EC_JUMP_TAG
#define EC_JUMP_TAG(ec, st)
Definition: eval_intern.h:184
RSYMBOL
#define RSYMBOL(obj)
Definition: symbol.h:33
rb_int_ge
VALUE rb_int_ge(VALUE x, VALUE y)
Definition: numeric.c:4292
during_gc
#define during_gc
Definition: gc.c:922
rb_objspace::mode
unsigned int mode
Definition: gc.c:687
UINT2NUM
#define UINT2NUM(x)
Definition: ruby.h:1610
rb_objspace::heap_used_at_gc_start
size_t heap_used_at_gc_start
Definition: gc.c:776
rb_free_generic_ivar
void rb_free_generic_ivar(VALUE)
Definition: variable.c:993
FL_SEEN_OBJ_ID
#define FL_SEEN_OBJ_ID
Definition: ruby.h:1285
wmap_iter_arg::value
VALUE value
Definition: gc.c:10503
GC_ENABLE_INCREMENTAL_MARK
#define GC_ENABLE_INCREMENTAL_MARK
Definition: gc.c:464
rb_gc_disable
VALUE rb_gc_disable(void)
Definition: gc.c:9249
rb_iseq_constant_body::location
rb_iseq_location_t location
Definition: vm_core.h:399
rb_inspect
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
Definition: object.c:551
DWORD
IUnknown DWORD
Definition: win32ole.c:33
fputs
int fputs(const char *__restrict, FILE *__restrict)
IMEMO_NAME
#define IMEMO_NAME(x)
rb_id_table
Definition: id_table.c:40
ruby_stack_check
int ruby_stack_check(void)
Definition: gc.c:4674
rb_mark_end_proc
void rb_mark_end_proc(void)
Definition: eval_jump.c:78
rb_obj_rgengc_promoted_p
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
Definition: gc.c:6973
atexit
int atexit(void(*__func)(void))
exc
const rb_iseq_t const VALUE exc
Definition: rb_mjit_min_header-2.7.2.h:13469
rb_method_definition_struct::refined
rb_method_refined_t refined
Definition: method.h:173
heap_page::start
RVALUE * start
Definition: gc.c:858
verify_internal_consistency_struct::err_count
int err_count
Definition: gc.c:5896
rb_iseq_location_struct::first_lineno
VALUE first_lineno
Definition: vm_core.h:276
rb_objspace::obj_to_id_tbl
st_table * obj_to_id_tbl
Definition: gc.c:823
rb_str_cat2
#define rb_str_cat2
Definition: intern.h:912
STACK_LEVEL_MAX
#define STACK_LEVEL_MAX
Definition: gc.c:4605
rb_objspace::allocatable_pages
size_t allocatable_pages
Definition: gc.c:725
gc_stat_compat_sym_old_object_limit
@ gc_stat_compat_sym_old_object_limit
Definition: gc.c:8896
rb_class_detach_module_subclasses
void rb_class_detach_module_subclasses(VALUE klass)
Definition: class.c:145
rb_method_refined_struct::orig_me
struct rb_method_entry_struct * orig_me
Definition: method.h:147
rmatch::char_offset
struct rmatch_offset * char_offset
Definition: re.h:39
CEILDIV
#define CEILDIV(i, mod)
Definition: gc.c:833
DYNAMIC_SYM_P
#define DYNAMIC_SYM_P(x)
Definition: ruby.h:412
Qundef
#define Qundef
Definition: ruby.h:470
RVALUE::memo
struct MEMO memo
Definition: gc.c:594
heap_cursor::page
struct heap_page * page
Definition: gc.c:7671
rb_define_singleton_method
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1755
T_RATIONAL
#define T_RATIONAL
Definition: ruby.h:541
CHAR_BIT
#define CHAR_BIT
Definition: ruby.h:227
RHASH_ST_TABLE_FLAG
@ RHASH_ST_TABLE_FLAG
Definition: internal.h:820
EXIT_FAILURE
#define EXIT_FAILURE
Definition: eval_intern.h:32
heap_pages_final_slots
#define heap_pages_final_slots
Definition: gc.c:917
gc_profile_record::heap_use_size
size_t heap_use_size
Definition: gc.c:528
rb_define_method
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1551
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
RUBY_DTRACE_GC_HOOK
#define RUBY_DTRACE_GC_HOOK(name)
Definition: gc.c:10898
RVALUE::throw_data
struct vm_throw_data throw_data
Definition: gc.c:592
INT2NUM
#define INT2NUM(x)
Definition: ruby.h:1609
MALLOC_ALLOCATED_SIZE
#define MALLOC_ALLOCATED_SIZE
Definition: gc.c:477
ptr
struct RIMemo * ptr
Definition: debug.c:65
RVALUE::ment
struct rb_method_entry_struct ment
Definition: gc.c:595
vm_ifunc
IFUNC (Internal FUNCtion)
Definition: internal.h:1215
heap_page_body::header
struct heap_page_header header
Definition: gc.c:630
STACK_START
#define STACK_START
Definition: gc.c:4603
rb_obj_id
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:3773
T_DATA
#define T_DATA
Definition: ruby.h:538
rb_objspace_reachable_objects_from
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
Definition: gc.c:9468
rb_method_iseq_struct::iseqptr
rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:127
PRI_PIDT_PREFIX
#define PRI_PIDT_PREFIX
Definition: rb_mjit_min_header-2.7.2.h:103
list_for_each_safe
#define list_for_each_safe(h, i, nxt, member)
Definition: rb_mjit_min_header-2.7.2.h:9061
ruby_xmalloc_body
void * ruby_xmalloc_body(size_t size)
Definition: gc.c:10085
has_sweeping_pages
#define has_sweeping_pages(heap)
Definition: gc.c:970
Qfalse
#define Qfalse
Definition: ruby.h:467
weakmap::obj2wmap
st_table * obj2wmap
Definition: gc.c:10336
is_marking
#define is_marking(objspace)
Definition: gc.c:953
gc_stat_compat_sym_remembered_shady_object
@ gc_stat_compat_sym_remembered_shady_object
Definition: gc.c:8893
uintptr_t
unsigned int uintptr_t
Definition: win32.h:106
rb_stdout
RUBY_EXTERN VALUE rb_stdout
Definition: ruby.h:2090
DBL2NUM
#define DBL2NUM(dbl)
Definition: ruby.h:967
__asan_region_is_poisoned
#define __asan_region_is_poisoned(x, y)
Definition: internal.h:110
GC_HEAP_FREE_SLOTS_GOAL_RATIO
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
Definition: gc.c:278
ruby_gc_params_t
Definition: gc.c:317
RVALUE::ast
rb_ast_t ast
Definition: gc.c:599
wmap_iter_arg
Definition: gc.c:10501
HEAP_PAGE_SIZE
@ HEAP_PAGE_SIZE
Definition: gc.c:838
rb_objspace::minor_gc_count
size_t minor_gc_count
Definition: gc.c:751
RVALUE_WB_UNPROTECTED_BITMAP
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
Definition: gc.c:1215
rb_method_definition_struct::alias_count
int alias_count
Definition: method.h:165
rb_objspace::major_gc_count
size_t major_gc_count
Definition: gc.c:752
RMATCH
#define RMATCH(obj)
Definition: re.h:50
rb_id2str
#define rb_id2str(id)
Definition: vm_backtrace.c:30
T_NODE
#define T_NODE
Definition: ruby.h:545
each_obj_args::data
void * data
Definition: gc.c:2946
rb_yield_values
#define rb_yield_values(argc,...)
Definition: rb_mjit_min_header-2.7.2.h:6580
rb_objspace_alloc
rb_objspace_t * rb_objspace_alloc(void)
Definition: gc.c:1587
dp
#define dp(v)
Definition: vm_debug.h:21
SPECIAL_CONST_P
#define SPECIAL_CONST_P(x)
Definition: ruby.h:1313
rb_ary_new3
#define rb_ary_new3
Definition: intern.h:104
list_next
#define list_next(h, i, member)
Definition: rb_mjit_min_header-2.7.2.h:9062
st.h
NULL
#define NULL
Definition: _sdbm.c:101
RVALUE::object
struct RObject object
Definition: gc.c:574
T_COMPLEX
#define T_COMPLEX
Definition: ruby.h:542
heap_cursor::objspace
rb_objspace_t * objspace
Definition: gc.c:7672
gc_list
Definition: gc.c:635
rb_print_backtrace
void rb_print_backtrace(void)
Definition: vm_dump.c:750
ST_DELETE
@ ST_DELETE
Definition: st.h:99
gc_raise_tag::exc
VALUE exc
Definition: gc.c:9524
uint32_t
unsigned int uint32_t
Definition: sha2.h:101
heap_page::before_sweep
unsigned int before_sweep
Definition: gc.c:851
imemo_svar
@ imemo_svar
special variable
Definition: internal.h:1135
gc_stat_sym_count
@ gc_stat_sym_count
Definition: gc.c:8841
rb_gc_mark_vm_stack_values
void rb_gc_mark_vm_stack_values(long n, const VALUE *values)
Definition: gc.c:4742
FL_TEST
#define FL_TEST(x, f)
Definition: ruby.h:1353
rb_class_remove_from_super_subclasses
void rb_class_remove_from_super_subclasses(VALUE klass)
Definition: class.c:76
RVALUE
Definition: gc.c:566
rb_special_const_p
#define rb_special_const_p(obj)
Definition: rb_mjit_min_header-2.7.2.h:5353
FL_WB_PROTECTED
#define FL_WB_PROTECTED
Definition: ruby.h:1279
fmt
const VALUE int int int int int int VALUE char * fmt
Definition: rb_mjit_min_header-2.7.2.h:6458
HEAP_PAGE_ALIGN_LOG
#define HEAP_PAGE_ALIGN_LOG
Definition: gc.c:832
heap_page::flags
struct heap_page::@15 flags
PRIsVALUE
#define PRIsVALUE
Definition: ruby.h:166
PRINTF_ARGS
PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char *,...)), 2, 3)
stack_chunk::next
struct stack_chunk * next
Definition: gc.c:644
gc_stat_sym_total_freed_objects
@ gc_stat_sym_total_freed_objects
Definition: gc.c:8855
heap_pages_sorted_length
#define heap_pages_sorted_length
Definition: gc.c:912
rb_obj_respond_to
int rb_obj_respond_to(VALUE, ID, int)
Definition: vm_method.c:2180
RUBY_T_MASK
@ RUBY_T_MASK
Definition: ruby.h:518
RVALUE_PAGE_UNCOLLECTIBLE
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
Definition: gc.c:1220
heap_pages_sorted
#define heap_pages_sorted
Definition: gc.c:910
BUFF_ARGS
#define BUFF_ARGS
FL_SET
#define FL_SET(x, f)
Definition: ruby.h:1359
GC_ENABLE_LAZY_SWEEP
#define GC_ENABLE_LAZY_SWEEP
Definition: gc.c:467
st_insert
int st_insert(st_table *tab, st_data_t key, st_data_t value)
Definition: st.c:1171
FIX2LONG
#define FIX2LONG(x)
Definition: ruby.h:394
ID2SYM
#define ID2SYM(x)
Definition: ruby.h:414
popcount_bits
#define popcount_bits
Definition: gc.c:623
heap_cursor::index
size_t index
Definition: gc.c:7670
rb_method_definition_struct::bmethod
rb_method_bmethod_t bmethod
Definition: method.h:174
strlen
size_t strlen(const char *)
TYPED_UPDATE_IF_MOVED
#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing)
Definition: gc.c:1078
OBJ_FREEZE
#define OBJ_FREEZE(x)
Definition: ruby.h:1377
rb_gc_enable
VALUE rb_gc_enable(void)
Definition: gc.c:9212
T_SYMBOL
#define T_SYMBOL
Definition: ruby.h:540
T_OBJECT
#define T_OBJECT
Definition: ruby.h:523
rb_free_tmp_buffer
void rb_free_tmp_buffer(volatile VALUE *store)
Definition: gc.c:10277
root_objects_data::data
void * data
Definition: gc.c:9485
rb_alloc_tmp_buffer
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
Definition: gc.c:10265
VM_METHOD_TYPE_IVAR
@ VM_METHOD_TYPE_IVAR
attr_reader or attr_accessor
Definition: method.h:105
rb_hash_set_default_proc
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc)
Definition: hash.c:2242
rb_mark_set
void rb_mark_set(st_table *tbl)
Definition: gc.c:4801
gc_stress_full_mark_after_malloc_p
#define gc_stress_full_mark_after_malloc_p()
Definition: gc.c:7129
rb_iseq_update_references
void rb_iseq_update_references(rb_iseq_t *iseq)
Definition: iseq.c:221
VM_ASSERT
#define VM_ASSERT(expr)
Definition: vm_core.h:56
ATOMIC_SET
#define ATOMIC_SET(var, val)
Definition: ruby_atomic.h:131
rb_big_hash
VALUE rb_big_hash(VALUE x)
Definition: bignum.c:6726
ruby_gc_params_t::gc_stress
VALUE gc_stress
Definition: gc.c:336
rb_free_method_entry
void rb_free_method_entry(const rb_method_entry_t *me)
Definition: vm_method.c:174
L
#define L(x)
Definition: asm.h:125
imemo_throw_data
@ imemo_throw_data
Definition: internal.h:1136
MEMO
MEMO.
Definition: internal.h:1278
rb_imemo_new
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2308
RB_BLOCK_CALL_FUNC_ARGLIST
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: ruby.h:1964
rb_check_arity
#define rb_check_arity
Definition: intern.h:347
rb_iseq_free
void rb_iseq_free(const rb_iseq_t *iseq)
Definition: iseq.c:89
rb_objspace::current_record
gc_profile_record * current_record
Definition: gc.c:741
RTypedData
Definition: ruby.h:1168
timespec::tv_nsec
long tv_nsec
Definition: missing.h:62
add
#define add(x, y)
Definition: date_strftime.c:23
BITS_SIZE
@ BITS_SIZE
Definition: gc.c:620
LL2NUM
#define LL2NUM(v)
Definition: rb_mjit_min_header-2.7.2.h:4243
onig_region_free
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
Definition: regexec.c:343
rb_hash_new_with_size
MJIT_FUNC_EXPORTED VALUE rb_hash_new_with_size(st_index_t size)
Definition: hash.c:1529
ruby_initial_gc_stress_ptr
VALUE * ruby_initial_gc_stress_ptr
Definition: gc.c:905
VM_METHOD_TYPE_UNDEF
@ VM_METHOD_TYPE_UNDEF
Definition: method.h:109
gc_stat_compat_sym
gc_stat_compat_sym
Definition: gc.c:8882
heap_cursor
Definition: gc.c:7668
GET_HEAP_PINNED_BITS
#define GET_HEAP_PINNED_BITS(x)
Definition: gc.c:892
ALLOC_N
#define ALLOC_N(type, n)
Definition: ruby.h:1663
RVALUE
struct RVALUE RVALUE
GET_HEAP_UNCOLLECTIBLE_BITS
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
Definition: gc.c:894
rb_vm_register_special_exception
#define rb_vm_register_special_exception(sp, e, m)
Definition: vm_core.h:1726
imemo_ast
@ imemo_ast
Definition: internal.h:1142
rb_gc_writebarrier
void rb_gc_writebarrier(VALUE a, VALUE b)
Definition: gc.c:6820
RData::dmark
void(* dmark)(void *)
Definition: ruby.h:1141
rb_transient_heap_update_references
void rb_transient_heap_update_references(void)
Definition: transient_heap.c:853
STR_SHARED_P
#define STR_SHARED_P(s)
Definition: internal.h:2164
void
void
Definition: rb_mjit_min_header-2.7.2.h:13241
ruby_global_symbols
rb_symbols_t ruby_global_symbols
Definition: symbol.c:66
rb_iseq_mark
void rb_iseq_mark(const rb_iseq_t *iseq)
Definition: iseq.c:287
gc_profile_record
struct gc_profile_record gc_profile_record
is_full_marking
#define is_full_marking(objspace)
Definition: gc.c:956
GC_HEAP_FREE_SLOTS_MIN_RATIO
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
Definition: gc.c:275
ruby_gc_params_t::oldmalloc_limit_max
size_t oldmalloc_limit_max
Definition: gc.c:333
rb_raise
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2671
each_obj_args::objspace
rb_objspace_t * objspace
Definition: gc.c:2944
each_obj_args::callback
each_obj_callback * callback
Definition: gc.c:2945
verify_internal_consistency_struct::zombie_object_count
size_t zombie_object_count
Definition: gc.c:5898
VM_ENV_FLAG_WB_REQUIRED
@ VM_ENV_FLAG_WB_REQUIRED
Definition: vm_core.h:1188
rb_imemo_tmpbuf_struct::cnt
size_t cnt
Definition: internal.h:1236
force_finalize_list
Definition: gc.c:3424
RESTORE_FINALIZER
#define RESTORE_FINALIZER()
GPR_FLAG_MAJOR_BY_OLDGEN
@ GPR_FLAG_MAJOR_BY_OLDGEN
Definition: gc.c:495
GC_HEAP_INIT_SLOTS
#define GC_HEAP_INIT_SLOTS
Definition: gc.c:259
gc_stat_compat_sym_last
@ gc_stat_compat_sym_last
Definition: gc.c:8906
T_FILE
#define T_FILE
Definition: ruby.h:534
list_for_each
#define list_for_each(h, i, member)
Definition: rb_mjit_min_header-2.7.2.h:9058
rb_execution_context_struct::cfp
rb_control_frame_t * cfp
Definition: vm_core.h:847
rb_eRangeError
VALUE rb_eRangeError
Definition: error.c:928
rb_obj_gc_flags
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
Definition: gc.c:6979
ruby_gc_params_t::heap_free_slots
size_t heap_free_slots
Definition: gc.c:319
FL_PROMOTED0
#define FL_PROMOTED0
Definition: ruby.h:1280
rb_objspace::mark_stack
mark_stack_t mark_stack
Definition: gc.c:719
imemo_parser_strterm
@ imemo_parser_strterm
Definition: internal.h:1143
rb_callable_method_entry_struct::called_id
ID called_id
Definition: method.h:63
RUBY_DEFAULT_FREE
#define RUBY_DEFAULT_FREE
Definition: ruby.h:1201
gc_stat_sym_heap_sorted_length
@ gc_stat_sym_heap_sorted_length
Definition: gc.c:8843
if
if((ID)(DISPID) nameid !=nameid)
Definition: win32ole.c:357
LONG2NUM
#define LONG2NUM(x)
Definition: ruby.h:1644
heap_pages_freeable_pages
#define heap_pages_freeable_pages
Definition: gc.c:916
rb_heap_struct::sweeping_page
struct heap_page * sweeping_page
Definition: gc.c:662
ATOMIC_SIZE_EXCHANGE
#define ATOMIC_SIZE_EXCHANGE(var, val)
Definition: ruby_atomic.h:140
ruby_gc_params_t::malloc_limit_growth_factor
double malloc_limit_growth_factor
Definition: gc.c:330
heap_page::page_node
struct list_node page_node
Definition: gc.c:860
rb_xrealloc_mul_add
void * rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z)
Definition: gc.c:10183
rb_objspace_t
struct rb_objspace rb_objspace_t
rb_obj_class
VALUE rb_obj_class(VALUE)
Equivalent to Object#class in Ruby.
Definition: object.c:217
onig_free
ONIG_EXTERN void onig_free(OnigRegex)
ELTS_SHARED
#define ELTS_SHARED
Definition: ruby.h:970
rb_objspace::deferred_final
VALUE deferred_final
Definition: gc.c:732
probes.h
GC_MALLOC_LIMIT_MIN
#define GC_MALLOC_LIMIT_MIN
Definition: gc.c:285
SIZEOF_VALUE
#define SIZEOF_VALUE
Definition: ruby.h:105
OBJ_ID_INITIAL
#define OBJ_ID_INITIAL
Definition: gc.c:2884
RGENGC_ESTIMATE_OLDMALLOC
#define RGENGC_ESTIMATE_OLDMALLOC
Definition: gc.c:431
rb_obj_is_proc
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:152
RICLASS_IS_ORIGIN
#define RICLASS_IS_ORIGIN
Definition: internal.h:1085
rb_data_typed_object_zalloc
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
PRIuSIZE
#define PRIuSIZE
Definition: ruby.h:208
gc_stat_sym_remembered_wb_unprotected_objects
@ gc_stat_sym_remembered_wb_unprotected_objects
Definition: gc.c:8862
rb_vraise
void rb_vraise(VALUE exc, const char *fmt, va_list ap)
Definition: error.c:2665
T_ICLASS
#define T_ICLASS
Definition: ruby.h:525
rb_transient_heap_finish_marking
void rb_transient_heap_finish_marking(void)
Definition: transient_heap.c:916
ULONG2NUM
#define ULONG2NUM(x)
Definition: ruby.h:1645
GC_PROFILE_RECORD_DEFAULT_SIZE
#define GC_PROFILE_RECORD_DEFAULT_SIZE
Definition: gc.c:10759
FIXNUM_FLAG
#define FIXNUM_FLAG
Definition: ruby.h:472
rb_ast_struct
Definition: node.h:399
gc_profile_record::heap_total_objects
size_t heap_total_objects
Definition: gc.c:527
MARK_OBJECT_ARY_BUCKET_SIZE
#define MARK_OBJECT_ARY_BUCKET_SIZE
Definition: gc.c:7062
rb_method_type_name
const char * rb_method_type_name(rb_method_type_t type)
Definition: gc.c:11406
rb_objspace::marked_slots
size_t marked_slots
Definition: gc.c:720
imemo_type
imemo_type
Definition: internal.h:1132
rb_objspace_markable_object_p
int rb_objspace_markable_object_p(VALUE obj)
Definition: gc.c:3600
mark_stack::unused_cache_size
size_t unused_cache_size
Definition: gc.c:653
rb_method_attr_struct::location
VALUE location
Definition: method.h:139
vm_svar
SVAR (Special VARiable)
Definition: internal.h:1181
RVALUE::moved
struct RMoved moved
Definition: gc.c:572
OLD_SYM
#define OLD_SYM(s)
snprintf
int snprintf(char *__restrict, size_t, const char *__restrict,...) __attribute__((__format__(__printf__
ST_REPLACE
@ ST_REPLACE
Definition: st.h:99
RVALUE::iseq
const rb_iseq_t iseq
Definition: gc.c:596
rb_objspace_internal_object_p
int rb_objspace_internal_object_p(VALUE obj)
Definition: gc.c:3097
RCLASS_IV_INDEX_TBL
#define RCLASS_IV_INDEX_TBL(c)
Definition: internal.h:1074
rb_objspace::step_slots
size_t step_slots
Definition: gc.c:817
rb_gc_mark_maybe
void rb_gc_mark_maybe(VALUE obj)
Definition: gc.c:5047
RFile::fptr
struct rb_io_t * fptr
Definition: ruby.h:1136
VM_ENV_DATA_INDEX_ENV
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1196
gc_stat_sym_old_objects
@ gc_stat_sym_old_objects
Definition: gc.c:8864
mjit_remove_class_serial
void mjit_remove_class_serial(rb_serial_t class_serial)
gc_stat_sym_heap_live_slots
@ gc_stat_sym_heap_live_slots
Definition: gc.c:8846
DATA_PTR
#define DATA_PTR(dta)
Definition: ruby.h:1175
rb_gc_unregister_address
void rb_gc_unregister_address(VALUE *addr)
Definition: gc.c:7092
TYPE_NAME
#define TYPE_NAME(t)
verify_internal_consistency_struct::remembered_shady_count
size_t remembered_shady_count
Definition: gc.c:5903
klass
VALUE klass
Definition: rb_mjit_min_header-2.7.2.h:13222
ruby_error_nomemory
@ ruby_error_nomemory
Definition: vm_core.h:508
TRY_WITH_GC
#define TRY_WITH_GC(alloc)
Definition: gc.c:9841
rb_malloc_info_show_results
void rb_malloc_info_show_results(void)
Definition: gc.c:9999
VM_UNREACHABLE
#define VM_UNREACHABLE(func)
Definition: vm_core.h:57
rb_heap_struct
Definition: gc.c:656
LIKELY
#define LIKELY(x)
Definition: ffi_common.h:125
EC_POP_TAG
#define EC_POP_TAG()
Definition: eval_intern.h:137
verify_internal_consistency_struct::old_object_count
size_t old_object_count
Definition: gc.c:5902
timespec::tv_sec
time_t tv_sec
Definition: missing.h:61
rb_imemo_tmpbuf_struct::ptr
VALUE * ptr
Definition: internal.h:1234
rb_objspace::moved_count_table
size_t moved_count_table[T_MASK]
Definition: gc.c:811
rb_cBasicObject
RUBY_EXTERN VALUE rb_cBasicObject
Definition: ruby.h:2009
FL_ABLE
#define FL_ABLE(x)
Definition: ruby.h:1351
RArray::heap
struct RArray::@5::@6 heap
time
time_t time(time_t *_timer)
MARKED_IN_BITMAP
#define MARKED_IN_BITMAP(bits, p)
Definition: gc.c:886
rb_objspace::size
size_t size
Definition: gc.c:743
gc_stat_compat_sym_oldmalloc_limit
@ gc_stat_compat_sym_oldmalloc_limit
Definition: gc.c:8904
RCLASS_M_TBL
#define RCLASS_M_TBL(c)
Definition: internal.h:1069
rb_check_frozen
#define rb_check_frozen(obj)
Definition: intern.h:319
rb_callable_method_entry_struct::defined_class
const VALUE defined_class
Definition: method.h:61
RSTRUCT_TRANSIENT_P
#define RSTRUCT_TRANSIENT_P(st)
Definition: internal.h:933
BITS_BITLENGTH
@ BITS_BITLENGTH
Definition: gc.c:621
rb_objspace::rgengc
struct rb_objspace::@12 rgengc
rb_objspace::run
int run
Definition: gc.c:738
rb_method_definition_struct::alias
rb_method_alias_t alias
Definition: method.h:172
GPR_FLAG_IMMEDIATE_SWEEP
@ GPR_FLAG_IMMEDIATE_SWEEP
Definition: gc.c:511
rb_objspace::tomb_heap
rb_heap_t tomb_heap
Definition: gc.c:708
h
size_t st_index_t h
Definition: rb_mjit_min_header-2.7.2.h:5458
RCLASS_CONST_TBL
#define RCLASS_CONST_TBL(c)
Definition: internal.h:1067
ruby_gc_params_t::heap_free_slots_goal_ratio
double heap_free_slots_goal_ratio
Definition: gc.c:324
heap_page::has_remembered_objects
unsigned int has_remembered_objects
Definition: gc.c:852
gc_stat_compat_sym_remembered_shady_object_limit
@ gc_stat_compat_sym_remembered_shady_object_limit
Definition: gc.c:8894
rb_obj_is_thread
VALUE rb_obj_is_thread(VALUE obj)
Definition: vm.c:2655
RVALUE::hash
struct RHash hash
Definition: gc.c:580
UNEXPECTED_NODE
#define UNEXPECTED_NODE(func)
Definition: gc.c:2301
RRegexp
Definition: ruby.h:1112
SET
#define SET(name, attr)
st_init_numtable_with_size
st_table * st_init_numtable_with_size(st_index_t size)
Definition: st.c:660
ATOMIC_SIZE_CAS
#define ATOMIC_SIZE_CAS(var, oldval, val)
Definition: ruby_atomic.h:163
rb_hash_compare_by_id_p
MJIT_FUNC_EXPORTED VALUE rb_hash_compare_by_id_p(VALUE hash)
Definition: hash.c:4267
pc
rb_control_frame_t const VALUE * pc
Definition: rb_mjit_min_header-2.7.2.h:16903
bool
#define bool
Definition: stdbool.h:13
mark_stack::cache
stack_chunk_t * cache
Definition: gc.c:649
list_head
Definition: rb_mjit_min_header-2.7.2.h:8940
ruby_xrealloc
void * ruby_xrealloc(void *ptr, size_t new_size)
Definition: gc.c:12008
rb_iseq_location_struct::label
VALUE label
Definition: vm_core.h:275
gc_stat_sym
gc_stat_sym
Definition: gc.c:8840
T_FIXNUM
#define T_FIXNUM
Definition: ruby.h:535
malloc_limit
#define malloc_limit
Definition: gc.c:907
gc_stat_compat_sym_total_freed_object
@ gc_stat_compat_sym_total_freed_object
Definition: gc.c:8899
GPR_FLAG_FULL_MARK
@ GPR_FLAG_FULL_MARK
Definition: gc.c:514
RVALUE::alloc
struct rb_imemo_tmpbuf_struct alloc
Definition: gc.c:598
USE_RGENGC
#define USE_RGENGC
Definition: ruby.h:791
rb_objspace::profile
struct rb_objspace::@11 profile
rb_ary_cat
VALUE rb_ary_cat(VALUE ary, const VALUE *argv, long len)
Definition: array.c:1208
rb_objspace::need_major_gc
int need_major_gc
Definition: gc.c:791
st_foreach_callback_func
int st_foreach_callback_func(st_data_t, st_data_t, st_data_t)
Definition: st.h:137
weakmap::final
VALUE final
Definition: gc.c:10338
APPENDF
#define APPENDF(f)
RSTRUCT_EMBED_LEN_MASK
#define RSTRUCT_EMBED_LEN_MASK
Definition: internal.h:920
rb_io_memsize
RUBY_FUNC_EXPORTED size_t rb_io_memsize(const rb_io_t *fptr)
Definition: io.c:4760
rb_ary_tmp_new
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:768
rb_hash_ar_table_size
size_t rb_hash_ar_table_size(void)
Definition: hash.c:355
FL_EXIVAR
#define FL_EXIVAR
Definition: ruby.h:1286
rb_ast_memsize
size_t rb_ast_memsize(const rb_ast_t *ast)
Definition: node.c:1373
rb_objspace::rincgc
struct rb_objspace::@14 rincgc
RHASH_EMPTY_P
#define RHASH_EMPTY_P(h)
Definition: ruby.h:1131
gc_list::next
struct gc_list * next
Definition: gc.c:637
RFile
Definition: ruby.h:1134
RZombie
Definition: gc.c:986
rb_clear_constant_cache
void rb_clear_constant_cache(void)
Definition: vm_method.c:87
heap_page::free_slots
short free_slots
Definition: gc.c:847
st_data_t
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:22
st_numhash
st_index_t st_numhash(st_data_t n)
Definition: st.c:2176
GPR_DEFAULT_REASON
@ GPR_DEFAULT_REASON
Definition: gc.c:516
force_finalize_list::obj
VALUE obj
Definition: gc.c:3425
VM_METHOD_TYPE_CFUNC
@ VM_METHOD_TYPE_CFUNC
C method.
Definition: method.h:103
heap_pages_deferred_final
#define heap_pages_deferred_final
Definition: gc.c:918
ID_TABLE_REPLACE
@ ID_TABLE_REPLACE
Definition: id_table.h:12
GPR_FLAG_METHOD
@ GPR_FLAG_METHOD
Definition: gc.c:506
rb_jmp_buf
#define rb_jmp_buf
Definition: gc.c:82
rb_ast_update_references
void rb_ast_update_references(rb_ast_t *ast)
Definition: node.c:1330
st_init_strtable
st_table * st_init_strtable(void)
Definition: st.c:668
RCLASS
#define RCLASS(obj)
Definition: ruby.h:1269
rb_execution_context_struct::errinfo
VALUE errinfo
Definition: vm_core.h:875
obj_id_to_ref
#define obj_id_to_ref(objid)
Definition: gc.c:975
ruby_xcalloc_body
void * ruby_xcalloc_body(size_t n, size_t size)
Definition: gc.c:10118
rb_ary_last
VALUE rb_ary_last(int argc, const VALUE *argv, VALUE ary)
Definition: array.c:1677
rb_imemo_tmpbuf_struct::next
struct rb_imemo_tmpbuf_struct * next
Definition: internal.h:1235
heap_pages_lomem
#define heap_pages_lomem
Definition: gc.c:913
T_REGEXP
#define T_REGEXP
Definition: ruby.h:529
RVALUE_OLD_AGE
#define RVALUE_OLD_AGE
Definition: gc.c:1223
VM_METHOD_TYPE_NOTIMPLEMENTED
@ VM_METHOD_TYPE_NOTIMPLEMENTED
Definition: method.h:110
rb_gc_writebarrier_unprotect
void rb_gc_writebarrier_unprotect(VALUE obj)
Definition: gc.c:6841
rb_objspace::next_index
size_t next_index
Definition: gc.c:742
MARK_IN_BITMAP
#define MARK_IN_BITMAP(bits, p)
Definition: gc.c:887
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void mark_locations_array(rb_objspace_t *objspace, register const VALUE *x, register long n))
FL_UNSET
#define FL_UNSET(x, f)
Definition: ruby.h:1361
GPR_FLAG_MALLOC
@ GPR_FLAG_MALLOC
Definition: gc.c:505
rb_cref_struct
CREF (Class REFerence)
Definition: method.h:41
getpid
pid_t getpid(void)
mjit.h
HEAP_PAGE_BITMAP_LIMIT
@ HEAP_PAGE_BITMAP_LIMIT
Definition: gc.c:840
RCLASS_SERIAL
#define RCLASS_SERIAL(c)
Definition: internal.h:1078
rb_hash_lookup
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:2063
rb_errinfo
VALUE rb_errinfo(void)
The current exception in the current thread.
Definition: eval.c:1882
rb_objspace::range
RVALUE * range[2]
Definition: gc.c:727
rb_ary_push
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:1195
ruby_xcalloc
void * ruby_xcalloc(size_t n, size_t size)
Definition: gc.c:11998
GET_HEAP_PAGE
#define GET_HEAP_PAGE(x)
Definition: gc.c:878
st_index_t
st_data_t st_index_t
Definition: st.h:50
RComplex::real
VALUE real
Definition: internal.h:807
st_hash_type
Definition: st.h:61
vm_throw_data
THROW_DATA.
Definition: internal.h:1193
VM_METHOD_TYPE_BMETHOD
@ VM_METHOD_TYPE_BMETHOD
Definition: method.h:106
EC_EXEC_TAG
#define EC_EXEC_TAG()
Definition: eval_intern.h:181
rb_env_t
Definition: vm_core.h:1055
objspace_and_reason::objspace
rb_objspace_t * objspace
Definition: gc.c:7373
cnt
rb_atomic_t cnt[RUBY_NSIG]
Definition: signal.c:503
me
const rb_callable_method_entry_t * me
Definition: rb_mjit_min_header-2.7.2.h:13194
ruby_sized_xrealloc2
void * ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
Definition: gc.c:10146
transient_heap.h
rb_obj_freeze
VALUE rb_obj_freeze(VALUE)
Make the object unmodifiable.
Definition: object.c:1080
rb_method_definition_struct::iseq
rb_method_iseq_t iseq
Definition: method.h:169
RARRAY_AREF
#define RARRAY_AREF(a, i)
Definition: psych_emitter.c:7
RHASH_IFNONE
#define RHASH_IFNONE(h)
Definition: ruby.h:1129
rb_transient_heap_promote
void rb_transient_heap_promote(VALUE obj)
Definition: transient_heap.c:640
onig_region_memsize
size_t onig_region_memsize(const OnigRegion *regs)
Definition: regcomp.c:5669
rb_classext_struct::refined_class
const VALUE refined_class
Definition: internal.h:1040
vm_core.h
RCLASS_CALLABLE_M_TBL
#define RCLASS_CALLABLE_M_TBL(c)
Definition: internal.h:1073
GPR_FLAG_MAJOR_BY_SHADY
@ GPR_FLAG_MAJOR_BY_SHADY
Definition: gc.c:496
CHECK
#define CHECK(sub)
Definition: compile.c:448
rb_source_location_cstr
const char * rb_source_location_cstr(int *pline)
Definition: vm.c:1376
ruby_xmalloc2_body
void * ruby_xmalloc2_body(size_t n, size_t size)
Definition: gc.c:10102
rb_eTypeError
VALUE rb_eTypeError
Definition: error.c:924
gc_stat_sym_remembered_wb_unprotected_objects_limit
@ gc_stat_sym_remembered_wb_unprotected_objects_limit
Definition: gc.c:8863
rb_gc_mark_machine_stack
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:4984
RBASIC_CLASS
#define RBASIC_CLASS(obj)
Definition: ruby.h:906
SIZED_REALLOC_N
#define SIZED_REALLOC_N(var, type, n, old_n)
Definition: internal.h:1663
RRational::den
VALUE den
Definition: internal.h:791
rb_obj_is_mutex
VALUE rb_obj_is_mutex(VALUE obj)
Definition: thread_sync.c:131
rb_heap_t
struct rb_heap_struct rb_heap_t
ALLOC
#define ALLOC(type)
Definition: ruby.h:1664
STACK_CHUNK_SIZE
#define STACK_CHUNK_SIZE
Definition: gc.c:640
T_CLASS
#define T_CLASS
Definition: ruby.h:524
rb_method_alias_struct::original_me
struct rb_method_entry_struct * original_me
Definition: method.h:143
list_add
#define list_add(h, n)
Definition: rb_mjit_min_header-2.7.2.h:8968
T_MATCH
#define T_MATCH
Definition: ruby.h:539
rb_free_const_table
void rb_free_const_table(struct rb_id_table *tbl)
Definition: gc.c:2493
idCall
@ idCall
Definition: rb_mjit_min_header-2.7.2.h:8691
RARRAY_EMBED_FLAG
@ RARRAY_EMBED_FLAG
Definition: ruby.h:1029
rb_objspace::rcompactor
struct rb_objspace::@13 rcompactor
rb_eRuntimeError
VALUE rb_eRuntimeError
Definition: error.c:922
rb_gc_mark_global_tbl
void rb_gc_mark_global_tbl(void)
Definition: variable.c:434
rb_objspace::immediate_sweep
unsigned int immediate_sweep
Definition: gc.c:688
HEAP_PAGE_BITMAP_SIZE
@ HEAP_PAGE_BITMAP_SIZE
Definition: gc.c:841
gc_mode
gc_mode
Definition: gc.c:670
rb_imemo_tmpbuf_parser_heap
rb_imemo_tmpbuf_t * rb_imemo_tmpbuf_parser_heap(void *buf, rb_imemo_tmpbuf_t *old_heap, size_t cnt)
Definition: gc.c:2328
rb_objspace::total_freed_objects
size_t total_freed_objects
Definition: gc.c:780
rb_size_mul_or_raise
size_t rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
Definition: gc.c:192
rb_objspace::gc_stressful
unsigned int gc_stressful
Definition: gc.c:693
rb_gc_copy_finalizer
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
Definition: gc.c:3297
RETURN_ENUMERATOR
#define RETURN_ENUMERATOR(obj, argc, argv)
Definition: intern.h:279
mod
#define mod(x, y)
Definition: date_strftime.c:28
rb_control_frame_struct
Definition: vm_core.h:760
RHASH_TRANSIENT_P
#define RHASH_TRANSIENT_P(hash)
Definition: internal.h:870
rb_mv_generic_ivar
void rb_mv_generic_ivar(VALUE src, VALUE dst)
Definition: variable.c:983
rb_newobj_of
VALUE rb_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2296
FL_USHIFT
#define FL_USHIFT
Definition: ruby.h:1289
ROBJECT_IVPTR
#define ROBJECT_IVPTR(o)
Definition: ruby.h:937
gc_stat_sym_heap_final_slots
@ gc_stat_sym_heap_final_slots
Definition: gc.c:8848
RTYPEDDATA_DATA
#define RTYPEDDATA_DATA(v)
Definition: ruby.h:1179
heap_page::total_slots
short total_slots
Definition: gc.c:846
symbol.h
size
int size
Definition: encoding.c:58
gc_stat_compat_sym_oldmalloc_increase
@ gc_stat_compat_sym_oldmalloc_increase
Definition: gc.c:8903
FALSE
#define FALSE
Definition: nkf.h:174
rb_vm_mark
void rb_vm_mark(void *ptr)
Definition: vm.c:2243
ruby_gc_params_t::oldmalloc_limit_growth_factor
double oldmalloc_limit_growth_factor
Definition: gc.c:334
FIXNUM_P
#define FIXNUM_P(f)
Definition: ruby.h:396
optional::right
size_t right
Definition: gc.c:92
GET_HEAP_MARKING_BITS
#define GET_HEAP_MARKING_BITS(x)
Definition: gc.c:896
RString
Definition: ruby.h:988
rb_gc_adjust_memory_usage
void rb_gc_adjust_memory_usage(ssize_t diff)
Definition: gc.c:10320
rb_ast_free
void rb_ast_free(rb_ast_t *ast)
Definition: node.c:1352
rb_objspace_marked_object_p
int rb_objspace_marked_object_p(VALUE obj)
Definition: gc.c:5225
RCLASS_SUPER
#define RCLASS_SUPER(c)
Definition: classext.h:16
gc_stress_no_major
@ gc_stress_no_major
Definition: gc.c:7123
rb_gc
void rb_gc(void)
Definition: gc.c:8682
rb_gc_mark_movable
void rb_gc_mark_movable(VALUE ptr)
Definition: gc.c:5209
rb_to_int
VALUE rb_to_int(VALUE)
Converts val into Integer.
Definition: object.c:3021
gc_stat_sym_malloc_increase_bytes
@ gc_stat_sym_malloc_increase_bytes
Definition: gc.c:8856
rb_objspace_free
void rb_objspace_free(rb_objspace_t *objspace)
Definition: gc.c:1602
gc_stat_sym_heap_tomb_pages
@ gc_stat_sym_heap_tomb_pages
Definition: gc.c:8851
rb_id_table_memsize
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
Definition: id_table.c:123
POP_MARK_FUNC_DATA
#define POP_MARK_FUNC_DATA()
Definition: gc.c:1102
VM_METHOD_TYPE_ZSUPER
@ VM_METHOD_TYPE_ZSUPER
Definition: method.h:107
ruby_xmalloc2
void * ruby_xmalloc2(size_t n, size_t size)
Definition: gc.c:11988
arg
VALUE arg
Definition: rb_mjit_min_header-2.7.2.h:5597
rb_objspace::flags
struct rb_objspace::@8 flags
rb_objspace::latest_gc_info
int latest_gc_info
Definition: gc.c:739
verify_internal_consistency_struct::objspace
rb_objspace_t * objspace
Definition: gc.c:5895
rb_gc_writebarrier_remember
MJIT_FUNC_EXPORTED void rb_gc_writebarrier_remember(VALUE obj)
Definition: gc.c:6878
rb_obj_is_fiber
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:1041
mark_stack::cache_size
size_t cache_size
Definition: gc.c:652
rb_gc_free_dsymbol
void rb_gc_free_dsymbol(VALUE)
Definition: symbol.c:678
vsnprintf
int int vsnprintf(char *__restrict, size_t, const char *__restrict, __gnuc_va_list) __attribute__((__format__(__printf__
list
struct rb_encoding_entry * list
Definition: encoding.c:56
MEMZERO
#define MEMZERO(p, type, n)
Definition: ruby.h:1752
GPR_FLAG_NONE
@ GPR_FLAG_NONE
Definition: gc.c:492
gc_raise_tag::fmt
const char * fmt
Definition: gc.c:9525
rb_gc_start
VALUE rb_gc_start(void)
Definition: gc.c:8675
rb_gc_register_address
void rb_gc_register_address(VALUE *addr)
Definition: gc.c:7080
gc_stat_sym_heap_eden_pages
@ gc_stat_sym_heap_eden_pages
Definition: gc.c:8850
RGENGC_OLD_NEWOBJ_CHECK
#define RGENGC_OLD_NEWOBJ_CHECK
Definition: gc.c:412
RHASH
#define RHASH(obj)
Definition: internal.h:859
rb_iseq_location_struct::pathobj
VALUE pathobj
Definition: vm_core.h:273
RVALUE::cref
rb_cref_t cref
Definition: gc.c:590
rb_gc_stat
size_t rb_gc_stat(VALUE key)
Definition: gc.c:9177
RArray
Definition: ruby.h:1048
strdup
char * strdup(const char *) __attribute__((__malloc__)) __attribute__((__warn_unused_result__))
NOINLINE
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace))
T_NONE
#define T_NONE
Definition: ruby.h:521
N
#define N
Definition: lgamma_r.c:20
heap_page
Definition: gc.c:845
ROBJECT
#define ROBJECT(obj)
Definition: ruby.h:1268
rb_data_object_zalloc
VALUE rb_data_object_zalloc(VALUE, size_t, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
rb_objspace::sorted
struct heap_page ** sorted
Definition: gc.c:723
rb_objspace::eden_heap
rb_heap_t eden_heap
Definition: gc.c:707
REQUIRED_SIZE_BY_MALLOC
@ REQUIRED_SIZE_BY_MALLOC
Definition: gc.c:837
rb_class_path_cached
VALUE rb_class_path_cached(VALUE)
Definition: variable.c:162
rb_data_type_struct::dcompact
void(* dcompact)(void *)
Definition: ruby.h:1154
rb_ec_raised_set
#define rb_ec_raised_set(ec, f)
Definition: eval_intern.h:258
ruby_xrealloc2
void * ruby_xrealloc2(void *ptr, size_t n, size_t new_size)
Definition: gc.c:12018
st_update
int st_update(st_table *tab, st_data_t key, st_update_callback_func *func, st_data_t arg)
Definition: st.c:1510
RGENGC_DEBUG
#define RGENGC_DEBUG
Definition: gc.c:380
NEW_SYM
#define NEW_SYM(s)
RString::heap
struct RString::@2::@3 heap
heap_page::free_next
struct heap_page * free_next
Definition: gc.c:857
key
key
Definition: openssl_missing.h:181
T_HASH
#define T_HASH
Definition: ruby.h:531
RCLASS_IV_TBL
#define RCLASS_IV_TBL(c)
Definition: internal.h:1066
heap_page::in_tomb
unsigned int in_tomb
Definition: gc.c:854
rb_objspace::allocated_pages
size_t allocated_pages
Definition: gc.c:724
rb_scan_args
#define rb_scan_args(argc, argvp, fmt,...)
Definition: rb_mjit_min_header-2.7.2.h:6368
RHASH_ST_TABLE_P
#define RHASH_ST_TABLE_P(h)
Definition: internal.h:861
gc_profile_record_flag
gc_profile_record_flag
Definition: gc.c:491
SET_STACK_END
#define SET_STACK_END
Definition: gc.c:4601
rb_heap_struct::freelist
RVALUE * freelist
Definition: gc.c:657
strtoll
long long strtoll(const char *__restrict __n, char **__restrict __end_PTR, int __base)
rb_global_variable
void rb_global_variable(VALUE *var)
Definition: gc.c:7115
RCLASS_EXT
#define RCLASS_EXT(c)
Definition: classext.h:15
rb_copy_wb_protected_attribute
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
Definition: gc.c:6941
RUBY_INTERNAL_EVENT_OBJSPACE_MASK
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Definition: ruby.h:2275
RHash
Definition: internal.h:887
CLASS_OF
#define CLASS_OF(v)
Definition: ruby.h:484
rb_str_buf_append
VALUE rb_str_buf_append(VALUE, VALUE)
Definition: string.c:2950
gc_stress_max
@ gc_stress_max
Definition: gc.c:7126
rb_io_t::writeconv_asciicompat
VALUE writeconv_asciicompat
Definition: io.h:96
T_MODULE
#define T_MODULE
Definition: ruby.h:526
ruby_mimfree
void ruby_mimfree(void *ptr)
Definition: gc.c:10237
force_finalize_list::next
struct force_finalize_list * next
Definition: gc.c:3427
rb_gcdebug_print_obj_condition
void rb_gcdebug_print_obj_condition(VALUE obj)
RVALUE::ifunc
struct vm_ifunc ifunc
Definition: gc.c:593
list_del
#define list_del(n)
Definition: rb_mjit_min_header-2.7.2.h:9005
RVALUE::v2
VALUE v2
Definition: gc.c:604
gc_stat_sym_minor_gc_count
@ gc_stat_sym_minor_gc_count
Definition: gc.c:8859
heap_allocated_pages
#define heap_allocated_pages
Definition: gc.c:911
gc_event_hook_available_p
#define gc_event_hook_available_p(objspace)
Definition: gc.c:2101
mjit_gc_start_hook
void mjit_gc_start_hook(void)
RMoved
Definition: internal.h:908
TAG_RAISE
#define TAG_RAISE
Definition: vm_core.h:203
RARRAY_LEN
#define RARRAY_LEN(a)
Definition: ruby.h:1070
RAISED_NOMEMORY
@ RAISED_NOMEMORY
Definition: eval_intern.h:256
RUBY_INTERNAL_EVENT_GC_EXIT
#define RUBY_INTERNAL_EVENT_GC_EXIT
Definition: ruby.h:2274
st_foreach
int st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
Definition: st.c:1718
RHASH_AR_TABLE
#define RHASH_AR_TABLE(hash)
Definition: internal.h:855
ruby_get_stack_grow_direction
int ruby_get_stack_grow_direction(volatile VALUE *addr)
Definition: gc.c:4623
imemo_ment
@ imemo_ment
Definition: internal.h:1139
rb_define_module_function
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for module.
Definition: class.c:1771
rb_method_bmethod_struct::hooks
struct rb_hook_list_struct * hooks
Definition: method.h:153
rb_objspace::finalizer_table
st_table * finalizer_table
Definition: gc.c:735
RSTRUCT_LEN
#define RSTRUCT_LEN(st)
Definition: ruby.h:1255
verify_internal_consistency_struct::parent
VALUE parent
Definition: gc.c:5901
rb_id_table_free
void rb_id_table_free(struct rb_id_table *tbl)
Definition: id_table.c:102
UPDATE_IF_MOVED
#define UPDATE_IF_MOVED(_objspace, _thing)
Definition: gc.c:1084
optional::left
bool left
Definition: gc.c:91
ruby_gc_params_t::oldmalloc_limit_min
size_t oldmalloc_limit_min
Definition: gc.c:332
rb_method_type_t
rb_method_type_t
Definition: method.h:101
rb_hook_list_mark
void rb_hook_list_mark(rb_hook_list_t *hooks)
Definition: vm_trace.c:53
strtol
long strtol(const char *__restrict __n, char **__restrict __end_PTR, int __base)
FL_TEST_RAW
#define FL_TEST_RAW(x, f)
Definition: ruby.h:1352
rb_method_refined_struct::owner
VALUE owner
Definition: method.h:148
LONG_LONG
#define LONG_LONG
Definition: rb_mjit_min_header-2.7.2.h:3942
rb_objspace::during_compacting
unsigned int during_compacting
Definition: gc.c:692
heap_page::mark_bits
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:866
ATOMIC_SIZE_ADD
#define ATOMIC_SIZE_ADD(var, val)
Definition: ruby_atomic.h:138
rb_cObject
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:2010
rb_event_flag_t
uint32_t rb_event_flag_t
Definition: ruby.h:2278
GC_HEAP_GROWTH_FACTOR
#define GC_HEAP_GROWTH_FACTOR
Definition: gc.c:265
verify_internal_consistency_struct
Definition: gc.c:5894
is_lazy_sweeping
#define is_lazy_sweeping(heap)
Definition: gc.c:971
buf
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4322
RVALUE_AGE_SHIFT
#define RVALUE_AGE_SHIFT
Definition: gc.c:1224
rb_classext_t
struct rb_classext_struct rb_classext_t
Definition: internal.h:1045
obj
const VALUE VALUE obj
Definition: rb_mjit_min_header-2.7.2.h:5738
T_BIGNUM
#define T_BIGNUM
Definition: ruby.h:533
gc_mode_none
@ gc_mode_none
Definition: gc.c:671
TypedData_Get_Struct
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1252
rb_data_typed_object_wrap
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
Definition: gc.c:2399
rb_objspace_each_objects
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
Definition: gc.c:3027
root_objects_data::category
const char * category
Definition: gc.c:9483
rb_objspace_data_type_memsize
size_t rb_objspace_data_type_memsize(VALUE obj)
Definition: gc.c:2419
rb_str_append
VALUE rb_str_append(VALUE, VALUE)
Definition: string.c:2965
VM_METHOD_TYPE_MISSING
@ VM_METHOD_TYPE_MISSING
wrapper for method_missing(id)
Definition: method.h:112
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:636
rb_control_frame_struct::self
VALUE self
Definition: vm_core.h:764
RVALUE::match
struct RMatch match
Definition: gc.c:586
rb_objspace_reachable_objects_from_root
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
Definition: gc.c:9496
gc_stat_compat_sym_heap_increment
@ gc_stat_compat_sym_heap_increment
Definition: gc.c:8886
rb_io_t::rb_io_enc_t::ecopts
VALUE ecopts
Definition: io.h:89
rmatch::char_offset_num_allocated
int char_offset_num_allocated
Definition: re.h:40
GC_ASSERT
#define GC_ASSERT(expr)
Definition: gc.c:403
internal.h
dont_gc
#define dont_gc
Definition: gc.c:921
gc_mode_set
#define gc_mode_set(objspace, mode)
Definition: gc.c:951
rb_objspace::last_major_gc
size_t last_major_gc
Definition: gc.c:792
T_ARRAY
#define T_ARRAY
Definition: ruby.h:530
rb_objspace::increase
size_t increase
Definition: gc.c:679
stack_chunk::data
VALUE data[STACK_CHUNK_SIZE]
Definition: gc.c:643
list_top
#define list_top(h, type, member)
Definition: rb_mjit_min_header-2.7.2.h:9034
rb_mKernel
RUBY_EXTERN VALUE rb_mKernel
Definition: ruby.h:1998
ROBJ_TRANSIENT_P
#define ROBJ_TRANSIENT_P(obj)
Definition: internal.h:2262
rb_gc_guarded_val
volatile VALUE rb_gc_guarded_val
Definition: gc.c:248
RArray::as
union RArray::@5 as
argv
char ** argv
Definition: ruby.c:223
f
#define f
gc_stat_sym_major_gc_count
@ gc_stat_sym_major_gc_count
Definition: gc.c:8860
RHASH_ST_TABLE
#define RHASH_ST_TABLE(hash)
Definition: internal.h:856
objspace_and_reason
Definition: gc.c:7372
gc_stat_compat_sym_heap_length
@ gc_stat_compat_sym_heap_length
Definition: gc.c:8887
os_each_struct
Definition: gc.c:3055
ST_CONTINUE
@ ST_CONTINUE
Definition: st.h:99
RBignum
Definition: internal.h:749
rb_heap_struct::total_slots
size_t total_slots
Definition: gc.c:667
rb_setjmp
#define rb_setjmp(env)
Definition: gc.c:81
RVALUE::rational
struct RRational rational
Definition: gc.c:587
RVALUE::v1
VALUE v1
Definition: gc.c:603
st_init_table
st_table * st_init_table(const struct st_hash_type *type)
Definition: st.c:645
rb_data_typed_object_alloc
#define rb_data_typed_object_alloc
Definition: gc.c:15
xmalloc
#define xmalloc
Definition: defines.h:211
ruby_thread_has_gvl_p
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1705
rb_objspace::hook_events
rb_event_flag_t hook_events
Definition: gc.c:703
UNREACHABLE
#define UNREACHABLE
Definition: ruby.h:63
VM_METHOD_TYPE_ATTRSET
@ VM_METHOD_TYPE_ATTRSET
attr_writer or attr_accessor
Definition: method.h:104
GET_PAGE_BODY
#define GET_PAGE_BODY(x)
Definition: gc.c:876
rb_sprintf
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1197
ARY_SHARED_P
#define ARY_SHARED_P(ary)
Definition: gc.c:11426
rb_objspace_garbage_object_p
int rb_objspace_garbage_object_p(VALUE obj)
Definition: gc.c:3607
gc_profile_record::flags
int flags
Definition: gc.c:522
rb_mark_hash
void rb_mark_hash(st_table *tbl)
Definition: gc.c:4865
rb_wb_protected_newobj_of
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2281
rb_gc_guarded_ptr_val
volatile VALUE * rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
Definition: gc.c:250
rb_hashtype_ident
const struct st_hash_type rb_hashtype_ident
Definition: hash.c:322
rb_id_table_foreach_with_replace
void rb_id_table_foreach_with_replace(struct rb_id_table *tbl, rb_id_table_foreach_func_t *func, rb_id_table_update_callback_func_t *replace, void *data)
Definition: id_table.c:270
RARRAY
#define RARRAY(obj)
Definition: ruby.h:1273
rb_xmalloc_mul_add_mul
void * rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w)
Definition: gc.c:10190
gc_event_hook
#define gc_event_hook(objspace, event, data)
Definition: gc.c:2104
GC_HEAP_FREE_SLOTS_MAX_RATIO
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
Definition: gc.c:281
rb_method_definition_struct::body
union rb_method_definition_struct::@0 body
GC_OLDMALLOC_LIMIT_MIN
#define GC_OLDMALLOC_LIMIT_MIN
Definition: gc.c:295
T_NIL
#define T_NIL
Definition: ruby.h:522
GPR_FLAG_MAJOR_BY_FORCE
@ GPR_FLAG_MAJOR_BY_FORCE
Definition: gc.c:497
rb_subclass_entry::klass
VALUE klass
Definition: internal.h:999
heap_page_body
Definition: gc.c:629
rb_objspace::old_objects_limit
size_t old_objects_limit
Definition: gc.c:796
ruby_gc_params_t::growth_factor
double growth_factor
Definition: gc.c:320
BDIGIT
#define BDIGIT
Definition: bigdecimal.h:48
timeval
Definition: missing.h:53
rb_objspace::total_allocated_pages
size_t total_allocated_pages
Definition: gc.c:781
global_list
#define global_list
Definition: gc.c:925
lo
#define lo
Definition: siphash.c:21
gc_stat_sym_heap_allocatable_pages
@ gc_stat_sym_heap_allocatable_pages
Definition: gc.c:8844
RVALUE::next
struct RVALUE * next
Definition: gc.c:570
T_MOVED
#define T_MOVED
Definition: ruby.h:547
str
char str[HTML_ESCAPE_MAX_LEN+1]
Definition: escape.c:18
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
GPR_FLAG_MAJOR_MASK
@ GPR_FLAG_MAJOR_MASK
Definition: gc.c:501
stack_chunk
Definition: gc.c:642
mark_stack::limit
int limit
Definition: gc.c:651
ruby_disable_gc
int ruby_disable_gc
Definition: gc.c:1001
src
__inline__ const void *__restrict src
Definition: rb_mjit_min_header-2.7.2.h:2842
NO_SANITIZE
NO_SANITIZE("memory", static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr))
gc_stat_sym_compact_count
@ gc_stat_sym_compact_count
Definition: gc.c:8861
memcpy
void * memcpy(void *__restrict, const void *__restrict, size_t)
RUBY_TYPED_FREE_IMMEDIATELY
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1207
ruby_native_thread_p
int ruby_native_thread_p(void)
Definition: thread.c:5277
heap_pages_himem
#define heap_pages_himem
Definition: gc.c:914
debug_counter.h
T_ZOMBIE
#define T_ZOMBIE
Definition: ruby.h:546
MEMCPY
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1753
MARK_CHECKPOINT
#define MARK_CHECKPOINT(category)
clock_t
unsigned long clock_t
Definition: rb_mjit_min_header-2.7.2.h:1301
PRIuVALUE
#define PRIuVALUE
Definition: ruby.h:163
RZombie::next
VALUE next
Definition: gc.c:988
types
enum imemo_type types
Definition: debug.c:63
GPR_FLAG_MAJOR_BY_OLDMALLOC
@ GPR_FLAG_MAJOR_BY_OLDMALLOC
Definition: gc.c:499
stress_to_class
#define stress_to_class
Definition: gc.c:931
Init_heap
void Init_heap(void)
Definition: gc.c:2911
path
VALUE path
Definition: rb_mjit_min_header-2.7.2.h:7336
rb_hash_aset
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:2852
ruby_gc_params_t::malloc_limit_max
size_t malloc_limit_max
Definition: gc.c:329
RComplex
Definition: internal.h:805
ATOMIC_EXCHANGE
#define ATOMIC_EXCHANGE(var, val)
Definition: ruby_atomic.h:135
ssize_t
_ssize_t ssize_t
Definition: rb_mjit_min_header-2.7.2.h:1327
RRegexp::src
const VALUE src
Definition: ruby.h:1115
clock
clock_t clock(void)
gc_prof_enabled
#define gc_prof_enabled(objspace)
Definition: gc.c:1087
RVALUE::array
struct RArray array
Definition: gc.c:578
clock_gettime
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4612
rb_obj_info_dump
void rb_obj_info_dump(VALUE obj)
Definition: gc.c:11689
heap_tomb
#define heap_tomb
Definition: gc.c:920
NIL_P
#define NIL_P(v)
Definition: ruby.h:482
RDATA
#define RDATA(obj)
Definition: ruby.h:1274
RVALUE_PIN_BITMAP
#define RVALUE_PIN_BITMAP(obj)
Definition: gc.c:1211
rb_funcall
#define rb_funcall(recv, mid, argc,...)
Definition: rb_mjit_min_header-2.7.2.h:6581
PRIdSIZE
#define PRIdSIZE
Definition: ruby.h:205
TAG_NONE
#define TAG_NONE
Definition: vm_core.h:197
RUBY_INTERNAL_EVENT_NEWOBJ
#define RUBY_INTERNAL_EVENT_NEWOBJ
Definition: ruby.h:2268
rb_objspace::uncollectible_wb_unprotected_objects_limit
size_t uncollectible_wb_unprotected_objects_limit
Definition: gc.c:794
RGENGC_CHECK_MODE
#define RGENGC_CHECK_MODE
Definition: gc.c:399
gc_stat_compat_sym_heap_eden_page_length
@ gc_stat_compat_sym_heap_eden_page_length
Definition: gc.c:8884
BIGNUM_EMBED_FLAG
#define BIGNUM_EMBED_FLAG
Definition: internal.h:769
va_start
#define va_start(v, l)
Definition: rb_mjit_min_header-2.7.2.h:3981
io.h
Init_gc_stress
void Init_gc_stress(void)
Definition: gc.c:2931
rb_objspace_set_event_hook
void rb_objspace_set_event_hook(const rb_event_flag_t event)
Definition: gc.c:2082
BITMAP_BIT
#define BITMAP_BIT(p)
Definition: gc.c:883
argc
int argc
Definition: ruby.c:222
malloc_increase
#define malloc_increase
Definition: gc.c:908
VM_METHOD_TYPE_ISEQ
@ VM_METHOD_TYPE_ISEQ
Ruby method.
Definition: method.h:102
rb_objspace::total_allocated_objects_at_gc_start
size_t total_allocated_objects_at_gc_start
Definition: gc.c:775
rb_objspace::freeable_pages
size_t freeable_pages
Definition: gc.c:728
GC_OLDMALLOC_LIMIT_MAX
#define GC_OLDMALLOC_LIMIT_MAX
Definition: gc.c:301
regint.h
T_IMEMO
#define T_IMEMO
Definition: ruby.h:543
GC_MALLOC_LIMIT_MAX
#define GC_MALLOC_LIMIT_MAX
Definition: gc.c:288
rb_obj_classname
const char * rb_obj_classname(VALUE)
Definition: variable.c:289
memop_type
memop_type
Definition: gc.c:9687
rb_objspace_gc_disable
VALUE rb_objspace_gc_disable(rb_objspace_t *objspace)
Definition: gc.c:9256
rb_iseq_memsize
size_t rb_iseq_memsize(const rb_iseq_t *iseq)
Definition: iseq.c:373
rb_vm_struct::self
VALUE self
Definition: vm_core.h:577
mark_stack::index
int index
Definition: gc.c:650
roomof
#define roomof(x, y)
Definition: internal.h:1298
rb_obj_memsize_of
size_t rb_obj_memsize_of(VALUE obj)
Definition: gc.c:3937
ruby_gc_params_t::growth_max_slots
size_t growth_max_slots
Definition: gc.c:321
list_node
Definition: rb_mjit_min_header-2.7.2.h:8936
GC_DEBUG
#define GC_DEBUG
Definition: gc.c:365
rb_define_const
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2891
free
#define free(x)
Definition: dln.c:52
err
int err
Definition: win32.c:135
will_be_incremental_marking
#define will_be_incremental_marking(objspace)
Definition: gc.c:966
GPR_FLAG_STRESS
@ GPR_FLAG_STRESS
Definition: gc.c:508
is_sweeping
#define is_sweeping(objspace)
Definition: gc.c:954
gc_stat_sym_malloc_increase_bytes_limit
@ gc_stat_sym_malloc_increase_bytes_limit
Definition: gc.c:8857
STACK_LENGTH
#define STACK_LENGTH
Definition: gc.c:4617
rb_io_write
VALUE rb_io_write(VALUE, VALUE)
Definition: io.c:1804
verify_internal_consistency_struct::live_object_count
size_t live_object_count
Definition: gc.c:5897
gc_raise_tag::ap
va_list * ap
Definition: gc.c:9526
__asm__
#define __asm__
Definition: Context.c:12
rb_data_type_struct
Definition: ruby.h:1148
BUILTIN_TYPE
#define BUILTIN_TYPE(x)
Definition: ruby.h:551
gc_mode_marking
@ gc_mode_marking
Definition: gc.c:672
rb_vm_struct
Definition: vm_core.h:576
xfree
#define xfree
Definition: defines.h:216
heap_page::pinned_slots
short pinned_slots
Definition: gc.c:848
gc_stat_sym_oldmalloc_increase_bytes
@ gc_stat_sym_oldmalloc_increase_bytes
Definition: gc.c:8867
RVALUE::regexp
struct RRegexp regexp
Definition: gc.c:579
RZombie::data
void * data
Definition: gc.c:990
RUBY_INTERNAL_EVENT_GC_END_MARK
#define RUBY_INTERNAL_EVENT_GC_END_MARK
Definition: ruby.h:2271
st_data_t
unsigned long st_data_t
Definition: rb_mjit_min_header-2.7.2.h:5359
RMOVED
#define RMOVED(obj)
Definition: ruby.h:1266
ARY_EMBED_P
#define ARY_EMBED_P(ary)
Definition: gc.c:11429
MEMOP_TYPE_REALLOC
@ MEMOP_TYPE_REALLOC
Definition: gc.c:9690
SET_MACHINE_STACK_END
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:13
exit
void exit(int __status) __attribute__((__noreturn__))
RBASIC
#define RBASIC(obj)
Definition: ruby.h:1267
CLOCK_PROCESS_CPUTIME_ID
#define CLOCK_PROCESS_CPUTIME_ID
Definition: rb_mjit_min_header-2.7.2.h:2378
PUREFUNC
PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr))
CLEAR_IN_BITMAP
#define CLEAR_IN_BITMAP(bits, p)
Definition: gc.c:888
root_objects_data
Definition: gc.c:9482
ruby_gc_params_t::heap_free_slots_max_ratio
double heap_free_slots_max_ratio
Definition: gc.c:325
size_t
unsigned int size_t
Definition: rb_mjit_min_header-2.7.2.h:663
rb_obj_info_dump_loc
void rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
Definition: gc.c:11696
rb_sym2id
ID rb_sym2id(VALUE)
Definition: symbol.c:748
root_objects_data::func
void(* func)(const char *category, VALUE, void *)
Definition: gc.c:9484
rb_method_definition_struct
Definition: method.h:163
rb_objspace::malloc_params
struct rb_objspace::@7 malloc_params
rb_gc_mark
void rb_gc_mark(VALUE ptr)
Definition: gc.c:5215
gc_stat_compat_sym_total_allocated_object
@ gc_stat_compat_sym_total_allocated_object
Definition: gc.c:8898
GC_HEAP_GROWTH_MAX_SLOTS
#define GC_HEAP_GROWTH_MAX_SLOTS
Definition: gc.c:268
imemo_cref
@ imemo_cref
class reference
Definition: internal.h:1134
rb_objspace_each_objects_without_setup
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
Definition: gc.c:3050
rb_gc_mark_values
void rb_gc_mark_values(long n, const VALUE *values)
Definition: gc.c:4718
MJIT_FUNC_EXPORTED
#define MJIT_FUNC_EXPORTED
Definition: defines.h:396
rb_undefine_finalizer
VALUE rb_undefine_finalizer(VALUE obj)
Definition: gc.c:3193
rb_size_mul_add_or_raise
size_t rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
Definition: gc.c:219
_
#define _(args)
Definition: dln.h:28
EC_PUSH_TAG
#define EC_PUSH_TAG(ec)
Definition: eval_intern.h:130
INT_MAX
#define INT_MAX
Definition: rb_mjit_min_header-2.7.2.h:4055
rb_objspace::limit
size_t limit
Definition: gc.c:678
ar_table_struct
Definition: hash.c:349
count
int count
Definition: encoding.c:57
st_memsize
size_t st_memsize(const st_table *tab)
Definition: st.c:719
rb_callable_method_entry_struct::def
struct rb_method_definition_struct *const def
Definition: method.h:62
Qtrue
#define Qtrue
Definition: ruby.h:468
heap_page_header
Definition: gc.c:625
rb_obj_rgengc_writebarrier_protected_p
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
Definition: gc.c:6963
S
#define S(s)
rb_objspace::global_list
struct gc_list * global_list
Definition: gc.c:784
NUM_IN_PAGE
#define NUM_IN_PAGE(p)
Definition: gc.c:880
rb_class_name
VALUE rb_class_name(VALUE)
Definition: variable.c:274
rb_io_fptr_finalize
#define rb_io_fptr_finalize
Definition: internal.h:1734
re_registers::num_regs
int num_regs
Definition: onigmo.h:718
gc_stat_sym_heap_allocated_pages
@ gc_stat_sym_heap_allocated_pages
Definition: gc.c:8842
v
int VALUE v
Definition: rb_mjit_min_header-2.7.2.h:12300
FL_PROMOTED1
#define FL_PROMOTED1
Definition: ruby.h:1281
RANY
#define RANY(o)
Definition: gc.c:984
len
uint8_t len
Definition: escape.c:17
gc_prof_record
#define gc_prof_record(objspace)
Definition: gc.c:1086
SYMBOL_P
#define SYMBOL_P(x)
Definition: ruby.h:413
RB_DEBUG_COUNTER_INC
#define RB_DEBUG_COUNTER_INC(type)
Definition: debug_counter.h:375
rb_objspace::atomic_flags
struct rb_objspace::@9 atomic_flags
rb_heap_struct::using_page
struct heap_page * using_page
Definition: gc.c:660
RVALUE::svar
struct vm_svar svar
Definition: gc.c:591
rb_const_entry_struct::file
VALUE file
Definition: constant.h:35
rb_memerror
void rb_memerror(void)
Definition: gc.c:9598
GC_PROFILE_MORE_DETAIL
#define GC_PROFILE_MORE_DETAIL
Definition: gc.c:458
RVALUE::typeddata
struct RTypedData typeddata
Definition: gc.c:582
rb_method_entry_struct
Definition: method.h:51
rb_iseq_struct::body
struct rb_iseq_constant_body * body
Definition: vm_core.h:460
rb_mark_tbl
void rb_mark_tbl(st_table *tbl)
Definition: gc.c:5008
rb_subclass_entry
Definition: internal.h:998
RStruct
Definition: internal.h:942
rb_control_frame_struct::pc
const VALUE * pc
Definition: vm_core.h:761
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.2.h:1516
MEMMOVE
#define MEMMOVE(p1, p2, type, n)
Definition: ruby.h:1754
rb_gc_unprotect_logging
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
Definition: gc.c:6912
gc_stat_compat_sym_heap_tomb_page_length
@ gc_stat_compat_sym_heap_tomb_page_length
Definition: gc.c:8885
timespec
Definition: missing.h:60
gc_stat_sym_old_objects_limit
@ gc_stat_sym_old_objects_limit
Definition: gc.c:8865
ruby_atomic.h
rb_io_t::tied_io_for_writing
VALUE tied_io_for_writing
Definition: io.h:77
heap_page::freelist
RVALUE * freelist
Definition: gc.c:859
RTYPEDDATA_P
#define RTYPEDDATA_P(v)
Definition: ruby.h:1177
rb_define_finalizer
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Definition: gc.c:3289
RVALUE::basic
struct RBasic basic
Definition: gc.c:573
rb_str_free
void rb_str_free(VALUE)
Definition: string.c:1349
RVALUE::values
struct RVALUE::@3::@6 values
rb_alloc_tmp_buffer_with_count
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
Definition: gc.c:10247
rb_objspace::mark_func_data_struct
Definition: gc.c:714
OBJ_PROMOTED
#define OBJ_PROMOTED(x)
Definition: ruby.h:1494
ROBJECT_NUMIV
#define ROBJECT_NUMIV(o)
Definition: ruby.h:933
rb_class_detach_subclasses
void rb_class_detach_subclasses(VALUE klass)
Definition: class.c:133
T_STRING
#define T_STRING
Definition: ruby.h:528
gc_raise_tag
Definition: gc.c:9523
imemo_tmpbuf
@ imemo_tmpbuf
Definition: internal.h:1141
finalizing
#define finalizing
Definition: gc.c:923
rb_hash_stlike_foreach_with_replace
int rb_hash_stlike_foreach_with_replace(VALUE hash, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
Definition: hash.c:1453
rb_atomic_t
int rb_atomic_t
Definition: ruby_atomic.h:124
RZombie::dfree
void(* dfree)(void *)
Definition: gc.c:989
rb_block
Definition: vm_core.h:751
FL_SINGLETON
#define FL_SINGLETON
Definition: ruby.h:1278
heap_page_header::page
struct heap_page * page
Definition: gc.c:626
RMatch::str
VALUE str
Definition: re.h:45
rb_define_class_under
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
Definition: class.c:698
ULL2NUM
#define ULL2NUM(v)
Definition: rb_mjit_min_header-2.7.2.h:4245
rb_sym2str
VALUE rb_sym2str(VALUE)
Definition: symbol.c:784
SIZET2NUM
#define SIZET2NUM(v)
Definition: ruby.h:295
old
VALUE ID VALUE old
Definition: rb_mjit_min_header-2.7.2.h:16113
index
int index
Definition: rb_mjit_min_header-2.7.2.h:11214
rb_objspace::gc_stress_mode
VALUE gc_stress_mode
Definition: gc.c:786
gc_stress_full_mark_after_malloc
@ gc_stress_full_mark_after_malloc
Definition: gc.c:7125
rb_id_table_foreach_values
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
Definition: id_table.c:311
malloc
void * malloc(size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1)))
rb_generic_ivar_memsize
size_t rb_generic_ivar_memsize(VALUE)
Definition: variable.c:1010
rb_yield
VALUE rb_yield(VALUE)
Definition: vm_eval.c:1237
RVALUE::klass
struct RClass klass
Definition: gc.c:575
rb_hash_stlike_foreach
int rb_hash_stlike_foreach(VALUE hash, st_foreach_callback_func *func, st_data_t arg)
Definition: hash.c:1442
rb_gc_disable_no_rest
VALUE rb_gc_disable_no_rest(void)
Definition: gc.c:9234
rb_str_new_cstr
#define rb_str_new_cstr(str)
Definition: rb_mjit_min_header-2.7.2.h:6113
eval_intern.h
gc_list::varptr
VALUE * varptr
Definition: gc.c:636
ruby_initial_gc_stress
#define ruby_initial_gc_stress
Definition: gc.c:903
rb_objspace::mark_func_data
struct rb_objspace::mark_func_data_struct * mark_func_data
rb_ensure
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1115
rb_io_t::encs
struct rb_io_t::rb_io_enc_t encs
RComplex::imag
VALUE imag
Definition: internal.h:808
RARRAY_CONST_PTR_TRANSIENT
#define RARRAY_CONST_PTR_TRANSIENT(a)
Definition: ruby.h:1073
ruby_vm_special_exception_copy
MJIT_STATIC VALUE ruby_vm_special_exception_copy(VALUE)
Definition: rb_mjit_min_header-2.7.2.h:12187
rb_ary_new
VALUE rb_ary_new(void)
Definition: array.c:723
gc_stat_sym_last
@ gc_stat_sym_last
Definition: gc.c:8879
ruby_xrealloc2_body
void * ruby_xrealloc2_body(void *ptr, size_t n, size_t size)
Definition: gc.c:10153
rb_symbols_t
Definition: symbol.h:61
rb_vm_update_references
void rb_vm_update_references(void *ptr)
Definition: vm.c:2234
builtin.h
heap_page::has_uncollectible_shady_objects
unsigned int has_uncollectible_shady_objects
Definition: gc.c:853
ROBJECT_EMBED
@ ROBJECT_EMBED
Definition: ruby.h:917
rb_imemo_tmpbuf_struct
Definition: internal.h:1231
Qnil
#define Qnil
Definition: ruby.h:469
T_STRUCT
#define T_STRUCT
Definition: ruby.h:532
mark_stack_t
struct mark_stack mark_stack_t
GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
Definition: gc.c:298
gc_profile_record::gc_invoke_time
double gc_invoke_time
Definition: gc.c:525
rb_big_size
size_t rb_big_size(VALUE big)
Definition: bignum.c:6778
mjit_gc_exit_hook
void mjit_gc_exit_hook(void)
rb_objspace::final_slots
size_t final_slots
Definition: gc.c:731
heap_cursor::slot
RVALUE * slot
Definition: gc.c:7669
heap_page::marking_bits
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:869
gc_stat_sym_total_freed_pages
@ gc_stat_sym_total_freed_pages
Definition: gc.c:8853
RVALUE_UNCOLLECTIBLE_BITMAP
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
Definition: gc.c:1216
realloc
void * realloc(void *, size_t) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(2)))
thread.h
rmatch_offset
Definition: re.h:31
rb_mEnumerable
VALUE rb_mEnumerable
Definition: enum.c:20
ruby_qsort
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
st_lookup
int st_lookup(st_table *tab, st_data_t key, st_data_t *value)
Definition: st.c:1101
util.h
O
#define O(member)
BIGNUM_LEN
#define BIGNUM_LEN(b)
Definition: internal.h:774
stack_chunk_t
struct stack_chunk stack_chunk_t
rb_io_t
Definition: io.h:66
page_compare_func_t
int page_compare_func_t(const void *, const void *, void *)
Definition: gc.c:7769
GC_HEAP_FREE_SLOTS
#define GC_HEAP_FREE_SLOTS
Definition: gc.c:262
calloc
void * calloc(size_t, size_t) __attribute__((__malloc__)) __attribute__((__warn_unused_result__)) __attribute__((__alloc_size__(1
gc_mode_sweeping
@ gc_mode_sweeping
Definition: gc.c:673
rb_classext_struct
Definition: internal.h:1020
rb_int_plus
VALUE rb_int_plus(VALUE x, VALUE y)
Definition: numeric.c:3610
RUBY_INTERNAL_EVENT_GC_START
#define RUBY_INTERNAL_EVENT_GC_START
Definition: ruby.h:2270
rb_gc_latest_gc_info
VALUE rb_gc_latest_gc_info(VALUE key)
Definition: gc.c:8819
gc_profile_record
Definition: gc.c:521
numberof
#define numberof(array)
Definition: etc.c:618
double
double
Definition: rb_mjit_min_header-2.7.2.h:5919
rb_thread_struct
Definition: vm_core.h:910
rb_ary_delete_same
void rb_ary_delete_same(VALUE ary, VALUE item)
Definition: array.c:3396
RString::as
union RString::@2 as
UNREACHABLE_RETURN
#define UNREACHABLE_RETURN(val)
Definition: ruby.h:59
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
rb_objspace::finalizing
rb_atomic_t finalizing
Definition: gc.c:711
rb_classext_struct::origin_
const VALUE origin_
Definition: internal.h:1039
rb_objspace::uncollectible_wb_unprotected_objects
size_t uncollectible_wb_unprotected_objects
Definition: gc.c:793
st_free_table
void st_free_table(st_table *tab)
Definition: st.c:709
rb_eNoMemError
VALUE rb_eNoMemError
Definition: error.c:935
wmap_iter_arg::objspace
rb_objspace_t * objspace
Definition: gc.c:10502
st_table
Definition: st.h:79
rb_transient_heap_verify
void rb_transient_heap_verify(void)
Definition: transient_heap.c:219
RHASH_AR_TABLE_P
#define RHASH_AR_TABLE_P(hash)
Definition: internal.h:854
block_type_iseq
@ block_type_iseq
Definition: vm_core.h:745
rb_ec_raised_p
#define rb_ec_raised_p(ec, f)
Definition: eval_intern.h:260
rb_objspace::old_objects
size_t old_objects
Definition: gc.c:795
hi
#define hi
Definition: siphash.c:22
RVALUE::file
struct RFile file
Definition: gc.c:585
RData
Definition: ruby.h:1139
ruby_assert.h
rb_objspace::sorted_length
size_t sorted_length
Definition: gc.c:726
gc_profile_record::gc_time
double gc_time
Definition: gc.c:524
malloc_allocated_size
#define malloc_allocated_size
Definition: gc.c:909
HEAP_PAGE_OBJ_LIMIT
@ HEAP_PAGE_OBJ_LIMIT
Definition: gc.c:839
rb_any_to_s
VALUE rb_any_to_s(VALUE)
Default implementation of #to_s.
Definition: object.c:527
RUBY_INTERNAL_EVENT_GC_END_SWEEP
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
Definition: ruby.h:2272
sub
#define sub(x, y)
Definition: date_strftime.c:24
DSIZE_T
#define DSIZE_T
Definition: rb_mjit_min_header-2.7.2.h:5041
ID_TABLE_CONTINUE
@ ID_TABLE_CONTINUE
Definition: id_table.h:9
rb_heap_struct::free_pages
struct heap_page * free_pages
Definition: gc.c:659
GC_HEAP_OLDOBJECT_LIMIT_FACTOR
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
Definition: gc.c:271
rb_data_object_alloc
#define rb_data_object_alloc
Definition: gc.c:14
rb_const_entry_struct
Definition: constant.h:31
each_obj_callback
int each_obj_callback(void *, void *, size_t, void *)
Definition: gc.c:2938
id_table.h
finalizer_table
#define finalizer_table
Definition: gc.c:924
RREGEXP_PTR
#define RREGEXP_PTR(r)
Definition: ruby.h:1118
rb_newobj
VALUE rb_newobj(void)
Definition: gc.c:2290
rb_objspace::invoke_time
double invoke_time
Definition: gc.c:748
gc_stat_compat_sym_gc_stat_heap_used
@ gc_stat_compat_sym_gc_stat_heap_used
Definition: gc.c:8883
T_TRUE
#define T_TRUE
Definition: ruby.h:536
rb_obj_is_kind_of
VALUE rb_obj_is_kind_of(VALUE, VALUE)
Determines if obj is a kind of c.
Definition: object.c:692
ruby_gc_set_params
void ruby_gc_set_params(void)
Definition: gc.c:9421
malloc_obj_info
Definition: gc.c:9791
ruby_tag_type
ruby_tag_type
Definition: vm_core.h:184
rb_define_alloc_func
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALGRIND_MAKE_MEM_UNDEFINED
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
Definition: zlib.c:25
RUBY_ALIAS_FUNCTION
RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree), rb_data_object_wrap,(klass, datap, dmark, dfree))
Definition: gc.c:2385
RTEST
#define RTEST(v)
Definition: ruby.h:481
imemo_ifunc
@ imemo_ifunc
iterator function
Definition: internal.h:1137
rb_wb_unprotected_newobj_of
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
Definition: gc.c:2274
ruby::backward::cxxanyargs::type
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:39
debug.h
RB_SPECIAL_CONST_P
#define RB_SPECIAL_CONST_P(x)
Definition: ruby.h:1312
RB_GNUC_EXTENSION
#define RB_GNUC_EXTENSION
Definition: defines.h:121
VM_METHOD_TYPE_ALIAS
@ VM_METHOD_TYPE_ALIAS
Definition: method.h:108
heap_page::uncollectible_bits
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:868
SYM2ID
#define SYM2ID(x)
Definition: ruby.h:415
rb_vm_env_prev_env
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:796
strcmp
int strcmp(const char *, const char *)
T_UNDEF
#define T_UNDEF
Definition: ruby.h:544
rmatch
Definition: re.h:36
optional
Definition: gc.c:90
ATOMIC_SIZE_INC
#define ATOMIC_SIZE_INC(var)
Definition: ruby_atomic.h:151
__sFILE
Definition: vsnprintf.c:169
RSTRUCT_CONST_PTR
#define RSTRUCT_CONST_PTR(st)
Definition: internal.h:962
heap_page::pinned_bits
bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT]
Definition: gc.c:873
rb_ec_stack_check
MJIT_FUNC_EXPORTED int rb_ec_stack_check(rb_execution_context_t *ec)
Definition: gc.c:4668
rb_thread_call_with_gvl
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1662
iseq
const rb_iseq_t * iseq
Definition: rb_mjit_min_header-2.7.2.h:13469
rb_heap_struct::pooled_pages
struct heap_page * pooled_pages
Definition: gc.c:664
RSTRUCT
#define RSTRUCT(obj)
Definition: internal.h:966
vfprintf
int int int int int int vfprintf(FILE *__restrict, const char *__restrict, __gnuc_va_list) __attribute__((__format__(__printf__
rb_objspace::compact_count
size_t compact_count
Definition: gc.c:753
rb_objspace::during_gc
unsigned int during_gc
Definition: gc.c:691
rb_objspace_gc_enable
VALUE rb_objspace_gc_enable(rb_objspace_t *objspace)
Definition: gc.c:9219
RGENGC_FORCE_MAJOR_GC
#define RGENGC_FORCE_MAJOR_GC
Definition: gc.c:438
RClass
Definition: internal.h:1048
rb_transient_heap_start_marking
void rb_transient_heap_start_marking(int full_marking)
Definition: transient_heap.c:868
RVALUE::data
struct RData data
Definition: gc.c:581
ruby_sized_xfree
void ruby_sized_xfree(void *x, size_t size)
Definition: gc.c:10162
rb_objspace::parent_object
VALUE parent_object
Definition: gc.c:790
name
const char * name
Definition: nkf.c:208
NUM2PTR
#define NUM2PTR(x)
RVALUE_MARK_BITMAP
#define RVALUE_MARK_BITMAP(obj)
Definition: gc.c:1210
rb_execution_context_struct
Definition: vm_core.h:843
ruby_gc_params_t::heap_free_slots_min_ratio
double heap_free_slots_min_ratio
Definition: gc.c:323
n
const char size_t n
Definition: rb_mjit_min_header-2.7.2.h:5452
rb_block_proc
VALUE rb_block_proc(void)
Definition: proc.c:837