Ruby 3.2.1p31 (2023-02-08 revision 31819e82c88c6f8ecfaeb162519bfa26a14b21fd)
thread.c
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "gc.h"
77#include "hrtime.h"
78#include "internal.h"
79#include "internal/class.h"
80#include "internal/cont.h"
81#include "internal/error.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "mjit.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#if USE_MJIT && defined(HAVE_SYS_WAIT_H)
104#include <sys/wait.h>
105#endif
106
107#ifndef USE_NATIVE_THREAD_PRIORITY
108#define USE_NATIVE_THREAD_PRIORITY 0
109#define RUBY_THREAD_PRIORITY_MAX 3
110#define RUBY_THREAD_PRIORITY_MIN -3
111#endif
112
113static VALUE rb_cThreadShield;
114
115static VALUE sym_immediate;
116static VALUE sym_on_blocking;
117static VALUE sym_never;
118
119enum SLEEP_FLAGS {
120 SLEEP_DEADLOCKABLE = 0x1,
121 SLEEP_SPURIOUS_CHECK = 0x2
122};
123
124#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
125#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
126
127static inline VALUE
128rb_thread_local_storage(VALUE thread)
129{
130 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
131 rb_ivar_set(thread, idLocals, rb_hash_new());
132 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
133 }
134 return rb_ivar_get(thread, idLocals);
135}
136
137static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
138static void sleep_forever(rb_thread_t *th, unsigned int fl);
139static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
140static int rb_threadptr_dead(rb_thread_t *th);
141static void rb_check_deadlock(rb_ractor_t *r);
142static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
143static const char *thread_status_name(rb_thread_t *th, int detail);
144static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
145NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
146static int consume_communication_pipe(int fd);
147static int check_signals_nogvl(rb_thread_t *, int sigwait_fd);
148void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
149
150#define eKillSignal INT2FIX(0)
151#define eTerminateSignal INT2FIX(1)
152static volatile int system_working = 1;
153
155 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
156 rb_thread_t *th;
157 int fd;
158};
159
160/********************************************************************************/
161
162#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
163
165 enum rb_thread_status prev_status;
166};
167
168static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
169static void unblock_function_clear(rb_thread_t *th);
170
171static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
172 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
173static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
174
175#define THREAD_BLOCKING_BEGIN(th) do { \
176 struct rb_thread_sched * const sched = TH_SCHED(th); \
177 RB_GC_SAVE_MACHINE_CONTEXT(th); \
178 thread_sched_to_waiting(sched);
179
180#define THREAD_BLOCKING_END(th) \
181 thread_sched_to_running(sched, th); \
182 rb_ractor_thread_switch(th->ractor, th); \
183} while(0)
184
185#ifdef __GNUC__
186#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
187#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
188#else
189#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
190#endif
191#else
192#define only_if_constant(expr, notconst) notconst
193#endif
194#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
195 struct rb_blocking_region_buffer __region; \
196 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
197 /* always return true unless fail_if_interrupted */ \
198 !only_if_constant(fail_if_interrupted, TRUE)) { \
199 exec; \
200 blocking_region_end(th, &__region); \
201 }; \
202} while(0)
203
204/*
205 * returns true if this thread was spuriously interrupted, false otherwise
206 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
207 */
208#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
209static inline int
210vm_check_ints_blocking(rb_execution_context_t *ec)
211{
212 rb_thread_t *th = rb_ec_thread_ptr(ec);
213
214 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
215 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
216 }
217 else {
218 th->pending_interrupt_queue_checked = 0;
219 RUBY_VM_SET_INTERRUPT(ec);
220 }
221 return rb_threadptr_execute_interrupts(th, 1);
222}
223
224int
225rb_vm_check_ints_blocking(rb_execution_context_t *ec)
226{
227 return vm_check_ints_blocking(ec);
228}
229
230/*
231 * poll() is supported by many OSes, but so far Linux is the only
232 * one we know of that supports using poll() in all places select()
233 * would work.
234 */
235#if defined(HAVE_POLL)
236# if defined(__linux__)
237# define USE_POLL
238# endif
239# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
240# define USE_POLL
241 /* FreeBSD does not set POLLOUT when POLLHUP happens */
242# define POLLERR_SET (POLLHUP | POLLERR)
243# endif
244#endif
245
246static void
247timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
248 const struct timeval *timeout)
249{
250 if (timeout) {
251 *rel = rb_timeval2hrtime(timeout);
252 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
253 *to = rel;
254 }
255 else {
256 *to = 0;
257 }
258}
259
260MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
261void ruby_sigchld_handler(rb_vm_t *); /* signal.c */
262
263static void
264ubf_sigwait(void *ignore)
265{
266 rb_thread_wakeup_timer_thread(0);
267}
268
269#include THREAD_IMPL_SRC
270
271/*
272 * TODO: somebody with win32 knowledge should be able to get rid of
273 * timer-thread by busy-waiting on signals. And it should be possible
274 * to make the GVL in thread_pthread.c be platform-independent.
275 */
276#ifndef BUSY_WAIT_SIGNALS
277# define BUSY_WAIT_SIGNALS (0)
278#endif
279
280#ifndef USE_EVENTFD
281# define USE_EVENTFD (0)
282#endif
283
284#include "thread_sync.c"
285
286void
288{
290}
291
292void
294{
296}
297
298void
300{
302}
303
304void
306{
308}
309
310static int
311unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
312{
313 do {
314 if (fail_if_interrupted) {
315 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
316 return FALSE;
317 }
318 }
319 else {
320 RUBY_VM_CHECK_INTS(th->ec);
321 }
322
323 rb_native_mutex_lock(&th->interrupt_lock);
324 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
325 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
326
327 VM_ASSERT(th->unblock.func == NULL);
328
329 th->unblock.func = func;
330 th->unblock.arg = arg;
331 rb_native_mutex_unlock(&th->interrupt_lock);
332
333 return TRUE;
334}
335
336static void
337unblock_function_clear(rb_thread_t *th)
338{
339 rb_native_mutex_lock(&th->interrupt_lock);
340 th->unblock.func = 0;
341 rb_native_mutex_unlock(&th->interrupt_lock);
342}
343
344static void
345rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
346{
347 rb_native_mutex_lock(&th->interrupt_lock);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
357 }
358 else {
359 /* none */
360 }
361 rb_native_mutex_unlock(&th->interrupt_lock);
362}
363
364void
365rb_threadptr_interrupt(rb_thread_t *th)
366{
367 rb_threadptr_interrupt_common(th, 0);
368}
369
370static void
371threadptr_trap_interrupt(rb_thread_t *th)
372{
373 rb_threadptr_interrupt_common(th, 1);
374}
375
376static void
377terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
378{
379 rb_thread_t *th = 0;
380
381 ccan_list_for_each(&r->threads.set, th, lt_node) {
382 if (th != main_thread) {
383 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
384
385 rb_threadptr_pending_interrupt_enque(th, eTerminateSignal);
386 rb_threadptr_interrupt(th);
387
388 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
389 }
390 else {
391 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
392 }
393 }
394}
395
396static void
397rb_threadptr_join_list_wakeup(rb_thread_t *thread)
398{
399 while (thread->join_list) {
400 struct rb_waiting_list *join_list = thread->join_list;
401
402 // Consume the entry from the join list:
403 thread->join_list = join_list->next;
404
405 rb_thread_t *target_thread = join_list->thread;
406
407 if (target_thread->scheduler != Qnil && join_list->fiber) {
408 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
409 }
410 else {
411 rb_threadptr_interrupt(target_thread);
412
413 switch (target_thread->status) {
414 case THREAD_STOPPED:
415 case THREAD_STOPPED_FOREVER:
416 target_thread->status = THREAD_RUNNABLE;
417 default:
418 break;
419 }
420 }
421 }
422}
423
424void
425rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
426{
427 while (th->keeping_mutexes) {
428 rb_mutex_t *mutex = th->keeping_mutexes;
429 th->keeping_mutexes = mutex->next_mutex;
430
431 /* rb_warn("mutex #<%p> remains to be locked by terminated thread", (void *)mutexes); */
432
433 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
434 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
435 }
436}
437
438void
439rb_thread_terminate_all(rb_thread_t *th)
440{
441 rb_ractor_t *cr = th->ractor;
442 rb_execution_context_t * volatile ec = th->ec;
443 volatile int sleeping = 0;
444
445 if (cr->threads.main != th) {
446 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
447 (void *)cr->threads.main, (void *)th);
448 }
449
450 /* unlock all locking mutexes */
451 rb_threadptr_unlock_all_locking_mutexes(th);
452
453 EC_PUSH_TAG(ec);
454 if (EC_EXEC_TAG() == TAG_NONE) {
455 retry:
456 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
457
458 terminate_all(cr, th);
459
460 while (rb_ractor_living_thread_num(cr) > 1) {
461 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
462 /*q
463 * Thread exiting routine in thread_start_func_2 notify
464 * me when the last sub-thread exit.
465 */
466 sleeping = 1;
467 native_sleep(th, &rel);
468 RUBY_VM_CHECK_INTS_BLOCKING(ec);
469 sleeping = 0;
470 }
471 }
472 else {
473 /*
474 * When caught an exception (e.g. Ctrl+C), let's broadcast
475 * kill request again to ensure killing all threads even
476 * if they are blocked on sleep, mutex, etc.
477 */
478 if (sleeping) {
479 sleeping = 0;
480 goto retry;
481 }
482 }
483 EC_POP_TAG();
484}
485
486void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
487
488static void
489thread_cleanup_func_before_exec(void *th_ptr)
490{
491 rb_thread_t *th = th_ptr;
492 th->status = THREAD_KILLED;
493
494 // The thread stack doesn't exist in the forked process:
495 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
496
497 rb_threadptr_root_fiber_terminate(th);
498}
499
500static void
501thread_cleanup_func(void *th_ptr, int atfork)
502{
503 rb_thread_t *th = th_ptr;
504
505 th->locking_mutex = Qfalse;
506 thread_cleanup_func_before_exec(th_ptr);
507
508 /*
509 * Unfortunately, we can't release native threading resource at fork
510 * because libc may have unstable locking state therefore touching
511 * a threading resource may cause a deadlock.
512 *
513 * FIXME: Skipping native_mutex_destroy(pthread_mutex_destroy) is safe
514 * with NPTL, but native_thread_destroy calls pthread_cond_destroy
515 * which calls free(3), so there is a small memory leak atfork, here.
516 */
517 if (atfork)
518 return;
519
520 rb_native_mutex_destroy(&th->interrupt_lock);
521 native_thread_destroy(th);
522}
523
524static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
525static VALUE rb_thread_to_s(VALUE thread);
526
527void
528ruby_thread_init_stack(rb_thread_t *th)
529{
530 native_thread_init_stack(th);
531}
532
533const VALUE *
534rb_vm_proc_local_ep(VALUE proc)
535{
536 const VALUE *ep = vm_proc_ep(proc);
537
538 if (ep) {
539 return rb_vm_ep_local_ep(ep);
540 }
541 else {
542 return NULL;
543 }
544}
545
546// for ractor, defined in vm.c
547VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
548 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
549
550static VALUE
551thread_do_start_proc(rb_thread_t *th)
552{
553 VALUE args = th->invoke_arg.proc.args;
554 const VALUE *args_ptr;
555 int args_len;
556 VALUE procval = th->invoke_arg.proc.proc;
557 rb_proc_t *proc;
558 GetProcPtr(procval, proc);
559
560 th->ec->errinfo = Qnil;
561 th->ec->root_lep = rb_vm_proc_local_ep(procval);
562 th->ec->root_svar = Qfalse;
563
564 vm_check_ints_blocking(th->ec);
565
566 if (th->invoke_type == thread_invoke_type_ractor_proc) {
567 VALUE self = rb_ractor_self(th->ractor);
568 VM_ASSERT(FIXNUM_P(args));
569 args_len = FIX2INT(args);
570 args_ptr = ALLOCA_N(VALUE, args_len);
571 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
572 vm_check_ints_blocking(th->ec);
573
574 return rb_vm_invoke_proc_with_self(
575 th->ec, proc, self,
576 args_len, args_ptr,
577 th->invoke_arg.proc.kw_splat,
578 VM_BLOCK_HANDLER_NONE
579 );
580 }
581 else {
582 args_len = RARRAY_LENINT(args);
583 if (args_len < 8) {
584 /* free proc.args if the length is enough small */
585 args_ptr = ALLOCA_N(VALUE, args_len);
586 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR_TRANSIENT(args), VALUE, args_len);
587 th->invoke_arg.proc.args = Qnil;
588 }
589 else {
590 args_ptr = RARRAY_CONST_PTR(args);
591 }
592
593 vm_check_ints_blocking(th->ec);
594
595 return rb_vm_invoke_proc(
596 th->ec, proc,
597 args_len, args_ptr,
598 th->invoke_arg.proc.kw_splat,
599 VM_BLOCK_HANDLER_NONE
600 );
601 }
602}
603
604static void
605thread_do_start(rb_thread_t *th)
606{
607 native_set_thread_name(th);
608 VALUE result = Qundef;
609
610 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
611
612 switch (th->invoke_type) {
613 case thread_invoke_type_proc:
614 result = thread_do_start_proc(th);
615 break;
616
617 case thread_invoke_type_ractor_proc:
618 result = thread_do_start_proc(th);
619 rb_ractor_atexit(th->ec, result);
620 break;
621
622 case thread_invoke_type_func:
623 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
624 break;
625
626 case thread_invoke_type_none:
627 rb_bug("unreachable");
628 }
629
631
632 th->value = result;
633
634 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
635}
636
637void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
638#define thread_sched_to_dead thread_sched_to_waiting
639
640static int
641thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
642{
643 STACK_GROW_DIR_DETECTION;
644 enum ruby_tag_type state;
645 VALUE errinfo = Qnil;
646 size_t size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
647 rb_thread_t *ractor_main_th = th->ractor->threads.main;
648 VALUE * vm_stack = NULL;
649
650 VM_ASSERT(th != th->vm->ractor.main_thread);
651 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
652
653 // setup native thread
654 thread_sched_to_running(TH_SCHED(th), th);
655 ruby_thread_set_native(th);
656
657 RUBY_DEBUG_LOG("got lock. th:%u", rb_th_serial(th));
658
659 // setup ractor
660 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
661 RB_VM_LOCK();
662 {
663 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
664 rb_ractor_t *r = th->ractor;
665 r->r_stdin = rb_io_prep_stdin();
666 r->r_stdout = rb_io_prep_stdout();
667 r->r_stderr = rb_io_prep_stderr();
668 }
669 RB_VM_UNLOCK();
670 }
671
672 // This assertion is not passed on win32 env. Check it later.
673 // VM_ASSERT((size * sizeof(VALUE)) <= th->ec->machine.stack_maxsize);
674
675 // setup VM and machine stack
676 vm_stack = alloca(size * sizeof(VALUE));
677 VM_ASSERT(vm_stack);
678
679 rb_ec_initialize_vm_stack(th->ec, vm_stack, size);
680 th->ec->machine.stack_start = STACK_DIR_UPPER(vm_stack + size, vm_stack);
681 th->ec->machine.stack_maxsize -= size * sizeof(VALUE);
682
683 // Ensure that we are not joinable.
684 VM_ASSERT(UNDEF_P(th->value));
685
686 EC_PUSH_TAG(th->ec);
687
688 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
689 SAVE_ROOT_JMPBUF(th, thread_do_start(th));
690 }
691 else {
692 errinfo = th->ec->errinfo;
693
694 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
695 if (!NIL_P(exc)) errinfo = exc;
696
697 if (state == TAG_FATAL) {
698 if (th->invoke_type == thread_invoke_type_ractor_proc) {
699 rb_ractor_atexit(th->ec, Qnil);
700 }
701 /* fatal error within this thread, need to stop whole script */
702 }
703 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
704 /* exit on main_thread. */
705 }
706 else {
707 if (th->report_on_exception) {
708 VALUE mesg = rb_thread_to_s(th->self);
709 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
710 rb_write_error_str(mesg);
711 rb_ec_error_print(th->ec, errinfo);
712 }
713
714 if (th->invoke_type == thread_invoke_type_ractor_proc) {
715 rb_ractor_atexit_exception(th->ec);
716 }
717
718 if (th->vm->thread_abort_on_exception ||
719 th->abort_on_exception || RTEST(ruby_debug)) {
720 /* exit on main_thread */
721 }
722 else {
723 errinfo = Qnil;
724 }
725 }
726 th->value = Qnil;
727 }
728
729 // The thread is effectively finished and can be joined.
730 VM_ASSERT(!UNDEF_P(th->value));
731
732 rb_threadptr_join_list_wakeup(th);
733 rb_threadptr_unlock_all_locking_mutexes(th);
734
735 if (th->invoke_type == thread_invoke_type_ractor_proc) {
736 rb_thread_terminate_all(th);
737 rb_ractor_teardown(th->ec);
738 }
739
740 th->status = THREAD_KILLED;
741 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
742
743 if (th->vm->ractor.main_thread == th) {
744 ruby_stop(0);
745 }
746
747 if (RB_TYPE_P(errinfo, T_OBJECT)) {
748 /* treat with normal error object */
749 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
750 }
751
752 EC_POP_TAG();
753
754 rb_ec_clear_current_thread_trace_func(th->ec);
755
756 /* locking_mutex must be Qfalse */
757 if (th->locking_mutex != Qfalse) {
758 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
759 (void *)th, th->locking_mutex);
760 }
761
762 if (ractor_main_th->status == THREAD_KILLED &&
763 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
764 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
765 rb_threadptr_interrupt(ractor_main_th);
766 }
767
768 rb_check_deadlock(th->ractor);
769
770 rb_fiber_close(th->ec->fiber_ptr);
771
772 thread_cleanup_func(th, FALSE);
773 VM_ASSERT(th->ec->vm_stack == NULL);
774
775 if (th->invoke_type == thread_invoke_type_ractor_proc) {
776 // after rb_ractor_living_threads_remove()
777 // GC will happen anytime and this ractor can be collected (and destroy GVL).
778 // So gvl_release() should be before it.
779 thread_sched_to_dead(TH_SCHED(th));
780 rb_ractor_living_threads_remove(th->ractor, th);
781 }
782 else {
783 rb_ractor_living_threads_remove(th->ractor, th);
784 thread_sched_to_dead(TH_SCHED(th));
785 }
786
787 return 0;
788}
789
791 enum thread_invoke_type type;
792
793 // for normal proc thread
794 VALUE args;
795 VALUE proc;
796
797 // for ractor
798 rb_ractor_t *g;
799
800 // for func
801 VALUE (*fn)(void *);
802};
803
804static VALUE
805thread_create_core(VALUE thval, struct thread_create_params *params)
806{
807 rb_execution_context_t *ec = GET_EC();
808 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
809 int err;
810
811 if (OBJ_FROZEN(current_th->thgroup)) {
813 "can't start a new thread (frozen ThreadGroup)");
814 }
815
816 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
817
818 switch (params->type) {
819 case thread_invoke_type_proc:
820 th->invoke_type = thread_invoke_type_proc;
821 th->invoke_arg.proc.args = params->args;
822 th->invoke_arg.proc.proc = params->proc;
823 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
824 break;
825
826 case thread_invoke_type_ractor_proc:
827#if RACTOR_CHECK_MODE > 0
828 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
829#endif
830 th->invoke_type = thread_invoke_type_ractor_proc;
831 th->ractor = params->g;
832 th->ractor->threads.main = th;
833 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
834 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
835 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
836 rb_ractor_send_parameters(ec, params->g, params->args);
837 break;
838
839 case thread_invoke_type_func:
840 th->invoke_type = thread_invoke_type_func;
841 th->invoke_arg.func.func = params->fn;
842 th->invoke_arg.func.arg = (void *)params->args;
843 break;
844
845 default:
846 rb_bug("unreachable");
847 }
848
849 th->priority = current_th->priority;
850 th->thgroup = current_th->thgroup;
851
852 th->pending_interrupt_queue = rb_ary_hidden_new(0);
853 th->pending_interrupt_queue_checked = 0;
854 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
855 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
856
857 rb_native_mutex_initialize(&th->interrupt_lock);
858
859 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
860
861 rb_ractor_living_threads_insert(th->ractor, th);
862
863 /* kick thread */
864 err = native_thread_create(th);
865 if (err) {
866 th->status = THREAD_KILLED;
867 rb_ractor_living_threads_remove(th->ractor, th);
868 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
869 }
870 return thval;
871}
872
873#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
874
875/*
876 * call-seq:
877 * Thread.new { ... } -> thread
878 * Thread.new(*args, &proc) -> thread
879 * Thread.new(*args) { |args| ... } -> thread
880 *
881 * Creates a new thread executing the given block.
882 *
883 * Any +args+ given to ::new will be passed to the block:
884 *
885 * arr = []
886 * a, b, c = 1, 2, 3
887 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
888 * arr #=> [1, 2, 3]
889 *
890 * A ThreadError exception is raised if ::new is called without a block.
891 *
892 * If you're going to subclass Thread, be sure to call super in your
893 * +initialize+ method, otherwise a ThreadError will be raised.
894 */
895static VALUE
896thread_s_new(int argc, VALUE *argv, VALUE klass)
897{
898 rb_thread_t *th;
899 VALUE thread = rb_thread_alloc(klass);
900
901 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
902 rb_raise(rb_eThreadError, "can't alloc thread");
903 }
904
905 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
906 th = rb_thread_ptr(thread);
907 if (!threadptr_initialized(th)) {
908 rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
909 klass);
910 }
911 return thread;
912}
913
914/*
915 * call-seq:
916 * Thread.start([args]*) {|args| block } -> thread
917 * Thread.fork([args]*) {|args| block } -> thread
918 *
919 * Basically the same as ::new. However, if class Thread is subclassed, then
920 * calling +start+ in that subclass will not invoke the subclass's
921 * +initialize+ method.
922 */
923
924static VALUE
925thread_start(VALUE klass, VALUE args)
926{
927 struct thread_create_params params = {
928 .type = thread_invoke_type_proc,
929 .args = args,
930 .proc = rb_block_proc(),
931 };
932 return thread_create_core(rb_thread_alloc(klass), &params);
933}
934
935static VALUE
936threadptr_invoke_proc_location(rb_thread_t *th)
937{
938 if (th->invoke_type == thread_invoke_type_proc) {
939 return rb_proc_location(th->invoke_arg.proc.proc);
940 }
941 else {
942 return Qnil;
943 }
944}
945
946/* :nodoc: */
947static VALUE
948thread_initialize(VALUE thread, VALUE args)
949{
950 rb_thread_t *th = rb_thread_ptr(thread);
951
952 if (!rb_block_given_p()) {
953 rb_raise(rb_eThreadError, "must be called with a block");
954 }
955 else if (th->invoke_type != thread_invoke_type_none) {
956 VALUE loc = threadptr_invoke_proc_location(th);
957 if (!NIL_P(loc)) {
959 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
960 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
961 }
962 else {
963 rb_raise(rb_eThreadError, "already initialized thread");
964 }
965 }
966 else {
967 struct thread_create_params params = {
968 .type = thread_invoke_type_proc,
969 .args = args,
970 .proc = rb_block_proc(),
971 };
972 return thread_create_core(thread, &params);
973 }
974}
975
976VALUE
977rb_thread_create(VALUE (*fn)(void *), void *arg)
978{
979 struct thread_create_params params = {
980 .type = thread_invoke_type_func,
981 .fn = fn,
982 .args = (VALUE)arg,
983 };
984 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
985}
986
987VALUE
988rb_thread_create_ractor(rb_ractor_t *g, VALUE args, VALUE proc)
989{
990 struct thread_create_params params = {
991 .type = thread_invoke_type_ractor_proc,
992 .g = g,
993 .args = args,
994 .proc = proc,
995 };
996 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
997}
998
999
1000struct join_arg {
1001 struct rb_waiting_list *waiter;
1002 rb_thread_t *target;
1003 VALUE timeout;
1004 rb_hrtime_t *limit;
1005};
1006
1007static VALUE
1008remove_from_join_list(VALUE arg)
1009{
1010 struct join_arg *p = (struct join_arg *)arg;
1011 rb_thread_t *target_thread = p->target;
1012
1013 if (target_thread->status != THREAD_KILLED) {
1014 struct rb_waiting_list **join_list = &target_thread->join_list;
1015
1016 while (*join_list) {
1017 if (*join_list == p->waiter) {
1018 *join_list = (*join_list)->next;
1019 break;
1020 }
1021
1022 join_list = &(*join_list)->next;
1023 }
1024 }
1025
1026 return Qnil;
1027}
1028
1029static int
1030thread_finished(rb_thread_t *th)
1031{
1032 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1033}
1034
1035static VALUE
1036thread_join_sleep(VALUE arg)
1037{
1038 struct join_arg *p = (struct join_arg *)arg;
1039 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1040 rb_hrtime_t end = 0, *limit = p->limit;
1041
1042 if (limit) {
1043 end = rb_hrtime_add(*limit, rb_hrtime_now());
1044 }
1045
1046 while (!thread_finished(target_th)) {
1047 VALUE scheduler = rb_fiber_scheduler_current();
1048
1049 if (scheduler != Qnil) {
1050 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1051 }
1052 else if (!limit) {
1053 th->status = THREAD_STOPPED_FOREVER;
1054 rb_ractor_sleeper_threads_inc(th->ractor);
1055 rb_check_deadlock(th->ractor);
1056 native_sleep(th, 0);
1057 rb_ractor_sleeper_threads_dec(th->ractor);
1058 }
1059 else {
1060 if (hrtime_update_expire(limit, end)) {
1061 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1062 return Qfalse;
1063 }
1064 th->status = THREAD_STOPPED;
1065 native_sleep(th, limit);
1066 }
1067 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1068 th->status = THREAD_RUNNABLE;
1069
1070 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1071 }
1072 return Qtrue;
1073}
1074
1075static VALUE
1076thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1077{
1078 rb_execution_context_t *ec = GET_EC();
1079 rb_thread_t *th = ec->thread_ptr;
1080 rb_fiber_t *fiber = ec->fiber_ptr;
1081
1082 if (th == target_th) {
1083 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1084 }
1085
1086 if (th->ractor->threads.main == target_th) {
1087 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1088 }
1089
1090 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1091
1092 if (target_th->status != THREAD_KILLED) {
1093 struct rb_waiting_list waiter;
1094 waiter.next = target_th->join_list;
1095 waiter.thread = th;
1096 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1097 target_th->join_list = &waiter;
1098
1099 struct join_arg arg;
1100 arg.waiter = &waiter;
1101 arg.target = target_th;
1102 arg.timeout = timeout;
1103 arg.limit = limit;
1104
1105 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1106 return Qnil;
1107 }
1108 }
1109
1110 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1111
1112 if (target_th->ec->errinfo != Qnil) {
1113 VALUE err = target_th->ec->errinfo;
1114
1115 if (FIXNUM_P(err)) {
1116 switch (err) {
1117 case INT2FIX(TAG_FATAL):
1118 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1119
1120 /* OK. killed. */
1121 break;
1122 default:
1123 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1124 }
1125 }
1126 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1127 rb_bug("thread_join: THROW_DATA should not reach here.");
1128 }
1129 else {
1130 /* normal exception */
1131 rb_exc_raise(err);
1132 }
1133 }
1134 return target_th->self;
1135}
1136
1137/*
1138 * call-seq:
1139 * thr.join -> thr
1140 * thr.join(limit) -> thr
1141 *
1142 * The calling thread will suspend execution and run this +thr+.
1143 *
1144 * Does not return until +thr+ exits or until the given +limit+ seconds have
1145 * passed.
1146 *
1147 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1148 * returned.
1149 *
1150 * Any threads not joined will be killed when the main program exits.
1151 *
1152 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1153 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1154 * will be processed at this time.
1155 *
1156 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1157 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1158 * x.join # Let thread x finish, thread a will be killed on exit.
1159 * #=> "axyz"
1160 *
1161 * The following example illustrates the +limit+ parameter.
1162 *
1163 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1164 * puts "Waiting" until y.join(0.15)
1165 *
1166 * This will produce:
1167 *
1168 * tick...
1169 * Waiting
1170 * tick...
1171 * Waiting
1172 * tick...
1173 * tick...
1174 */
1175
1176static VALUE
1177thread_join_m(int argc, VALUE *argv, VALUE self)
1178{
1179 VALUE timeout = Qnil;
1180 rb_hrtime_t rel = 0, *limit = 0;
1181
1182 if (rb_check_arity(argc, 0, 1)) {
1183 timeout = argv[0];
1184 }
1185
1186 // Convert the timeout eagerly, so it's always converted and deterministic
1187 /*
1188 * This supports INFINITY and negative values, so we can't use
1189 * rb_time_interval right now...
1190 */
1191 if (NIL_P(timeout)) {
1192 /* unlimited */
1193 }
1194 else if (FIXNUM_P(timeout)) {
1195 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1196 limit = &rel;
1197 }
1198 else {
1199 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1200 }
1201
1202 return thread_join(rb_thread_ptr(self), timeout, limit);
1203}
1204
1205/*
1206 * call-seq:
1207 * thr.value -> obj
1208 *
1209 * Waits for +thr+ to complete, using #join, and returns its value or raises
1210 * the exception which terminated the thread.
1211 *
1212 * a = Thread.new { 2 + 2 }
1213 * a.value #=> 4
1214 *
1215 * b = Thread.new { raise 'something went wrong' }
1216 * b.value #=> RuntimeError: something went wrong
1217 */
1218
1219static VALUE
1220thread_value(VALUE self)
1221{
1222 rb_thread_t *th = rb_thread_ptr(self);
1223 thread_join(th, Qnil, 0);
1224 if (UNDEF_P(th->value)) {
1225 // If the thread is dead because we forked th->value is still Qundef.
1226 return Qnil;
1227 }
1228 return th->value;
1229}
1230
1231/*
1232 * Thread Scheduling
1233 */
1234
1235static void
1236getclockofday(struct timespec *ts)
1237{
1238#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1239 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1240 return;
1241#endif
1242 rb_timespec_now(ts);
1243}
1244
1245/*
1246 * Don't inline this, since library call is already time consuming
1247 * and we don't want "struct timespec" on stack too long for GC
1248 */
1249NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1250rb_hrtime_t
1251rb_hrtime_now(void)
1252{
1253 struct timespec ts;
1254
1255 getclockofday(&ts);
1256 return rb_timespec2hrtime(&ts);
1257}
1258
1259static void
1260sleep_forever(rb_thread_t *th, unsigned int fl)
1261{
1262 enum rb_thread_status prev_status = th->status;
1263 enum rb_thread_status status;
1264 int woke;
1265
1266 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1267 th->status = status;
1268 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1269 while (th->status == status) {
1270 if (fl & SLEEP_DEADLOCKABLE) {
1271 rb_ractor_sleeper_threads_inc(th->ractor);
1272 rb_check_deadlock(th->ractor);
1273 }
1274 native_sleep(th, 0);
1275 if (fl & SLEEP_DEADLOCKABLE) {
1276 rb_ractor_sleeper_threads_dec(th->ractor);
1277 }
1278 woke = vm_check_ints_blocking(th->ec);
1279 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1280 break;
1281 }
1282 th->status = prev_status;
1283}
1284
1285/*
1286 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1287 * being uninitialized, maybe other versions, too.
1288 */
1289COMPILER_WARNING_PUSH
1290#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1291COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1292#endif
1293#ifndef PRIu64
1294#define PRIu64 PRI_64_PREFIX "u"
1295#endif
1296/*
1297 * @end is the absolute time when @ts is set to expire
1298 * Returns true if @end has past
1299 * Updates @ts and returns false otherwise
1300 */
1301static int
1302hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1303{
1304 rb_hrtime_t now = rb_hrtime_now();
1305
1306 if (now > end) return 1;
1307
1308 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1309
1310 *timeout = end - now;
1311 return 0;
1312}
1313COMPILER_WARNING_POP
1314
1315static int
1316sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1317{
1318 enum rb_thread_status prev_status = th->status;
1319 int woke;
1320 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1321
1322 th->status = THREAD_STOPPED;
1323 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1324 while (th->status == THREAD_STOPPED) {
1325 native_sleep(th, &rel);
1326 woke = vm_check_ints_blocking(th->ec);
1327 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1328 break;
1329 if (hrtime_update_expire(&rel, end))
1330 break;
1331 woke = 1;
1332 }
1333 th->status = prev_status;
1334 return woke;
1335}
1336
1337static int
1338sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1339{
1340 enum rb_thread_status prev_status = th->status;
1341 int woke;
1342 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1343
1344 th->status = THREAD_STOPPED;
1345 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1346 while (th->status == THREAD_STOPPED) {
1347 native_sleep(th, &rel);
1348 woke = vm_check_ints_blocking(th->ec);
1349 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1350 break;
1351 if (hrtime_update_expire(&rel, end))
1352 break;
1353 woke = 1;
1354 }
1355 th->status = prev_status;
1356 return woke;
1357}
1358
1359void
1361{
1362 RUBY_DEBUG_LOG("");
1363 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1364}
1365
1366void
1368{
1369 RUBY_DEBUG_LOG("");
1370 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1371}
1372
1373void
1374rb_thread_sleep_interruptible(void)
1375{
1376 rb_thread_t *th = GET_THREAD();
1377 enum rb_thread_status prev_status = th->status;
1378
1379 th->status = THREAD_STOPPED;
1380 native_sleep(th, 0);
1381 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1382 th->status = prev_status;
1383}
1384
1385static void
1386rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1387{
1388 VALUE scheduler = rb_fiber_scheduler_current();
1389 if (scheduler != Qnil) {
1390 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1391 }
1392 else {
1393 RUBY_DEBUG_LOG("");
1394 if (end) {
1395 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1396 }
1397 else {
1398 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1399 }
1400 }
1401}
1402
1403void
1405{
1406 rb_thread_t *th = GET_THREAD();
1407
1408 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1409}
1410
1411/*
1412 * CAUTION: This function causes thread switching.
1413 * rb_thread_check_ints() check ruby's interrupts.
1414 * some interrupt needs thread switching/invoke handlers,
1415 * and so on.
1416 */
1417
1418void
1420{
1421 RUBY_VM_CHECK_INTS_BLOCKING(GET_EC());
1422}
1423
1424/*
1425 * Hidden API for tcl/tk wrapper.
1426 * There is no guarantee to perpetuate it.
1427 */
1428int
1429rb_thread_check_trap_pending(void)
1430{
1431 return rb_signal_buff_size() != 0;
1432}
1433
1434/* This function can be called in blocking region. */
1435int
1437{
1438 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1439}
1440
1441void
1443{
1445}
1446
1447static void
1448rb_thread_schedule_limits(uint32_t limits_us)
1449{
1450 if (!rb_thread_alone()) {
1451 rb_thread_t *th = GET_THREAD();
1452 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1453
1454 if (th->running_time_us >= limits_us) {
1455 RUBY_DEBUG_LOG("switch %s", "start");
1456
1457 RB_GC_SAVE_MACHINE_CONTEXT(th);
1458 thread_sched_yield(TH_SCHED(th), th);
1459 rb_ractor_thread_switch(th->ractor, th);
1460
1461 RUBY_DEBUG_LOG("switch %s", "done");
1462 }
1463 }
1464}
1465
1466void
1468{
1469 rb_thread_schedule_limits(0);
1470 RUBY_VM_CHECK_INTS(GET_EC());
1471}
1472
1473/* blocking region */
1474
1475static inline int
1476blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1477 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1478{
1479#ifdef RUBY_VM_CRITICAL_SECTION
1480 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1481#endif
1482 VM_ASSERT(th == GET_THREAD());
1483
1484 region->prev_status = th->status;
1485 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1486 th->blocking_region_buffer = region;
1487 th->status = THREAD_STOPPED;
1488 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1489
1490 RUBY_DEBUG_LOG("");
1491
1492 RB_GC_SAVE_MACHINE_CONTEXT(th);
1493 thread_sched_to_waiting(TH_SCHED(th));
1494 return TRUE;
1495 }
1496 else {
1497 return FALSE;
1498 }
1499}
1500
1501static inline void
1502blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1503{
1504 /* entry to ubf_list still permitted at this point, make it impossible: */
1505 unblock_function_clear(th);
1506 /* entry to ubf_list impossible at this point, so unregister is safe: */
1507 unregister_ubf_list(th);
1508
1509 thread_sched_to_running(TH_SCHED(th), th);
1510 rb_ractor_thread_switch(th->ractor, th);
1511
1512 th->blocking_region_buffer = 0;
1513 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1514 if (th->status == THREAD_STOPPED) {
1515 th->status = region->prev_status;
1516 }
1517
1518 RUBY_DEBUG_LOG("");
1519 VM_ASSERT(th == GET_THREAD());
1520}
1521
1522void *
1523rb_nogvl(void *(*func)(void *), void *data1,
1524 rb_unblock_function_t *ubf, void *data2,
1525 int flags)
1526{
1527 void *val = 0;
1528 rb_execution_context_t *ec = GET_EC();
1529 rb_thread_t *th = rb_ec_thread_ptr(ec);
1530 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1531 bool is_main_thread = vm->ractor.main_thread == th;
1532 int saved_errno = 0;
1533 VALUE ubf_th = Qfalse;
1534
1535 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1536 ubf = ubf_select;
1537 data2 = th;
1538 }
1539 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1540 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1541 vm->ubf_async_safe = 1;
1542 }
1543 else {
1544 ubf_th = rb_thread_start_unblock_thread();
1545 }
1546 }
1547
1548 BLOCKING_REGION(th, {
1549 val = func(data1);
1550 saved_errno = errno;
1551 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1552
1553 if (is_main_thread) vm->ubf_async_safe = 0;
1554
1555 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1556 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1557 }
1558
1559 if (ubf_th != Qfalse) {
1560 thread_value(rb_thread_kill(ubf_th));
1561 }
1562
1563 errno = saved_errno;
1564
1565 return val;
1566}
1567
1568/*
1569 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1570 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1571 * without interrupt process.
1572 *
1573 * rb_thread_call_without_gvl() does:
1574 * (1) Check interrupts.
1575 * (2) release GVL.
1576 * Other Ruby threads may run in parallel.
1577 * (3) call func with data1
1578 * (4) acquire GVL.
1579 * Other Ruby threads can not run in parallel any more.
1580 * (5) Check interrupts.
1581 *
1582 * rb_thread_call_without_gvl2() does:
1583 * (1) Check interrupt and return if interrupted.
1584 * (2) release GVL.
1585 * (3) call func with data1 and a pointer to the flags.
1586 * (4) acquire GVL.
1587 *
1588 * If another thread interrupts this thread (Thread#kill, signal delivery,
1589 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1590 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1591 * toggling a cancellation flag, canceling the invocation of a call inside
1592 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1593 *
1594 * There are built-in ubfs and you can specify these ubfs:
1595 *
1596 * * RUBY_UBF_IO: ubf for IO operation
1597 * * RUBY_UBF_PROCESS: ubf for process operation
1598 *
1599 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1600 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1601 * provide proper ubf(), your program will not stop for Control+C or other
1602 * shutdown events.
1603 *
1604 * "Check interrupts" on above list means checking asynchronous
1605 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1606 * request, and so on) and calling corresponding procedures
1607 * (such as `trap' for signals, raise an exception for Thread#raise).
1608 * If `func()' finished and received interrupts, you may skip interrupt
1609 * checking. For example, assume the following func() it reads data from file.
1610 *
1611 * read_func(...) {
1612 * // (a) before read
1613 * read(buffer); // (b) reading
1614 * // (c) after read
1615 * }
1616 *
1617 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1618 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1619 * at (c), after *read* operation is completed, checking interrupts is harmful
1620 * because it causes irrevocable side-effect, the read data will vanish. To
1621 * avoid such problem, the `read_func()' should be used with
1622 * `rb_thread_call_without_gvl2()'.
1623 *
1624 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1625 * immediately. This function does not show when the execution was interrupted.
1626 * For example, there are 4 possible timing (a), (b), (c) and before calling
1627 * read_func(). You need to record progress of a read_func() and check
1628 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1629 * `rb_thread_check_ints()' correctly or your program can not process proper
1630 * process such as `trap' and so on.
1631 *
1632 * NOTE: You can not execute most of Ruby C API and touch Ruby
1633 * objects in `func()' and `ubf()', including raising an
1634 * exception, because current thread doesn't acquire GVL
1635 * (it causes synchronization problems). If you need to
1636 * call ruby functions either use rb_thread_call_with_gvl()
1637 * or read source code of C APIs and confirm safety by
1638 * yourself.
1639 *
1640 * NOTE: In short, this API is difficult to use safely. I recommend you
1641 * use other ways if you have. We lack experiences to use this API.
1642 * Please report your problem related on it.
1643 *
1644 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1645 * for a short running `func()'. Be sure to benchmark and use this
1646 * mechanism when `func()' consumes enough time.
1647 *
1648 * Safe C API:
1649 * * rb_thread_interrupted() - check interrupt flag
1650 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1651 * they will work without GVL, and may acquire GVL when GC is needed.
1652 */
1653void *
1654rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1655 rb_unblock_function_t *ubf, void *data2)
1656{
1657 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1658}
1659
1660void *
1661rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1662 rb_unblock_function_t *ubf, void *data2)
1663{
1664 return rb_nogvl(func, data1, ubf, data2, 0);
1665}
1666
1667VALUE
1668rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
1669{
1670 volatile VALUE val = Qundef; /* shouldn't be used */
1671 rb_execution_context_t * volatile ec = GET_EC();
1672 volatile int saved_errno = 0;
1673 enum ruby_tag_type state;
1674
1675 struct waiting_fd waiting_fd = {
1676 .fd = fd,
1677 .th = rb_ec_thread_ptr(ec)
1678 };
1679
1680 // `errno` is only valid when there is an actual error - but we can't
1681 // extract that from the return value of `func` alone, so we clear any
1682 // prior `errno` value here so that we can later check if it was set by
1683 // `func` or not (as opposed to some previously set value).
1684 errno = 0;
1685
1686 RB_VM_LOCK_ENTER();
1687 {
1688 ccan_list_add(&rb_ec_vm_ptr(ec)->waiting_fds, &waiting_fd.wfd_node);
1689 }
1690 RB_VM_LOCK_LEAVE();
1691
1692 EC_PUSH_TAG(ec);
1693 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1694 BLOCKING_REGION(waiting_fd.th, {
1695 val = func(data1);
1696 saved_errno = errno;
1697 }, ubf_select, waiting_fd.th, FALSE);
1698 }
1699 EC_POP_TAG();
1700
1701 /*
1702 * must be deleted before jump
1703 * this will delete either from waiting_fds or on-stack CCAN_LIST_HEAD(busy)
1704 */
1705 RB_VM_LOCK_ENTER();
1706 {
1707 ccan_list_del(&waiting_fd.wfd_node);
1708 }
1709 RB_VM_LOCK_LEAVE();
1710
1711 if (state) {
1712 EC_JUMP_TAG(ec, state);
1713 }
1714 /* TODO: check func() */
1715 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1716
1717 // If the error was a timeout, we raise a specific exception for that:
1718 if (saved_errno == ETIMEDOUT) {
1719 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1720 }
1721
1722 errno = saved_errno;
1723
1724 return val;
1725}
1726
1727/*
1728 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1729 *
1730 * After releasing GVL using
1731 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1732 * methods. If you need to access Ruby you must use this function
1733 * rb_thread_call_with_gvl().
1734 *
1735 * This function rb_thread_call_with_gvl() does:
1736 * (1) acquire GVL.
1737 * (2) call passed function `func'.
1738 * (3) release GVL.
1739 * (4) return a value which is returned at (2).
1740 *
1741 * NOTE: You should not return Ruby object at (2) because such Object
1742 * will not be marked.
1743 *
1744 * NOTE: If an exception is raised in `func', this function DOES NOT
1745 * protect (catch) the exception. If you have any resources
1746 * which should free before throwing exception, you need use
1747 * rb_protect() in `func' and return a value which represents
1748 * exception was raised.
1749 *
1750 * NOTE: This function should not be called by a thread which was not
1751 * created as Ruby thread (created by Thread.new or so). In other
1752 * words, this function *DOES NOT* associate or convert a NON-Ruby
1753 * thread to a Ruby thread.
1754 */
1755void *
1756rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1757{
1758 rb_thread_t *th = ruby_thread_from_native();
1759 struct rb_blocking_region_buffer *brb;
1760 struct rb_unblock_callback prev_unblock;
1761 void *r;
1762
1763 if (th == 0) {
1764 /* Error has occurred, but we can't use rb_bug()
1765 * because this thread is not Ruby's thread.
1766 * What should we do?
1767 */
1768 bp();
1769 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1770 exit(EXIT_FAILURE);
1771 }
1772
1773 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1774 prev_unblock = th->unblock;
1775
1776 if (brb == 0) {
1777 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1778 }
1779
1780 blocking_region_end(th, brb);
1781 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1782 r = (*func)(data1);
1783 /* leave from Ruby world: You can not access Ruby values, etc. */
1784 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1785 RUBY_ASSERT_ALWAYS(released);
1786 return r;
1787}
1788
1789/*
1790 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1791 *
1792 ***
1793 *** This API is EXPERIMENTAL!
1794 *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1795 ***
1796 */
1797
1798int
1799ruby_thread_has_gvl_p(void)
1800{
1801 rb_thread_t *th = ruby_thread_from_native();
1802
1803 if (th && th->blocking_region_buffer == 0) {
1804 return 1;
1805 }
1806 else {
1807 return 0;
1808 }
1809}
1810
1811/*
1812 * call-seq:
1813 * Thread.pass -> nil
1814 *
1815 * Give the thread scheduler a hint to pass execution to another thread.
1816 * A running thread may or may not switch, it depends on OS and processor.
1817 */
1818
1819static VALUE
1820thread_s_pass(VALUE klass)
1821{
1823 return Qnil;
1824}
1825
1826/*****************************************************/
1827
1828/*
1829 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1830 *
1831 * Async events such as an exception thrown by Thread#raise,
1832 * Thread#kill and thread termination (after main thread termination)
1833 * will be queued to th->pending_interrupt_queue.
1834 * - clear: clear the queue.
1835 * - enque: enqueue err object into queue.
1836 * - deque: dequeue err object from queue.
1837 * - active_p: return 1 if the queue should be checked.
1838 *
1839 * All rb_threadptr_pending_interrupt_* functions are called by
1840 * a GVL acquired thread, of course.
1841 * Note that all "rb_" prefix APIs need GVL to call.
1842 */
1843
1844void
1845rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1846{
1847 rb_ary_clear(th->pending_interrupt_queue);
1848}
1849
1850void
1851rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
1852{
1853 rb_ary_push(th->pending_interrupt_queue, v);
1854 th->pending_interrupt_queue_checked = 0;
1855}
1856
1857static void
1858threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1859{
1860 if (!th->pending_interrupt_queue) {
1861 rb_raise(rb_eThreadError, "uninitialized thread");
1862 }
1863}
1864
1865enum handle_interrupt_timing {
1866 INTERRUPT_NONE,
1867 INTERRUPT_IMMEDIATE,
1868 INTERRUPT_ON_BLOCKING,
1869 INTERRUPT_NEVER
1870};
1871
1872static enum handle_interrupt_timing
1873rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1874{
1875 VALUE mask;
1876 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1877 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1878 VALUE mod;
1879 long i;
1880
1881 for (i=0; i<mask_stack_len; i++) {
1882 mask = mask_stack[mask_stack_len-(i+1)];
1883
1884 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1885 VALUE klass = mod;
1886 VALUE sym;
1887
1888 if (BUILTIN_TYPE(mod) == T_ICLASS) {
1889 klass = RBASIC(mod)->klass;
1890 }
1891 else if (mod != RCLASS_ORIGIN(mod)) {
1892 continue;
1893 }
1894
1895 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1896 if (sym == sym_immediate) {
1897 return INTERRUPT_IMMEDIATE;
1898 }
1899 else if (sym == sym_on_blocking) {
1900 return INTERRUPT_ON_BLOCKING;
1901 }
1902 else if (sym == sym_never) {
1903 return INTERRUPT_NEVER;
1904 }
1905 else {
1906 rb_raise(rb_eThreadError, "unknown mask signature");
1907 }
1908 }
1909 }
1910 /* try to next mask */
1911 }
1912 return INTERRUPT_NONE;
1913}
1914
1915static int
1916rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
1917{
1918 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1919}
1920
1921static int
1922rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
1923{
1924 int i;
1925 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1926 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
1927 if (rb_obj_is_kind_of(e, err)) {
1928 return TRUE;
1929 }
1930 }
1931 return FALSE;
1932}
1933
1934static VALUE
1935rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
1936{
1937#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1938 int i;
1939
1940 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1941 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
1942
1943 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
1944
1945 switch (mask_timing) {
1946 case INTERRUPT_ON_BLOCKING:
1947 if (timing != INTERRUPT_ON_BLOCKING) {
1948 break;
1949 }
1950 /* fall through */
1951 case INTERRUPT_NONE: /* default: IMMEDIATE */
1952 case INTERRUPT_IMMEDIATE:
1953 rb_ary_delete_at(th->pending_interrupt_queue, i);
1954 return err;
1955 case INTERRUPT_NEVER:
1956 break;
1957 }
1958 }
1959
1960 th->pending_interrupt_queue_checked = 1;
1961 return Qundef;
1962#else
1963 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
1964 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1965 th->pending_interrupt_queue_checked = 1;
1966 }
1967 return err;
1968#endif
1969}
1970
1971static int
1972threadptr_pending_interrupt_active_p(rb_thread_t *th)
1973{
1974 /*
1975 * For optimization, we don't check async errinfo queue
1976 * if the queue and the thread interrupt mask were not changed
1977 * since last check.
1978 */
1979 if (th->pending_interrupt_queue_checked) {
1980 return 0;
1981 }
1982
1983 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1984 return 0;
1985 }
1986
1987 return 1;
1988}
1989
1990static int
1991handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
1992{
1993 VALUE *maskp = (VALUE *)args;
1994
1995 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1996 rb_raise(rb_eArgError, "unknown mask signature");
1997 }
1998
1999 if (!*maskp) {
2000 *maskp = rb_ident_hash_new();
2001 }
2002 rb_hash_aset(*maskp, key, val);
2003
2004 return ST_CONTINUE;
2005}
2006
2007/*
2008 * call-seq:
2009 * Thread.handle_interrupt(hash) { ... } -> result of the block
2010 *
2011 * Changes asynchronous interrupt timing.
2012 *
2013 * _interrupt_ means asynchronous event and corresponding procedure
2014 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2015 * and main thread termination (if main thread terminates, then all
2016 * other thread will be killed).
2017 *
2018 * The given +hash+ has pairs like <code>ExceptionClass =>
2019 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2020 * the given block. The TimingSymbol can be one of the following symbols:
2021 *
2022 * [+:immediate+] Invoke interrupts immediately.
2023 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2024 * [+:never+] Never invoke all interrupts.
2025 *
2026 * _BlockingOperation_ means that the operation will block the calling thread,
2027 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2028 * operation executed without GVL.
2029 *
2030 * Masked asynchronous interrupts are delayed until they are enabled.
2031 * This method is similar to sigprocmask(3).
2032 *
2033 * === NOTE
2034 *
2035 * Asynchronous interrupts are difficult to use.
2036 *
2037 * If you need to communicate between threads, please consider to use another way such as Queue.
2038 *
2039 * Or use them with deep understanding about this method.
2040 *
2041 * === Usage
2042 *
2043 * In this example, we can guard from Thread#raise exceptions.
2044 *
2045 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2046 * ignored in the first block of the main thread. In the second
2047 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2048 *
2049 * th = Thread.new do
2050 * Thread.handle_interrupt(RuntimeError => :never) {
2051 * begin
2052 * # You can write resource allocation code safely.
2053 * Thread.handle_interrupt(RuntimeError => :immediate) {
2054 * # ...
2055 * }
2056 * ensure
2057 * # You can write resource deallocation code safely.
2058 * end
2059 * }
2060 * end
2061 * Thread.pass
2062 * # ...
2063 * th.raise "stop"
2064 *
2065 * While we are ignoring the RuntimeError exception, it's safe to write our
2066 * resource allocation code. Then, the ensure block is where we can safely
2067 * deallocate your resources.
2068 *
2069 * ==== Guarding from Timeout::Error
2070 *
2071 * In the next example, we will guard from the Timeout::Error exception. This
2072 * will help prevent from leaking resources when Timeout::Error exceptions occur
2073 * during normal ensure clause. For this example we use the help of the
2074 * standard library Timeout, from lib/timeout.rb
2075 *
2076 * require 'timeout'
2077 * Thread.handle_interrupt(Timeout::Error => :never) {
2078 * timeout(10){
2079 * # Timeout::Error doesn't occur here
2080 * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
2081 * # possible to be killed by Timeout::Error
2082 * # while blocking operation
2083 * }
2084 * # Timeout::Error doesn't occur here
2085 * }
2086 * }
2087 *
2088 * In the first part of the +timeout+ block, we can rely on Timeout::Error being
2089 * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
2090 * operation that will block the calling thread is susceptible to a
2091 * Timeout::Error exception being raised.
2092 *
2093 * ==== Stack control settings
2094 *
2095 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2096 * to control more than one ExceptionClass and TimingSymbol at a time.
2097 *
2098 * Thread.handle_interrupt(FooError => :never) {
2099 * Thread.handle_interrupt(BarError => :never) {
2100 * # FooError and BarError are prohibited.
2101 * }
2102 * }
2103 *
2104 * ==== Inheritance with ExceptionClass
2105 *
2106 * All exceptions inherited from the ExceptionClass parameter will be considered.
2107 *
2108 * Thread.handle_interrupt(Exception => :never) {
2109 * # all exceptions inherited from Exception are prohibited.
2110 * }
2111 *
2112 * For handling all interrupts, use +Object+ and not +Exception+
2113 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2114 */
2115static VALUE
2116rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2117{
2118 VALUE mask;
2119 rb_execution_context_t * volatile ec = GET_EC();
2120 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2121 volatile VALUE r = Qnil;
2122 enum ruby_tag_type state;
2123
2124 if (!rb_block_given_p()) {
2125 rb_raise(rb_eArgError, "block is needed.");
2126 }
2127
2128 mask = 0;
2129 mask_arg = rb_to_hash_type(mask_arg);
2130 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2131 if (!mask) {
2132 return rb_yield(Qnil);
2133 }
2134 OBJ_FREEZE_RAW(mask);
2135 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2136 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2137 th->pending_interrupt_queue_checked = 0;
2138 RUBY_VM_SET_INTERRUPT(th->ec);
2139 }
2140
2141 EC_PUSH_TAG(th->ec);
2142 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2143 r = rb_yield(Qnil);
2144 }
2145 EC_POP_TAG();
2146
2147 rb_ary_pop(th->pending_interrupt_mask_stack);
2148 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2149 th->pending_interrupt_queue_checked = 0;
2150 RUBY_VM_SET_INTERRUPT(th->ec);
2151 }
2152
2153 RUBY_VM_CHECK_INTS(th->ec);
2154
2155 if (state) {
2156 EC_JUMP_TAG(th->ec, state);
2157 }
2158
2159 return r;
2160}
2161
2162/*
2163 * call-seq:
2164 * target_thread.pending_interrupt?(error = nil) -> true/false
2165 *
2166 * Returns whether or not the asynchronous queue is empty for the target thread.
2167 *
2168 * If +error+ is given, then check only for +error+ type deferred events.
2169 *
2170 * See ::pending_interrupt? for more information.
2171 */
2172static VALUE
2173rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2174{
2175 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2176
2177 if (!target_th->pending_interrupt_queue) {
2178 return Qfalse;
2179 }
2180 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2181 return Qfalse;
2182 }
2183 if (rb_check_arity(argc, 0, 1)) {
2184 VALUE err = argv[0];
2185 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2186 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2187 }
2188 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2189 }
2190 else {
2191 return Qtrue;
2192 }
2193}
2194
2195/*
2196 * call-seq:
2197 * Thread.pending_interrupt?(error = nil) -> true/false
2198 *
2199 * Returns whether or not the asynchronous queue is empty.
2200 *
2201 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2202 * this method can be used to determine if there are any deferred events.
2203 *
2204 * If you find this method returns true, then you may finish +:never+ blocks.
2205 *
2206 * For example, the following method processes deferred asynchronous events
2207 * immediately.
2208 *
2209 * def Thread.kick_interrupt_immediately
2210 * Thread.handle_interrupt(Object => :immediate) {
2211 * Thread.pass
2212 * }
2213 * end
2214 *
2215 * If +error+ is given, then check only for +error+ type deferred events.
2216 *
2217 * === Usage
2218 *
2219 * th = Thread.new{
2220 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2221 * while true
2222 * ...
2223 * # reach safe point to invoke interrupt
2224 * if Thread.pending_interrupt?
2225 * Thread.handle_interrupt(Object => :immediate){}
2226 * end
2227 * ...
2228 * end
2229 * }
2230 * }
2231 * ...
2232 * th.raise # stop thread
2233 *
2234 * This example can also be written as the following, which you should use to
2235 * avoid asynchronous interrupts.
2236 *
2237 * flag = true
2238 * th = Thread.new{
2239 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2240 * while true
2241 * ...
2242 * # reach safe point to invoke interrupt
2243 * break if flag == false
2244 * ...
2245 * end
2246 * }
2247 * }
2248 * ...
2249 * flag = false # stop thread
2250 */
2251
2252static VALUE
2253rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2254{
2255 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2256}
2257
2258NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2259
2260static void
2261rb_threadptr_to_kill(rb_thread_t *th)
2262{
2263 rb_threadptr_pending_interrupt_clear(th);
2264 th->status = THREAD_RUNNABLE;
2265 th->to_kill = 1;
2266 th->ec->errinfo = INT2FIX(TAG_FATAL);
2267 EC_JUMP_TAG(th->ec, TAG_FATAL);
2268}
2269
2270static inline rb_atomic_t
2271threadptr_get_interrupts(rb_thread_t *th)
2272{
2273 rb_execution_context_t *ec = th->ec;
2274 rb_atomic_t interrupt;
2275 rb_atomic_t old;
2276
2277 do {
2278 interrupt = ec->interrupt_flag;
2279 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2280 } while (old != interrupt);
2281 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2282}
2283
2284#if USE_MJIT
2285// process.c
2286extern bool mjit_waitpid_finished;
2287extern int mjit_waitpid_status;
2288#endif
2289
2290MJIT_FUNC_EXPORTED int
2291rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2292{
2293 rb_atomic_t interrupt;
2294 int postponed_job_interrupt = 0;
2295 int ret = FALSE;
2296
2297 if (th->ec->raised_flag) return ret;
2298
2299 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2300 int sig;
2301 int timer_interrupt;
2302 int pending_interrupt;
2303 int trap_interrupt;
2304 int terminate_interrupt;
2305
2306 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2307 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2308 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2309 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2310 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2311
2312 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2313 RB_VM_LOCK_ENTER();
2314 RB_VM_LOCK_LEAVE();
2315 }
2316
2317 if (postponed_job_interrupt) {
2318 rb_postponed_job_flush(th->vm);
2319 }
2320
2321 /* signal handling */
2322 if (trap_interrupt && (th == th->vm->ractor.main_thread)) {
2323 enum rb_thread_status prev_status = th->status;
2324 int sigwait_fd = rb_sigwait_fd_get(th);
2325
2326 if (sigwait_fd >= 0) {
2327 (void)consume_communication_pipe(sigwait_fd);
2328 ruby_sigchld_handler(th->vm);
2329 rb_sigwait_fd_put(th, sigwait_fd);
2330 rb_sigwait_fd_migrate(th->vm);
2331 }
2332 th->status = THREAD_RUNNABLE;
2333 while ((sig = rb_get_next_signal()) != 0) {
2334 ret |= rb_signal_exec(th, sig);
2335 }
2336 th->status = prev_status;
2337 }
2338
2339#if USE_MJIT
2340 // Handle waitpid_signal for MJIT issued by ruby_sigchld_handler. This needs to be done
2341 // outside ruby_sigchld_handler to avoid recursively relying on the SIGCHLD handler.
2342 if (mjit_waitpid_finished && th == th->vm->ractor.main_thread) {
2343 mjit_waitpid_finished = false;
2344 mjit_notify_waitpid(WIFEXITED(mjit_waitpid_status) ? WEXITSTATUS(mjit_waitpid_status) : -1);
2345 }
2346#endif
2347
2348 /* exception from another thread */
2349 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2350 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2351 RUBY_DEBUG_LOG("err:%"PRIdVALUE"\n", err);
2352 ret = TRUE;
2353
2354 if (UNDEF_P(err)) {
2355 /* no error */
2356 }
2357 else if (err == eKillSignal /* Thread#kill received */ ||
2358 err == eTerminateSignal /* Terminate thread */ ||
2359 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2360 terminate_interrupt = 1;
2361 }
2362 else {
2363 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2364 /* the only special exception to be queued across thread */
2365 err = ruby_vm_special_exception_copy(err);
2366 }
2367 /* set runnable if th was slept. */
2368 if (th->status == THREAD_STOPPED ||
2369 th->status == THREAD_STOPPED_FOREVER)
2370 th->status = THREAD_RUNNABLE;
2371 rb_exc_raise(err);
2372 }
2373 }
2374
2375 if (terminate_interrupt) {
2376 rb_threadptr_to_kill(th);
2377 }
2378
2379 if (timer_interrupt) {
2380 uint32_t limits_us = TIME_QUANTUM_USEC;
2381
2382 if (th->priority > 0)
2383 limits_us <<= th->priority;
2384 else
2385 limits_us >>= -th->priority;
2386
2387 if (th->status == THREAD_RUNNABLE)
2388 th->running_time_us += TIME_QUANTUM_USEC;
2389
2390 VM_ASSERT(th->ec->cfp);
2391 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2392 0, 0, 0, Qundef);
2393
2394 rb_thread_schedule_limits(limits_us);
2395 }
2396 }
2397 return ret;
2398}
2399
2400void
2401rb_thread_execute_interrupts(VALUE thval)
2402{
2403 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2404}
2405
2406static void
2407rb_threadptr_ready(rb_thread_t *th)
2408{
2409 rb_threadptr_interrupt(th);
2410}
2411
2412static VALUE
2413rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2414{
2415 VALUE exc;
2416
2417 if (rb_threadptr_dead(target_th)) {
2418 return Qnil;
2419 }
2420
2421 if (argc == 0) {
2422 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2423 }
2424 else {
2425 exc = rb_make_exception(argc, argv);
2426 }
2427
2428 /* making an exception object can switch thread,
2429 so we need to check thread deadness again */
2430 if (rb_threadptr_dead(target_th)) {
2431 return Qnil;
2432 }
2433
2434 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2435 rb_threadptr_pending_interrupt_enque(target_th, exc);
2436 rb_threadptr_interrupt(target_th);
2437 return Qnil;
2438}
2439
2440void
2441rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2442{
2443 VALUE argv[2];
2444
2445 argv[0] = rb_eSignal;
2446 argv[1] = INT2FIX(sig);
2447 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2448}
2449
2450void
2451rb_threadptr_signal_exit(rb_thread_t *th)
2452{
2453 VALUE argv[2];
2454
2455 argv[0] = rb_eSystemExit;
2456 argv[1] = rb_str_new2("exit");
2457
2458 // TODO: check signal raise deliverly
2459 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2460}
2461
2462int
2463rb_ec_set_raised(rb_execution_context_t *ec)
2464{
2465 if (ec->raised_flag & RAISED_EXCEPTION) {
2466 return 1;
2467 }
2468 ec->raised_flag |= RAISED_EXCEPTION;
2469 return 0;
2470}
2471
2472int
2473rb_ec_reset_raised(rb_execution_context_t *ec)
2474{
2475 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2476 return 0;
2477 }
2478 ec->raised_flag &= ~RAISED_EXCEPTION;
2479 return 1;
2480}
2481
2482int
2483rb_notify_fd_close(int fd, struct ccan_list_head *busy)
2484{
2485 rb_vm_t *vm = GET_THREAD()->vm;
2486 struct waiting_fd *wfd = 0, *next;
2487
2488 RB_VM_LOCK_ENTER();
2489 {
2490 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2491 if (wfd->fd == fd) {
2492 rb_thread_t *th = wfd->th;
2493 VALUE err;
2494
2495 ccan_list_del(&wfd->wfd_node);
2496 ccan_list_add(busy, &wfd->wfd_node);
2497
2498 err = th->vm->special_exceptions[ruby_error_stream_closed];
2499 rb_threadptr_pending_interrupt_enque(th, err);
2500 rb_threadptr_interrupt(th);
2501 }
2502 }
2503 }
2504 RB_VM_LOCK_LEAVE();
2505
2506 return !ccan_list_empty(busy);
2507}
2508
2509void
2511{
2512 struct ccan_list_head busy;
2513
2514 ccan_list_head_init(&busy);
2515 if (rb_notify_fd_close(fd, &busy)) {
2516 do rb_thread_schedule(); while (!ccan_list_empty(&busy));
2517 }
2518}
2519
2520/*
2521 * call-seq:
2522 * thr.raise
2523 * thr.raise(string)
2524 * thr.raise(exception [, string [, array]])
2525 *
2526 * Raises an exception from the given thread. The caller does not have to be
2527 * +thr+. See Kernel#raise for more information.
2528 *
2529 * Thread.abort_on_exception = true
2530 * a = Thread.new { sleep(200) }
2531 * a.raise("Gotcha")
2532 *
2533 * This will produce:
2534 *
2535 * prog.rb:3: Gotcha (RuntimeError)
2536 * from prog.rb:2:in `initialize'
2537 * from prog.rb:2:in `new'
2538 * from prog.rb:2
2539 */
2540
2541static VALUE
2542thread_raise_m(int argc, VALUE *argv, VALUE self)
2543{
2544 rb_thread_t *target_th = rb_thread_ptr(self);
2545 const rb_thread_t *current_th = GET_THREAD();
2546
2547 threadptr_check_pending_interrupt_queue(target_th);
2548 rb_threadptr_raise(target_th, argc, argv);
2549
2550 /* To perform Thread.current.raise as Kernel.raise */
2551 if (current_th == target_th) {
2552 RUBY_VM_CHECK_INTS(target_th->ec);
2553 }
2554 return Qnil;
2555}
2556
2557
2558/*
2559 * call-seq:
2560 * thr.exit -> thr
2561 * thr.kill -> thr
2562 * thr.terminate -> thr
2563 *
2564 * Terminates +thr+ and schedules another thread to be run, returning
2565 * the terminated Thread. If this is the main thread, or the last
2566 * thread, exits the process.
2567 */
2568
2569VALUE
2571{
2572 rb_thread_t *target_th = rb_thread_ptr(thread);
2573
2574 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2575 return thread;
2576 }
2577 if (target_th == target_th->vm->ractor.main_thread) {
2578 rb_exit(EXIT_SUCCESS);
2579 }
2580
2581 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2582
2583 if (target_th == GET_THREAD()) {
2584 /* kill myself immediately */
2585 rb_threadptr_to_kill(target_th);
2586 }
2587 else {
2588 threadptr_check_pending_interrupt_queue(target_th);
2589 rb_threadptr_pending_interrupt_enque(target_th, eKillSignal);
2590 rb_threadptr_interrupt(target_th);
2591 }
2592
2593 return thread;
2594}
2595
2596int
2597rb_thread_to_be_killed(VALUE thread)
2598{
2599 rb_thread_t *target_th = rb_thread_ptr(thread);
2600
2601 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2602 return TRUE;
2603 }
2604 return FALSE;
2605}
2606
2607/*
2608 * call-seq:
2609 * Thread.kill(thread) -> thread
2610 *
2611 * Causes the given +thread+ to exit, see also Thread::exit.
2612 *
2613 * count = 0
2614 * a = Thread.new { loop { count += 1 } }
2615 * sleep(0.1) #=> 0
2616 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2617 * count #=> 93947
2618 * a.alive? #=> false
2619 */
2620
2621static VALUE
2622rb_thread_s_kill(VALUE obj, VALUE th)
2623{
2624 return rb_thread_kill(th);
2625}
2626
2627
2628/*
2629 * call-seq:
2630 * Thread.exit -> thread
2631 *
2632 * Terminates the currently running thread and schedules another thread to be
2633 * run.
2634 *
2635 * If this thread is already marked to be killed, ::exit returns the Thread.
2636 *
2637 * If this is the main thread, or the last thread, exit the process.
2638 */
2639
2640static VALUE
2641rb_thread_exit(VALUE _)
2642{
2643 rb_thread_t *th = GET_THREAD();
2644 return rb_thread_kill(th->self);
2645}
2646
2647
2648/*
2649 * call-seq:
2650 * thr.wakeup -> thr
2651 *
2652 * Marks a given thread as eligible for scheduling, however it may still
2653 * remain blocked on I/O.
2654 *
2655 * *Note:* This does not invoke the scheduler, see #run for more information.
2656 *
2657 * c = Thread.new { Thread.stop; puts "hey!" }
2658 * sleep 0.1 while c.status!='sleep'
2659 * c.wakeup
2660 * c.join
2661 * #=> "hey!"
2662 */
2663
2664VALUE
2666{
2667 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2668 rb_raise(rb_eThreadError, "killed thread");
2669 }
2670 return thread;
2671}
2672
2673VALUE
2675{
2676 rb_thread_t *target_th = rb_thread_ptr(thread);
2677 if (target_th->status == THREAD_KILLED) return Qnil;
2678
2679 rb_threadptr_ready(target_th);
2680
2681 if (target_th->status == THREAD_STOPPED ||
2682 target_th->status == THREAD_STOPPED_FOREVER) {
2683 target_th->status = THREAD_RUNNABLE;
2684 }
2685
2686 return thread;
2687}
2688
2689
2690/*
2691 * call-seq:
2692 * thr.run -> thr
2693 *
2694 * Wakes up +thr+, making it eligible for scheduling.
2695 *
2696 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2697 * sleep 0.1 while a.status!='sleep'
2698 * puts "Got here"
2699 * a.run
2700 * a.join
2701 *
2702 * This will produce:
2703 *
2704 * a
2705 * Got here
2706 * c
2707 *
2708 * See also the instance method #wakeup.
2709 */
2710
2711VALUE
2713{
2714 rb_thread_wakeup(thread);
2716 return thread;
2717}
2718
2719
2720VALUE
2722{
2723 if (rb_thread_alone()) {
2725 "stopping only thread\n\tnote: use sleep to stop forever");
2726 }
2728 return Qnil;
2729}
2730
2731/*
2732 * call-seq:
2733 * Thread.stop -> nil
2734 *
2735 * Stops execution of the current thread, putting it into a ``sleep'' state,
2736 * and schedules execution of another thread.
2737 *
2738 * a = Thread.new { print "a"; Thread.stop; print "c" }
2739 * sleep 0.1 while a.status!='sleep'
2740 * print "b"
2741 * a.run
2742 * a.join
2743 * #=> "abc"
2744 */
2745
2746static VALUE
2747thread_stop(VALUE _)
2748{
2749 return rb_thread_stop();
2750}
2751
2752/********************************************************************/
2753
2754VALUE
2755rb_thread_list(void)
2756{
2757 // TODO
2758 return rb_ractor_thread_list(GET_RACTOR());
2759}
2760
2761/*
2762 * call-seq:
2763 * Thread.list -> array
2764 *
2765 * Returns an array of Thread objects for all threads that are either runnable
2766 * or stopped.
2767 *
2768 * Thread.new { sleep(200) }
2769 * Thread.new { 1000000.times {|i| i*i } }
2770 * Thread.new { Thread.stop }
2771 * Thread.list.each {|t| p t}
2772 *
2773 * This will produce:
2774 *
2775 * #<Thread:0x401b3e84 sleep>
2776 * #<Thread:0x401b3f38 run>
2777 * #<Thread:0x401b3fb0 sleep>
2778 * #<Thread:0x401bdf4c run>
2779 */
2780
2781static VALUE
2782thread_list(VALUE _)
2783{
2784 return rb_thread_list();
2785}
2786
2787VALUE
2789{
2790 return GET_THREAD()->self;
2791}
2792
2793/*
2794 * call-seq:
2795 * Thread.current -> thread
2796 *
2797 * Returns the currently executing thread.
2798 *
2799 * Thread.current #=> #<Thread:0x401bdf4c run>
2800 */
2801
2802static VALUE
2803thread_s_current(VALUE klass)
2804{
2805 return rb_thread_current();
2806}
2807
2808VALUE
2810{
2811 return GET_RACTOR()->threads.main->self;
2812}
2813
2814/*
2815 * call-seq:
2816 * Thread.main -> thread
2817 *
2818 * Returns the main thread.
2819 */
2820
2821static VALUE
2822rb_thread_s_main(VALUE klass)
2823{
2824 return rb_thread_main();
2825}
2826
2827
2828/*
2829 * call-seq:
2830 * Thread.abort_on_exception -> true or false
2831 *
2832 * Returns the status of the global ``abort on exception'' condition.
2833 *
2834 * The default is +false+.
2835 *
2836 * When set to +true+, if any thread is aborted by an exception, the
2837 * raised exception will be re-raised in the main thread.
2838 *
2839 * Can also be specified by the global $DEBUG flag or command line option
2840 * +-d+.
2841 *
2842 * See also ::abort_on_exception=.
2843 *
2844 * There is also an instance level method to set this for a specific thread,
2845 * see #abort_on_exception.
2846 */
2847
2848static VALUE
2849rb_thread_s_abort_exc(VALUE _)
2850{
2851 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
2852}
2853
2854
2855/*
2856 * call-seq:
2857 * Thread.abort_on_exception= boolean -> true or false
2858 *
2859 * When set to +true+, if any thread is aborted by an exception, the
2860 * raised exception will be re-raised in the main thread.
2861 * Returns the new state.
2862 *
2863 * Thread.abort_on_exception = true
2864 * t1 = Thread.new do
2865 * puts "In new thread"
2866 * raise "Exception from thread"
2867 * end
2868 * sleep(1)
2869 * puts "not reached"
2870 *
2871 * This will produce:
2872 *
2873 * In new thread
2874 * prog.rb:4: Exception from thread (RuntimeError)
2875 * from prog.rb:2:in `initialize'
2876 * from prog.rb:2:in `new'
2877 * from prog.rb:2
2878 *
2879 * See also ::abort_on_exception.
2880 *
2881 * There is also an instance level method to set this for a specific thread,
2882 * see #abort_on_exception=.
2883 */
2884
2885static VALUE
2886rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2887{
2888 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2889 return val;
2890}
2891
2892
2893/*
2894 * call-seq:
2895 * thr.abort_on_exception -> true or false
2896 *
2897 * Returns the status of the thread-local ``abort on exception'' condition for
2898 * this +thr+.
2899 *
2900 * The default is +false+.
2901 *
2902 * See also #abort_on_exception=.
2903 *
2904 * There is also a class level method to set this for all threads, see
2905 * ::abort_on_exception.
2906 */
2907
2908static VALUE
2909rb_thread_abort_exc(VALUE thread)
2910{
2911 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
2912}
2913
2914
2915/*
2916 * call-seq:
2917 * thr.abort_on_exception= boolean -> true or false
2918 *
2919 * When set to +true+, if this +thr+ is aborted by an exception, the
2920 * raised exception will be re-raised in the main thread.
2921 *
2922 * See also #abort_on_exception.
2923 *
2924 * There is also a class level method to set this for all threads, see
2925 * ::abort_on_exception=.
2926 */
2927
2928static VALUE
2929rb_thread_abort_exc_set(VALUE thread, VALUE val)
2930{
2931 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
2932 return val;
2933}
2934
2935
2936/*
2937 * call-seq:
2938 * Thread.report_on_exception -> true or false
2939 *
2940 * Returns the status of the global ``report on exception'' condition.
2941 *
2942 * The default is +true+ since Ruby 2.5.
2943 *
2944 * All threads created when this flag is true will report
2945 * a message on $stderr if an exception kills the thread.
2946 *
2947 * Thread.new { 1.times { raise } }
2948 *
2949 * will produce this output on $stderr:
2950 *
2951 * #<Thread:...> terminated with exception (report_on_exception is true):
2952 * Traceback (most recent call last):
2953 * 2: from -e:1:in `block in <main>'
2954 * 1: from -e:1:in `times'
2955 *
2956 * This is done to catch errors in threads early.
2957 * In some cases, you might not want this output.
2958 * There are multiple ways to avoid the extra output:
2959 *
2960 * * If the exception is not intended, the best is to fix the cause of
2961 * the exception so it does not happen anymore.
2962 * * If the exception is intended, it might be better to rescue it closer to
2963 * where it is raised rather then let it kill the Thread.
2964 * * If it is guaranteed the Thread will be joined with Thread#join or
2965 * Thread#value, then it is safe to disable this report with
2966 * <code>Thread.current.report_on_exception = false</code>
2967 * when starting the Thread.
2968 * However, this might handle the exception much later, or not at all
2969 * if the Thread is never joined due to the parent thread being blocked, etc.
2970 *
2971 * See also ::report_on_exception=.
2972 *
2973 * There is also an instance level method to set this for a specific thread,
2974 * see #report_on_exception=.
2975 *
2976 */
2977
2978static VALUE
2979rb_thread_s_report_exc(VALUE _)
2980{
2981 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
2982}
2983
2984
2985/*
2986 * call-seq:
2987 * Thread.report_on_exception= boolean -> true or false
2988 *
2989 * Returns the new state.
2990 * When set to +true+, all threads created afterwards will inherit the
2991 * condition and report a message on $stderr if an exception kills a thread:
2992 *
2993 * Thread.report_on_exception = true
2994 * t1 = Thread.new do
2995 * puts "In new thread"
2996 * raise "Exception from thread"
2997 * end
2998 * sleep(1)
2999 * puts "In the main thread"
3000 *
3001 * This will produce:
3002 *
3003 * In new thread
3004 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3005 * Traceback (most recent call last):
3006 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3007 * In the main thread
3008 *
3009 * See also ::report_on_exception.
3010 *
3011 * There is also an instance level method to set this for a specific thread,
3012 * see #report_on_exception=.
3013 */
3014
3015static VALUE
3016rb_thread_s_report_exc_set(VALUE self, VALUE val)
3017{
3018 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3019 return val;
3020}
3021
3022
3023/*
3024 * call-seq:
3025 * Thread.ignore_deadlock -> true or false
3026 *
3027 * Returns the status of the global ``ignore deadlock'' condition.
3028 * The default is +false+, so that deadlock conditions are not ignored.
3029 *
3030 * See also ::ignore_deadlock=.
3031 *
3032 */
3033
3034static VALUE
3035rb_thread_s_ignore_deadlock(VALUE _)
3036{
3037 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3038}
3039
3040
3041/*
3042 * call-seq:
3043 * Thread.ignore_deadlock = boolean -> true or false
3044 *
3045 * Returns the new state.
3046 * When set to +true+, the VM will not check for deadlock conditions.
3047 * It is only useful to set this if your application can break a
3048 * deadlock condition via some other means, such as a signal.
3049 *
3050 * Thread.ignore_deadlock = true
3051 * queue = Thread::Queue.new
3052 *
3053 * trap(:SIGUSR1){queue.push "Received signal"}
3054 *
3055 * # raises fatal error unless ignoring deadlock
3056 * puts queue.pop
3057 *
3058 * See also ::ignore_deadlock.
3059 */
3060
3061static VALUE
3062rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3063{
3064 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3065 return val;
3066}
3067
3068
3069/*
3070 * call-seq:
3071 * thr.report_on_exception -> true or false
3072 *
3073 * Returns the status of the thread-local ``report on exception'' condition for
3074 * this +thr+.
3075 *
3076 * The default value when creating a Thread is the value of
3077 * the global flag Thread.report_on_exception.
3078 *
3079 * See also #report_on_exception=.
3080 *
3081 * There is also a class level method to set this for all new threads, see
3082 * ::report_on_exception=.
3083 */
3084
3085static VALUE
3086rb_thread_report_exc(VALUE thread)
3087{
3088 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3089}
3090
3091
3092/*
3093 * call-seq:
3094 * thr.report_on_exception= boolean -> true or false
3095 *
3096 * When set to +true+, a message is printed on $stderr if an exception
3097 * kills this +thr+. See ::report_on_exception for details.
3098 *
3099 * See also #report_on_exception.
3100 *
3101 * There is also a class level method to set this for all new threads, see
3102 * ::report_on_exception=.
3103 */
3104
3105static VALUE
3106rb_thread_report_exc_set(VALUE thread, VALUE val)
3107{
3108 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3109 return val;
3110}
3111
3112
3113/*
3114 * call-seq:
3115 * thr.group -> thgrp or nil
3116 *
3117 * Returns the ThreadGroup which contains the given thread.
3118 *
3119 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3120 */
3121
3122VALUE
3123rb_thread_group(VALUE thread)
3124{
3125 return rb_thread_ptr(thread)->thgroup;
3126}
3127
3128static const char *
3129thread_status_name(rb_thread_t *th, int detail)
3130{
3131 switch (th->status) {
3132 case THREAD_RUNNABLE:
3133 return th->to_kill ? "aborting" : "run";
3134 case THREAD_STOPPED_FOREVER:
3135 if (detail) return "sleep_forever";
3136 case THREAD_STOPPED:
3137 return "sleep";
3138 case THREAD_KILLED:
3139 return "dead";
3140 default:
3141 return "unknown";
3142 }
3143}
3144
3145static int
3146rb_threadptr_dead(rb_thread_t *th)
3147{
3148 return th->status == THREAD_KILLED;
3149}
3150
3151
3152/*
3153 * call-seq:
3154 * thr.status -> string, false or nil
3155 *
3156 * Returns the status of +thr+.
3157 *
3158 * [<tt>"sleep"</tt>]
3159 * Returned if this thread is sleeping or waiting on I/O
3160 * [<tt>"run"</tt>]
3161 * When this thread is executing
3162 * [<tt>"aborting"</tt>]
3163 * If this thread is aborting
3164 * [+false+]
3165 * When this thread is terminated normally
3166 * [+nil+]
3167 * If terminated with an exception.
3168 *
3169 * a = Thread.new { raise("die now") }
3170 * b = Thread.new { Thread.stop }
3171 * c = Thread.new { Thread.exit }
3172 * d = Thread.new { sleep }
3173 * d.kill #=> #<Thread:0x401b3678 aborting>
3174 * a.status #=> nil
3175 * b.status #=> "sleep"
3176 * c.status #=> false
3177 * d.status #=> "aborting"
3178 * Thread.current.status #=> "run"
3179 *
3180 * See also the instance methods #alive? and #stop?
3181 */
3182
3183static VALUE
3184rb_thread_status(VALUE thread)
3185{
3186 rb_thread_t *target_th = rb_thread_ptr(thread);
3187
3188 if (rb_threadptr_dead(target_th)) {
3189 if (!NIL_P(target_th->ec->errinfo) &&
3190 !FIXNUM_P(target_th->ec->errinfo)) {
3191 return Qnil;
3192 }
3193 else {
3194 return Qfalse;
3195 }
3196 }
3197 else {
3198 return rb_str_new2(thread_status_name(target_th, FALSE));
3199 }
3200}
3201
3202
3203/*
3204 * call-seq:
3205 * thr.alive? -> true or false
3206 *
3207 * Returns +true+ if +thr+ is running or sleeping.
3208 *
3209 * thr = Thread.new { }
3210 * thr.join #=> #<Thread:0x401b3fb0 dead>
3211 * Thread.current.alive? #=> true
3212 * thr.alive? #=> false
3213 *
3214 * See also #stop? and #status.
3215 */
3216
3217static VALUE
3218rb_thread_alive_p(VALUE thread)
3219{
3220 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3221}
3222
3223/*
3224 * call-seq:
3225 * thr.stop? -> true or false
3226 *
3227 * Returns +true+ if +thr+ is dead or sleeping.
3228 *
3229 * a = Thread.new { Thread.stop }
3230 * b = Thread.current
3231 * a.stop? #=> true
3232 * b.stop? #=> false
3233 *
3234 * See also #alive? and #status.
3235 */
3236
3237static VALUE
3238rb_thread_stop_p(VALUE thread)
3239{
3240 rb_thread_t *th = rb_thread_ptr(thread);
3241
3242 if (rb_threadptr_dead(th)) {
3243 return Qtrue;
3244 }
3245 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3246}
3247
3248/*
3249 * call-seq:
3250 * thr.name -> string
3251 *
3252 * show the name of the thread.
3253 */
3254
3255static VALUE
3256rb_thread_getname(VALUE thread)
3257{
3258 return rb_thread_ptr(thread)->name;
3259}
3260
3261/*
3262 * call-seq:
3263 * thr.name=(name) -> string
3264 *
3265 * set given name to the ruby thread.
3266 * On some platform, it may set the name to pthread and/or kernel.
3267 */
3268
3269static VALUE
3270rb_thread_setname(VALUE thread, VALUE name)
3271{
3272 rb_thread_t *target_th = rb_thread_ptr(thread);
3273
3274 if (!NIL_P(name)) {
3275 rb_encoding *enc;
3276 StringValueCStr(name);
3277 enc = rb_enc_get(name);
3278 if (!rb_enc_asciicompat(enc)) {
3279 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3280 rb_enc_name(enc));
3281 }
3282 name = rb_str_new_frozen(name);
3283 }
3284 target_th->name = name;
3285 if (threadptr_initialized(target_th)) {
3286 native_set_another_thread_name(target_th->nt->thread_id, name);
3287 }
3288 return name;
3289}
3290
3291#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3292/*
3293 * call-seq:
3294 * thr.native_thread_id -> integer
3295 *
3296 * Return the native thread ID which is used by the Ruby thread.
3297 *
3298 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3299 * * On Linux it is TID returned by gettid(2).
3300 * * On macOS it is the system-wide unique integral ID of thread returned
3301 * by pthread_threadid_np(3).
3302 * * On FreeBSD it is the unique integral ID of the thread returned by
3303 * pthread_getthreadid_np(3).
3304 * * On Windows it is the thread identifier returned by GetThreadId().
3305 * * On other platforms, it raises NotImplementedError.
3306 *
3307 * NOTE:
3308 * If the thread is not associated yet or already deassociated with a native
3309 * thread, it returns _nil_.
3310 * If the Ruby implementation uses M:N thread model, the ID may change
3311 * depending on the timing.
3312 */
3313
3314static VALUE
3315rb_thread_native_thread_id(VALUE thread)
3316{
3317 rb_thread_t *target_th = rb_thread_ptr(thread);
3318 if (rb_threadptr_dead(target_th)) return Qnil;
3319 return native_thread_native_thread_id(target_th);
3320}
3321#else
3322# define rb_thread_native_thread_id rb_f_notimplement
3323#endif
3324
3325/*
3326 * call-seq:
3327 * thr.to_s -> string
3328 *
3329 * Dump the name, id, and status of _thr_ to a string.
3330 */
3331
3332static VALUE
3333rb_thread_to_s(VALUE thread)
3334{
3335 VALUE cname = rb_class_path(rb_obj_class(thread));
3336 rb_thread_t *target_th = rb_thread_ptr(thread);
3337 const char *status;
3338 VALUE str, loc;
3339
3340 status = thread_status_name(target_th, TRUE);
3341 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3342 if (!NIL_P(target_th->name)) {
3343 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3344 }
3345 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3346 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3347 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3348 }
3349 rb_str_catf(str, " %s>", status);
3350
3351 return str;
3352}
3353
3354/* variables for recursive traversals */
3355#define recursive_key id__recursive_key__
3356
3357static VALUE
3358threadptr_local_aref(rb_thread_t *th, ID id)
3359{
3360 if (id == recursive_key) {
3361 return th->ec->local_storage_recursive_hash;
3362 }
3363 else {
3364 VALUE val;
3365 struct rb_id_table *local_storage = th->ec->local_storage;
3366
3367 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3368 return val;
3369 }
3370 else {
3371 return Qnil;
3372 }
3373 }
3374}
3375
3376VALUE
3378{
3379 return threadptr_local_aref(rb_thread_ptr(thread), id);
3380}
3381
3382/*
3383 * call-seq:
3384 * thr[sym] -> obj or nil
3385 *
3386 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3387 * if not explicitly inside a Fiber), using either a symbol or a string name.
3388 * If the specified variable does not exist, returns +nil+.
3389 *
3390 * [
3391 * Thread.new { Thread.current["name"] = "A" },
3392 * Thread.new { Thread.current[:name] = "B" },
3393 * Thread.new { Thread.current["name"] = "C" }
3394 * ].each do |th|
3395 * th.join
3396 * puts "#{th.inspect}: #{th[:name]}"
3397 * end
3398 *
3399 * This will produce:
3400 *
3401 * #<Thread:0x00000002a54220 dead>: A
3402 * #<Thread:0x00000002a541a8 dead>: B
3403 * #<Thread:0x00000002a54130 dead>: C
3404 *
3405 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3406 * This confusion did not exist in Ruby 1.8 because
3407 * fibers are only available since Ruby 1.9.
3408 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3409 * following idiom for dynamic scope.
3410 *
3411 * def meth(newvalue)
3412 * begin
3413 * oldvalue = Thread.current[:name]
3414 * Thread.current[:name] = newvalue
3415 * yield
3416 * ensure
3417 * Thread.current[:name] = oldvalue
3418 * end
3419 * end
3420 *
3421 * The idiom may not work as dynamic scope if the methods are thread-local
3422 * and a given block switches fiber.
3423 *
3424 * f = Fiber.new {
3425 * meth(1) {
3426 * Fiber.yield
3427 * }
3428 * }
3429 * meth(2) {
3430 * f.resume
3431 * }
3432 * f.resume
3433 * p Thread.current[:name]
3434 * #=> nil if fiber-local
3435 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3436 *
3437 * For thread-local variables, please see #thread_variable_get and
3438 * #thread_variable_set.
3439 *
3440 */
3441
3442static VALUE
3443rb_thread_aref(VALUE thread, VALUE key)
3444{
3445 ID id = rb_check_id(&key);
3446 if (!id) return Qnil;
3447 return rb_thread_local_aref(thread, id);
3448}
3449
3450/*
3451 * call-seq:
3452 * thr.fetch(sym) -> obj
3453 * thr.fetch(sym) { } -> obj
3454 * thr.fetch(sym, default) -> obj
3455 *
3456 * Returns a fiber-local for the given key. If the key can't be
3457 * found, there are several options: With no other arguments, it will
3458 * raise a KeyError exception; if <i>default</i> is given, then that
3459 * will be returned; if the optional code block is specified, then
3460 * that will be run and its result returned. See Thread#[] and
3461 * Hash#fetch.
3462 */
3463static VALUE
3464rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3465{
3466 VALUE key, val;
3467 ID id;
3468 rb_thread_t *target_th = rb_thread_ptr(self);
3469 int block_given;
3470
3471 rb_check_arity(argc, 1, 2);
3472 key = argv[0];
3473
3474 block_given = rb_block_given_p();
3475 if (block_given && argc == 2) {
3476 rb_warn("block supersedes default value argument");
3477 }
3478
3479 id = rb_check_id(&key);
3480
3481 if (id == recursive_key) {
3482 return target_th->ec->local_storage_recursive_hash;
3483 }
3484 else if (id && target_th->ec->local_storage &&
3485 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3486 return val;
3487 }
3488 else if (block_given) {
3489 return rb_yield(key);
3490 }
3491 else if (argc == 1) {
3492 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3493 }
3494 else {
3495 return argv[1];
3496 }
3497}
3498
3499static VALUE
3500threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3501{
3502 if (id == recursive_key) {
3503 th->ec->local_storage_recursive_hash = val;
3504 return val;
3505 }
3506 else {
3507 struct rb_id_table *local_storage = th->ec->local_storage;
3508
3509 if (NIL_P(val)) {
3510 if (!local_storage) return Qnil;
3511 rb_id_table_delete(local_storage, id);
3512 return Qnil;
3513 }
3514 else {
3515 if (local_storage == NULL) {
3516 th->ec->local_storage = local_storage = rb_id_table_create(0);
3517 }
3518 rb_id_table_insert(local_storage, id, val);
3519 return val;
3520 }
3521 }
3522}
3523
3524VALUE
3526{
3527 if (OBJ_FROZEN(thread)) {
3528 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3529 }
3530
3531 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3532}
3533
3534/*
3535 * call-seq:
3536 * thr[sym] = obj -> obj
3537 *
3538 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3539 * using either a symbol or a string.
3540 *
3541 * See also Thread#[].
3542 *
3543 * For thread-local variables, please see #thread_variable_set and
3544 * #thread_variable_get.
3545 */
3546
3547static VALUE
3548rb_thread_aset(VALUE self, VALUE id, VALUE val)
3549{
3550 return rb_thread_local_aset(self, rb_to_id(id), val);
3551}
3552
3553/*
3554 * call-seq:
3555 * thr.thread_variable_get(key) -> obj or nil
3556 *
3557 * Returns the value of a thread local variable that has been set. Note that
3558 * these are different than fiber local values. For fiber local values,
3559 * please see Thread#[] and Thread#[]=.
3560 *
3561 * Thread local values are carried along with threads, and do not respect
3562 * fibers. For example:
3563 *
3564 * Thread.new {
3565 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3566 * Thread.current["foo"] = "bar" # set a fiber local
3567 *
3568 * Fiber.new {
3569 * Fiber.yield [
3570 * Thread.current.thread_variable_get("foo"), # get the thread local
3571 * Thread.current["foo"], # get the fiber local
3572 * ]
3573 * }.resume
3574 * }.join.value # => ['bar', nil]
3575 *
3576 * The value "bar" is returned for the thread local, where nil is returned
3577 * for the fiber local. The fiber is executed in the same thread, so the
3578 * thread local values are available.
3579 */
3580
3581static VALUE
3582rb_thread_variable_get(VALUE thread, VALUE key)
3583{
3584 VALUE locals;
3585
3586 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3587 return Qnil;
3588 }
3589 locals = rb_thread_local_storage(thread);
3590 return rb_hash_aref(locals, rb_to_symbol(key));
3591}
3592
3593/*
3594 * call-seq:
3595 * thr.thread_variable_set(key, value)
3596 *
3597 * Sets a thread local with +key+ to +value+. Note that these are local to
3598 * threads, and not to fibers. Please see Thread#thread_variable_get and
3599 * Thread#[] for more information.
3600 */
3601
3602static VALUE
3603rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3604{
3605 VALUE locals;
3606
3607 if (OBJ_FROZEN(thread)) {
3608 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3609 }
3610
3611 locals = rb_thread_local_storage(thread);
3612 return rb_hash_aset(locals, rb_to_symbol(key), val);
3613}
3614
3615/*
3616 * call-seq:
3617 * thr.key?(sym) -> true or false
3618 *
3619 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3620 * variable.
3621 *
3622 * me = Thread.current
3623 * me[:oliver] = "a"
3624 * me.key?(:oliver) #=> true
3625 * me.key?(:stanley) #=> false
3626 */
3627
3628static VALUE
3629rb_thread_key_p(VALUE self, VALUE key)
3630{
3631 VALUE val;
3632 ID id = rb_check_id(&key);
3633 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3634
3635 if (!id || local_storage == NULL) {
3636 return Qfalse;
3637 }
3638 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3639}
3640
3641static enum rb_id_table_iterator_result
3642thread_keys_i(ID key, VALUE value, void *ary)
3643{
3644 rb_ary_push((VALUE)ary, ID2SYM(key));
3645 return ID_TABLE_CONTINUE;
3646}
3647
3648int
3650{
3651 // TODO
3652 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3653}
3654
3655/*
3656 * call-seq:
3657 * thr.keys -> array
3658 *
3659 * Returns an array of the names of the fiber-local variables (as Symbols).
3660 *
3661 * thr = Thread.new do
3662 * Thread.current[:cat] = 'meow'
3663 * Thread.current["dog"] = 'woof'
3664 * end
3665 * thr.join #=> #<Thread:0x401b3f10 dead>
3666 * thr.keys #=> [:dog, :cat]
3667 */
3668
3669static VALUE
3670rb_thread_keys(VALUE self)
3671{
3672 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3673 VALUE ary = rb_ary_new();
3674
3675 if (local_storage) {
3676 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3677 }
3678 return ary;
3679}
3680
3681static int
3682keys_i(VALUE key, VALUE value, VALUE ary)
3683{
3684 rb_ary_push(ary, key);
3685 return ST_CONTINUE;
3686}
3687
3688/*
3689 * call-seq:
3690 * thr.thread_variables -> array
3691 *
3692 * Returns an array of the names of the thread-local variables (as Symbols).
3693 *
3694 * thr = Thread.new do
3695 * Thread.current.thread_variable_set(:cat, 'meow')
3696 * Thread.current.thread_variable_set("dog", 'woof')
3697 * end
3698 * thr.join #=> #<Thread:0x401b3f10 dead>
3699 * thr.thread_variables #=> [:dog, :cat]
3700 *
3701 * Note that these are not fiber local variables. Please see Thread#[] and
3702 * Thread#thread_variable_get for more details.
3703 */
3704
3705static VALUE
3706rb_thread_variables(VALUE thread)
3707{
3708 VALUE locals;
3709 VALUE ary;
3710
3711 ary = rb_ary_new();
3712 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3713 return ary;
3714 }
3715 locals = rb_thread_local_storage(thread);
3716 rb_hash_foreach(locals, keys_i, ary);
3717
3718 return ary;
3719}
3720
3721/*
3722 * call-seq:
3723 * thr.thread_variable?(key) -> true or false
3724 *
3725 * Returns +true+ if the given string (or symbol) exists as a thread-local
3726 * variable.
3727 *
3728 * me = Thread.current
3729 * me.thread_variable_set(:oliver, "a")
3730 * me.thread_variable?(:oliver) #=> true
3731 * me.thread_variable?(:stanley) #=> false
3732 *
3733 * Note that these are not fiber local variables. Please see Thread#[] and
3734 * Thread#thread_variable_get for more details.
3735 */
3736
3737static VALUE
3738rb_thread_variable_p(VALUE thread, VALUE key)
3739{
3740 VALUE locals;
3741
3742 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3743 return Qfalse;
3744 }
3745 locals = rb_thread_local_storage(thread);
3746
3747 return RBOOL(rb_hash_lookup(locals, rb_to_symbol(key)) != Qnil);
3748}
3749
3750/*
3751 * call-seq:
3752 * thr.priority -> integer
3753 *
3754 * Returns the priority of <i>thr</i>. Default is inherited from the
3755 * current thread which creating the new thread, or zero for the
3756 * initial main thread; higher-priority thread will run more frequently
3757 * than lower-priority threads (but lower-priority threads can also run).
3758 *
3759 * This is just hint for Ruby thread scheduler. It may be ignored on some
3760 * platform.
3761 *
3762 * Thread.current.priority #=> 0
3763 */
3764
3765static VALUE
3766rb_thread_priority(VALUE thread)
3767{
3768 return INT2NUM(rb_thread_ptr(thread)->priority);
3769}
3770
3771
3772/*
3773 * call-seq:
3774 * thr.priority= integer -> thr
3775 *
3776 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3777 * will run more frequently than lower-priority threads (but lower-priority
3778 * threads can also run).
3779 *
3780 * This is just hint for Ruby thread scheduler. It may be ignored on some
3781 * platform.
3782 *
3783 * count1 = count2 = 0
3784 * a = Thread.new do
3785 * loop { count1 += 1 }
3786 * end
3787 * a.priority = -1
3788 *
3789 * b = Thread.new do
3790 * loop { count2 += 1 }
3791 * end
3792 * b.priority = -2
3793 * sleep 1 #=> 1
3794 * count1 #=> 622504
3795 * count2 #=> 5832
3796 */
3797
3798static VALUE
3799rb_thread_priority_set(VALUE thread, VALUE prio)
3800{
3801 rb_thread_t *target_th = rb_thread_ptr(thread);
3802 int priority;
3803
3804#if USE_NATIVE_THREAD_PRIORITY
3805 target_th->priority = NUM2INT(prio);
3806 native_thread_apply_priority(th);
3807#else
3808 priority = NUM2INT(prio);
3809 if (priority > RUBY_THREAD_PRIORITY_MAX) {
3810 priority = RUBY_THREAD_PRIORITY_MAX;
3811 }
3812 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3813 priority = RUBY_THREAD_PRIORITY_MIN;
3814 }
3815 target_th->priority = (int8_t)priority;
3816#endif
3817 return INT2NUM(target_th->priority);
3818}
3819
3820/* for IO */
3821
3822#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3823
3824/*
3825 * several Unix platforms support file descriptors bigger than FD_SETSIZE
3826 * in select(2) system call.
3827 *
3828 * - Linux 2.2.12 (?)
3829 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3830 * select(2) documents how to allocate fd_set dynamically.
3831 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3832 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3833 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3834 * select(2) documents how to allocate fd_set dynamically.
3835 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3836 * - Solaris 8 has select_large_fdset
3837 * - Mac OS X 10.7 (Lion)
3838 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3839 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3840 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
3841 *
3842 * When fd_set is not big enough to hold big file descriptors,
3843 * it should be allocated dynamically.
3844 * Note that this assumes fd_set is structured as bitmap.
3845 *
3846 * rb_fd_init allocates the memory.
3847 * rb_fd_term free the memory.
3848 * rb_fd_set may re-allocates bitmap.
3849 *
3850 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3851 */
3852
3853void
3855{
3856 fds->maxfd = 0;
3857 fds->fdset = ALLOC(fd_set);
3858 FD_ZERO(fds->fdset);
3859}
3860
3861void
3862rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3863{
3864 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3865
3866 if (size < sizeof(fd_set))
3867 size = sizeof(fd_set);
3868 dst->maxfd = src->maxfd;
3869 dst->fdset = xmalloc(size);
3870 memcpy(dst->fdset, src->fdset, size);
3871}
3872
3873void
3875{
3876 if (fds->fdset) xfree(fds->fdset);
3877 fds->maxfd = 0;
3878 fds->fdset = 0;
3879}
3880
3881void
3883{
3884 if (fds->fdset)
3885 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3886}
3887
3888static void
3889rb_fd_resize(int n, rb_fdset_t *fds)
3890{
3891 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3892 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3893
3894 if (m < sizeof(fd_set)) m = sizeof(fd_set);
3895 if (o < sizeof(fd_set)) o = sizeof(fd_set);
3896
3897 if (m > o) {
3898 fds->fdset = xrealloc(fds->fdset, m);
3899 memset((char *)fds->fdset + o, 0, m - o);
3900 }
3901 if (n >= fds->maxfd) fds->maxfd = n + 1;
3902}
3903
3904void
3905rb_fd_set(int n, rb_fdset_t *fds)
3906{
3907 rb_fd_resize(n, fds);
3908 FD_SET(n, fds->fdset);
3909}
3910
3911void
3912rb_fd_clr(int n, rb_fdset_t *fds)
3913{
3914 if (n >= fds->maxfd) return;
3915 FD_CLR(n, fds->fdset);
3916}
3917
3918int
3919rb_fd_isset(int n, const rb_fdset_t *fds)
3920{
3921 if (n >= fds->maxfd) return 0;
3922 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3923}
3924
3925void
3926rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3927{
3928 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3929
3930 if (size < sizeof(fd_set)) size = sizeof(fd_set);
3931 dst->maxfd = max;
3932 dst->fdset = xrealloc(dst->fdset, size);
3933 memcpy(dst->fdset, src, size);
3934}
3935
3936void
3937rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3938{
3939 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3940
3941 if (size < sizeof(fd_set))
3942 size = sizeof(fd_set);
3943 dst->maxfd = src->maxfd;
3944 dst->fdset = xrealloc(dst->fdset, size);
3945 memcpy(dst->fdset, src->fdset, size);
3946}
3947
3948int
3949rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3950{
3951 fd_set *r = NULL, *w = NULL, *e = NULL;
3952 if (readfds) {
3953 rb_fd_resize(n - 1, readfds);
3954 r = rb_fd_ptr(readfds);
3955 }
3956 if (writefds) {
3957 rb_fd_resize(n - 1, writefds);
3958 w = rb_fd_ptr(writefds);
3959 }
3960 if (exceptfds) {
3961 rb_fd_resize(n - 1, exceptfds);
3962 e = rb_fd_ptr(exceptfds);
3963 }
3964 return select(n, r, w, e, timeout);
3965}
3966
3967#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
3968
3969#undef FD_ZERO
3970#undef FD_SET
3971#undef FD_CLR
3972#undef FD_ISSET
3973
3974#define FD_ZERO(f) rb_fd_zero(f)
3975#define FD_SET(i, f) rb_fd_set((i), (f))
3976#define FD_CLR(i, f) rb_fd_clr((i), (f))
3977#define FD_ISSET(i, f) rb_fd_isset((i), (f))
3978
3979#elif defined(_WIN32)
3980
3981void
3983{
3984 set->capa = FD_SETSIZE;
3985 set->fdset = ALLOC(fd_set);
3986 FD_ZERO(set->fdset);
3987}
3988
3989void
3990rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
3991{
3992 rb_fd_init(dst);
3993 rb_fd_dup(dst, src);
3994}
3995
3996void
3998{
3999 xfree(set->fdset);
4000 set->fdset = NULL;
4001 set->capa = 0;
4002}
4003
4004void
4005rb_fd_set(int fd, rb_fdset_t *set)
4006{
4007 unsigned int i;
4008 SOCKET s = rb_w32_get_osfhandle(fd);
4009
4010 for (i = 0; i < set->fdset->fd_count; i++) {
4011 if (set->fdset->fd_array[i] == s) {
4012 return;
4013 }
4014 }
4015 if (set->fdset->fd_count >= (unsigned)set->capa) {
4016 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4017 set->fdset =
4018 rb_xrealloc_mul_add(
4019 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4020 }
4021 set->fdset->fd_array[set->fdset->fd_count++] = s;
4022}
4023
4024#undef FD_ZERO
4025#undef FD_SET
4026#undef FD_CLR
4027#undef FD_ISSET
4028
4029#define FD_ZERO(f) rb_fd_zero(f)
4030#define FD_SET(i, f) rb_fd_set((i), (f))
4031#define FD_CLR(i, f) rb_fd_clr((i), (f))
4032#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4033
4034#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4035
4036#endif
4037
4038#ifndef rb_fd_no_init
4039#define rb_fd_no_init(fds) (void)(fds)
4040#endif
4041
4042static int
4043wait_retryable(int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4044{
4045 if (*result < 0) {
4046 switch (errnum) {
4047 case EINTR:
4048#ifdef ERESTART
4049 case ERESTART:
4050#endif
4051 *result = 0;
4052 if (rel && hrtime_update_expire(rel, end)) {
4053 *rel = 0;
4054 }
4055 return TRUE;
4056 }
4057 return FALSE;
4058 }
4059 else if (*result == 0) {
4060 /* check for spurious wakeup */
4061 if (rel) {
4062 return !hrtime_update_expire(rel, end);
4063 }
4064 return TRUE;
4065 }
4066 return FALSE;
4067}
4068
4070 int max;
4071 int sigwait_fd;
4072 rb_thread_t *th;
4073 rb_fdset_t *rset;
4074 rb_fdset_t *wset;
4075 rb_fdset_t *eset;
4076 rb_fdset_t orig_rset;
4077 rb_fdset_t orig_wset;
4078 rb_fdset_t orig_eset;
4079 struct timeval *timeout;
4080};
4081
4082static VALUE
4083select_set_free(VALUE p)
4084{
4085 struct select_set *set = (struct select_set *)p;
4086
4087 if (set->sigwait_fd >= 0) {
4088 rb_sigwait_fd_put(set->th, set->sigwait_fd);
4089 rb_sigwait_fd_migrate(set->th->vm);
4090 }
4091
4092 rb_fd_term(&set->orig_rset);
4093 rb_fd_term(&set->orig_wset);
4094 rb_fd_term(&set->orig_eset);
4095
4096 return Qfalse;
4097}
4098
4099static const rb_hrtime_t *
4100sigwait_timeout(rb_thread_t *th, int sigwait_fd, const rb_hrtime_t *orig,
4101 int *drained_p)
4102{
4103 static const rb_hrtime_t quantum = TIME_QUANTUM_USEC * 1000;
4104
4105 if (sigwait_fd >= 0 && (!ubf_threads_empty() || BUSY_WAIT_SIGNALS)) {
4106 *drained_p = check_signals_nogvl(th, sigwait_fd);
4107 if (!orig || *orig > quantum)
4108 return &quantum;
4109 }
4110
4111 return orig;
4112}
4113
4114#define sigwait_signals_fd(result, cond, sigwait_fd) \
4115 (result > 0 && (cond) ? (result--, (sigwait_fd)) : -1)
4116
4117static VALUE
4118do_select(VALUE p)
4119{
4120 struct select_set *set = (struct select_set *)p;
4121 int result = 0;
4122 int lerrno;
4123 rb_hrtime_t *to, rel, end = 0;
4124
4125 timeout_prepare(&to, &rel, &end, set->timeout);
4126#define restore_fdset(dst, src) \
4127 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4128#define do_select_update() \
4129 (restore_fdset(set->rset, &set->orig_rset), \
4130 restore_fdset(set->wset, &set->orig_wset), \
4131 restore_fdset(set->eset, &set->orig_eset), \
4132 TRUE)
4133
4134 do {
4135 int drained;
4136 lerrno = 0;
4137
4138 BLOCKING_REGION(set->th, {
4139 const rb_hrtime_t *sto;
4140 struct timeval tv;
4141
4142 sto = sigwait_timeout(set->th, set->sigwait_fd, to, &drained);
4143 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4144 result = native_fd_select(set->max, set->rset, set->wset,
4145 set->eset,
4146 rb_hrtime2timeval(&tv, sto), set->th);
4147 if (result < 0) lerrno = errno;
4148 }
4149 }, set->sigwait_fd >= 0 ? ubf_sigwait : ubf_select, set->th, TRUE);
4150
4151 if (set->sigwait_fd >= 0) {
4152 int fd = sigwait_signals_fd(result,
4153 rb_fd_isset(set->sigwait_fd, set->rset),
4154 set->sigwait_fd);
4155 (void)check_signals_nogvl(set->th, fd);
4156 }
4157
4158 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4159 } while (wait_retryable(&result, lerrno, to, end) && do_select_update());
4160
4161 if (result < 0) {
4162 errno = lerrno;
4163 }
4164
4165 return (VALUE)result;
4166}
4167
4168static rb_fdset_t *
4169init_set_fd(int fd, rb_fdset_t *fds)
4170{
4171 if (fd < 0) {
4172 return 0;
4173 }
4174 rb_fd_init(fds);
4175 rb_fd_set(fd, fds);
4176
4177 return fds;
4178}
4179
4180int
4181rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4182 struct timeval *timeout)
4183{
4184 struct select_set set;
4185
4186 set.th = GET_THREAD();
4187 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4188 set.max = max;
4189 set.rset = read;
4190 set.wset = write;
4191 set.eset = except;
4192 set.timeout = timeout;
4193
4194 if (!set.rset && !set.wset && !set.eset) {
4195 if (!timeout) {
4197 return 0;
4198 }
4199 rb_thread_wait_for(*timeout);
4200 return 0;
4201 }
4202
4203 set.sigwait_fd = rb_sigwait_fd_get(set.th);
4204 if (set.sigwait_fd >= 0) {
4205 if (set.rset)
4206 rb_fd_set(set.sigwait_fd, set.rset);
4207 else
4208 set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
4209 if (set.sigwait_fd >= set.max) {
4210 set.max = set.sigwait_fd + 1;
4211 }
4212 }
4213#define fd_init_copy(f) do { \
4214 if (set.f) { \
4215 rb_fd_resize(set.max - 1, set.f); \
4216 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4217 rb_fd_init_copy(&set.orig_##f, set.f); \
4218 } \
4219 } \
4220 else { \
4221 rb_fd_no_init(&set.orig_##f); \
4222 } \
4223 } while (0)
4224 fd_init_copy(rset);
4225 fd_init_copy(wset);
4226 fd_init_copy(eset);
4227#undef fd_init_copy
4228
4229 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4230}
4231
4232#ifdef USE_POLL
4233
4234/* The same with linux kernel. TODO: make platform independent definition. */
4235#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4236#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4237#define POLLEX_SET (POLLPRI)
4238
4239#ifndef POLLERR_SET /* defined for FreeBSD for now */
4240# define POLLERR_SET (0)
4241#endif
4242
4243/*
4244 * returns a mask of events
4245 */
4246int
4247rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4248{
4249 struct pollfd fds[2];
4250 int result = 0;
4251 int drained;
4252 nfds_t nfds;
4254 struct waiting_fd wfd;
4255 int state;
4256 volatile int lerrno;
4257
4258 wfd.th = GET_THREAD();
4259 wfd.fd = fd;
4260
4261 RB_VM_LOCK_ENTER();
4262 {
4263 ccan_list_add(&wfd.th->vm->waiting_fds, &wfd.wfd_node);
4264 }
4265 RB_VM_LOCK_LEAVE();
4266
4267 EC_PUSH_TAG(wfd.th->ec);
4268 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4269 rb_hrtime_t *to, rel, end = 0;
4270 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4271 timeout_prepare(&to, &rel, &end, timeout);
4272 fds[0].fd = fd;
4273 fds[0].events = (short)events;
4274 fds[0].revents = 0;
4275 do {
4276 fds[1].fd = rb_sigwait_fd_get(wfd.th);
4277
4278 if (fds[1].fd >= 0) {
4279 fds[1].events = POLLIN;
4280 fds[1].revents = 0;
4281 nfds = 2;
4282 ubf = ubf_sigwait;
4283 }
4284 else {
4285 nfds = 1;
4286 ubf = ubf_select;
4287 }
4288
4289 lerrno = 0;
4290 BLOCKING_REGION(wfd.th, {
4291 const rb_hrtime_t *sto;
4292 struct timespec ts;
4293
4294 sto = sigwait_timeout(wfd.th, fds[1].fd, to, &drained);
4295 if (!RUBY_VM_INTERRUPTED(wfd.th->ec)) {
4296 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, sto), 0);
4297 if (result < 0) lerrno = errno;
4298 }
4299 }, ubf, wfd.th, TRUE);
4300
4301 if (fds[1].fd >= 0) {
4302 int fd1 = sigwait_signals_fd(result, fds[1].revents, fds[1].fd);
4303 (void)check_signals_nogvl(wfd.th, fd1);
4304 rb_sigwait_fd_put(wfd.th, fds[1].fd);
4305 rb_sigwait_fd_migrate(wfd.th->vm);
4306 }
4307 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4308 } while (wait_retryable(&result, lerrno, to, end));
4309 }
4310 EC_POP_TAG();
4311
4312 RB_VM_LOCK_ENTER();
4313 {
4314 ccan_list_del(&wfd.wfd_node);
4315 }
4316 RB_VM_LOCK_LEAVE();
4317
4318 if (state) {
4319 EC_JUMP_TAG(wfd.th->ec, state);
4320 }
4321
4322 if (result < 0) {
4323 errno = lerrno;
4324 return -1;
4325 }
4326
4327 if (fds[0].revents & POLLNVAL) {
4328 errno = EBADF;
4329 return -1;
4330 }
4331
4332 /*
4333 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4334 * Therefore we need to fix it up.
4335 */
4336 result = 0;
4337 if (fds[0].revents & POLLIN_SET)
4338 result |= RB_WAITFD_IN;
4339 if (fds[0].revents & POLLOUT_SET)
4340 result |= RB_WAITFD_OUT;
4341 if (fds[0].revents & POLLEX_SET)
4342 result |= RB_WAITFD_PRI;
4343
4344 /* all requested events are ready if there is an error */
4345 if (fds[0].revents & POLLERR_SET)
4346 result |= events;
4347
4348 return result;
4349}
4350#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4351struct select_args {
4352 union {
4353 int fd;
4354 int error;
4355 } as;
4356 rb_fdset_t *read;
4357 rb_fdset_t *write;
4358 rb_fdset_t *except;
4359 struct waiting_fd wfd;
4360 struct timeval *tv;
4361};
4362
4363static VALUE
4364select_single(VALUE ptr)
4365{
4366 struct select_args *args = (struct select_args *)ptr;
4367 int r;
4368
4369 r = rb_thread_fd_select(args->as.fd + 1,
4370 args->read, args->write, args->except, args->tv);
4371 if (r == -1)
4372 args->as.error = errno;
4373 if (r > 0) {
4374 r = 0;
4375 if (args->read && rb_fd_isset(args->as.fd, args->read))
4376 r |= RB_WAITFD_IN;
4377 if (args->write && rb_fd_isset(args->as.fd, args->write))
4378 r |= RB_WAITFD_OUT;
4379 if (args->except && rb_fd_isset(args->as.fd, args->except))
4380 r |= RB_WAITFD_PRI;
4381 }
4382 return (VALUE)r;
4383}
4384
4385static VALUE
4386select_single_cleanup(VALUE ptr)
4387{
4388 struct select_args *args = (struct select_args *)ptr;
4389
4390 RB_VM_LOCK_ENTER();
4391 {
4392 ccan_list_del(&args->wfd.wfd_node);
4393 }
4394 RB_VM_LOCK_LEAVE();
4395 if (args->read) rb_fd_term(args->read);
4396 if (args->write) rb_fd_term(args->write);
4397 if (args->except) rb_fd_term(args->except);
4398
4399 return (VALUE)-1;
4400}
4401
4402int
4403rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4404{
4405 rb_fdset_t rfds, wfds, efds;
4406 struct select_args args;
4407 int r;
4408 VALUE ptr = (VALUE)&args;
4409
4410 args.as.fd = fd;
4411 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4412 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4413 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4414 args.tv = timeout;
4415 args.wfd.fd = fd;
4416 args.wfd.th = GET_THREAD();
4417
4418 RB_VM_LOCK_ENTER();
4419 {
4420 ccan_list_add(&args.wfd.th->vm->waiting_fds, &args.wfd.wfd_node);
4421 }
4422 RB_VM_LOCK_LEAVE();
4423
4424 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4425 if (r == -1)
4426 errno = args.as.error;
4427
4428 return r;
4429}
4430#endif /* ! USE_POLL */
4431
4432/*
4433 * for GC
4434 */
4435
4436#ifdef USE_CONSERVATIVE_STACK_END
4437void
4438rb_gc_set_stack_end(VALUE **stack_end_p)
4439{
4440 VALUE stack_end;
4441 *stack_end_p = &stack_end;
4442}
4443#endif
4444
4445/*
4446 *
4447 */
4448
4449void
4450rb_threadptr_check_signal(rb_thread_t *mth)
4451{
4452 /* mth must be main_thread */
4453 if (rb_signal_buff_size() > 0) {
4454 /* wakeup main thread */
4455 threadptr_trap_interrupt(mth);
4456 }
4457}
4458
4459static void
4460async_bug_fd(const char *mesg, int errno_arg, int fd)
4461{
4462 char buff[64];
4463 size_t n = strlcpy(buff, mesg, sizeof(buff));
4464 if (n < sizeof(buff)-3) {
4465 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4466 }
4467 rb_async_bug_errno(buff, errno_arg);
4468}
4469
4470/* VM-dependent API is not available for this function */
4471static int
4472consume_communication_pipe(int fd)
4473{
4474#if USE_EVENTFD
4475 uint64_t buff[1];
4476#else
4477 /* buffer can be shared because no one refers to them. */
4478 static char buff[1024];
4479#endif
4480 ssize_t result;
4481 int ret = FALSE; /* for rb_sigwait_sleep */
4482
4483 /*
4484 * disarm UBF_TIMER before we read, because it can become
4485 * re-armed at any time via sighandler and the pipe will refill
4486 * We can disarm it because this thread is now processing signals
4487 * and we do not want unnecessary SIGVTALRM
4488 */
4489 ubf_timer_disarm();
4490
4491 while (1) {
4492 result = read(fd, buff, sizeof(buff));
4493 if (result > 0) {
4494 ret = TRUE;
4495 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4496 return ret;
4497 }
4498 }
4499 else if (result == 0) {
4500 return ret;
4501 }
4502 else if (result < 0) {
4503 int e = errno;
4504 switch (e) {
4505 case EINTR:
4506 continue; /* retry */
4507 case EAGAIN:
4508#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4509 case EWOULDBLOCK:
4510#endif
4511 return ret;
4512 default:
4513 async_bug_fd("consume_communication_pipe: read", e, fd);
4514 }
4515 }
4516 }
4517}
4518
4519static int
4520check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
4521{
4522 rb_vm_t *vm = GET_VM(); /* th may be 0 */
4523 int ret = sigwait_fd >= 0 ? consume_communication_pipe(sigwait_fd) : FALSE;
4524 ubf_wakeup_all_threads();
4525 ruby_sigchld_handler(vm);
4526 if (rb_signal_buff_size()) {
4527 if (th == vm->ractor.main_thread) {
4528 /* no need to lock + wakeup if already in main thread */
4529 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
4530 }
4531 else {
4532 threadptr_trap_interrupt(vm->ractor.main_thread);
4533 }
4534 ret = TRUE; /* for SIGCHLD_LOSSY && rb_sigwait_sleep */
4535 }
4536 return ret;
4537}
4538
4539void
4540rb_thread_stop_timer_thread(void)
4541{
4542 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4543 native_reset_timer_thread();
4544 }
4545}
4546
4547void
4548rb_thread_reset_timer_thread(void)
4549{
4550 native_reset_timer_thread();
4551}
4552
4553void
4554rb_thread_start_timer_thread(void)
4555{
4556 system_working = 1;
4557 rb_thread_create_timer_thread();
4558}
4559
4560static int
4561clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4562{
4563 int i;
4564 VALUE coverage = (VALUE)val;
4565 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4566 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4567
4568 if (lines) {
4569 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4570 rb_ary_clear(lines);
4571 }
4572 else {
4573 int i;
4574 for (i = 0; i < RARRAY_LEN(lines); i++) {
4575 if (RARRAY_AREF(lines, i) != Qnil)
4576 RARRAY_ASET(lines, i, INT2FIX(0));
4577 }
4578 }
4579 }
4580 if (branches) {
4581 VALUE counters = RARRAY_AREF(branches, 1);
4582 for (i = 0; i < RARRAY_LEN(counters); i++) {
4583 RARRAY_ASET(counters, i, INT2FIX(0));
4584 }
4585 }
4586
4587 return ST_CONTINUE;
4588}
4589
4590void
4591rb_clear_coverages(void)
4592{
4593 VALUE coverages = rb_get_coverages();
4594 if (RTEST(coverages)) {
4595 rb_hash_foreach(coverages, clear_coverage_i, 0);
4596 }
4597}
4598
4599#if defined(HAVE_WORKING_FORK)
4600
4601static void
4602rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4603{
4604 rb_thread_t *i = 0;
4605 rb_vm_t *vm = th->vm;
4606 rb_ractor_t *r = th->ractor;
4607 vm->ractor.main_ractor = r;
4608 vm->ractor.main_thread = th;
4609 r->threads.main = th;
4610 r->status_ = ractor_created;
4611
4612 thread_sched_atfork(TH_SCHED(th));
4613 ubf_list_atfork();
4614
4615 // OK. Only this thread accesses:
4616 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4617 ccan_list_for_each(&r->threads.set, i, lt_node) {
4618 atfork(i, th);
4619 }
4620 }
4621 rb_vm_living_threads_init(vm);
4622
4623 rb_ractor_atfork(vm, th);
4624
4625 /* may be held by MJIT threads in parent */
4626 rb_native_mutex_initialize(&vm->waitpid_lock);
4627 rb_native_mutex_initialize(&vm->workqueue_lock);
4628
4629 /* may be held by any thread in parent */
4630 rb_native_mutex_initialize(&th->interrupt_lock);
4631
4632 vm->fork_gen++;
4633 rb_ractor_sleeper_threads_clear(th->ractor);
4634 rb_clear_coverages();
4635
4636 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4637 VM_ASSERT(vm->ractor.cnt == 1);
4638}
4639
4640static void
4641terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4642{
4643 if (th != current_th) {
4644 rb_mutex_abandon_keeping_mutexes(th);
4645 rb_mutex_abandon_locking_mutex(th);
4646 thread_cleanup_func(th, TRUE);
4647 }
4648}
4649
4650void rb_fiber_atfork(rb_thread_t *);
4651void
4652rb_thread_atfork(void)
4653{
4654 rb_thread_t *th = GET_THREAD();
4655 rb_thread_atfork_internal(th, terminate_atfork_i);
4656 th->join_list = NULL;
4657 rb_fiber_atfork(th);
4658
4659 /* We don't want reproduce CVE-2003-0900. */
4661
4662 /* For child, starting MJIT worker thread in this place which is safer than immediately after `after_fork_ruby`. */
4663 mjit_child_after_fork();
4664}
4665
4666static void
4667terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4668{
4669 if (th != current_th) {
4670 thread_cleanup_func_before_exec(th);
4671 }
4672}
4673
4674void
4676{
4677 rb_thread_t *th = GET_THREAD();
4678 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4679}
4680#else
4681void
4683{
4684}
4685
4686void
4688{
4689}
4690#endif
4691
4692struct thgroup {
4693 int enclosed;
4694 VALUE group;
4695};
4696
4697static size_t
4698thgroup_memsize(const void *ptr)
4699{
4700 return sizeof(struct thgroup);
4701}
4702
4703static const rb_data_type_t thgroup_data_type = {
4704 "thgroup",
4705 {0, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4706 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4707};
4708
4709/*
4710 * Document-class: ThreadGroup
4711 *
4712 * ThreadGroup provides a means of keeping track of a number of threads as a
4713 * group.
4714 *
4715 * A given Thread object can only belong to one ThreadGroup at a time; adding
4716 * a thread to a new group will remove it from any previous group.
4717 *
4718 * Newly created threads belong to the same group as the thread from which they
4719 * were created.
4720 */
4721
4722/*
4723 * Document-const: Default
4724 *
4725 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4726 * by default.
4727 */
4728static VALUE
4729thgroup_s_alloc(VALUE klass)
4730{
4731 VALUE group;
4732 struct thgroup *data;
4733
4734 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4735 data->enclosed = 0;
4736 data->group = group;
4737
4738 return group;
4739}
4740
4741/*
4742 * call-seq:
4743 * thgrp.list -> array
4744 *
4745 * Returns an array of all existing Thread objects that belong to this group.
4746 *
4747 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4748 */
4749
4750static VALUE
4751thgroup_list(VALUE group)
4752{
4753 VALUE ary = rb_ary_new();
4754 rb_thread_t *th = 0;
4755 rb_ractor_t *r = GET_RACTOR();
4756
4757 ccan_list_for_each(&r->threads.set, th, lt_node) {
4758 if (th->thgroup == group) {
4759 rb_ary_push(ary, th->self);
4760 }
4761 }
4762 return ary;
4763}
4764
4765
4766/*
4767 * call-seq:
4768 * thgrp.enclose -> thgrp
4769 *
4770 * Prevents threads from being added to or removed from the receiving
4771 * ThreadGroup.
4772 *
4773 * New threads can still be started in an enclosed ThreadGroup.
4774 *
4775 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4776 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4777 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4778 * tg.add thr
4779 * #=> ThreadError: can't move from the enclosed thread group
4780 */
4781
4782static VALUE
4783thgroup_enclose(VALUE group)
4784{
4785 struct thgroup *data;
4786
4787 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4788 data->enclosed = 1;
4789
4790 return group;
4791}
4792
4793
4794/*
4795 * call-seq:
4796 * thgrp.enclosed? -> true or false
4797 *
4798 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4799 */
4800
4801static VALUE
4802thgroup_enclosed_p(VALUE group)
4803{
4804 struct thgroup *data;
4805
4806 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4807 return RBOOL(data->enclosed);
4808}
4809
4810
4811/*
4812 * call-seq:
4813 * thgrp.add(thread) -> thgrp
4814 *
4815 * Adds the given +thread+ to this group, removing it from any other
4816 * group to which it may have previously been a member.
4817 *
4818 * puts "Initial group is #{ThreadGroup::Default.list}"
4819 * tg = ThreadGroup.new
4820 * t1 = Thread.new { sleep }
4821 * t2 = Thread.new { sleep }
4822 * puts "t1 is #{t1}"
4823 * puts "t2 is #{t2}"
4824 * tg.add(t1)
4825 * puts "Initial group now #{ThreadGroup::Default.list}"
4826 * puts "tg group now #{tg.list}"
4827 *
4828 * This will produce:
4829 *
4830 * Initial group is #<Thread:0x401bdf4c>
4831 * t1 is #<Thread:0x401b3c90>
4832 * t2 is #<Thread:0x401b3c18>
4833 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4834 * tg group now #<Thread:0x401b3c90>
4835 */
4836
4837static VALUE
4838thgroup_add(VALUE group, VALUE thread)
4839{
4840 rb_thread_t *target_th = rb_thread_ptr(thread);
4841 struct thgroup *data;
4842
4843 if (OBJ_FROZEN(group)) {
4844 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4845 }
4846 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4847 if (data->enclosed) {
4848 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4849 }
4850
4851 if (OBJ_FROZEN(target_th->thgroup)) {
4852 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4853 }
4854 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4855 if (data->enclosed) {
4857 "can't move from the enclosed thread group");
4858 }
4859
4860 target_th->thgroup = group;
4861 return group;
4862}
4863
4864/*
4865 * Document-class: ThreadShield
4866 */
4867static void
4868thread_shield_mark(void *ptr)
4869{
4870 rb_gc_mark((VALUE)ptr);
4871}
4872
4873static const rb_data_type_t thread_shield_data_type = {
4874 "thread_shield",
4875 {thread_shield_mark, 0, 0,},
4876 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4877};
4878
4879static VALUE
4880thread_shield_alloc(VALUE klass)
4881{
4882 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4883}
4884
4885#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4886#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
4887#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4888#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
4889STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
4890static inline unsigned int
4891rb_thread_shield_waiting(VALUE b)
4892{
4893 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
4894}
4895
4896static inline void
4897rb_thread_shield_waiting_inc(VALUE b)
4898{
4899 unsigned int w = rb_thread_shield_waiting(b);
4900 w++;
4901 if (w > THREAD_SHIELD_WAITING_MAX)
4902 rb_raise(rb_eRuntimeError, "waiting count overflow");
4903 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4904 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4905}
4906
4907static inline void
4908rb_thread_shield_waiting_dec(VALUE b)
4909{
4910 unsigned int w = rb_thread_shield_waiting(b);
4911 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4912 w--;
4913 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4914 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4915}
4916
4917VALUE
4918rb_thread_shield_new(void)
4919{
4920 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4921 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4922 return thread_shield;
4923}
4924
4925/*
4926 * Wait a thread shield.
4927 *
4928 * Returns
4929 * true: acquired the thread shield
4930 * false: the thread shield was destroyed and no other threads waiting
4931 * nil: the thread shield was destroyed but still in use
4932 */
4933VALUE
4934rb_thread_shield_wait(VALUE self)
4935{
4936 VALUE mutex = GetThreadShieldPtr(self);
4937 rb_mutex_t *m;
4938
4939 if (!mutex) return Qfalse;
4940 m = mutex_ptr(mutex);
4941 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
4942 rb_thread_shield_waiting_inc(self);
4943 rb_mutex_lock(mutex);
4944 rb_thread_shield_waiting_dec(self);
4945 if (DATA_PTR(self)) return Qtrue;
4946 rb_mutex_unlock(mutex);
4947 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4948}
4949
4950static VALUE
4951thread_shield_get_mutex(VALUE self)
4952{
4953 VALUE mutex = GetThreadShieldPtr(self);
4954 if (!mutex)
4955 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4956 return mutex;
4957}
4958
4959/*
4960 * Release a thread shield, and return true if it has waiting threads.
4961 */
4962VALUE
4963rb_thread_shield_release(VALUE self)
4964{
4965 VALUE mutex = thread_shield_get_mutex(self);
4966 rb_mutex_unlock(mutex);
4967 return RBOOL(rb_thread_shield_waiting(self) > 0);
4968}
4969
4970/*
4971 * Release and destroy a thread shield, and return true if it has waiting threads.
4972 */
4973VALUE
4974rb_thread_shield_destroy(VALUE self)
4975{
4976 VALUE mutex = thread_shield_get_mutex(self);
4977 DATA_PTR(self) = 0;
4978 rb_mutex_unlock(mutex);
4979 return RBOOL(rb_thread_shield_waiting(self) > 0);
4980}
4981
4982static VALUE
4983threadptr_recursive_hash(rb_thread_t *th)
4984{
4985 return th->ec->local_storage_recursive_hash;
4986}
4987
4988static void
4989threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
4990{
4991 th->ec->local_storage_recursive_hash = hash;
4992}
4993
4995
4996/*
4997 * Returns the current "recursive list" used to detect recursion.
4998 * This list is a hash table, unique for the current thread and for
4999 * the current __callee__.
5000 */
5001
5002static VALUE
5003recursive_list_access(VALUE sym)
5004{
5005 rb_thread_t *th = GET_THREAD();
5006 VALUE hash = threadptr_recursive_hash(th);
5007 VALUE list;
5008 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5009 hash = rb_ident_hash_new();
5010 threadptr_recursive_hash_set(th, hash);
5011 list = Qnil;
5012 }
5013 else {
5014 list = rb_hash_aref(hash, sym);
5015 }
5016 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5017 list = rb_ident_hash_new();
5018 rb_hash_aset(hash, sym, list);
5019 }
5020 return list;
5021}
5022
5023/*
5024 * Returns Qtrue if and only if obj (or the pair <obj, paired_obj>) is already
5025 * in the recursion list.
5026 * Assumes the recursion list is valid.
5027 */
5028
5029static VALUE
5030recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5031{
5032#if SIZEOF_LONG == SIZEOF_VOIDP
5033 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5034#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5035 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5036 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5037#endif
5038
5039 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5040 if (UNDEF_P(pair_list))
5041 return Qfalse;
5042 if (paired_obj_id) {
5043 if (!RB_TYPE_P(pair_list, T_HASH)) {
5044 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5045 return Qfalse;
5046 }
5047 else {
5048 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5049 return Qfalse;
5050 }
5051 }
5052 return Qtrue;
5053}
5054
5055/*
5056 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5057 * For a single obj, it sets list[obj] to Qtrue.
5058 * For a pair, it sets list[obj] to paired_obj_id if possible,
5059 * otherwise list[obj] becomes a hash like:
5060 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5061 * Assumes the recursion list is valid.
5062 */
5063
5064static void
5065recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5066{
5067 VALUE pair_list;
5068
5069 if (!paired_obj) {
5070 rb_hash_aset(list, obj, Qtrue);
5071 }
5072 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5073 rb_hash_aset(list, obj, paired_obj);
5074 }
5075 else {
5076 if (!RB_TYPE_P(pair_list, T_HASH)){
5077 VALUE other_paired_obj = pair_list;
5078 pair_list = rb_hash_new();
5079 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5080 rb_hash_aset(list, obj, pair_list);
5081 }
5082 rb_hash_aset(pair_list, paired_obj, Qtrue);
5083 }
5084}
5085
5086/*
5087 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5088 * For a pair, if list[obj] is a hash, then paired_obj_id is
5089 * removed from the hash and no attempt is made to simplify
5090 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5091 * Assumes the recursion list is valid.
5092 */
5093
5094static int
5095recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5096{
5097 if (paired_obj) {
5098 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5099 if (UNDEF_P(pair_list)) {
5100 return 0;
5101 }
5102 if (RB_TYPE_P(pair_list, T_HASH)) {
5103 rb_hash_delete_entry(pair_list, paired_obj);
5104 if (!RHASH_EMPTY_P(pair_list)) {
5105 return 1; /* keep hash until is empty */
5106 }
5107 }
5108 }
5109 rb_hash_delete_entry(list, obj);
5110 return 1;
5111}
5112
5114 VALUE (*func) (VALUE, VALUE, int);
5115 VALUE list;
5116 VALUE obj;
5117 VALUE pairid;
5118 VALUE arg;
5119};
5120
5121static VALUE
5122exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5123{
5124 struct exec_recursive_params *p = (void *)data;
5125 return (*p->func)(p->obj, p->arg, FALSE);
5126}
5127
5128/*
5129 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5130 * current method is called recursively on obj, or on the pair <obj, pairid>
5131 * If outer is 0, then the innermost func will be called with recursive set
5132 * to Qtrue, otherwise the outermost func will be called. In the latter case,
5133 * all inner func are short-circuited by throw.
5134 * Implementation details: the value thrown is the recursive list which is
5135 * proper to the current method and unlikely to be caught anywhere else.
5136 * list[recursive_key] is used as a flag for the outermost call.
5137 */
5138
5139static VALUE
5140exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5141{
5142 VALUE result = Qundef;
5143 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5144 struct exec_recursive_params p;
5145 int outermost;
5146 p.list = recursive_list_access(sym);
5147 p.obj = obj;
5148 p.pairid = pairid;
5149 p.arg = arg;
5150 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5151
5152 if (recursive_check(p.list, p.obj, pairid)) {
5153 if (outer && !outermost) {
5154 rb_throw_obj(p.list, p.list);
5155 }
5156 return (*func)(obj, arg, TRUE);
5157 }
5158 else {
5159 enum ruby_tag_type state;
5160
5161 p.func = func;
5162
5163 if (outermost) {
5164 recursive_push(p.list, ID2SYM(recursive_key), 0);
5165 recursive_push(p.list, p.obj, p.pairid);
5166 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5167 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5168 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5169 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5170 if (result == p.list) {
5171 result = (*func)(obj, arg, TRUE);
5172 }
5173 }
5174 else {
5175 volatile VALUE ret = Qundef;
5176 recursive_push(p.list, p.obj, p.pairid);
5177 EC_PUSH_TAG(GET_EC());
5178 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5179 ret = (*func)(obj, arg, FALSE);
5180 }
5181 EC_POP_TAG();
5182 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5183 goto invalid;
5184 }
5185 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5186 result = ret;
5187 }
5188 }
5189 *(volatile struct exec_recursive_params *)&p;
5190 return result;
5191
5192 invalid:
5193 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5194 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5195 sym, rb_thread_current());
5197}
5198
5199/*
5200 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5201 * current method is called recursively on obj
5202 */
5203
5204VALUE
5205rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5206{
5207 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5208}
5209
5210/*
5211 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5212 * current method is called recursively on the ordered pair <obj, paired_obj>
5213 */
5214
5215VALUE
5216rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5217{
5218 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5219}
5220
5221/*
5222 * If recursion is detected on the current method and obj, the outermost
5223 * func will be called with (obj, arg, Qtrue). All inner func will be
5224 * short-circuited using throw.
5225 */
5226
5227VALUE
5228rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5229{
5230 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5231}
5232
5233VALUE
5234rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5235{
5236 return exec_recursive(func, obj, 0, arg, 1, mid);
5237}
5238
5239/*
5240 * If recursion is detected on the current method, obj and paired_obj,
5241 * the outermost func will be called with (obj, arg, Qtrue). All inner
5242 * func will be short-circuited using throw.
5243 */
5244
5245VALUE
5246rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5247{
5248 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5249}
5250
5251/*
5252 * call-seq:
5253 * thread.backtrace -> array or nil
5254 *
5255 * Returns the current backtrace of the target thread.
5256 *
5257 */
5258
5259static VALUE
5260rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5261{
5262 return rb_vm_thread_backtrace(argc, argv, thval);
5263}
5264
5265/* call-seq:
5266 * thread.backtrace_locations(*args) -> array or nil
5267 *
5268 * Returns the execution stack for the target thread---an array containing
5269 * backtrace location objects.
5270 *
5271 * See Thread::Backtrace::Location for more information.
5272 *
5273 * This method behaves similarly to Kernel#caller_locations except it applies
5274 * to a specific thread.
5275 */
5276static VALUE
5277rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5278{
5279 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5280}
5281
5282void
5283Init_Thread_Mutex(void)
5284{
5285 rb_thread_t *th = GET_THREAD();
5286
5287 rb_native_mutex_initialize(&th->vm->waitpid_lock);
5288 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5289 rb_native_mutex_initialize(&th->interrupt_lock);
5290}
5291
5292/*
5293 * Document-class: ThreadError
5294 *
5295 * Raised when an invalid operation is attempted on a thread.
5296 *
5297 * For example, when no other thread has been started:
5298 *
5299 * Thread.stop
5300 *
5301 * This will raises the following exception:
5302 *
5303 * ThreadError: stopping only thread
5304 * note: use sleep to stop forever
5305 */
5306
5307void
5308Init_Thread(void)
5309{
5310 VALUE cThGroup;
5311 rb_thread_t *th = GET_THREAD();
5312
5313 sym_never = ID2SYM(rb_intern_const("never"));
5314 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5315 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5316
5317 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5318 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5319 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5320 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5321 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5322 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5323 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5324 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5325 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5326 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5327 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5328 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5329 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5330 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5331 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5332 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5333 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5334 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5335 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5336
5337 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5338 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5339 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5340 rb_define_method(rb_cThread, "value", thread_value, 0);
5341 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5342 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5343 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5344 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5345 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5346 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5347 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5348 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5349 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5350 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5351 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5352 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5353 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5354 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5355 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5356 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5357 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5358 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5359 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5360 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5361 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5362 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5363 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5364 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5365 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5366 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5367
5368 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5369 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5370 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5371 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5372 rb_define_alias(rb_cThread, "inspect", "to_s");
5373
5374 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5375 "stream closed in another thread");
5376
5377 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5378 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5379 rb_define_method(cThGroup, "list", thgroup_list, 0);
5380 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5381 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5382 rb_define_method(cThGroup, "add", thgroup_add, 1);
5383
5384 {
5385 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5386 rb_define_const(cThGroup, "Default", th->thgroup);
5387 }
5388
5390
5391 /* init thread core */
5392 {
5393 /* main thread setting */
5394 {
5395 /* acquire global vm lock */
5396 struct rb_thread_sched *sched = TH_SCHED(th);
5397 thread_sched_to_running(sched, th);
5398
5399 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5400 th->pending_interrupt_queue_checked = 0;
5401 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5402 }
5403 }
5404
5405 rb_thread_create_timer_thread();
5406
5407 Init_thread_sync();
5408}
5409
5410int
5412{
5413 rb_thread_t *th = ruby_thread_from_native();
5414
5415 return th != 0;
5416}
5417
5418#ifdef NON_SCALAR_THREAD_ID
5419 #define thread_id_str(th) (NULL)
5420#else
5421 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5422#endif
5423
5424static void
5425debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5426{
5427 rb_thread_t *th = 0;
5428 VALUE sep = rb_str_new_cstr("\n ");
5429
5430 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5431 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5432 (void *)GET_THREAD(), (void *)r->threads.main);
5433
5434 ccan_list_for_each(&r->threads.set, th, lt_node) {
5435 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5436 "native:%p int:%u",
5437 th->self, (void *)th, thread_id_str(th), th->ec->interrupt_flag);
5438
5439 if (th->locking_mutex) {
5440 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5441 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5442 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5443 }
5444
5445 {
5446 struct rb_waiting_list *list = th->join_list;
5447 while (list) {
5448 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5449 list = list->next;
5450 }
5451 }
5452 rb_str_catf(msg, "\n ");
5453 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, 0, 0), sep));
5454 rb_str_catf(msg, "\n");
5455 }
5456}
5457
5458static void
5459rb_check_deadlock(rb_ractor_t *r)
5460{
5461 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5462
5463 int found = 0;
5464 rb_thread_t *th = NULL;
5465 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5466 int ltnum = rb_ractor_living_thread_num(r);
5467
5468 if (ltnum > sleeper_num) return;
5469 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5470 if (patrol_thread && patrol_thread != GET_THREAD()) return;
5471
5472 ccan_list_for_each(&r->threads.set, th, lt_node) {
5473 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5474 found = 1;
5475 }
5476 else if (th->locking_mutex) {
5477 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5478 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5479 found = 1;
5480 }
5481 }
5482 if (found)
5483 break;
5484 }
5485
5486 if (!found) {
5487 VALUE argv[2];
5488 argv[0] = rb_eFatal;
5489 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5490 debug_deadlock_check(r, argv[1]);
5491 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5492 rb_threadptr_raise(r->threads.main, 2, argv);
5493 }
5494}
5495
5496// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5497// structs. Defined here because the struct definition lives here as well.
5498size_t
5499rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5500{
5501 struct waiting_fd *waitfd = 0;
5502 size_t size = 0;
5503
5504 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5505 size += sizeof(struct waiting_fd);
5506 }
5507
5508 return size;
5509}
5510
5511static void
5512update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5513{
5514 const rb_control_frame_t *cfp = GET_EC()->cfp;
5515 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5516 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5517 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5518 if (lines) {
5519 long line = rb_sourceline() - 1;
5520 long count;
5521 VALUE num;
5522 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5523 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5524 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5525 rb_ary_push(lines, LONG2FIX(line + 1));
5526 return;
5527 }
5528 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5529 return;
5530 }
5531 num = RARRAY_AREF(lines, line);
5532 if (!FIXNUM_P(num)) return;
5533 count = FIX2LONG(num) + 1;
5534 if (POSFIXABLE(count)) {
5535 RARRAY_ASET(lines, line, LONG2FIX(count));
5536 }
5537 }
5538 }
5539}
5540
5541static void
5542update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5543{
5544 const rb_control_frame_t *cfp = GET_EC()->cfp;
5545 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5546 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5547 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5548 if (branches) {
5549 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5550 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5551 VALUE counters = RARRAY_AREF(branches, 1);
5552 VALUE num = RARRAY_AREF(counters, idx);
5553 count = FIX2LONG(num) + 1;
5554 if (POSFIXABLE(count)) {
5555 RARRAY_ASET(counters, idx, LONG2FIX(count));
5556 }
5557 }
5558 }
5559}
5560
5561const rb_method_entry_t *
5562rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5563{
5564 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5565
5566 if (!me->def) return NULL; // negative cme
5567
5568 retry:
5569 switch (me->def->type) {
5570 case VM_METHOD_TYPE_ISEQ: {
5571 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5572 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5573 path = rb_iseq_path(iseq);
5574 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5575 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5576 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5577 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5578 break;
5579 }
5580 case VM_METHOD_TYPE_BMETHOD: {
5581 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5582 if (iseq) {
5583 rb_iseq_location_t *loc;
5584 rb_iseq_check(iseq);
5585 path = rb_iseq_path(iseq);
5586 loc = &ISEQ_BODY(iseq)->location;
5587 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5588 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5589 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5590 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5591 break;
5592 }
5593 return NULL;
5594 }
5595 case VM_METHOD_TYPE_ALIAS:
5596 me = me->def->body.alias.original_me;
5597 goto retry;
5598 case VM_METHOD_TYPE_REFINED:
5599 me = me->def->body.refined.orig_me;
5600 if (!me) return NULL;
5601 goto retry;
5602 default:
5603 return NULL;
5604 }
5605
5606 /* found */
5607 if (RB_TYPE_P(path, T_ARRAY)) {
5608 path = rb_ary_entry(path, 1);
5609 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5610 }
5611 if (resolved_location) {
5612 resolved_location[0] = path;
5613 resolved_location[1] = beg_pos_lineno;
5614 resolved_location[2] = beg_pos_column;
5615 resolved_location[3] = end_pos_lineno;
5616 resolved_location[4] = end_pos_column;
5617 }
5618 return me;
5619}
5620
5621static void
5622update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5623{
5624 const rb_control_frame_t *cfp = GET_EC()->cfp;
5625 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5626 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5627 VALUE rcount;
5628 long count;
5629
5630 me = rb_resolve_me_location(me, 0);
5631 if (!me) return;
5632
5633 rcount = rb_hash_aref(me2counter, (VALUE) me);
5634 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5635 if (POSFIXABLE(count)) {
5636 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5637 }
5638}
5639
5640VALUE
5641rb_get_coverages(void)
5642{
5643 return GET_VM()->coverages;
5644}
5645
5646int
5647rb_get_coverage_mode(void)
5648{
5649 return GET_VM()->coverage_mode;
5650}
5651
5652void
5653rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5654{
5655 GET_VM()->coverages = coverages;
5656 GET_VM()->me2counter = me2counter;
5657 GET_VM()->coverage_mode = mode;
5658}
5659
5660void
5661rb_resume_coverages(void)
5662{
5663 int mode = GET_VM()->coverage_mode;
5664 VALUE me2counter = GET_VM()->me2counter;
5665 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5666 if (mode & COVERAGE_TARGET_BRANCHES) {
5667 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5668 }
5669 if (mode & COVERAGE_TARGET_METHODS) {
5670 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5671 }
5672}
5673
5674void
5675rb_suspend_coverages(void)
5676{
5677 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5678 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5679 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5680 }
5681 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5682 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5683 }
5684}
5685
5686/* Make coverage arrays empty so old covered files are no longer tracked. */
5687void
5688rb_reset_coverages(void)
5689{
5690 rb_clear_coverages();
5691 rb_iseq_remove_coverage_all();
5692 GET_VM()->coverages = Qfalse;
5693}
5694
5695VALUE
5696rb_default_coverage(int n)
5697{
5698 VALUE coverage = rb_ary_hidden_new_fill(3);
5699 VALUE lines = Qfalse, branches = Qfalse;
5700 int mode = GET_VM()->coverage_mode;
5701
5702 if (mode & COVERAGE_TARGET_LINES) {
5703 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5704 }
5705 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5706
5707 if (mode & COVERAGE_TARGET_BRANCHES) {
5708 branches = rb_ary_hidden_new_fill(2);
5709 /* internal data structures for branch coverage:
5710 *
5711 * { branch base node =>
5712 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5713 * branch target id =>
5714 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5715 * ...
5716 * }],
5717 * ...
5718 * }
5719 *
5720 * Example:
5721 * { NODE_CASE =>
5722 * [1, 0, 4, 3, {
5723 * NODE_WHEN => [2, 8, 2, 9, 0],
5724 * NODE_WHEN => [3, 8, 3, 9, 1],
5725 * ...
5726 * }],
5727 * ...
5728 * }
5729 */
5730 VALUE structure = rb_hash_new();
5731 rb_obj_hide(structure);
5732 RARRAY_ASET(branches, 0, structure);
5733 /* branch execution counters */
5734 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5735 }
5736 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5737
5738 return coverage;
5739}
5740
5741static VALUE
5742uninterruptible_exit(VALUE v)
5743{
5744 rb_thread_t *cur_th = GET_THREAD();
5745 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5746
5747 cur_th->pending_interrupt_queue_checked = 0;
5748 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5749 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5750 }
5751 return Qnil;
5752}
5753
5754VALUE
5755rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5756{
5757 VALUE interrupt_mask = rb_ident_hash_new();
5758 rb_thread_t *cur_th = GET_THREAD();
5759
5760 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5761 OBJ_FREEZE_RAW(interrupt_mask);
5762 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5763
5764 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5765
5766 RUBY_VM_CHECK_INTS(cur_th->ec);
5767 return ret;
5768}
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:167
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:85
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:293
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:53
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:115
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:37
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:54
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:888
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2249
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1094
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:877
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:864
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:394
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:145
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE_RAW
Old name of RB_OBJ_FREEZE_RAW.
Definition fl_type.h:144
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:298
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:470
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition error.c:3148
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:684
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1084
VALUE rb_eIOError
IOError exception.
Definition io.c:182
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1088
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1091
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:3470
VALUE rb_eFatal
fatal exception.
Definition error.c:1087
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1089
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
Definition error.c:411
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1129
VALUE rb_eArgError
ArgumentError exception.
Definition error.c:1092
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition eval.c:993
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:882
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4510
VALUE rb_eSignal
SignalException exception.
Definition error.c:1086
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:1939
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:84
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:190
VALUE rb_cThread
Thread class.
Definition vm.c:466
VALUE rb_cModule
Module class.
Definition object.c:53
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3619
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:787
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1776
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1382
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3423
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1436
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3377
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2570
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:2809
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1360
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2510
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1404
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2721
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4687
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1419
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2712
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2665
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1367
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4682
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2788
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3649
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3525
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1467
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2674
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1442
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:1933
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2844
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1593
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1215
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:185
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1702
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1084
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:11859
ID rb_to_id(VALUE str)
Identical to rb_intern(), except it takes an instance of rb_cString.
Definition string.c:11849
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition variable.c:3427
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:183
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1523
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1756
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1654
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition sprintf.c:1219
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
Definition sprintf.c:1242
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1358
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2278
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:366
#define ALLOCA_N(type, n)
Definition memory.h:286
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:354
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:68
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
Definition rarray.h:70
#define RARRAY_AREF(a, i)
Definition rarray.h:583
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:69
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:71
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:92
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:95
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:507
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:441
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:489
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5411
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1045
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:203
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:367
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:165
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:386
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4181
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:62
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Definition method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:134
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:299
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:305
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:287
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:293
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52