Ruby 3.2.1p31 (2023-02-08 revision 31819e82c88c6f8ecfaeb162519bfa26a14b21fd)
thread_win32.c
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_win32.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include <process.h>
15
16#define TIME_QUANTUM_USEC (10 * 1000)
17#define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
18
19#undef Sleep
20
21#define native_thread_yield() Sleep(0)
22#define unregister_ubf_list(th)
23#define ubf_wakeup_all_threads() do {} while (0)
24#define ubf_threads_empty() (1)
25#define ubf_timer_disarm() do {} while (0)
26#define ubf_list_atfork() do {} while (0)
27
28static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
29
30static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
31
32rb_internal_thread_event_hook_t *
33rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
34{
35 // not implemented
36 return NULL;
37}
38
39bool
40rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
41{
42 // not implemented
43 return false;
44}
45
47static void
48w32_error(const char *func)
49{
50 LPVOID lpMsgBuf;
51 DWORD err = GetLastError();
52 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
53 FORMAT_MESSAGE_FROM_SYSTEM |
54 FORMAT_MESSAGE_IGNORE_INSERTS,
55 NULL,
56 err,
57 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
58 (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
59 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
60 FORMAT_MESSAGE_FROM_SYSTEM |
61 FORMAT_MESSAGE_IGNORE_INSERTS,
62 NULL,
63 err,
64 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
65 (LPTSTR) & lpMsgBuf, 0, NULL);
66 rb_bug("%s: %s", func, (char*)lpMsgBuf);
68}
69
70#define W32_EVENT_DEBUG 0
71
72#if W32_EVENT_DEBUG
73#define w32_event_debug printf
74#else
75#define w32_event_debug if (0) printf
76#endif
77
78static int
79w32_mutex_lock(HANDLE lock, bool try)
80{
81 DWORD result;
82 while (1) {
83 // RUBY_DEBUG_LOG() is not available because RUBY_DEBUG_LOG() calls it.
84 w32_event_debug("lock:%p\n", lock);
85
86 result = w32_wait_events(&lock, 1, try ? 0 : INFINITE, 0);
87 switch (result) {
88 case WAIT_OBJECT_0:
89 /* get mutex object */
90 w32_event_debug("locked lock:%p\n", lock);
91 return 0;
92
93 case WAIT_OBJECT_0 + 1:
94 /* interrupt */
95 errno = EINTR;
96 w32_event_debug("interrupted lock:%p\n", lock);
97 return 0;
98
99 case WAIT_TIMEOUT:
100 w32_event_debug("timeout locK:%p\n", lock);
101 return EBUSY;
102
103 case WAIT_ABANDONED:
104 rb_bug("win32_mutex_lock: WAIT_ABANDONED");
105 break;
106
107 default:
108 rb_bug("win32_mutex_lock: unknown result (%ld)", result);
109 break;
110 }
111 }
112 return 0;
113}
114
115static HANDLE
116w32_mutex_create(void)
117{
118 HANDLE lock = CreateMutex(NULL, FALSE, NULL);
119 if (lock == NULL) {
120 w32_error("rb_native_mutex_initialize");
121 }
122 return lock;
123}
124
125#define GVL_DEBUG 0
126
127static void
128thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
129{
130 w32_mutex_lock(sched->lock, false);
131 if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
132}
133
134static void
135thread_sched_to_waiting(struct rb_thread_sched *sched)
136{
137 ReleaseMutex(sched->lock);
138}
139
140static void
141thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
142{
143 thread_sched_to_waiting(sched);
144 native_thread_yield();
145 thread_sched_to_running(sched, th);
146}
147
148void
149rb_thread_sched_init(struct rb_thread_sched *sched)
150{
151 if (GVL_DEBUG) fprintf(stderr, "sched init\n");
152 sched->lock = w32_mutex_create();
153}
154
155#if 0
156void
157rb_thread_sched_destroy(struct rb_thread_sched *sched)
158{
159 if (GVL_DEBUG) fprintf(stderr, "sched destroy\n");
160 CloseHandle(sched->lock);
161}
162#endif
163
165ruby_thread_from_native(void)
166{
167 return TlsGetValue(ruby_native_thread_key);
168}
169
170int
171ruby_thread_set_native(rb_thread_t *th)
172{
173 if (th && th->ec) {
174 rb_ractor_set_current_ec(th->ractor, th->ec);
175 }
176 return TlsSetValue(ruby_native_thread_key, th);
177}
178
179void
180Init_native_thread(rb_thread_t *main_th)
181{
182 if ((ruby_current_ec_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
183 rb_bug("TlsAlloc() for ruby_current_ec_key fails");
184 }
185 if ((ruby_native_thread_key = TlsAlloc()) == TLS_OUT_OF_INDEXES) {
186 rb_bug("TlsAlloc() for ruby_native_thread_key fails");
187 }
188
189 // setup main thread
190
191 ruby_thread_set_native(main_th);
192 main_th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
193
194 DuplicateHandle(GetCurrentProcess(),
195 GetCurrentThread(),
196 GetCurrentProcess(),
197 &main_th->nt->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
198
199 RUBY_DEBUG_LOG("initial thread th:%u thid:%p, event: %p",
200 rb_th_serial(main_th),
201 main_th->nt->thread_id,
202 main_th->nt->interrupt_event);
203}
204
205static int
206w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
207{
208 HANDLE *targets = events;
209 HANDLE intr;
210 const int initcount = count;
211 DWORD ret;
212
213 w32_event_debug("events:%p, count:%d, timeout:%ld, th:%u\n",
214 events, count, timeout, th ? rb_th_serial(th) : UINT_MAX);
215
216 if (th && (intr = th->nt->interrupt_event)) {
217 if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
218 targets = ALLOCA_N(HANDLE, count + 1);
219 memcpy(targets, events, sizeof(HANDLE) * count);
220
221 targets[count++] = intr;
222 w32_event_debug("handle:%p (count:%d, intr)\n", intr, count);
223 }
224 else if (intr == th->nt->interrupt_event) {
225 w32_error("w32_wait_events");
226 }
227 }
228
229 w32_event_debug("WaitForMultipleObjects start count:%d\n", count);
230 ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
231 w32_event_debug("WaitForMultipleObjects end ret:%lu\n", ret);
232
233 if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
234 errno = EINTR;
235 }
236 if (ret == WAIT_FAILED && W32_EVENT_DEBUG) {
237 int i;
238 DWORD dmy;
239 for (i = 0; i < count; i++) {
240 w32_event_debug("i:%d %s\n", i, GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
241 }
242 }
243 return ret;
244}
245
246static void ubf_handle(void *ptr);
247#define ubf_select ubf_handle
248
249int
250rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
251{
252 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
253}
254
255int
256rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
257{
258 int ret;
259 rb_thread_t *th = GET_THREAD();
260
261 BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
262 ubf_handle, ruby_thread_from_native(), FALSE);
263 return ret;
264}
265
266static void
267w32_close_handle(HANDLE handle)
268{
269 if (CloseHandle(handle) == 0) {
270 w32_error("w32_close_handle");
271 }
272}
273
274static void
275w32_resume_thread(HANDLE handle)
276{
277 if (ResumeThread(handle) == (DWORD)-1) {
278 w32_error("w32_resume_thread");
279 }
280}
281
282#ifdef _MSC_VER
283#define HAVE__BEGINTHREADEX 1
284#else
285#undef HAVE__BEGINTHREADEX
286#endif
287
288#ifdef HAVE__BEGINTHREADEX
289#define start_thread (HANDLE)_beginthreadex
290#define thread_errno errno
291typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
292#else
293#define start_thread CreateThread
294#define thread_errno rb_w32_map_errno(GetLastError())
295typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
296#endif
297
298static HANDLE
299w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
300{
301 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
302}
303
304int
305rb_w32_sleep(unsigned long msec)
306{
307 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
308}
309
310int WINAPI
311rb_w32_Sleep(unsigned long msec)
312{
313 int ret;
314 rb_thread_t *th = GET_THREAD();
315
316 BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
317 ubf_handle, ruby_thread_from_native(), FALSE);
318 return ret;
319}
320
321static DWORD
322hrtime2msec(rb_hrtime_t hrt)
323{
324 return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
325}
326
327static void
328native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
329{
330 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
331
332 THREAD_BLOCKING_BEGIN(th);
333 {
334 DWORD ret;
335
336 rb_native_mutex_lock(&th->interrupt_lock);
337 th->unblock.func = ubf_handle;
338 th->unblock.arg = th;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340
341 if (RUBY_VM_INTERRUPTED(th->ec)) {
342 /* interrupted. return immediate */
343 }
344 else {
345 RUBY_DEBUG_LOG("start msec:%lu", msec);
346 ret = w32_wait_events(0, 0, msec, th);
347 RUBY_DEBUG_LOG("done ret:%lu", ret);
348 (void)ret;
349 }
350
351 rb_native_mutex_lock(&th->interrupt_lock);
352 th->unblock.func = 0;
353 th->unblock.arg = 0;
354 rb_native_mutex_unlock(&th->interrupt_lock);
355 }
356 THREAD_BLOCKING_END(th);
357}
358
359void
361{
362#ifdef USE_WIN32_MUTEX
363 w32_mutex_lock(lock->mutex, false);
364#else
365 EnterCriticalSection(&lock->crit);
366#endif
367}
368
369int
371{
372#ifdef USE_WIN32_MUTEX
373 return w32_mutex_lock(lock->mutex, true);
374#else
375 return TryEnterCriticalSection(&lock->crit) == 0 ? EBUSY : 0;
376#endif
377}
378
379void
381{
382#ifdef USE_WIN32_MUTEX
383 RUBY_DEBUG_LOG("lock:%p", lock->mutex);
384 ReleaseMutex(lock->mutex);
385#else
386 LeaveCriticalSection(&lock->crit);
387#endif
388}
389
390void
392{
393#ifdef USE_WIN32_MUTEX
394 lock->mutex = w32_mutex_create();
395 /* thread_debug("initialize mutex: %p\n", lock->mutex); */
396#else
397 InitializeCriticalSection(&lock->crit);
398#endif
399}
400
401void
403{
404#ifdef USE_WIN32_MUTEX
405 w32_close_handle(lock->mutex);
406#else
407 DeleteCriticalSection(&lock->crit);
408#endif
409}
410
411struct cond_event_entry {
412 struct cond_event_entry* next;
413 struct cond_event_entry* prev;
414 HANDLE event;
415};
416
417void
419{
420 /* cond is guarded by mutex */
421 struct cond_event_entry *e = cond->next;
422 struct cond_event_entry *head = (struct cond_event_entry*)cond;
423
424 if (e != head) {
425 struct cond_event_entry *next = e->next;
426 struct cond_event_entry *prev = e->prev;
427
428 prev->next = next;
429 next->prev = prev;
430 e->next = e->prev = e;
431
432 SetEvent(e->event);
433 }
434}
435
436void
438{
439 /* cond is guarded by mutex */
440 struct cond_event_entry *e = cond->next;
441 struct cond_event_entry *head = (struct cond_event_entry*)cond;
442
443 while (e != head) {
444 struct cond_event_entry *next = e->next;
445 struct cond_event_entry *prev = e->prev;
446
447 SetEvent(e->event);
448
449 prev->next = next;
450 next->prev = prev;
451 e->next = e->prev = e;
452
453 e = next;
454 }
455}
456
457static int
458native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
459{
460 DWORD r;
461 struct cond_event_entry entry;
462 struct cond_event_entry *head = (struct cond_event_entry*)cond;
463
464 entry.event = CreateEvent(0, FALSE, FALSE, 0);
465
466 /* cond is guarded by mutex */
467 entry.next = head;
468 entry.prev = head->prev;
469 head->prev->next = &entry;
470 head->prev = &entry;
471
473 {
474 r = WaitForSingleObject(entry.event, msec);
475 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
476 rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
477 }
478 }
480
481 entry.prev->next = entry.next;
482 entry.next->prev = entry.prev;
483
484 w32_close_handle(entry.event);
485 return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
486}
487
488void
490{
491 native_cond_timedwait_ms(cond, mutex, INFINITE);
492}
493
494static unsigned long
495abs_timespec_to_timeout_ms(const struct timespec *ts)
496{
497 struct timeval tv;
498 struct timeval now;
499
500 gettimeofday(&now, NULL);
501 tv.tv_sec = ts->tv_sec;
502 tv.tv_usec = ts->tv_nsec / 1000;
503
504 if (!rb_w32_time_subtract(&tv, &now))
505 return 0;
506
507 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
508}
509
510static int
511native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
512{
513 unsigned long timeout_ms;
514
515 timeout_ms = abs_timespec_to_timeout_ms(ts);
516 if (!timeout_ms)
517 return ETIMEDOUT;
518
519 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
520}
521
522static struct timespec native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel);
523
524void
526{
527 struct timespec rel = {
528 .tv_sec = msec / 1000,
529 .tv_nsec = (msec % 1000) * 1000 * 1000,
530 };
531 struct timespec ts = native_cond_timeout(cond, rel);
532 native_cond_timedwait(cond, mutex, &ts);
533}
534
535static struct timespec
536native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
537{
538 int ret;
539 struct timeval tv;
540 struct timespec timeout;
541 struct timespec now;
542
543 ret = gettimeofday(&tv, 0);
544 if (ret != 0)
545 rb_sys_fail(0);
546 now.tv_sec = tv.tv_sec;
547 now.tv_nsec = tv.tv_usec * 1000;
548
549 timeout.tv_sec = now.tv_sec;
550 timeout.tv_nsec = now.tv_nsec;
551 timeout.tv_sec += timeout_rel.tv_sec;
552 timeout.tv_nsec += timeout_rel.tv_nsec;
553
554 if (timeout.tv_nsec >= 1000*1000*1000) {
555 timeout.tv_sec++;
556 timeout.tv_nsec -= 1000*1000*1000;
557 }
558
559 if (timeout.tv_sec < now.tv_sec)
560 timeout.tv_sec = TIMET_MAX;
561
562 return timeout;
563}
564
565void
567{
568 cond->next = (struct cond_event_entry *)cond;
569 cond->prev = (struct cond_event_entry *)cond;
570}
571
572void
574{
575 /* */
576}
577
578void
579ruby_init_stack(volatile VALUE *addr)
580{
581}
582
583#define CHECK_ERR(expr) \
584 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
585
586COMPILER_WARNING_PUSH
587#if defined(__GNUC__)
588COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
589#endif
590static inline SIZE_T
591query_memory_basic_info(PMEMORY_BASIC_INFORMATION mi)
592{
593 return VirtualQuery(mi, mi, sizeof(*mi));
594}
595COMPILER_WARNING_POP
596
597static void
598native_thread_init_stack(rb_thread_t *th)
599{
600 MEMORY_BASIC_INFORMATION mi;
601 char *base, *end;
602 DWORD size, space;
603
604 CHECK_ERR(query_memory_basic_info(&mi));
605 base = mi.AllocationBase;
606 end = mi.BaseAddress;
607 end += mi.RegionSize;
608 size = end - base;
609 space = size / 5;
610 if (space > 1024*1024) space = 1024*1024;
611 th->ec->machine.stack_start = (VALUE *)end - 1;
612 th->ec->machine.stack_maxsize = size - space;
613}
614
615#ifndef InterlockedExchangePointer
616#define InterlockedExchangePointer(t, v) \
617 (void *)InterlockedExchange((long *)(t), (long)(v))
618#endif
619static void
620native_thread_destroy(rb_thread_t *th)
621{
622 HANDLE intr = InterlockedExchangePointer(&th->nt->interrupt_event, 0);
623 RUBY_DEBUG_LOG("close handle intr:%p, thid:%p\n", intr, th->nt->thread_id);
624 w32_close_handle(intr);
625}
626
627static unsigned long __stdcall
628thread_start_func_1(void *th_ptr)
629{
630 rb_thread_t *th = th_ptr;
631 volatile HANDLE thread_id = th->nt->thread_id;
632
633 native_thread_init_stack(th);
634 th->nt->interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
635
636 /* run */
637 RUBY_DEBUG_LOG("thread created th:%u, thid: %p, event: %p",
638 rb_th_serial(th), th->nt->thread_id, th->nt->interrupt_event);
639
640 thread_start_func_2(th, th->ec->machine.stack_start);
641
642 w32_close_handle(thread_id);
643 RUBY_DEBUG_LOG("thread deleted th:%u", rb_th_serial(th));
644 return 0;
645}
646
647static int
648native_thread_create(rb_thread_t *th)
649{
650 const size_t stack_size = th->vm->default_params.thread_machine_stack_size + th->vm->default_params.thread_vm_stack_size;
651 th->nt = ZALLOC(struct rb_native_thread);
652 th->nt->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
653
654 if ((th->nt->thread_id) == 0) {
655 return thread_errno;
656 }
657
658 w32_resume_thread(th->nt->thread_id);
659
660 if (USE_RUBY_DEBUG_LOG) {
661 Sleep(0);
662 RUBY_DEBUG_LOG("th:%u thid:%p intr:%p), stack size: %"PRIuSIZE"",
663 rb_th_serial(th), th->nt->thread_id,
664 th->nt->interrupt_event, stack_size);
665 }
666 return 0;
667}
668
669static void
670native_thread_join(HANDLE th)
671{
672 w32_wait_events(&th, 1, INFINITE, 0);
673}
674
675#if USE_NATIVE_THREAD_PRIORITY
676
677static void
678native_thread_apply_priority(rb_thread_t *th)
679{
680 int priority = th->priority;
681 if (th->priority > 0) {
682 priority = THREAD_PRIORITY_ABOVE_NORMAL;
683 }
684 else if (th->priority < 0) {
685 priority = THREAD_PRIORITY_BELOW_NORMAL;
686 }
687 else {
688 priority = THREAD_PRIORITY_NORMAL;
689 }
690
691 SetThreadPriority(th->nt->thread_id, priority);
692}
693
694#endif /* USE_NATIVE_THREAD_PRIORITY */
695
696int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
697
698static int
699native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
700{
701 fd_set *r = NULL, *w = NULL, *e = NULL;
702 if (readfds) {
703 rb_fd_resize(n - 1, readfds);
704 r = rb_fd_ptr(readfds);
705 }
706 if (writefds) {
707 rb_fd_resize(n - 1, writefds);
708 w = rb_fd_ptr(writefds);
709 }
710 if (exceptfds) {
711 rb_fd_resize(n - 1, exceptfds);
712 e = rb_fd_ptr(exceptfds);
713 }
714 return rb_w32_select_with_thread(n, r, w, e, timeout, th);
715}
716
717/* @internal */
718int
719rb_w32_check_interrupt(rb_thread_t *th)
720{
721 return w32_wait_events(0, 0, 0, th);
722}
723
724static void
725ubf_handle(void *ptr)
726{
727 rb_thread_t *th = (rb_thread_t *)ptr;
728 RUBY_DEBUG_LOG("th:%u\n", rb_th_serial(th));
729
730 if (!SetEvent(th->nt->interrupt_event)) {
731 w32_error("ubf_handle");
732 }
733}
734
735int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
736int rb_w32_set_thread_description_str(HANDLE th, VALUE name);
737#define native_set_another_thread_name rb_w32_set_thread_description_str
738
739static struct {
740 HANDLE id;
741 HANDLE lock;
742} timer_thread;
743#define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
744
745static unsigned long __stdcall
746timer_thread_func(void *dummy)
747{
748 rb_vm_t *vm = GET_VM();
749 RUBY_DEBUG_LOG("start");
750 rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
751 while (WaitForSingleObject(timer_thread.lock,
752 TIME_QUANTUM_USEC/1000) == WAIT_TIMEOUT) {
753 vm->clock++;
754 ruby_sigchld_handler(vm); /* probably no-op */
755 rb_threadptr_check_signal(vm->ractor.main_thread);
756 }
757 RUBY_DEBUG_LOG("end");
758 return 0;
759}
760
761void
762rb_thread_wakeup_timer_thread(int sig)
763{
764 /* do nothing */
765}
766
767static VALUE
768rb_thread_start_unblock_thread(void)
769{
770 return Qfalse; /* no-op */
771}
772
773static void
774rb_thread_create_timer_thread(void)
775{
776 if (timer_thread.id == 0) {
777 if (!timer_thread.lock) {
778 timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
779 }
780 timer_thread.id = w32_create_thread(1024 + (USE_RUBY_DEBUG_LOG ? BUFSIZ : 0),
781 timer_thread_func, 0);
782 w32_resume_thread(timer_thread.id);
783 }
784}
785
786static int
787native_stop_timer_thread(void)
788{
789 int stopped = --system_working <= 0;
790 if (stopped) {
791 SetEvent(timer_thread.lock);
792 native_thread_join(timer_thread.id);
793 CloseHandle(timer_thread.lock);
794 timer_thread.lock = 0;
795 }
796 return stopped;
797}
798
799static void
800native_reset_timer_thread(void)
801{
802 if (timer_thread.id) {
803 CloseHandle(timer_thread.id);
804 timer_thread.id = 0;
805 }
806}
807
808int
809ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
810{
811 return rb_ec_raised_p(th->ec, RAISED_STACKOVERFLOW);
812}
813
814#if defined(__MINGW32__)
815LONG WINAPI
816rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
817{
818 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
819 rb_ec_raised_set(GET_EC(), RAISED_STACKOVERFLOW);
820 raise(SIGSEGV);
821 }
822 return EXCEPTION_CONTINUE_SEARCH;
823}
824#endif
825
826#ifdef RUBY_ALLOCA_CHKSTK
827void
828ruby_alloca_chkstk(size_t len, void *sp)
829{
830 if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
831 rb_execution_context_t *ec = GET_EC();
832 if (!rb_ec_raised_p(ec, RAISED_STACKOVERFLOW)) {
833 rb_ec_raised_set(ec, RAISED_STACKOVERFLOW);
834 rb_exc_raise(sysstack_error);
835 }
836 }
837}
838#endif
839int
840rb_reserved_fd_p(int fd)
841{
842 return 0;
843}
844
845int
846rb_sigwait_fd_get(rb_thread_t *th)
847{
848 return -1; /* TODO */
849}
850
851NORETURN(void rb_sigwait_fd_put(rb_thread_t *, int));
852void
853rb_sigwait_fd_put(rb_thread_t *th, int fd)
854{
855 rb_bug("not implemented, should not be called");
856}
857
858NORETURN(void rb_sigwait_sleep(const rb_thread_t *, int, const rb_hrtime_t *));
859void
860rb_sigwait_sleep(const rb_thread_t *th, int fd, const rb_hrtime_t *rel)
861{
862 rb_bug("not implemented, should not be called");
863}
864
867{
868 return GetCurrentThread();
869}
870
871static void
872native_set_thread_name(rb_thread_t *th)
873{
874}
875
876static VALUE
877native_thread_native_thread_id(rb_thread_t *th)
878{
879 DWORD tid = GetThreadId(th->nt->thread_id);
880 if (tid == 0) rb_sys_fail("GetThreadId");
881 return ULONG2NUM(tid);
882}
883#define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
884
885#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:103
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:396
#define Qfalse
Old name of RUBY_Qfalse.
void ruby_init_stack(volatile VALUE *addr)
Set stack bottom of Ruby implementation.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:6449
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:684
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition error.c:794
void rb_sys_fail(const char *mesg)
Converts a C errno into a Ruby exception, then raises it.
Definition error.c:3272
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define ALLOCA_N(type, n)
Definition memory.h:286
#define RBIMPL_ATTR_NORETURN()
Wraps (or simulates) [[noreturn]]
Definition noreturn.h:38
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40