pthread_stop_world.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
  3. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  4. * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
  5. * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
  6. * All rights reserved.
  7. *
  8. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  9. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  10. *
  11. * Permission is hereby granted to use or copy this program
  12. * for any purpose, provided the above notices are retained on all copies.
  13. * Permission to modify the code and to distribute modified code is granted,
  14. * provided the above notices are retained, and a notice that the code was
  15. * modified is included with the above copyright notice.
  16. */
  17. #include "private/pthread_support.h"
  18. #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && \
  19. !defined(GC_DARWIN_THREADS) && !defined(SN_TARGET_ORBIS) \
  20. && !defined(SN_TARGET_PSP2)
  21. #ifdef NACL
  22. # include <unistd.h>
  23. # include <sys/time.h>
  24. STATIC int GC_nacl_num_gc_threads = 0;
  25. STATIC __thread int GC_nacl_thread_idx = -1;
  26. STATIC volatile int GC_nacl_park_threads_now = 0;
  27. STATIC volatile pthread_t GC_nacl_thread_parker = -1;
  28. GC_INNER __thread GC_thread GC_nacl_gc_thread_self = NULL;
  29. volatile int GC_nacl_thread_parked[MAX_NACL_GC_THREADS];
  30. int GC_nacl_thread_used[MAX_NACL_GC_THREADS];
  31. #elif defined(GC_OPENBSD_UTHREADS)
  32. # include <pthread_np.h>
  33. #else /* !GC_OPENBSD_UTHREADS && !NACL */
  34. #include <signal.h>
  35. #include <semaphore.h>
  36. #include <errno.h>
  37. #include <time.h> /* for nanosleep() */
  38. #include <unistd.h>
  39. #if (!defined(AO_HAVE_load_acquire) || !defined(AO_HAVE_store_release)) \
  40. && !defined(CPPCHECK)
  41. # error AO_load_acquire and/or AO_store_release are missing;
  42. # error please define AO_REQUIRE_CAS manually
  43. #endif
  44. /* It's safe to call original pthread_sigmask() here. */
  45. #undef pthread_sigmask
  46. #ifdef GC_ENABLE_SUSPEND_THREAD
  47. static void *GC_CALLBACK suspend_self_inner(void *client_data);
  48. #endif
  49. #ifdef DEBUG_THREADS
  50. # ifndef NSIG
  51. # if defined(MAXSIG)
  52. # define NSIG (MAXSIG+1)
  53. # elif defined(_NSIG)
  54. # define NSIG _NSIG
  55. # elif defined(__SIGRTMAX)
  56. # define NSIG (__SIGRTMAX+1)
  57. # else
  58. # error define NSIG
  59. # endif
  60. # endif /* NSIG */
  61. void GC_print_sig_mask(void)
  62. {
  63. sigset_t blocked;
  64. int i;
  65. if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
  66. ABORT("pthread_sigmask failed");
  67. for (i = 1; i < NSIG; i++) {
  68. if (sigismember(&blocked, i))
  69. GC_printf("Signal blocked: %d\n", i);
  70. }
  71. }
  72. #endif /* DEBUG_THREADS */
  73. /* Remove the signals that we want to allow in thread stopping */
  74. /* handler from a set. */
  75. STATIC void GC_remove_allowed_signals(sigset_t *set)
  76. {
  77. if (sigdelset(set, SIGINT) != 0
  78. # ifndef HOST_ANDROID
  79. || sigdelset(set, SIGQUIT) != 0
  80. # endif
  81. || sigdelset(set, SIGABRT) != 0
  82. || sigdelset(set, SIGTERM) != 0) {
  83. ABORT("sigdelset failed");
  84. }
  85. # ifdef MPROTECT_VDB
  86. /* Handlers write to the thread structure, which is in the heap, */
  87. /* and hence can trigger a protection fault. */
  88. if (sigdelset(set, SIGSEGV) != 0
  89. # ifdef HAVE_SIGBUS
  90. || sigdelset(set, SIGBUS) != 0
  91. # endif
  92. ) {
  93. ABORT("sigdelset failed");
  94. }
  95. # endif
  96. }
  97. static sigset_t suspend_handler_mask;
  98. STATIC volatile AO_t GC_stop_count = 0;
  99. /* Incremented by two at the beginning of */
  100. /* GC_stop_world (the lowest bit is always 0). */
  101. STATIC volatile AO_t GC_world_is_stopped = FALSE;
  102. /* FALSE ==> it is safe for threads to restart, */
  103. /* i.e. they will see another suspend signal */
  104. /* before they are expected to stop (unless */
  105. /* they have stopped voluntarily). */
  106. #if defined(GC_OSF1_THREADS) || defined(THREAD_SANITIZER) \
  107. || defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER)
  108. STATIC GC_bool GC_retry_signals = TRUE;
  109. #else
  110. // Unity: Always enable retry signals, since any platform could lose signals
  111. STATIC GC_bool GC_retry_signals = TRUE;
  112. #endif
  113. #define UNITY_RETRY_SIGNALS
  114. /*
  115. * We use signals to stop threads during GC.
  116. *
  117. * Suspended threads wait in signal handler for SIG_THR_RESTART.
  118. * That's more portable than semaphores or condition variables.
  119. * (We do use sem_post from a signal handler, but that should be portable.)
  120. *
  121. * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
  122. * Note that we can't just stop a thread; we need it to save its stack
  123. * pointer(s) and acknowledge.
  124. */
  125. #ifndef SIG_THR_RESTART
  126. # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) \
  127. || defined(GC_NETBSD_THREADS) || defined(GC_USESIGRT_SIGNALS)
  128. # if defined(_SIGRTMIN) && !defined(CPPCHECK)
  129. # define SIG_THR_RESTART _SIGRTMIN + 5
  130. # else
  131. # define SIG_THR_RESTART SIGRTMIN + 5
  132. # endif
  133. # else
  134. # define SIG_THR_RESTART SIGXCPU
  135. # endif
  136. #endif
  137. #define SIGNAL_UNSET (-1)
  138. /* Since SIG_SUSPEND and/or SIG_THR_RESTART could represent */
  139. /* a non-constant expression (e.g., in case of SIGRTMIN), */
  140. /* actual signal numbers are determined by GC_stop_init() */
  141. /* unless manually set (before GC initialization). */
  142. STATIC int GC_sig_suspend = SIGNAL_UNSET;
  143. STATIC int GC_sig_thr_restart = SIGNAL_UNSET;
  144. GC_API void GC_CALL GC_set_suspend_signal(int sig)
  145. {
  146. if (GC_is_initialized) return;
  147. GC_sig_suspend = sig;
  148. }
  149. GC_API void GC_CALL GC_set_thr_restart_signal(int sig)
  150. {
  151. if (GC_is_initialized) return;
  152. GC_sig_thr_restart = sig;
  153. }
  154. GC_API int GC_CALL GC_get_suspend_signal(void)
  155. {
  156. return GC_sig_suspend != SIGNAL_UNSET ? GC_sig_suspend : SIG_SUSPEND;
  157. }
  158. GC_API int GC_CALL GC_get_thr_restart_signal(void)
  159. {
  160. return GC_sig_thr_restart != SIGNAL_UNSET
  161. ? GC_sig_thr_restart : SIG_THR_RESTART;
  162. }
  163. #if defined(GC_EXPLICIT_SIGNALS_UNBLOCK) \
  164. || !defined(NO_SIGNALS_UNBLOCK_IN_MAIN)
  165. /* Some targets (e.g., Solaris) might require this to be called when */
  166. /* doing thread registering from the thread destructor. */
  167. GC_INNER void GC_unblock_gc_signals(void)
  168. {
  169. sigset_t set;
  170. sigemptyset(&set);
  171. GC_ASSERT(GC_sig_suspend != SIGNAL_UNSET);
  172. GC_ASSERT(GC_sig_thr_restart != SIGNAL_UNSET);
  173. sigaddset(&set, GC_sig_suspend);
  174. sigaddset(&set, GC_sig_thr_restart);
  175. if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
  176. ABORT("pthread_sigmask failed");
  177. }
  178. #endif /* GC_EXPLICIT_SIGNALS_UNBLOCK */
  179. #ifdef HOST_ANDROID
  180. GC_INNER void GC_block_android_signal_catcher_signals(void)
  181. {
  182. sigset_t sys_signals;
  183. sigemptyset(&sys_signals);
  184. if (sigaddset(&sys_signals, SIGQUIT) != 0
  185. || sigaddset(&sys_signals, SIGUSR1) != 0)
  186. ABORT("sigaddset failed");
  187. if (pthread_sigmask(SIG_BLOCK, NULL, &sys_signals) != 0)
  188. ABORT("pthread_sigmask failed");
  189. }
  190. #endif /* HOST_ANDROID */
  191. STATIC sem_t GC_suspend_ack_sem; /* also used to acknowledge restart */
  192. STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context);
  193. #ifndef NO_SA_SIGACTION
  194. STATIC void GC_suspend_handler(int sig, siginfo_t * info GC_ATTR_UNUSED,
  195. void * context GC_ATTR_UNUSED)
  196. #else
  197. STATIC void GC_suspend_handler(int sig)
  198. #endif
  199. {
  200. int old_errno = errno;
  201. if (sig != GC_sig_suspend) {
  202. # if defined(GC_FREEBSD_THREADS)
  203. /* Workaround "deferred signal handling" bug in FreeBSD 9.2. */
  204. if (0 == sig) return;
  205. # endif
  206. ABORT("Bad signal in suspend_handler");
  207. }
  208. # if defined(IA64) || defined(HP_PA) || defined(M68K)
  209. GC_with_callee_saves_pushed(GC_suspend_handler_inner, NULL);
  210. # else
  211. /* We believe that in all other cases the full context is already */
  212. /* in the signal handler frame. */
  213. {
  214. # ifdef NO_SA_SIGACTION
  215. void *context = 0;
  216. # endif
  217. GC_suspend_handler_inner(NULL, context);
  218. }
  219. # endif
  220. errno = old_errno;
  221. }
  222. /* The lookup here is safe, since this is done on behalf */
  223. /* of a thread which holds the allocation lock in order */
  224. /* to stop the world. Thus concurrent modification of the */
  225. /* data structure is impossible. Unfortunately, we have to */
  226. /* instruct TSan that the lookup is safe. */
  227. #ifdef THREAD_SANITIZER
  228. /* The implementation of the function is the same as that of */
  229. /* GC_lookup_thread except for the attribute added here. */
  230. GC_ATTR_NO_SANITIZE_THREAD
  231. static GC_thread GC_lookup_thread_async(pthread_t id)
  232. {
  233. GC_thread p = GC_threads[THREAD_TABLE_INDEX(id)];
  234. while (p != NULL && !THREAD_EQUAL(p->id, id))
  235. p = p->next;
  236. return p;
  237. }
  238. #else
  239. # define GC_lookup_thread_async GC_lookup_thread
  240. #endif
  241. #ifdef HOST_ANDROID
  242. STATIC GC_bool GC_may_altstack(GC_thread me)
  243. {
  244. ptr_t approx_sp, hi;
  245. stack_t stack;
  246. approx_sp = GC_approx_sp();
  247. hi = (me -> flags & MAIN_THREAD) == 0 ? me -> stack_end : GC_stackbottom;
  248. if ((word)hi HOTTER_THAN (word)approx_sp)
  249. return TRUE;
  250. if (sigaltstack(NULL, &stack) != 0)
  251. ABORT("sigaltstack failed");
  252. if (stack.ss_flags != 0 && stack.ss_flags != SS_DISABLE)
  253. return TRUE;
  254. return FALSE;
  255. }
  256. #endif /* HOST_ANDROID */
  257. GC_INLINE void GC_store_stack_ptr(GC_thread me)
  258. {
  259. /* There is no data race between the suspend handler (storing */
  260. /* stack_ptr) and GC_push_all_stacks (fetching stack_ptr) because */
  261. /* GC_push_all_stacks is executed after GC_stop_world exits and the */
  262. /* latter runs sem_wait repeatedly waiting for all the suspended */
  263. /* threads to call sem_post. Nonetheless, stack_ptr is stored (here) */
  264. /* and fetched (by GC_push_all_stacks) using the atomic primitives to */
  265. /* avoid the related TSan warning. */
  266. # ifdef SPARC
  267. AO_store((volatile AO_t *)&me->stop_info.stack_ptr,
  268. (AO_t)GC_save_regs_in_stack());
  269. # else
  270. # ifdef IA64
  271. me -> backing_store_ptr = GC_save_regs_in_stack();
  272. # endif
  273. AO_store((volatile AO_t *)&me->stop_info.stack_ptr, (AO_t)GC_approx_sp());
  274. # endif
  275. }
  276. STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
  277. void * context GC_ATTR_UNUSED)
  278. {
  279. pthread_t self = pthread_self();
  280. GC_thread me;
  281. IF_CANCEL(int cancel_state;)
  282. AO_t my_stop_count = AO_load_acquire(&GC_stop_count);
  283. /* After the barrier, this thread should see */
  284. /* the actual content of GC_threads. */
  285. DISABLE_CANCEL(cancel_state);
  286. /* pthread_setcancelstate is not defined to be async-signal-safe. */
  287. /* But the glibc version appears to be in the absence of */
  288. /* asynchronous cancellation. And since this signal handler */
  289. /* to block on sigsuspend, which is both async-signal-safe */
  290. /* and a cancellation point, there seems to be no obvious way */
  291. /* out of it. In fact, it looks to me like an async-signal-safe */
  292. /* cancellation point is inherently a problem, unless there is */
  293. /* some way to disable cancellation in the handler. */
  294. # ifdef DEBUG_THREADS
  295. GC_log_printf("Suspending %p\n", (void *)self);
  296. # endif
  297. GC_ASSERT(((word)my_stop_count & 1) == 0);
  298. me = GC_lookup_thread_async(self);
  299. # ifdef HOST_ANDROID
  300. if (GC_retry_signals && GC_may_altstack(me)) {
  301. RESTORE_CANCEL(cancel_state);
  302. return;
  303. }
  304. # endif
  305. # ifdef GC_ENABLE_SUSPEND_THREAD
  306. if (AO_load(&me->suspended_ext)) {
  307. GC_store_stack_ptr(me);
  308. sem_post(&GC_suspend_ack_sem);
  309. suspend_self_inner(me);
  310. # ifdef DEBUG_THREADS
  311. GC_log_printf("Continuing %p on GC_resume_thread\n", (void *)self);
  312. # endif
  313. RESTORE_CANCEL(cancel_state);
  314. return;
  315. }
  316. # endif
  317. if (((word)me->stop_info.last_stop_count & ~(word)0x1)
  318. == (word)my_stop_count) {
  319. /* Duplicate signal. OK if we are retrying. */
  320. if (!GC_retry_signals) {
  321. WARN("Duplicate suspend signal in thread %p\n", self);
  322. }
  323. RESTORE_CANCEL(cancel_state);
  324. return;
  325. }
  326. GC_store_stack_ptr(me);
  327. # ifdef THREAD_SANITIZER
  328. /* TSan disables signals around signal handlers. Without */
  329. /* a pthread_sigmask call, sigsuspend may block forever. */
  330. {
  331. sigset_t set;
  332. sigemptyset(&set);
  333. GC_ASSERT(GC_sig_suspend != SIGNAL_UNSET);
  334. GC_ASSERT(GC_sig_thr_restart != SIGNAL_UNSET);
  335. sigaddset(&set, GC_sig_suspend);
  336. sigaddset(&set, GC_sig_thr_restart);
  337. if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
  338. ABORT("pthread_sigmask failed in suspend handler");
  339. }
  340. # endif
  341. /* Tell the thread that wants to stop the world that this */
  342. /* thread has been stopped. Note that sem_post() is */
  343. /* the only async-signal-safe primitive in LinuxThreads. */
  344. sem_post(&GC_suspend_ack_sem);
  345. AO_store_release(&me->stop_info.last_stop_count, my_stop_count);
  346. /* Wait until that thread tells us to restart by sending */
  347. /* this thread a GC_sig_thr_restart signal (should be masked */
  348. /* at this point thus there is no race). */
  349. /* We do not continue until we receive that signal, */
  350. /* but we do not take that as authoritative. (We may be */
  351. /* accidentally restarted by one of the user signals we */
  352. /* don't block.) After we receive the signal, we use a */
  353. /* primitive and expensive mechanism to wait until it's */
  354. /* really safe to proceed. Under normal circumstances, */
  355. /* this code should not be executed. */
  356. do {
  357. sigsuspend (&suspend_handler_mask);
  358. } while (AO_load_acquire(&GC_world_is_stopped)
  359. && AO_load(&GC_stop_count) == my_stop_count);
  360. # ifdef DEBUG_THREADS
  361. GC_log_printf("Continuing %p\n", (void *)self);
  362. # endif
  363. # ifndef GC_NETBSD_THREADS_WORKAROUND
  364. if (GC_retry_signals)
  365. # endif
  366. {
  367. /* If the RESTART signal loss is possible (though it should be */
  368. /* less likely than losing the SUSPEND signal as we do not do */
  369. /* much between the first sem_post and sigsuspend calls), more */
  370. /* handshaking is provided to work around it. */
  371. sem_post(&GC_suspend_ack_sem);
  372. # ifdef GC_NETBSD_THREADS_WORKAROUND
  373. if (GC_retry_signals)
  374. # endif
  375. {
  376. /* Set the flag (the lowest bit of last_stop_count) that the */
  377. /* thread has been restarted. */
  378. AO_store_release(&me->stop_info.last_stop_count,
  379. (AO_t)((word)my_stop_count | 1));
  380. }
  381. }
  382. RESTORE_CANCEL(cancel_state);
  383. }
  384. static void suspend_restart_barrier(int n_live_threads)
  385. {
  386. int i;
  387. for (i = 0; i < n_live_threads; i++) {
  388. while (0 != sem_wait(&GC_suspend_ack_sem)) {
  389. /* On Linux, sem_wait is documented to always return zero. */
  390. /* But the documentation appears to be incorrect. */
  391. /* EINTR seems to happen with some versions of gdb. */
  392. if (errno != EINTR)
  393. ABORT("sem_wait failed");
  394. }
  395. }
  396. # ifdef GC_ASSERTIONS
  397. sem_getvalue(&GC_suspend_ack_sem, &i);
  398. GC_ASSERT(0 == i);
  399. # endif
  400. }
  401. static int resend_lost_signals(int n_live_threads,
  402. int (*suspend_restart_all)(GC_bool))
  403. {
  404. # define WAIT_UNIT 3000
  405. # define RETRY_INTERVAL 100000
  406. if (n_live_threads > 0) {
  407. unsigned long wait_usecs = 0; /* Total wait since retry. */
  408. for (;;) {
  409. int ack_count;
  410. sem_getvalue(&GC_suspend_ack_sem, &ack_count);
  411. if (ack_count == n_live_threads)
  412. break;
  413. if (wait_usecs > RETRY_INTERVAL) {
  414. int newly_sent = suspend_restart_all(GC_retry_signals);
  415. GC_COND_LOG_PRINTF("Resent %d signals after timeout\n", newly_sent);
  416. sem_getvalue(&GC_suspend_ack_sem, &ack_count);
  417. if (newly_sent < n_live_threads - ack_count) {
  418. WARN("Lost some threads while stopping or starting world?!\n", 0);
  419. n_live_threads = ack_count + newly_sent;
  420. }
  421. wait_usecs = 0;
  422. }
  423. # ifdef LINT2
  424. /* Workaround "waiting while holding a lock" warning. */
  425. # undef WAIT_UNIT
  426. # define WAIT_UNIT 1
  427. sched_yield();
  428. # elif defined(CPPCHECK) /* || _POSIX_C_SOURCE >= 199309L */
  429. {
  430. struct timespec ts;
  431. ts.tv_sec = 0;
  432. ts.tv_nsec = WAIT_UNIT * 1000;
  433. (void)nanosleep(&ts, NULL);
  434. }
  435. # else
  436. usleep(WAIT_UNIT);
  437. # endif
  438. wait_usecs += WAIT_UNIT;
  439. }
  440. }
  441. return n_live_threads;
  442. }
  443. #ifdef UNITY_RETRY_SIGNALS
  444. static void suspend_restart_barrier_retry(int n_live_threads,
  445. int (*suspend_restart_all)(GC_bool))
  446. {
  447. # define TIMEOUT_UNIT 10000
  448. int i;
  449. int acked_threads = 0;
  450. struct timespec ts;
  451. if (clock_gettime(CLOCK_REALTIME, &ts) == -1) {
  452. n_live_threads = resend_lost_signals(n_live_threads, suspend_restart_all);
  453. suspend_restart_barrier(n_live_threads);
  454. return;
  455. }
  456. ts.tv_nsec += TIMEOUT_UNIT * 1000;
  457. for (i = 0; i < n_live_threads; i++) {
  458. while (0 != sem_timedwait(&GC_suspend_ack_sem, &ts)) {
  459. /* On Linux, sem_wait is documented to always return zero. */
  460. /* But the documentation appears to be incorrect. */
  461. /* EINTR seems to happen with some versions of gdb. */
  462. if (errno == ETIMEDOUT || errno == EINVAL) {
  463. // Wait timed out or the timeout period has passed
  464. n_live_threads = resend_lost_signals(n_live_threads - acked_threads, suspend_restart_all);
  465. suspend_restart_barrier(n_live_threads);
  466. return;
  467. }
  468. else if (errno != EINTR) {
  469. ABORT("sem_wait failed");
  470. }
  471. }
  472. acked_threads++;
  473. }
  474. # ifdef GC_ASSERTIONS
  475. sem_getvalue(&GC_suspend_ack_sem, &i);
  476. GC_ASSERT(0 == i);
  477. # endif
  478. }
  479. #endif
  480. STATIC void GC_restart_handler(int sig)
  481. {
  482. # if defined(DEBUG_THREADS)
  483. int old_errno = errno; /* Preserve errno value. */
  484. # endif
  485. if (sig != GC_sig_thr_restart)
  486. ABORT("Bad signal in restart handler");
  487. /*
  488. ** Note: even if we don't do anything useful here,
  489. ** it would still be necessary to have a signal handler,
  490. ** rather than ignoring the signals, otherwise
  491. ** the signals will not be delivered at all, and
  492. ** will thus not interrupt the sigsuspend() above.
  493. */
  494. # ifdef DEBUG_THREADS
  495. GC_log_printf("In GC_restart_handler for %p\n", (void *)pthread_self());
  496. errno = old_errno;
  497. # endif
  498. }
  499. # ifdef USE_TKILL_ON_ANDROID
  500. EXTERN_C_BEGIN
  501. extern int tkill(pid_t tid, int sig); /* from sys/linux-unistd.h */
  502. EXTERN_C_END
  503. static int android_thread_kill(pid_t tid, int sig)
  504. {
  505. int ret;
  506. int old_errno = errno;
  507. ret = tkill(tid, sig);
  508. if (ret < 0) {
  509. ret = errno;
  510. errno = old_errno;
  511. }
  512. return ret;
  513. }
  514. # define THREAD_SYSTEM_ID(t) (t)->kernel_id
  515. # define RAISE_SIGNAL(t, sig) android_thread_kill(THREAD_SYSTEM_ID(t), sig)
  516. # else
  517. # define THREAD_SYSTEM_ID(t) (t)->id
  518. # define RAISE_SIGNAL(t, sig) pthread_kill(THREAD_SYSTEM_ID(t), sig)
  519. # endif /* !USE_TKILL_ON_ANDROID */
  520. # ifdef GC_ENABLE_SUSPEND_THREAD
  521. # include <sys/time.h>
  522. # include "javaxfc.h" /* to get the prototypes as extern "C" */
  523. STATIC void GC_brief_async_signal_safe_sleep(void)
  524. {
  525. struct timeval tv;
  526. tv.tv_sec = 0;
  527. # if defined(GC_TIME_LIMIT) && !defined(CPPCHECK)
  528. tv.tv_usec = 1000 * GC_TIME_LIMIT / 2;
  529. # else
  530. tv.tv_usec = 1000 * 50 / 2;
  531. # endif
  532. (void)select(0, 0, 0, 0, &tv);
  533. }
  534. static void *GC_CALLBACK suspend_self_inner(void *client_data) {
  535. GC_thread me = (GC_thread)client_data;
  536. while (AO_load_acquire(&me->suspended_ext)) {
  537. /* TODO: Use sigsuspend() instead. */
  538. GC_brief_async_signal_safe_sleep();
  539. }
  540. return NULL;
  541. }
  542. GC_API void GC_CALL GC_suspend_thread(GC_SUSPEND_THREAD_ID thread) {
  543. GC_thread t;
  544. IF_CANCEL(int cancel_state;)
  545. DCL_LOCK_STATE;
  546. LOCK();
  547. t = GC_lookup_thread((pthread_t)thread);
  548. if (t == NULL || t -> suspended_ext) {
  549. UNLOCK();
  550. return;
  551. }
  552. /* Set the flag making the change visible to the signal handler. */
  553. /* This also removes the protection for t object, preventing */
  554. /* write faults in GC_store_stack_ptr (thus double-locking should */
  555. /* not occur in async_set_pht_entry_from_index). */
  556. AO_store_release(&t->suspended_ext, TRUE);
  557. if (THREAD_EQUAL((pthread_t)thread, pthread_self())) {
  558. UNLOCK();
  559. /* It is safe as "t" cannot become invalid here (no race with */
  560. /* GC_unregister_my_thread). */
  561. (void)GC_do_blocking(suspend_self_inner, t);
  562. return;
  563. }
  564. if ((t -> flags & FINISHED) != 0) {
  565. /* Terminated but not joined yet. */
  566. UNLOCK();
  567. return;
  568. }
  569. DISABLE_CANCEL(cancel_state);
  570. /* GC_suspend_thread is not a cancellation point. */
  571. # ifdef PARALLEL_MARK
  572. /* Ensure we do not suspend a thread while it is rebuilding */
  573. /* a free list, otherwise such a dead-lock is possible: */
  574. /* thread 1 is blocked in GC_wait_for_reclaim holding */
  575. /* the allocation lock, thread 2 is suspended in */
  576. /* GC_reclaim_generic invoked from GC_generic_malloc_many */
  577. /* (with GC_fl_builder_count > 0), and thread 3 is blocked */
  578. /* acquiring the allocation lock in GC_resume_thread. */
  579. if (GC_parallel)
  580. GC_wait_for_reclaim();
  581. # endif
  582. /* TODO: Support GC_retry_signals (not needed for TSan) */
  583. GC_acquire_dirty_lock();
  584. switch (RAISE_SIGNAL(t, GC_sig_suspend)) {
  585. /* ESRCH cannot happen as terminated threads are handled above. */
  586. case 0:
  587. break;
  588. default:
  589. ABORT("pthread_kill failed");
  590. }
  591. /* Wait for the thread to complete threads table lookup and */
  592. /* stack_ptr assignment. */
  593. GC_ASSERT(GC_thr_initialized);
  594. while (sem_wait(&GC_suspend_ack_sem) != 0) {
  595. if (errno != EINTR)
  596. ABORT("sem_wait for handler failed (suspend_self)");
  597. }
  598. GC_release_dirty_lock();
  599. RESTORE_CANCEL(cancel_state);
  600. UNLOCK();
  601. }
  602. GC_API void GC_CALL GC_resume_thread(GC_SUSPEND_THREAD_ID thread) {
  603. GC_thread t;
  604. DCL_LOCK_STATE;
  605. LOCK();
  606. t = GC_lookup_thread((pthread_t)thread);
  607. if (t != NULL)
  608. AO_store(&t->suspended_ext, FALSE);
  609. UNLOCK();
  610. }
  611. GC_API int GC_CALL GC_is_thread_suspended(GC_SUSPEND_THREAD_ID thread) {
  612. GC_thread t;
  613. int is_suspended = 0;
  614. DCL_LOCK_STATE;
  615. LOCK();
  616. t = GC_lookup_thread((pthread_t)thread);
  617. if (t != NULL && t -> suspended_ext)
  618. is_suspended = (int)TRUE;
  619. UNLOCK();
  620. return is_suspended;
  621. }
  622. # endif /* GC_ENABLE_SUSPEND_THREAD */
  623. #endif /* !GC_OPENBSD_UTHREADS && !NACL */
  624. #ifdef IA64
  625. # define IF_IA64(x) x
  626. #else
  627. # define IF_IA64(x)
  628. #endif
  629. /* We hold allocation lock. Should do exactly the right thing if the */
  630. /* world is stopped. Should not fail if it isn't. */
  631. GC_INNER void GC_push_all_stacks(void)
  632. {
  633. GC_bool found_me = FALSE;
  634. size_t nthreads = 0;
  635. int i;
  636. GC_thread p;
  637. ptr_t lo, hi;
  638. /* On IA64, we also need to scan the register backing store. */
  639. IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
  640. struct GC_traced_stack_sect_s *traced_stack_sect;
  641. pthread_t self = pthread_self();
  642. word total_size = 0;
  643. if (!EXPECT(GC_thr_initialized, TRUE))
  644. GC_thr_init();
  645. # ifdef DEBUG_THREADS
  646. GC_log_printf("Pushing stacks from thread %p\n", (void *)self);
  647. # endif
  648. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  649. for (p = GC_threads[i]; p != 0; p = p -> next) {
  650. if (p -> flags & FINISHED) continue;
  651. ++nthreads;
  652. traced_stack_sect = p -> traced_stack_sect;
  653. if (THREAD_EQUAL(p -> id, self)) {
  654. GC_ASSERT(!p->thread_blocked);
  655. # ifdef SPARC
  656. lo = (ptr_t)GC_save_regs_in_stack();
  657. # else
  658. lo = GC_approx_sp();
  659. # endif
  660. found_me = TRUE;
  661. IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
  662. } else {
  663. lo = (ptr_t)AO_load((volatile AO_t *)&p->stop_info.stack_ptr);
  664. IF_IA64(bs_hi = p -> backing_store_ptr;)
  665. if (traced_stack_sect != NULL
  666. && traced_stack_sect->saved_stack_ptr == lo) {
  667. /* If the thread has never been stopped since the recent */
  668. /* GC_call_with_gc_active invocation then skip the top */
  669. /* "stack section" as stack_ptr already points to. */
  670. traced_stack_sect = traced_stack_sect->prev;
  671. }
  672. }
  673. if ((p -> flags & MAIN_THREAD) == 0) {
  674. hi = p -> stack_end;
  675. IF_IA64(bs_lo = p -> backing_store_end);
  676. } else {
  677. /* The original stack. */
  678. hi = GC_stackbottom;
  679. IF_IA64(bs_lo = BACKING_STORE_BASE;)
  680. }
  681. # ifdef DEBUG_THREADS
  682. GC_log_printf("Stack for thread %p = [%p,%p)\n",
  683. (void *)p->id, (void *)lo, (void *)hi);
  684. # endif
  685. if (0 == lo) ABORT("GC_push_all_stacks: sp not set!");
  686. if (p->altstack != NULL && (word)p->altstack <= (word)lo
  687. && (word)lo <= (word)p->altstack + p->altstack_size) {
  688. hi = p->altstack + p->altstack_size;
  689. /* FIXME: Need to scan the normal stack too, but how ? */
  690. /* FIXME: Assume stack grows down */
  691. }
  692. GC_push_all_stack_sections(lo, hi, traced_stack_sect);
  693. # ifdef STACK_GROWS_UP
  694. total_size += lo - hi;
  695. # else
  696. total_size += hi - lo; /* lo <= hi */
  697. # endif
  698. # ifdef NACL
  699. /* Push reg_storage as roots, this will cover the reg context. */
  700. GC_push_all_stack((ptr_t)p -> stop_info.reg_storage,
  701. (ptr_t)(p -> stop_info.reg_storage + NACL_GC_REG_STORAGE_SIZE));
  702. total_size += NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t);
  703. # endif
  704. # ifdef IA64
  705. # ifdef DEBUG_THREADS
  706. GC_log_printf("Reg stack for thread %p = [%p,%p)\n",
  707. (void *)p->id, (void *)bs_lo, (void *)bs_hi);
  708. # endif
  709. /* FIXME: This (if p->id==self) may add an unbounded number of */
  710. /* entries, and hence overflow the mark stack, which is bad. */
  711. GC_push_all_register_sections(bs_lo, bs_hi,
  712. THREAD_EQUAL(p -> id, self),
  713. traced_stack_sect);
  714. total_size += bs_hi - bs_lo; /* bs_lo <= bs_hi */
  715. # endif
  716. }
  717. }
  718. GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n", (int)nthreads);
  719. if (!found_me && !GC_in_thread_creation)
  720. ABORT("Collecting from unknown thread");
  721. GC_total_stacksize = total_size;
  722. }
  723. #ifdef DEBUG_THREADS
  724. /* There seems to be a very rare thread stopping problem. To help us */
  725. /* debug that, we save the ids of the stopping thread. */
  726. pthread_t GC_stopping_thread;
  727. int GC_stopping_pid = 0;
  728. #endif
  729. /* We hold the allocation lock. Suspend all threads that might */
  730. /* still be running. Return the number of suspend signals that */
  731. /* were sent. */
  732. STATIC int GC_suspend_all(GC_bool is_retry)
  733. {
  734. int n_live_threads = 0;
  735. int i;
  736. # ifndef NACL
  737. GC_thread p;
  738. # ifndef GC_OPENBSD_UTHREADS
  739. int result;
  740. # endif
  741. pthread_t self = pthread_self();
  742. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  743. for (p = GC_threads[i]; p != 0; p = p -> next) {
  744. if (!THREAD_EQUAL(p -> id, self)) {
  745. if ((p -> flags & FINISHED) != 0) continue;
  746. if (p -> thread_blocked) /* Will wait */ continue;
  747. # ifndef GC_OPENBSD_UTHREADS
  748. # ifdef GC_ENABLE_SUSPEND_THREAD
  749. if (p -> suspended_ext) continue;
  750. # endif
  751. if (is_retry && AO_load(&p->stop_info.last_stop_count) == GC_stop_count)
  752. continue; /* matters only if GC_retry_signals */
  753. n_live_threads++;
  754. # endif
  755. # ifdef DEBUG_THREADS
  756. GC_log_printf("Sending suspend signal to %p\n", (void *)p->id);
  757. # endif
  758. # ifdef GC_OPENBSD_UTHREADS
  759. {
  760. stack_t stack;
  761. GC_acquire_dirty_lock();
  762. if (pthread_suspend_np(p -> id) != 0)
  763. ABORT("pthread_suspend_np failed");
  764. GC_release_dirty_lock();
  765. if (pthread_stackseg_np(p->id, &stack))
  766. ABORT("pthread_stackseg_np failed");
  767. p -> stop_info.stack_ptr = (ptr_t)stack.ss_sp - stack.ss_size;
  768. if (GC_on_thread_event)
  769. GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
  770. (void *)p->id);
  771. }
  772. # else
  773. /* The synchronization between GC_dirty (based on */
  774. /* test-and-set) and the signal-based thread suspension */
  775. /* is performed in GC_stop_world because */
  776. /* GC_release_dirty_lock cannot be called before */
  777. /* acknowledging the thread is really suspended. */
  778. result = RAISE_SIGNAL(p, GC_sig_suspend);
  779. switch(result) {
  780. case ESRCH:
  781. /* Not really there anymore. Possible? */
  782. n_live_threads--;
  783. break;
  784. case 0:
  785. if (GC_on_thread_event)
  786. GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
  787. (void *)(word)THREAD_SYSTEM_ID(p));
  788. /* Note: thread id might be truncated. */
  789. break;
  790. default:
  791. ABORT_ARG1("pthread_kill failed at suspend",
  792. ": errcode= %d", result);
  793. }
  794. # endif
  795. }
  796. }
  797. }
  798. # else /* NACL */
  799. # ifndef NACL_PARK_WAIT_NANOSECONDS
  800. # define NACL_PARK_WAIT_NANOSECONDS (100 * 1000)
  801. # endif
  802. # define NANOS_PER_SECOND (1000UL * 1000 * 1000)
  803. unsigned long num_sleeps = 0;
  804. # ifdef DEBUG_THREADS
  805. GC_log_printf("pthread_stop_world: num_threads=%d\n",
  806. GC_nacl_num_gc_threads - 1);
  807. # endif
  808. GC_nacl_thread_parker = pthread_self();
  809. GC_nacl_park_threads_now = 1;
  810. while (1) {
  811. int num_threads_parked = 0;
  812. struct timespec ts;
  813. int num_used = 0;
  814. /* Check the 'parked' flag for each thread the GC knows about. */
  815. for (i = 0; i < MAX_NACL_GC_THREADS
  816. && num_used < GC_nacl_num_gc_threads; i++) {
  817. if (GC_nacl_thread_used[i] == 1) {
  818. num_used++;
  819. if (GC_nacl_thread_parked[i] == 1) {
  820. num_threads_parked++;
  821. if (GC_on_thread_event)
  822. GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, (void *)(word)i);
  823. }
  824. }
  825. }
  826. /* -1 for the current thread. */
  827. if (num_threads_parked >= GC_nacl_num_gc_threads - 1)
  828. break;
  829. ts.tv_sec = 0;
  830. ts.tv_nsec = NACL_PARK_WAIT_NANOSECONDS;
  831. # ifdef DEBUG_THREADS
  832. GC_log_printf("Sleep waiting for %d threads to park...\n",
  833. GC_nacl_num_gc_threads - num_threads_parked - 1);
  834. # endif
  835. /* This requires _POSIX_TIMERS feature. */
  836. nanosleep(&ts, 0);
  837. if (++num_sleeps > NANOS_PER_SECOND / NACL_PARK_WAIT_NANOSECONDS) {
  838. WARN("GC appears stalled waiting for %" WARN_PRIdPTR
  839. " threads to park...\n",
  840. GC_nacl_num_gc_threads - num_threads_parked - 1);
  841. num_sleeps = 0;
  842. }
  843. }
  844. # endif /* NACL */
  845. return n_live_threads;
  846. }
  847. GC_INNER void GC_stop_world(void)
  848. {
  849. # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
  850. int n_live_threads;
  851. # endif
  852. GC_ASSERT(I_HOLD_LOCK());
  853. # ifdef DEBUG_THREADS
  854. GC_stopping_thread = pthread_self();
  855. GC_stopping_pid = getpid();
  856. GC_log_printf("Stopping the world from %p\n", (void *)GC_stopping_thread);
  857. # endif
  858. /* Make sure all free list construction has stopped before we start. */
  859. /* No new construction can start, since free list construction is */
  860. /* required to acquire and release the GC lock before it starts, */
  861. /* and we have the lock. */
  862. # ifdef PARALLEL_MARK
  863. if (GC_parallel) {
  864. GC_acquire_mark_lock();
  865. GC_ASSERT(GC_fl_builder_count == 0);
  866. /* We should have previously waited for it to become zero. */
  867. }
  868. # endif /* PARALLEL_MARK */
  869. # if defined(GC_OPENBSD_UTHREADS) || defined(NACL)
  870. (void)GC_suspend_all(FALSE);
  871. # else
  872. AO_store(&GC_stop_count, (AO_t)((word)GC_stop_count + 2));
  873. /* Only concurrent reads are possible. */
  874. # ifdef MANUAL_VDB
  875. GC_acquire_dirty_lock();
  876. /* The write fault handler cannot be called if GC_manual_vdb */
  877. /* (thus double-locking should not occur in */
  878. /* async_set_pht_entry_from_index based on test-and-set). */
  879. # endif
  880. AO_store_release(&GC_world_is_stopped, TRUE);
  881. n_live_threads = GC_suspend_all(FALSE);
  882. #ifndef UNITY_RETRY_SIGNALS
  883. if (GC_retry_signals)
  884. n_live_threads = resend_lost_signals(n_live_threads, GC_suspend_all);
  885. suspend_restart_barrier(n_live_threads);
  886. #else
  887. if (GC_retry_signals)
  888. suspend_restart_barrier_retry(n_live_threads, GC_suspend_all);
  889. else
  890. suspend_restart_barrier(n_live_threads);
  891. #endif
  892. # ifdef MANUAL_VDB
  893. GC_release_dirty_lock(); /* cannot be done in GC_suspend_all */
  894. # endif
  895. # endif
  896. # ifdef PARALLEL_MARK
  897. if (GC_parallel)
  898. GC_release_mark_lock();
  899. # endif
  900. # ifdef DEBUG_THREADS
  901. GC_log_printf("World stopped from %p\n", (void *)pthread_self());
  902. GC_stopping_thread = 0;
  903. # endif
  904. }
  905. #ifdef NACL
  906. # if defined(__x86_64__)
  907. # define NACL_STORE_REGS() \
  908. do { \
  909. __asm__ __volatile__ ("push %rbx"); \
  910. __asm__ __volatile__ ("push %rbp"); \
  911. __asm__ __volatile__ ("push %r12"); \
  912. __asm__ __volatile__ ("push %r13"); \
  913. __asm__ __volatile__ ("push %r14"); \
  914. __asm__ __volatile__ ("push %r15"); \
  915. __asm__ __volatile__ ("mov %%esp, %0" \
  916. : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
  917. BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
  918. GC_nacl_gc_thread_self->stop_info.reg_storage, \
  919. NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
  920. __asm__ __volatile__ ("naclasp $48, %r15"); \
  921. } while (0)
  922. # elif defined(__i386__)
  923. # define NACL_STORE_REGS() \
  924. do { \
  925. __asm__ __volatile__ ("push %ebx"); \
  926. __asm__ __volatile__ ("push %ebp"); \
  927. __asm__ __volatile__ ("push %esi"); \
  928. __asm__ __volatile__ ("push %edi"); \
  929. __asm__ __volatile__ ("mov %%esp, %0" \
  930. : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
  931. BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
  932. GC_nacl_gc_thread_self->stop_info.reg_storage, \
  933. NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
  934. __asm__ __volatile__ ("add $16, %esp"); \
  935. } while (0)
  936. # elif defined(__arm__)
  937. # define NACL_STORE_REGS() \
  938. do { \
  939. __asm__ __volatile__ ("push {r4-r8,r10-r12,lr}"); \
  940. __asm__ __volatile__ ("mov r0, %0" \
  941. : : "r" (&GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
  942. __asm__ __volatile__ ("bic r0, r0, #0xc0000000"); \
  943. __asm__ __volatile__ ("str sp, [r0]"); \
  944. BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
  945. GC_nacl_gc_thread_self->stop_info.reg_storage, \
  946. NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
  947. __asm__ __volatile__ ("add sp, sp, #40"); \
  948. __asm__ __volatile__ ("bic sp, sp, #0xc0000000"); \
  949. } while (0)
  950. # else
  951. # error TODO Please port NACL_STORE_REGS
  952. # endif
  953. GC_API_OSCALL void nacl_pre_syscall_hook(void)
  954. {
  955. if (GC_nacl_thread_idx != -1) {
  956. NACL_STORE_REGS();
  957. GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
  958. GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
  959. }
  960. }
  961. GC_API_OSCALL void __nacl_suspend_thread_if_needed(void)
  962. {
  963. if (GC_nacl_park_threads_now) {
  964. pthread_t self = pthread_self();
  965. /* Don't try to park the thread parker. */
  966. if (GC_nacl_thread_parker == self)
  967. return;
  968. /* This can happen when a thread is created outside of the GC */
  969. /* system (wthread mostly). */
  970. if (GC_nacl_thread_idx < 0)
  971. return;
  972. /* If it was already 'parked', we're returning from a syscall, */
  973. /* so don't bother storing registers again, the GC has a set. */
  974. if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
  975. NACL_STORE_REGS();
  976. GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
  977. }
  978. GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
  979. while (GC_nacl_park_threads_now) {
  980. /* Just spin. */
  981. }
  982. GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
  983. /* Clear out the reg storage for next suspend. */
  984. BZERO(GC_nacl_gc_thread_self->stop_info.reg_storage,
  985. NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));
  986. }
  987. }
  988. GC_API_OSCALL void nacl_post_syscall_hook(void)
  989. {
  990. /* Calling __nacl_suspend_thread_if_needed right away should */
  991. /* guarantee we don't mutate the GC set. */
  992. __nacl_suspend_thread_if_needed();
  993. if (GC_nacl_thread_idx != -1) {
  994. GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
  995. }
  996. }
  997. STATIC GC_bool GC_nacl_thread_parking_inited = FALSE;
  998. STATIC pthread_mutex_t GC_nacl_thread_alloc_lock = PTHREAD_MUTEX_INITIALIZER;
  999. struct nacl_irt_blockhook {
  1000. int (*register_block_hooks)(void (*pre)(void), void (*post)(void));
  1001. };
  1002. EXTERN_C_BEGIN
  1003. extern size_t nacl_interface_query(const char *interface_ident,
  1004. void *table, size_t tablesize);
  1005. EXTERN_C_END
  1006. GC_INNER void GC_nacl_initialize_gc_thread(void)
  1007. {
  1008. int i;
  1009. static struct nacl_irt_blockhook gc_hook;
  1010. pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
  1011. if (!EXPECT(GC_nacl_thread_parking_inited, TRUE)) {
  1012. BZERO(GC_nacl_thread_parked, sizeof(GC_nacl_thread_parked));
  1013. BZERO(GC_nacl_thread_used, sizeof(GC_nacl_thread_used));
  1014. /* TODO: replace with public 'register hook' function when */
  1015. /* available from glibc. */
  1016. nacl_interface_query("nacl-irt-blockhook-0.1",
  1017. &gc_hook, sizeof(gc_hook));
  1018. gc_hook.register_block_hooks(nacl_pre_syscall_hook,
  1019. nacl_post_syscall_hook);
  1020. GC_nacl_thread_parking_inited = TRUE;
  1021. }
  1022. GC_ASSERT(GC_nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
  1023. for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
  1024. if (GC_nacl_thread_used[i] == 0) {
  1025. GC_nacl_thread_used[i] = 1;
  1026. GC_nacl_thread_idx = i;
  1027. GC_nacl_num_gc_threads++;
  1028. break;
  1029. }
  1030. }
  1031. pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
  1032. }
  1033. GC_INNER void GC_nacl_shutdown_gc_thread(void)
  1034. {
  1035. pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
  1036. GC_ASSERT(GC_nacl_thread_idx >= 0);
  1037. GC_ASSERT(GC_nacl_thread_idx < MAX_NACL_GC_THREADS);
  1038. GC_ASSERT(GC_nacl_thread_used[GC_nacl_thread_idx] != 0);
  1039. GC_nacl_thread_used[GC_nacl_thread_idx] = 0;
  1040. GC_nacl_thread_idx = -1;
  1041. GC_nacl_num_gc_threads--;
  1042. pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
  1043. }
  1044. #else /* !NACL */
  1045. /* Restart all threads that were suspended by the collector. */
  1046. /* Return the number of restart signals that were sent. */
  1047. STATIC int GC_restart_all(GC_bool is_retry)
  1048. {
  1049. int n_live_threads = 0;
  1050. int i;
  1051. pthread_t self = pthread_self();
  1052. GC_thread p;
  1053. # ifndef GC_OPENBSD_UTHREADS
  1054. int result;
  1055. # endif
  1056. for (i = 0; i < THREAD_TABLE_SZ; i++) {
  1057. for (p = GC_threads[i]; p != NULL; p = p -> next) {
  1058. if (!THREAD_EQUAL(p -> id, self)) {
  1059. if ((p -> flags & FINISHED) != 0) continue;
  1060. if (p -> thread_blocked) continue;
  1061. # ifndef GC_OPENBSD_UTHREADS
  1062. # ifdef GC_ENABLE_SUSPEND_THREAD
  1063. if (p -> suspended_ext) continue;
  1064. # endif
  1065. if (is_retry && AO_load(&p->stop_info.last_stop_count)
  1066. == (AO_t)((word)GC_stop_count | 1))
  1067. continue; /* The thread has been restarted. */
  1068. n_live_threads++;
  1069. # endif
  1070. # ifdef DEBUG_THREADS
  1071. GC_log_printf("Sending restart signal to %p\n", (void *)p->id);
  1072. # endif
  1073. # ifdef GC_OPENBSD_UTHREADS
  1074. if (pthread_resume_np(p -> id) != 0)
  1075. ABORT("pthread_resume_np failed");
  1076. if (GC_on_thread_event)
  1077. GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, (void *)p->id);
  1078. # else
  1079. result = RAISE_SIGNAL(p, GC_sig_thr_restart);
  1080. switch(result) {
  1081. case ESRCH:
  1082. /* Not really there anymore. Possible? */
  1083. n_live_threads--;
  1084. break;
  1085. case 0:
  1086. if (GC_on_thread_event)
  1087. GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED,
  1088. (void *)(word)THREAD_SYSTEM_ID(p));
  1089. break;
  1090. default:
  1091. ABORT_ARG1("pthread_kill failed at resume",
  1092. ": errcode= %d", result);
  1093. }
  1094. # endif
  1095. }
  1096. }
  1097. }
  1098. return n_live_threads;
  1099. }
  1100. #endif /* !NACL */
  1101. /* Caller holds allocation lock, and has held it continuously since */
  1102. /* the world stopped. */
  1103. GC_INNER void GC_start_world(void)
  1104. {
  1105. # ifndef NACL
  1106. int n_live_threads;
  1107. GC_ASSERT(I_HOLD_LOCK());
  1108. # ifdef DEBUG_THREADS
  1109. GC_log_printf("World starting\n");
  1110. # endif
  1111. # ifndef GC_OPENBSD_UTHREADS
  1112. AO_store_release(&GC_world_is_stopped, FALSE);
  1113. /* The updated value should now be visible to the */
  1114. /* signal handler (note that pthread_kill is not on */
  1115. /* the list of functions which synchronize memory). */
  1116. # endif
  1117. n_live_threads = GC_restart_all(FALSE);
  1118. # ifndef GC_OPENBSD_UTHREADS
  1119. # ifndef UNITY_RETRY_SIGNALS
  1120. if (GC_retry_signals)
  1121. n_live_threads = resend_lost_signals(n_live_threads, GC_restart_all);
  1122. # endif
  1123. # ifdef GC_NETBSD_THREADS_WORKAROUND
  1124. suspend_restart_barrier(n_live_threads);
  1125. # else
  1126. if (GC_retry_signals)
  1127. # ifndef UNITY_RETRY_SIGNALS
  1128. suspend_restart_barrier(n_live_threads);
  1129. # else
  1130. suspend_restart_barrier_retry(n_live_threads, GC_restart_all);
  1131. # endif
  1132. # endif
  1133. # else
  1134. (void)n_live_threads;
  1135. # endif
  1136. # ifdef DEBUG_THREADS
  1137. GC_log_printf("World started\n");
  1138. # endif
  1139. # else /* NACL */
  1140. # ifdef DEBUG_THREADS
  1141. GC_log_printf("World starting...\n");
  1142. # endif
  1143. GC_nacl_park_threads_now = 0;
  1144. if (GC_on_thread_event)
  1145. GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, NULL);
  1146. /* TODO: Send event for every unsuspended thread. */
  1147. # endif
  1148. }
  1149. GC_INNER void GC_stop_init(void)
  1150. {
  1151. # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
  1152. struct sigaction act;
  1153. char *str;
  1154. if (SIGNAL_UNSET == GC_sig_suspend)
  1155. GC_sig_suspend = SIG_SUSPEND;
  1156. if (SIGNAL_UNSET == GC_sig_thr_restart)
  1157. GC_sig_thr_restart = SIG_THR_RESTART;
  1158. if (GC_sig_suspend == GC_sig_thr_restart)
  1159. ABORT("Cannot use same signal for thread suspend and resume");
  1160. if (sem_init(&GC_suspend_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
  1161. ABORT("sem_init failed");
  1162. # ifdef SA_RESTART
  1163. act.sa_flags = SA_RESTART
  1164. # else
  1165. act.sa_flags = 0
  1166. # endif
  1167. # ifndef NO_SA_SIGACTION
  1168. | SA_SIGINFO
  1169. # endif
  1170. ;
  1171. if (sigfillset(&act.sa_mask) != 0) {
  1172. ABORT("sigfillset failed");
  1173. }
  1174. # ifdef GC_RTEMS_PTHREADS
  1175. if(sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL) != 0) {
  1176. ABORT("sigprocmask failed");
  1177. }
  1178. # endif
  1179. GC_remove_allowed_signals(&act.sa_mask);
  1180. /* GC_sig_thr_restart is set in the resulting mask. */
  1181. /* It is unmasked by the handler when necessary. */
  1182. # ifndef NO_SA_SIGACTION
  1183. act.sa_sigaction = GC_suspend_handler;
  1184. # else
  1185. act.sa_handler = GC_suspend_handler;
  1186. # endif
  1187. /* act.sa_restorer is deprecated and should not be initialized. */
  1188. if (sigaction(GC_sig_suspend, &act, NULL) != 0) {
  1189. ABORT("Cannot set SIG_SUSPEND handler");
  1190. }
  1191. # ifndef NO_SA_SIGACTION
  1192. act.sa_flags &= ~SA_SIGINFO;
  1193. # endif
  1194. act.sa_handler = GC_restart_handler;
  1195. if (sigaction(GC_sig_thr_restart, &act, NULL) != 0) {
  1196. ABORT("Cannot set SIG_THR_RESTART handler");
  1197. }
  1198. /* Initialize suspend_handler_mask (excluding GC_sig_thr_restart). */
  1199. if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset failed");
  1200. GC_remove_allowed_signals(&suspend_handler_mask);
  1201. if (sigdelset(&suspend_handler_mask, GC_sig_thr_restart) != 0)
  1202. ABORT("sigdelset failed");
  1203. /* Override the default value of GC_retry_signals. */
  1204. str = GETENV("GC_RETRY_SIGNALS");
  1205. if (str != NULL) {
  1206. if (*str == '0' && *(str + 1) == '\0') {
  1207. /* Do not retry if the environment variable is set to "0". */
  1208. GC_retry_signals = FALSE;
  1209. } else {
  1210. GC_retry_signals = TRUE;
  1211. }
  1212. }
  1213. if (GC_retry_signals) {
  1214. GC_COND_LOG_PRINTF(
  1215. "Will retry suspend and restart signals if necessary\n");
  1216. }
  1217. # ifndef NO_SIGNALS_UNBLOCK_IN_MAIN
  1218. /* Explicitly unblock the signals once before new threads creation. */
  1219. GC_unblock_gc_signals();
  1220. # endif
  1221. # ifdef HOST_ANDROID
  1222. GC_block_android_signal_catcher_signals();
  1223. # endif
  1224. # endif /* !GC_OPENBSD_UTHREADS && !NACL */
  1225. }
  1226. #endif /* GC_PTHREADS && !GC_DARWIN_THREADS && !GC_WIN32_THREADS */