test.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  5. *
  6. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  7. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  8. *
  9. * Permission is hereby granted to use or copy this program
  10. * for any purpose, provided the above notices are retained on all copies.
  11. * Permission to modify the code and to distribute modified code is granted,
  12. * provided the above notices are retained, and a notice that the code was
  13. * modified is included with the above copyright notice.
  14. */
  15. /* An incomplete test for the garbage collector. */
  16. /* Some more obscure entry points are not tested at all. */
  17. /* This must be compiled with the same flags used to build the */
  18. /* GC. It uses GC internals to allow more precise results */
  19. /* checking for some of the tests. */
  20. # ifdef HAVE_CONFIG_H
  21. # include "config.h"
  22. # endif
  23. # undef GC_BUILD
  24. #if (defined(DBG_HDRS_ALL) || defined(MAKE_BACK_GRAPH)) \
  25. && !defined(GC_DEBUG) && !defined(CPPCHECK)
  26. # define GC_DEBUG
  27. #endif
  28. #ifdef DEFAULT_VDB /* specified manually (e.g. passed to CFLAGS) */
  29. # define TEST_DEFAULT_VDB
  30. #endif
  31. #if defined(CPPCHECK) && defined(GC_PTHREADS) && !defined(_GNU_SOURCE)
  32. # define _GNU_SOURCE 1
  33. #endif
  34. #undef GC_NO_THREAD_REDIRECTS
  35. #include "gc.h"
  36. #ifndef NTHREADS /* Number of additional threads to fork. */
  37. # define NTHREADS 5 /* excludes main thread, which also runs a test. */
  38. /* Not respected by PCR test. */
  39. #endif
  40. # if defined(mips) && defined(SYSTYPE_BSD43)
  41. /* MIPS RISCOS 4 */
  42. # else
  43. # include <stdlib.h>
  44. # endif
  45. # include <stdio.h>
  46. # if defined(_WIN32_WCE) && !defined(__GNUC__)
  47. # include <winbase.h>
  48. /* # define assert ASSERT */
  49. # else
  50. # include <assert.h> /* Not normally used, but handy for debugging. */
  51. # endif
  52. # include "gc_typed.h"
  53. # include "private/gc_priv.h" /* For output, locking, MIN_WORDS, */
  54. /* some statistics and gcconfig.h. */
  55. # if defined(MSWIN32) || defined(MSWINCE)
  56. # include <windows.h>
  57. # endif
  58. #ifdef GC_PRINT_VERBOSE_STATS
  59. # define print_stats VERBOSE
  60. # define INIT_PRINT_STATS /* empty */
  61. #else
  62. /* Use own variable as GC_print_stats might not be exported. */
  63. static int print_stats = 0;
  64. # ifdef GC_READ_ENV_FILE
  65. /* GETENV uses GC internal function in this case. */
  66. # define INIT_PRINT_STATS /* empty */
  67. # else
  68. # define INIT_PRINT_STATS \
  69. { \
  70. if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) \
  71. print_stats = VERBOSE; \
  72. else if (0 != GETENV("GC_PRINT_STATS")) \
  73. print_stats = 1; \
  74. }
  75. # endif
  76. #endif /* !GC_PRINT_VERBOSE_STATS */
  77. # ifdef PCR
  78. # include "th/PCR_ThCrSec.h"
  79. # include "th/PCR_Th.h"
  80. # define GC_printf printf
  81. # endif
  82. # if defined(GC_PTHREADS) && !defined(GC_WIN32_PTHREADS)
  83. # include <pthread.h>
  84. # else
  85. # define NO_TEST_HANDLE_FORK
  86. # endif
  87. # if (!defined(THREADS) || !defined(HANDLE_FORK) \
  88. || (defined(DARWIN) && defined(MPROTECT_VDB) \
  89. && !defined(NO_INCREMENTAL) && !defined(MAKE_BACK_GRAPH))) \
  90. && !defined(NO_TEST_HANDLE_FORK) && !defined(TEST_HANDLE_FORK) \
  91. && !defined(TEST_FORK_WITHOUT_ATFORK)
  92. # define NO_TEST_HANDLE_FORK
  93. # endif
  94. # ifndef NO_TEST_HANDLE_FORK
  95. # include <unistd.h>
  96. # include <sys/types.h>
  97. # include <sys/wait.h>
  98. # if defined(HANDLE_FORK) && defined(CAN_CALL_ATFORK)
  99. # define INIT_FORK_SUPPORT GC_set_handle_fork(1)
  100. /* Causes abort in GC_init on pthread_atfork failure. */
  101. # elif !defined(TEST_FORK_WITHOUT_ATFORK)
  102. # define INIT_FORK_SUPPORT GC_set_handle_fork(-1)
  103. /* Passing -1 implies fork() should be as well manually */
  104. /* surrounded with GC_atfork_prepare/parent/child. */
  105. # endif
  106. # endif
  107. # ifndef INIT_FORK_SUPPORT
  108. # define INIT_FORK_SUPPORT /* empty */
  109. # endif
  110. #ifdef PCR
  111. # define FINALIZER_LOCK() PCR_ThCrSec_EnterSys()
  112. # define FINALIZER_UNLOCK() PCR_ThCrSec_ExitSys()
  113. #elif defined(GC_PTHREADS)
  114. static pthread_mutex_t incr_lock = PTHREAD_MUTEX_INITIALIZER;
  115. # define FINALIZER_LOCK() pthread_mutex_lock(&incr_lock)
  116. # define FINALIZER_UNLOCK() pthread_mutex_unlock(&incr_lock)
  117. #elif defined(GC_WIN32_THREADS)
  118. static CRITICAL_SECTION incr_cs;
  119. # define FINALIZER_LOCK() EnterCriticalSection(&incr_cs)
  120. # define FINALIZER_UNLOCK() LeaveCriticalSection(&incr_cs)
  121. #else
  122. # define FINALIZER_LOCK() (void)0
  123. # define FINALIZER_UNLOCK() (void)0
  124. #endif /* !THREADS */
  125. #include <stdarg.h>
  126. #define CHECK_GCLIB_VERSION \
  127. if (GC_get_version() != ((GC_VERSION_MAJOR<<16) \
  128. | (GC_VERSION_MINOR<<8) \
  129. | GC_VERSION_MICRO)) { \
  130. GC_printf("libgc version mismatch\n"); \
  131. exit(1); \
  132. }
  133. /* Call GC_INIT only on platforms on which we think we really need it, */
  134. /* so that we can test automatic initialization on the rest. */
  135. #if defined(TEST_EXPLICIT_GC_INIT) || defined(AIX) || defined(CYGWIN32) \
  136. || defined(DARWIN) || defined(HOST_ANDROID) \
  137. || (defined(MSWINCE) && !defined(GC_WINMAIN_REDIRECT))
  138. # define GC_OPT_INIT GC_INIT()
  139. #else
  140. # define GC_OPT_INIT /* empty */
  141. #endif
  142. #ifdef NO_CLOCK
  143. # define INIT_PERF_MEASUREMENT (void)0
  144. #else
  145. # define INIT_PERF_MEASUREMENT GC_start_performance_measurement()
  146. #endif
  147. #define GC_COND_INIT() \
  148. INIT_FORK_SUPPORT; GC_OPT_INIT; CHECK_GCLIB_VERSION; \
  149. INIT_PRINT_STATS; INIT_PERF_MEASUREMENT
  150. #define CHECK_OUT_OF_MEMORY(p) \
  151. if ((p) == NULL) { \
  152. GC_printf("Out of memory\n"); \
  153. exit(1); \
  154. }
  155. /* Define AO primitives for a single-threaded mode. */
  156. #ifndef AO_CLEAR
  157. /* AO_t not defined. */
  158. # define AO_t GC_word
  159. #endif
  160. #ifndef AO_HAVE_load_acquire
  161. static AO_t AO_load_acquire(const volatile AO_t *addr)
  162. {
  163. AO_t result;
  164. FINALIZER_LOCK();
  165. result = *addr;
  166. FINALIZER_UNLOCK();
  167. return result;
  168. }
  169. #endif
  170. #ifndef AO_HAVE_store_release
  171. /* Not a macro as new_val argument should be evaluated before the lock. */
  172. static void AO_store_release(volatile AO_t *addr, AO_t new_val)
  173. {
  174. FINALIZER_LOCK();
  175. *addr = new_val;
  176. FINALIZER_UNLOCK();
  177. }
  178. #endif
  179. #ifndef AO_HAVE_fetch_and_add1
  180. # define AO_fetch_and_add1(p) ((*(p))++)
  181. /* This is used only to update counters. */
  182. #endif
  183. /* Allocation Statistics. Synchronization is not strictly necessary. */
  184. volatile AO_t uncollectable_count = 0;
  185. volatile AO_t collectable_count = 0;
  186. volatile AO_t atomic_count = 0;
  187. volatile AO_t realloc_count = 0;
  188. volatile AO_t extra_count = 0; /* Amount of space wasted in cons node; */
  189. /* also used in gcj_cons, mktree and */
  190. /* chktree (for other purposes). */
  191. #if defined(GC_AMIGA_FASTALLOC) && defined(AMIGA)
  192. EXTERN_C_BEGIN
  193. void GC_amiga_free_all_mem(void);
  194. EXTERN_C_END
  195. void Amiga_Fail(void){GC_amiga_free_all_mem();abort();}
  196. # define FAIL Amiga_Fail()
  197. void *GC_amiga_gctest_malloc_explicitly_typed(size_t lb, GC_descr d){
  198. void *ret=GC_malloc_explicitly_typed(lb,d);
  199. if(ret==NULL){
  200. GC_gcollect();
  201. ret=GC_malloc_explicitly_typed(lb,d);
  202. if(ret==NULL){
  203. GC_printf("Out of memory, (typed allocations are not directly "
  204. "supported with the GC_AMIGA_FASTALLOC option.)\n");
  205. FAIL;
  206. }
  207. }
  208. return ret;
  209. }
  210. void *GC_amiga_gctest_calloc_explicitly_typed(size_t a,size_t lb, GC_descr d){
  211. void *ret=GC_calloc_explicitly_typed(a,lb,d);
  212. if(ret==NULL){
  213. GC_gcollect();
  214. ret=GC_calloc_explicitly_typed(a,lb,d);
  215. if(ret==NULL){
  216. GC_printf("Out of memory, (typed allocations are not directly "
  217. "supported with the GC_AMIGA_FASTALLOC option.)\n");
  218. FAIL;
  219. }
  220. }
  221. return ret;
  222. }
  223. # define GC_malloc_explicitly_typed(a,b) GC_amiga_gctest_malloc_explicitly_typed(a,b)
  224. # define GC_calloc_explicitly_typed(a,b,c) GC_amiga_gctest_calloc_explicitly_typed(a,b,c)
  225. #else /* !AMIGA_FASTALLOC */
  226. # if defined(PCR) || defined(LINT2)
  227. # define FAIL abort()
  228. # else
  229. # define FAIL ABORT("Test failed")
  230. # endif
  231. #endif /* !AMIGA_FASTALLOC */
  232. /* AT_END may be defined to exercise the interior pointer test */
  233. /* if the collector is configured with ALL_INTERIOR_POINTERS. */
  234. /* As it stands, this test should succeed with either */
  235. /* configuration. In the FIND_LEAK configuration, it should */
  236. /* find lots of leaks, since we free almost nothing. */
  237. struct SEXPR {
  238. struct SEXPR * sexpr_car;
  239. struct SEXPR * sexpr_cdr;
  240. };
  241. typedef struct SEXPR * sexpr;
  242. # define INT_TO_SEXPR(x) ((sexpr)(GC_word)(x))
  243. # define SEXPR_TO_INT(x) ((int)(GC_word)(x))
  244. # undef nil
  245. # define nil (INT_TO_SEXPR(0))
  246. # define car(x) ((x) -> sexpr_car)
  247. # define cdr(x) ((x) -> sexpr_cdr)
  248. # define is_nil(x) ((x) == nil)
  249. /* Silly implementation of Lisp cons. Intentionally wastes lots of space */
  250. /* to test collector. */
  251. # ifdef VERY_SMALL_CONFIG
  252. # define cons small_cons
  253. # else
  254. sexpr cons (sexpr x, sexpr y)
  255. {
  256. sexpr r;
  257. int *p;
  258. unsigned my_extra = (unsigned)AO_fetch_and_add1(&extra_count) % 5000;
  259. r = (sexpr)GC_MALLOC(sizeof(struct SEXPR) + my_extra);
  260. CHECK_OUT_OF_MEMORY(r);
  261. AO_fetch_and_add1(&collectable_count);
  262. for (p = (int *)r;
  263. (word)p < (word)r + my_extra + sizeof(struct SEXPR); p++) {
  264. if (*p) {
  265. GC_printf("Found nonzero at %p - allocator is broken\n",
  266. (void *)p);
  267. FAIL;
  268. }
  269. *p = (int)((13 << 12) + ((p - (int *)r) & 0xfff));
  270. }
  271. # ifdef AT_END
  272. r = (sexpr)((char *)r + (my_extra & ~7));
  273. # endif
  274. r -> sexpr_car = x;
  275. r -> sexpr_cdr = y;
  276. GC_END_STUBBORN_CHANGE(r);
  277. return(r);
  278. }
  279. # endif
  280. #include "gc_mark.h"
  281. #ifdef GC_GCJ_SUPPORT
  282. #include "gc_gcj.h"
  283. /* The following struct emulates the vtable in gcj. */
  284. /* This assumes the default value of MARK_DESCR_OFFSET. */
  285. struct fake_vtable {
  286. void * dummy; /* class pointer in real gcj. */
  287. GC_word descr;
  288. };
  289. struct fake_vtable gcj_class_struct1 = { 0, sizeof(struct SEXPR)
  290. + sizeof(struct fake_vtable *) };
  291. /* length based descriptor. */
  292. struct fake_vtable gcj_class_struct2 =
  293. { 0, ((GC_word)3 << (CPP_WORDSZ - 3)) | GC_DS_BITMAP};
  294. /* Bitmap based descriptor. */
  295. struct GC_ms_entry * fake_gcj_mark_proc(word * addr,
  296. struct GC_ms_entry *mark_stack_ptr,
  297. struct GC_ms_entry *mark_stack_limit,
  298. word env )
  299. {
  300. sexpr x;
  301. if (1 == env) {
  302. /* Object allocated with debug allocator. */
  303. addr = (word *)GC_USR_PTR_FROM_BASE(addr);
  304. }
  305. x = (sexpr)(addr + 1); /* Skip the vtable pointer. */
  306. mark_stack_ptr = GC_MARK_AND_PUSH(
  307. (void *)(x -> sexpr_cdr), mark_stack_ptr,
  308. mark_stack_limit, (void * *)&(x -> sexpr_cdr));
  309. mark_stack_ptr = GC_MARK_AND_PUSH(
  310. (void *)(x -> sexpr_car), mark_stack_ptr,
  311. mark_stack_limit, (void * *)&(x -> sexpr_car));
  312. return(mark_stack_ptr);
  313. }
  314. #endif /* GC_GCJ_SUPPORT */
  315. sexpr small_cons (sexpr x, sexpr y)
  316. {
  317. sexpr r = GC_NEW(struct SEXPR);
  318. CHECK_OUT_OF_MEMORY(r);
  319. AO_fetch_and_add1(&collectable_count);
  320. r -> sexpr_car = x;
  321. r -> sexpr_cdr = y;
  322. GC_END_STUBBORN_CHANGE(r);
  323. return(r);
  324. }
  325. sexpr small_cons_uncollectable (sexpr x, sexpr y)
  326. {
  327. sexpr r = (sexpr)GC_MALLOC_UNCOLLECTABLE(sizeof(struct SEXPR));
  328. CHECK_OUT_OF_MEMORY(r);
  329. AO_fetch_and_add1(&uncollectable_count);
  330. r -> sexpr_car = x;
  331. r -> sexpr_cdr = (sexpr)(~(GC_word)y);
  332. GC_END_STUBBORN_CHANGE(r);
  333. return(r);
  334. }
  335. #ifdef GC_GCJ_SUPPORT
  336. sexpr gcj_cons(sexpr x, sexpr y)
  337. {
  338. sexpr result;
  339. GC_word * r = (GC_word *)GC_GCJ_MALLOC(
  340. sizeof(struct SEXPR) + sizeof(struct fake_vtable*),
  341. (AO_fetch_and_add1(&extra_count) & 1) != 0
  342. ? &gcj_class_struct1
  343. : &gcj_class_struct2);
  344. CHECK_OUT_OF_MEMORY(r);
  345. result = (sexpr)(r + 1);
  346. result -> sexpr_car = x;
  347. result -> sexpr_cdr = y;
  348. GC_END_STUBBORN_CHANGE(r);
  349. return(result);
  350. }
  351. #endif /* GC_GCJ_SUPPORT */
  352. /* Return reverse(x) concatenated with y */
  353. sexpr reverse1(sexpr x, sexpr y)
  354. {
  355. if (is_nil(x)) {
  356. return(y);
  357. } else {
  358. return( reverse1(cdr(x), cons(car(x), y)) );
  359. }
  360. }
  361. sexpr reverse(sexpr x)
  362. {
  363. # ifdef TEST_WITH_SYSTEM_MALLOC
  364. GC_noop1(GC_HIDE_POINTER(malloc(100000)));
  365. # endif
  366. return( reverse1(x, nil) );
  367. }
  368. sexpr ints(int low, int up)
  369. {
  370. if (low > up) {
  371. return(nil);
  372. } else {
  373. return(small_cons(small_cons(INT_TO_SEXPR(low), nil), ints(low+1, up)));
  374. }
  375. }
  376. #ifdef GC_GCJ_SUPPORT
  377. /* Return reverse(x) concatenated with y */
  378. sexpr gcj_reverse1(sexpr x, sexpr y)
  379. {
  380. if (is_nil(x)) {
  381. return(y);
  382. } else {
  383. return( gcj_reverse1(cdr(x), gcj_cons(car(x), y)) );
  384. }
  385. }
  386. sexpr gcj_reverse(sexpr x)
  387. {
  388. return( gcj_reverse1(x, nil) );
  389. }
  390. sexpr gcj_ints(int low, int up)
  391. {
  392. if (low > up) {
  393. return(nil);
  394. } else {
  395. return(gcj_cons(gcj_cons(INT_TO_SEXPR(low), nil), gcj_ints(low+1, up)));
  396. }
  397. }
  398. #endif /* GC_GCJ_SUPPORT */
  399. /* To check uncollectible allocation we build lists with disguised cdr */
  400. /* pointers, and make sure they don't go away. */
  401. sexpr uncollectable_ints(int low, int up)
  402. {
  403. if (low > up) {
  404. return(nil);
  405. } else {
  406. return(small_cons_uncollectable(small_cons(INT_TO_SEXPR(low), nil),
  407. uncollectable_ints(low+1, up)));
  408. }
  409. }
  410. void check_ints(sexpr list, int low, int up)
  411. {
  412. if (is_nil(list)) {
  413. GC_printf("list is nil\n");
  414. FAIL;
  415. }
  416. if (SEXPR_TO_INT(car(car(list))) != low) {
  417. GC_printf(
  418. "List reversal produced incorrect list - collector is broken\n");
  419. FAIL;
  420. }
  421. if (low == up) {
  422. if (cdr(list) != nil) {
  423. GC_printf("List too long - collector is broken\n");
  424. FAIL;
  425. }
  426. } else {
  427. check_ints(cdr(list), low+1, up);
  428. }
  429. }
  430. # define UNCOLLECTABLE_CDR(x) (sexpr)(~(GC_word)(cdr(x)))
  431. void check_uncollectable_ints(sexpr list, int low, int up)
  432. {
  433. if (SEXPR_TO_INT(car(car(list))) != low) {
  434. GC_printf("Uncollectable list corrupted - collector is broken\n");
  435. FAIL;
  436. }
  437. if (low == up) {
  438. if (UNCOLLECTABLE_CDR(list) != nil) {
  439. GC_printf("Uncollectable list too long - collector is broken\n");
  440. FAIL;
  441. }
  442. } else {
  443. check_uncollectable_ints(UNCOLLECTABLE_CDR(list), low+1, up);
  444. }
  445. }
  446. /* Not used, but useful for debugging: */
  447. void print_int_list(sexpr x)
  448. {
  449. if (is_nil(x)) {
  450. GC_printf("NIL\n");
  451. } else {
  452. GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
  453. if (!is_nil(cdr(x))) {
  454. GC_printf(", ");
  455. print_int_list(cdr(x));
  456. } else {
  457. GC_printf("\n");
  458. }
  459. }
  460. }
  461. /* ditto: */
  462. void check_marks_int_list(sexpr x)
  463. {
  464. if (!GC_is_marked(x))
  465. GC_printf("[unm:%p]", (void *)x);
  466. else
  467. GC_printf("[mkd:%p]", (void *)x);
  468. if (is_nil(x)) {
  469. GC_printf("NIL\n");
  470. } else {
  471. if (!GC_is_marked(car(x)))
  472. GC_printf("[unm car:%p]", (void *)car(x));
  473. GC_printf("(%d)", SEXPR_TO_INT(car(car(x))));
  474. if (!is_nil(cdr(x))) {
  475. GC_printf(", ");
  476. check_marks_int_list(cdr(x));
  477. } else {
  478. GC_printf("\n");
  479. }
  480. }
  481. }
  482. /*
  483. * A tiny list reversal test to check thread creation.
  484. */
  485. #ifdef THREADS
  486. # ifdef VERY_SMALL_CONFIG
  487. # define TINY_REVERSE_UPPER_VALUE 4
  488. # else
  489. # define TINY_REVERSE_UPPER_VALUE 10
  490. # endif
  491. # if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
  492. DWORD __stdcall tiny_reverse_test(void * arg GC_ATTR_UNUSED)
  493. # else
  494. void * tiny_reverse_test(void * arg GC_ATTR_UNUSED)
  495. # endif
  496. {
  497. int i;
  498. for (i = 0; i < 5; ++i) {
  499. check_ints(reverse(reverse(ints(1, TINY_REVERSE_UPPER_VALUE))),
  500. 1, TINY_REVERSE_UPPER_VALUE);
  501. }
  502. # if defined(GC_ENABLE_SUSPEND_THREAD)
  503. /* Force collection from a thread. */
  504. GC_gcollect();
  505. # endif
  506. return 0;
  507. }
  508. # if defined(GC_PTHREADS)
  509. # if defined(GC_ENABLE_SUSPEND_THREAD)
  510. # include "javaxfc.h"
  511. # endif
  512. void fork_a_thread(void)
  513. {
  514. pthread_t t;
  515. int code;
  516. code = pthread_create(&t, NULL, tiny_reverse_test, 0);
  517. if (code != 0) {
  518. GC_printf("Small thread creation failed %d\n", code);
  519. FAIL;
  520. }
  521. # if defined(GC_ENABLE_SUSPEND_THREAD) && !defined(GC_DARWIN_THREADS) \
  522. && !defined(GC_OPENBSD_UTHREADS) && !defined(GC_WIN32_THREADS) \
  523. && !defined(NACL) && !defined(GC_OSF1_THREADS)
  524. if (GC_is_thread_suspended(t)) {
  525. GC_printf("Running thread should be not suspended\n");
  526. FAIL;
  527. }
  528. /* Thread could be running or already terminated (but not joined). */
  529. GC_suspend_thread(t);
  530. if (!GC_is_thread_suspended(t)) {
  531. GC_printf("Thread expected to be suspended\n");
  532. FAIL;
  533. }
  534. GC_suspend_thread(t); /* should be no-op */
  535. GC_resume_thread(t);
  536. if (GC_is_thread_suspended(t)) {
  537. GC_printf("Resumed thread should be not suspended\n");
  538. FAIL;
  539. }
  540. GC_resume_thread(t); /* should be no-op */
  541. # endif
  542. if ((code = pthread_join(t, 0)) != 0) {
  543. GC_printf("Small thread join failed %d\n", code);
  544. FAIL;
  545. }
  546. }
  547. # elif defined(GC_WIN32_THREADS)
  548. void fork_a_thread(void)
  549. {
  550. DWORD thread_id;
  551. HANDLE h;
  552. h = GC_CreateThread((SECURITY_ATTRIBUTES *)NULL, (word)0,
  553. tiny_reverse_test, NULL, (DWORD)0, &thread_id);
  554. /* Explicitly specify types of the */
  555. /* arguments to test the prototype. */
  556. if (h == (HANDLE)NULL) {
  557. GC_printf("Small thread creation failed %d\n",
  558. (int)GetLastError());
  559. FAIL;
  560. }
  561. if (WaitForSingleObject(h, INFINITE) != WAIT_OBJECT_0) {
  562. GC_printf("Small thread wait failed %d\n",
  563. (int)GetLastError());
  564. FAIL;
  565. }
  566. }
  567. # endif
  568. #endif
  569. void test_generic_malloc_or_special(void *p) {
  570. size_t size;
  571. int kind = GC_get_kind_and_size(p, &size);
  572. void *p2;
  573. if (size != GC_size(p)) {
  574. GC_printf("GC_get_kind_and_size returned size not matching GC_size\n");
  575. FAIL;
  576. }
  577. p2 = GC_GENERIC_OR_SPECIAL_MALLOC(10, kind);
  578. CHECK_OUT_OF_MEMORY(p2);
  579. if (GC_get_kind_and_size(p2, NULL) != kind) {
  580. GC_printf("GC_generic_or_special_malloc:"
  581. " unexpected kind of returned object\n");
  582. FAIL;
  583. }
  584. GC_FREE(p2);
  585. }
  586. /* Try to force a to be strangely aligned */
  587. volatile struct A_s {
  588. char dummy;
  589. AO_t aa;
  590. } A;
  591. #define a_set(p) AO_store_release(&A.aa, (AO_t)(p))
  592. #define a_get() (sexpr)AO_load_acquire(&A.aa)
  593. /*
  594. * Repeatedly reverse lists built out of very different sized cons cells.
  595. * Check that we didn't lose anything.
  596. */
  597. void *GC_CALLBACK reverse_test_inner(void *data)
  598. {
  599. int i;
  600. sexpr b;
  601. sexpr c;
  602. sexpr d;
  603. sexpr e;
  604. sexpr *f, *g, *h;
  605. if (data == 0) {
  606. /* This stack frame is not guaranteed to be scanned. */
  607. return GC_call_with_gc_active(reverse_test_inner, (void*)(word)1);
  608. }
  609. # if defined(MACOS) \
  610. || (defined(UNIX_LIKE) && defined(NO_GETCONTEXT)) /* e.g. musl */
  611. /* Assume 128K stacks at least. */
  612. # define BIG 1000
  613. # elif defined(PCR)
  614. /* PCR default stack is 100K. Stack frames are up to 120 bytes. */
  615. # define BIG 700
  616. # elif defined(MSWINCE) || defined(RTEMS)
  617. /* WinCE only allows 64K stacks */
  618. # define BIG 500
  619. # elif defined(OSF1)
  620. /* OSF has limited stack space by default, and large frames. */
  621. # define BIG 200
  622. # elif defined(__MACH__) && defined(__ppc64__)
  623. # define BIG 2500
  624. # else
  625. # define BIG 4500
  626. # endif
  627. a_set(ints(1, 49));
  628. b = ints(1, 50);
  629. c = ints(1, BIG);
  630. d = uncollectable_ints(1, 100);
  631. test_generic_malloc_or_special(d);
  632. e = uncollectable_ints(1, 1);
  633. /* Check that realloc updates object descriptors correctly */
  634. AO_fetch_and_add1(&collectable_count);
  635. f = (sexpr *)GC_MALLOC(4 * sizeof(sexpr));
  636. f = (sexpr *)GC_REALLOC((void *)f, 6 * sizeof(sexpr));
  637. CHECK_OUT_OF_MEMORY(f);
  638. AO_fetch_and_add1(&realloc_count);
  639. f[5] = ints(1,17);
  640. AO_fetch_and_add1(&collectable_count);
  641. g = (sexpr *)GC_MALLOC(513 * sizeof(sexpr));
  642. test_generic_malloc_or_special(g);
  643. g = (sexpr *)GC_REALLOC((void *)g, 800 * sizeof(sexpr));
  644. CHECK_OUT_OF_MEMORY(g);
  645. AO_fetch_and_add1(&realloc_count);
  646. g[799] = ints(1,18);
  647. AO_fetch_and_add1(&collectable_count);
  648. h = (sexpr *)GC_MALLOC(1025 * sizeof(sexpr));
  649. h = (sexpr *)GC_REALLOC((void *)h, 2000 * sizeof(sexpr));
  650. CHECK_OUT_OF_MEMORY(h);
  651. AO_fetch_and_add1(&realloc_count);
  652. # ifdef GC_GCJ_SUPPORT
  653. h[1999] = gcj_ints(1,200);
  654. for (i = 0; i < 51; ++i)
  655. h[1999] = gcj_reverse(h[1999]);
  656. /* Leave it as the reversed list for now. */
  657. # else
  658. h[1999] = ints(1,200);
  659. # endif
  660. /* Try to force some collections and reuse of small list elements */
  661. for (i = 0; i < 10; i++) {
  662. (void)ints(1, BIG);
  663. }
  664. /* Superficially test interior pointer recognition on stack */
  665. c = (sexpr)((char *)c + sizeof(char *));
  666. d = (sexpr)((char *)d + sizeof(char *));
  667. GC_FREE((void *)e);
  668. check_ints(b,1,50);
  669. check_ints(a_get(),1,49);
  670. for (i = 0; i < 50; i++) {
  671. check_ints(b,1,50);
  672. b = reverse(reverse(b));
  673. }
  674. check_ints(b,1,50);
  675. check_ints(a_get(),1,49);
  676. for (i = 0; i < 60; i++) {
  677. # if (defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)) \
  678. && (NTHREADS > 0)
  679. if (i % 10 == 0) fork_a_thread();
  680. # endif
  681. /* This maintains the invariant that a always points to a list */
  682. /* of 49 integers. Thus, this is thread safe without locks, */
  683. /* assuming acquire/release barriers in a_get/set() and atomic */
  684. /* pointer assignments (otherwise, e.g., check_ints() may see */
  685. /* an uninitialized object returned by GC_MALLOC). */
  686. a_set(reverse(reverse(a_get())));
  687. # if !defined(AT_END) && !defined(THREADS)
  688. /* This is not thread safe, since realloc explicitly deallocates */
  689. a_set(GC_REALLOC(a_get(), (i & 1) != 0 ? 500 : 8200));
  690. AO_fetch_and_add1(&realloc_count);
  691. # endif
  692. }
  693. check_ints(a_get(),1,49);
  694. check_ints(b,1,50);
  695. /* Restore c and d values. */
  696. c = (sexpr)((char *)c - sizeof(char *));
  697. d = (sexpr)((char *)d - sizeof(char *));
  698. check_ints(c,1,BIG);
  699. check_uncollectable_ints(d, 1, 100);
  700. check_ints(f[5], 1,17);
  701. check_ints(g[799], 1,18);
  702. # ifdef GC_GCJ_SUPPORT
  703. h[1999] = gcj_reverse(h[1999]);
  704. # endif
  705. check_ints(h[1999], 1,200);
  706. # ifndef THREADS
  707. a_set(NULL);
  708. # endif
  709. *(sexpr volatile *)&b = 0;
  710. *(sexpr volatile *)&c = 0;
  711. return 0;
  712. }
  713. void reverse_test(void)
  714. {
  715. /* Test GC_do_blocking/GC_call_with_gc_active. */
  716. (void)GC_do_blocking(reverse_test_inner, 0);
  717. }
  718. /*
  719. * The rest of this builds balanced binary trees, checks that they don't
  720. * disappear, and tests finalization.
  721. */
  722. typedef struct treenode {
  723. int level;
  724. struct treenode * lchild;
  725. struct treenode * rchild;
  726. } tn;
  727. int finalizable_count = 0;
  728. int finalized_count = 0;
  729. int dropped_something = 0;
  730. void GC_CALLBACK finalizer(void * obj, void * client_data)
  731. {
  732. tn * t = (tn *)obj;
  733. FINALIZER_LOCK();
  734. if ((int)(GC_word)client_data != t -> level) {
  735. GC_printf("Wrong finalization data - collector is broken\n");
  736. FAIL;
  737. }
  738. finalized_count++;
  739. t -> level = -1; /* detect duplicate finalization immediately */
  740. FINALIZER_UNLOCK();
  741. }
  742. # define MAX_FINALIZED ((NTHREADS+1)*4000)
  743. # if !defined(MACOS)
  744. GC_FAR GC_word live_indicators[MAX_FINALIZED] = {0};
  745. # ifndef GC_LONG_REFS_NOT_NEEDED
  746. GC_FAR void *live_long_refs[MAX_FINALIZED] = { NULL };
  747. # endif
  748. #else
  749. /* Too big for THINK_C. have to allocate it dynamically. */
  750. GC_word *live_indicators = 0;
  751. # ifndef GC_LONG_REFS_NOT_NEEDED
  752. # define GC_LONG_REFS_NOT_NEEDED
  753. # endif
  754. #endif
  755. int live_indicators_count = 0;
  756. tn * mktree(int n)
  757. {
  758. tn * result = GC_NEW(tn);
  759. AO_fetch_and_add1(&collectable_count);
  760. # if defined(MACOS)
  761. /* get around static data limitations. */
  762. if (!live_indicators) {
  763. live_indicators =
  764. (GC_word*)NewPtrClear(MAX_FINALIZED * sizeof(GC_word));
  765. CHECK_OUT_OF_MEMORY(live_indicators);
  766. }
  767. # endif
  768. if (n == 0) return(0);
  769. CHECK_OUT_OF_MEMORY(result);
  770. result -> level = n;
  771. result -> lchild = mktree(n-1);
  772. result -> rchild = mktree(n-1);
  773. if (AO_fetch_and_add1(&extra_count) % 17 == 0 && n >= 2) {
  774. tn * tmp;
  775. tn * left = result -> lchild;
  776. tn * right = result -> rchild;
  777. CHECK_OUT_OF_MEMORY(left);
  778. tmp = left -> rchild;
  779. CHECK_OUT_OF_MEMORY(right);
  780. left -> rchild = right -> lchild;
  781. right -> lchild = tmp;
  782. GC_END_STUBBORN_CHANGE(left);
  783. GC_END_STUBBORN_CHANGE(right);
  784. }
  785. if (AO_fetch_and_add1(&extra_count) % 119 == 0) {
  786. # ifndef GC_NO_FINALIZATION
  787. int my_index;
  788. void *new_link;
  789. # endif
  790. {
  791. FINALIZER_LOCK();
  792. /* Losing a count here causes erroneous report of failure. */
  793. finalizable_count++;
  794. # ifndef GC_NO_FINALIZATION
  795. my_index = live_indicators_count++;
  796. # endif
  797. FINALIZER_UNLOCK();
  798. }
  799. # ifndef GC_NO_FINALIZATION
  800. GC_REGISTER_FINALIZER((void *)result, finalizer, (void *)(GC_word)n,
  801. (GC_finalization_proc *)0, (void * *)0);
  802. if (my_index >= MAX_FINALIZED) {
  803. GC_printf("live_indicators overflowed\n");
  804. FAIL;
  805. }
  806. live_indicators[my_index] = 13;
  807. if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
  808. (void * *)(&(live_indicators[my_index])), result) != 0) {
  809. GC_printf("GC_general_register_disappearing_link failed\n");
  810. FAIL;
  811. }
  812. if (GC_move_disappearing_link((void **)(&(live_indicators[my_index])),
  813. (void **)(&(live_indicators[my_index]))) != GC_SUCCESS) {
  814. GC_printf("GC_move_disappearing_link(link,link) failed\n");
  815. FAIL;
  816. }
  817. new_link = (void *)live_indicators[my_index];
  818. if (GC_move_disappearing_link((void **)(&(live_indicators[my_index])),
  819. &new_link) != GC_SUCCESS) {
  820. GC_printf("GC_move_disappearing_link(new_link) failed\n");
  821. FAIL;
  822. }
  823. if (GC_unregister_disappearing_link(&new_link) == 0) {
  824. GC_printf("GC_unregister_disappearing_link failed\n");
  825. FAIL;
  826. }
  827. if (GC_move_disappearing_link((void **)(&(live_indicators[my_index])),
  828. &new_link) != GC_NOT_FOUND) {
  829. GC_printf("GC_move_disappearing_link(new_link) failed 2\n");
  830. FAIL;
  831. }
  832. if (GC_GENERAL_REGISTER_DISAPPEARING_LINK(
  833. (void * *)(&(live_indicators[my_index])), result) != 0) {
  834. GC_printf("GC_general_register_disappearing_link failed 2\n");
  835. FAIL;
  836. }
  837. # ifndef GC_LONG_REFS_NOT_NEEDED
  838. if (GC_REGISTER_LONG_LINK(&live_long_refs[my_index], result) != 0) {
  839. GC_printf("GC_register_long_link failed\n");
  840. FAIL;
  841. }
  842. if (GC_move_long_link(&live_long_refs[my_index],
  843. &live_long_refs[my_index]) != GC_SUCCESS) {
  844. GC_printf("GC_move_long_link(link,link) failed\n");
  845. FAIL;
  846. }
  847. new_link = live_long_refs[my_index];
  848. if (GC_move_long_link(&live_long_refs[my_index],
  849. &new_link) != GC_SUCCESS) {
  850. GC_printf("GC_move_long_link(new_link) failed\n");
  851. FAIL;
  852. }
  853. if (GC_unregister_long_link(&new_link) == 0) {
  854. GC_printf("GC_unregister_long_link failed\n");
  855. FAIL;
  856. }
  857. if (GC_move_long_link(&live_long_refs[my_index],
  858. &new_link) != GC_NOT_FOUND) {
  859. GC_printf("GC_move_long_link(new_link) failed 2\n");
  860. FAIL;
  861. }
  862. if (GC_REGISTER_LONG_LINK(&live_long_refs[my_index], result) != 0) {
  863. GC_printf("GC_register_long_link failed 2\n");
  864. FAIL;
  865. }
  866. # endif
  867. # endif
  868. GC_reachable_here(result);
  869. }
  870. GC_END_STUBBORN_CHANGE(result);
  871. return(result);
  872. }
  873. void chktree(tn *t, int n)
  874. {
  875. if (0 == n) {
  876. if (NULL == t) /* is a leaf? */
  877. return;
  878. GC_printf("Clobbered a leaf - collector is broken\n");
  879. FAIL;
  880. }
  881. if (t -> level != n) {
  882. GC_printf("Lost a node at level %d - collector is broken\n", n);
  883. FAIL;
  884. }
  885. if (AO_fetch_and_add1(&extra_count) % 373 == 0) {
  886. (void)GC_MALLOC((unsigned)AO_fetch_and_add1(&extra_count) % 5001);
  887. AO_fetch_and_add1(&collectable_count);
  888. }
  889. chktree(t -> lchild, n-1);
  890. if (AO_fetch_and_add1(&extra_count) % 73 == 0) {
  891. (void)GC_MALLOC((unsigned)AO_fetch_and_add1(&extra_count) % 373);
  892. AO_fetch_and_add1(&collectable_count);
  893. }
  894. chktree(t -> rchild, n-1);
  895. }
  896. #if defined(GC_PTHREADS)
  897. pthread_key_t fl_key;
  898. void * alloc8bytes(void)
  899. {
  900. # if defined(SMALL_CONFIG) || defined(GC_DEBUG)
  901. AO_fetch_and_add1(&collectable_count);
  902. return(GC_MALLOC(8));
  903. # else
  904. void ** my_free_list_ptr;
  905. void * my_free_list;
  906. my_free_list_ptr = (void **)pthread_getspecific(fl_key);
  907. if (my_free_list_ptr == 0) {
  908. my_free_list_ptr = GC_NEW_UNCOLLECTABLE(void *);
  909. CHECK_OUT_OF_MEMORY(my_free_list_ptr);
  910. AO_fetch_and_add1(&uncollectable_count);
  911. if (pthread_setspecific(fl_key, my_free_list_ptr) != 0) {
  912. GC_printf("pthread_setspecific failed\n");
  913. FAIL;
  914. }
  915. }
  916. my_free_list = *my_free_list_ptr;
  917. if (my_free_list == 0) {
  918. my_free_list = GC_malloc_many(8);
  919. CHECK_OUT_OF_MEMORY(my_free_list);
  920. }
  921. *my_free_list_ptr = GC_NEXT(my_free_list);
  922. GC_NEXT(my_free_list) = 0;
  923. GC_END_STUBBORN_CHANGE(my_free_list_ptr);
  924. AO_fetch_and_add1(&collectable_count);
  925. return(my_free_list);
  926. # endif
  927. }
  928. #else
  929. # define alloc8bytes() GC_MALLOC_ATOMIC(8)
  930. #endif
  931. #include "gc_inline.h"
  932. void test_tinyfl(void)
  933. {
  934. void *results[3];
  935. void *tfls[3][GC_TINY_FREELISTS];
  936. # ifndef DONT_ADD_BYTE_AT_END
  937. if (GC_get_all_interior_pointers()) return; /* skip */
  938. # endif
  939. BZERO(tfls, sizeof(tfls));
  940. /* TODO: Improve testing of FAST_MALLOC functionality. */
  941. GC_MALLOC_WORDS(results[0], 11, tfls[0]);
  942. GC_MALLOC_ATOMIC_WORDS(results[1], 20, tfls[1]);
  943. GC_CONS(results[2], results[0], results[1], tfls[2]);
  944. }
  945. void alloc_small(int n)
  946. {
  947. int i;
  948. for (i = 0; i < n; i += 8) {
  949. if (alloc8bytes() == 0) {
  950. GC_printf("Out of memory\n");
  951. FAIL;
  952. }
  953. AO_fetch_and_add1(&atomic_count);
  954. }
  955. }
  956. # if defined(THREADS) && defined(GC_DEBUG)
  957. # ifdef VERY_SMALL_CONFIG
  958. # define TREE_HEIGHT 12
  959. # else
  960. # define TREE_HEIGHT 15
  961. # endif
  962. # else
  963. # ifdef VERY_SMALL_CONFIG
  964. # define TREE_HEIGHT 13
  965. # else
  966. # define TREE_HEIGHT 16
  967. # endif
  968. # endif
  969. void tree_test(void)
  970. {
  971. tn * root;
  972. int i;
  973. root = mktree(TREE_HEIGHT);
  974. # ifndef VERY_SMALL_CONFIG
  975. alloc_small(5000000);
  976. # endif
  977. chktree(root, TREE_HEIGHT);
  978. FINALIZER_LOCK();
  979. if (finalized_count && !dropped_something) {
  980. GC_printf("Premature finalization - collector is broken\n");
  981. FAIL;
  982. }
  983. dropped_something = 1;
  984. FINALIZER_UNLOCK();
  985. GC_noop1((word)root); /* Root needs to remain live until */
  986. /* dropped_something is set. */
  987. root = mktree(TREE_HEIGHT);
  988. chktree(root, TREE_HEIGHT);
  989. for (i = TREE_HEIGHT; i >= 0; i--) {
  990. root = mktree(i);
  991. chktree(root, i);
  992. }
  993. # ifndef VERY_SMALL_CONFIG
  994. alloc_small(5000000);
  995. # endif
  996. }
  997. unsigned n_tests = 0;
  998. const GC_word bm_huge[320 / CPP_WORDSZ] = {
  999. # if CPP_WORDSZ == 32
  1000. 0xffffffff,
  1001. 0xffffffff,
  1002. 0xffffffff,
  1003. 0xffffffff,
  1004. 0xffffffff,
  1005. # endif
  1006. (GC_word)((GC_signed_word)-1),
  1007. (GC_word)((GC_signed_word)-1),
  1008. (GC_word)((GC_signed_word)-1),
  1009. (GC_word)((GC_signed_word)-1),
  1010. ((GC_word)((GC_signed_word)-1)) >> 8 /* highest byte is zero */
  1011. };
  1012. /* A very simple test of explicitly typed allocation */
  1013. void typed_test(void)
  1014. {
  1015. GC_word * old, * newP;
  1016. GC_word bm3[1] = {0};
  1017. GC_word bm2[1] = {0};
  1018. GC_word bm_large[1] = { 0xf7ff7fff };
  1019. GC_descr d1;
  1020. GC_descr d2;
  1021. GC_descr d3 = GC_make_descriptor(bm_large, 32);
  1022. GC_descr d4 = GC_make_descriptor(bm_huge, 320);
  1023. GC_word * x = (GC_word *)GC_malloc_explicitly_typed(
  1024. 320 * sizeof(GC_word) + 123, d4);
  1025. int i;
  1026. AO_fetch_and_add1(&collectable_count);
  1027. (void)GC_make_descriptor(bm_large, 32);
  1028. if (GC_get_bit(bm_huge, 32) == 0 || GC_get_bit(bm_huge, 311) == 0
  1029. || GC_get_bit(bm_huge, 319) != 0) {
  1030. GC_printf("Bad GC_get_bit() or bm_huge initialization\n");
  1031. FAIL;
  1032. }
  1033. GC_set_bit(bm3, 0);
  1034. GC_set_bit(bm3, 1);
  1035. d1 = GC_make_descriptor(bm3, 2);
  1036. GC_set_bit(bm2, 1);
  1037. d2 = GC_make_descriptor(bm2, 2);
  1038. old = 0;
  1039. for (i = 0; i < 4000; i++) {
  1040. newP = (GC_word *)GC_malloc_explicitly_typed(4 * sizeof(GC_word), d1);
  1041. CHECK_OUT_OF_MEMORY(newP);
  1042. AO_fetch_and_add1(&collectable_count);
  1043. if (newP[0] != 0 || newP[1] != 0) {
  1044. GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
  1045. FAIL;
  1046. }
  1047. newP[0] = 17;
  1048. newP[1] = (GC_word)old;
  1049. old = newP;
  1050. AO_fetch_and_add1(&collectable_count);
  1051. newP = (GC_word *)GC_malloc_explicitly_typed(4 * sizeof(GC_word), d2);
  1052. CHECK_OUT_OF_MEMORY(newP);
  1053. newP[0] = 17;
  1054. newP[1] = (GC_word)old;
  1055. GC_END_STUBBORN_CHANGE(newP);
  1056. old = newP;
  1057. AO_fetch_and_add1(&collectable_count);
  1058. newP = (GC_word*)GC_malloc_explicitly_typed(33 * sizeof(GC_word), d3);
  1059. CHECK_OUT_OF_MEMORY(newP);
  1060. newP[0] = 17;
  1061. newP[1] = (GC_word)old;
  1062. GC_END_STUBBORN_CHANGE(newP);
  1063. old = newP;
  1064. AO_fetch_and_add1(&collectable_count);
  1065. newP = (GC_word *)GC_calloc_explicitly_typed(4, 2 * sizeof(GC_word),
  1066. d1);
  1067. CHECK_OUT_OF_MEMORY(newP);
  1068. newP[0] = 17;
  1069. newP[1] = (GC_word)old;
  1070. GC_END_STUBBORN_CHANGE(newP);
  1071. old = newP;
  1072. AO_fetch_and_add1(&collectable_count);
  1073. if (i & 0xff) {
  1074. newP = (GC_word *)GC_calloc_explicitly_typed(7, 3 * sizeof(GC_word),
  1075. d2);
  1076. } else {
  1077. newP = (GC_word *)GC_calloc_explicitly_typed(1001,
  1078. 3 * sizeof(GC_word),
  1079. d2);
  1080. if (newP != NULL && (newP[0] != 0 || newP[1] != 0)) {
  1081. GC_printf("Bad initialization by GC_malloc_explicitly_typed\n");
  1082. FAIL;
  1083. }
  1084. }
  1085. CHECK_OUT_OF_MEMORY(newP);
  1086. newP[0] = 17;
  1087. newP[1] = (GC_word)old;
  1088. GC_END_STUBBORN_CHANGE(newP);
  1089. old = newP;
  1090. }
  1091. for (i = 0; i < 20000; i++) {
  1092. if (newP[0] != 17) {
  1093. GC_printf("Typed alloc failed at %d\n", i);
  1094. FAIL;
  1095. }
  1096. newP[0] = 0;
  1097. old = newP;
  1098. newP = (GC_word *)old[1];
  1099. }
  1100. GC_gcollect();
  1101. GC_noop1((word)x);
  1102. }
  1103. #ifdef DBG_HDRS_ALL
  1104. # define set_print_procs() (void)(A.dummy = 17)
  1105. #else
  1106. int fail_count = 0;
  1107. void GC_CALLBACK fail_proc1(void *x GC_ATTR_UNUSED)
  1108. {
  1109. fail_count++;
  1110. }
  1111. void set_print_procs(void)
  1112. {
  1113. /* Set these global variables just once to avoid TSan false positives. */
  1114. A.dummy = 17;
  1115. GC_is_valid_displacement_print_proc = fail_proc1;
  1116. GC_is_visible_print_proc = fail_proc1;
  1117. }
  1118. # ifdef THREADS
  1119. # define TEST_FAIL_COUNT(n) 1
  1120. # else
  1121. # define TEST_FAIL_COUNT(n) (fail_count >= (n))
  1122. # endif
  1123. #endif /* !DBG_HDRS_ALL */
  1124. static void uniq(void *p, ...) {
  1125. va_list a;
  1126. void *q[100];
  1127. int n = 0, i, j;
  1128. q[n++] = p;
  1129. va_start(a,p);
  1130. for (;(q[n] = va_arg(a,void *)) != NULL;n++) ;
  1131. va_end(a);
  1132. for (i=0; i<n; i++)
  1133. for (j=0; j<i; j++)
  1134. if (q[i] == q[j]) {
  1135. GC_printf(
  1136. "Apparently failed to mark from some function arguments.\n"
  1137. "Perhaps GC_push_regs was configured incorrectly?\n"
  1138. );
  1139. FAIL;
  1140. }
  1141. }
  1142. void * GC_CALLBACK inc_int_counter(void *pcounter)
  1143. {
  1144. ++(*(int *)pcounter);
  1145. return NULL;
  1146. }
  1147. void run_one_test(void)
  1148. {
  1149. # ifndef DBG_HDRS_ALL
  1150. char *x;
  1151. char **z;
  1152. char *y = (char *)(GC_word)fail_proc1;
  1153. # endif
  1154. # ifndef NO_CLOCK
  1155. CLOCK_TYPE start_time;
  1156. CLOCK_TYPE reverse_time;
  1157. unsigned long time_diff;
  1158. # endif
  1159. # ifndef NO_TEST_HANDLE_FORK
  1160. pid_t pid;
  1161. int wstatus;
  1162. # endif
  1163. # ifdef FIND_LEAK
  1164. GC_printf(
  1165. "This test program is not designed for leak detection mode\n");
  1166. GC_printf("Expect lots of problems\n");
  1167. # endif
  1168. GC_FREE(0);
  1169. # ifdef THREADS
  1170. if (!GC_thread_is_registered()) {
  1171. GC_printf("Current thread is not registered with GC\n");
  1172. FAIL;
  1173. }
  1174. # endif
  1175. test_tinyfl();
  1176. # ifndef DBG_HDRS_ALL
  1177. AO_fetch_and_add1(&collectable_count); /* 1 */
  1178. AO_fetch_and_add1(&collectable_count); /* 2 */
  1179. AO_fetch_and_add1(&collectable_count); /* 3 */
  1180. if ((GC_size(GC_malloc(7)) != 8 &&
  1181. GC_size(GC_malloc(7)) != MIN_WORDS * sizeof(GC_word))
  1182. || GC_size(GC_malloc(15)) != 16) {
  1183. GC_printf("GC_size produced unexpected results\n");
  1184. FAIL;
  1185. }
  1186. AO_fetch_and_add1(&collectable_count);
  1187. if (GC_size(GC_malloc(0)) != MIN_WORDS * sizeof(GC_word)) {
  1188. GC_printf("GC_malloc(0) failed: GC_size returns %lu\n",
  1189. (unsigned long)GC_size(GC_malloc(0)));
  1190. FAIL;
  1191. }
  1192. AO_fetch_and_add1(&uncollectable_count);
  1193. if (GC_size(GC_malloc_uncollectable(0)) != MIN_WORDS * sizeof(GC_word)) {
  1194. GC_printf("GC_malloc_uncollectable(0) failed\n");
  1195. FAIL;
  1196. }
  1197. AO_fetch_and_add1(&collectable_count);
  1198. x = (char*)GC_malloc(16);
  1199. if (GC_base(GC_PTR_ADD(x, 13)) != x) {
  1200. GC_printf("GC_base(heap ptr) produced incorrect result\n");
  1201. FAIL;
  1202. }
  1203. if (!GC_is_heap_ptr(x)) {
  1204. GC_printf("GC_is_heap_ptr(heap_ptr) produced incorrect result\n");
  1205. FAIL;
  1206. }
  1207. if (GC_is_heap_ptr(&x)) {
  1208. GC_printf("GC_is_heap_ptr(&local_var) produced incorrect result\n");
  1209. FAIL;
  1210. }
  1211. if (GC_is_heap_ptr(&fail_count) || GC_is_heap_ptr(NULL)) {
  1212. GC_printf("GC_is_heap_ptr(&global_var) produced incorrect result\n");
  1213. FAIL;
  1214. }
  1215. (void)GC_PRE_INCR(x, 0);
  1216. (void)GC_POST_INCR(x);
  1217. (void)GC_POST_DECR(x);
  1218. if (GC_base(x) != x) {
  1219. GC_printf("Bad INCR/DECR result\n");
  1220. FAIL;
  1221. }
  1222. # ifndef PCR
  1223. if (GC_base(y) != 0) {
  1224. GC_printf("GC_base(fn_ptr) produced incorrect result\n");
  1225. FAIL;
  1226. }
  1227. # endif
  1228. if (GC_same_obj(x+5, x) != x + 5) {
  1229. GC_printf("GC_same_obj produced incorrect result\n");
  1230. FAIL;
  1231. }
  1232. if (GC_is_visible(y) != y || GC_is_visible(x) != x) {
  1233. GC_printf("GC_is_visible produced incorrect result\n");
  1234. FAIL;
  1235. }
  1236. z = (char**)GC_malloc(8);
  1237. CHECK_OUT_OF_MEMORY(z);
  1238. AO_fetch_and_add1(&collectable_count);
  1239. GC_PTR_STORE(z, x);
  1240. GC_end_stubborn_change(z);
  1241. if (*z != x) {
  1242. GC_printf("GC_PTR_STORE failed: %p != %p\n", (void *)(*z), (void *)x);
  1243. FAIL;
  1244. }
  1245. if (!TEST_FAIL_COUNT(1)) {
  1246. # if!(defined(POWERPC) || defined(IA64)) || defined(M68K)
  1247. /* On POWERPCs function pointers point to a descriptor in the */
  1248. /* data segment, so there should have been no failures. */
  1249. /* The same applies to IA64. Something similar seems to */
  1250. /* be going on with NetBSD/M68K. */
  1251. GC_printf("GC_is_visible produced wrong failure indication\n");
  1252. FAIL;
  1253. # endif
  1254. }
  1255. if (GC_is_valid_displacement(y) != y
  1256. || GC_is_valid_displacement(x) != x
  1257. || GC_is_valid_displacement(x + 3) != x + 3) {
  1258. GC_printf("GC_is_valid_displacement produced incorrect result\n");
  1259. FAIL;
  1260. }
  1261. {
  1262. size_t i;
  1263. (void)GC_malloc(17);
  1264. AO_fetch_and_add1(&collectable_count);
  1265. for (i = sizeof(GC_word); i < 512; i *= 2) {
  1266. GC_word result = (GC_word) GC_memalign(i, 17);
  1267. if (result % i != 0 || result == 0 || *(int *)result != 0) FAIL;
  1268. }
  1269. }
  1270. # ifndef ALL_INTERIOR_POINTERS
  1271. # if defined(RS6000) || defined(POWERPC)
  1272. if (!TEST_FAIL_COUNT(1))
  1273. # else
  1274. if (!TEST_FAIL_COUNT(GC_get_all_interior_pointers() ? 1 : 2))
  1275. # endif
  1276. {
  1277. GC_printf(
  1278. "GC_is_valid_displacement produced wrong failure indication\n");
  1279. FAIL;
  1280. }
  1281. # endif
  1282. # endif /* DBG_HDRS_ALL */
  1283. /* Test floating point alignment */
  1284. {
  1285. double *dp = GC_NEW(double);
  1286. CHECK_OUT_OF_MEMORY(dp);
  1287. AO_fetch_and_add1(&collectable_count);
  1288. *dp = 1.0;
  1289. dp = GC_NEW(double);
  1290. CHECK_OUT_OF_MEMORY(dp);
  1291. AO_fetch_and_add1(&collectable_count);
  1292. *dp = 1.0;
  1293. }
  1294. /* Test size 0 allocation a bit more */
  1295. {
  1296. size_t i;
  1297. for (i = 0; i < 10000; ++i) {
  1298. (void)GC_MALLOC(0);
  1299. AO_fetch_and_add1(&collectable_count);
  1300. GC_FREE(GC_MALLOC(0));
  1301. (void)GC_MALLOC_ATOMIC(0);
  1302. AO_fetch_and_add1(&atomic_count);
  1303. GC_FREE(GC_MALLOC_ATOMIC(0));
  1304. test_generic_malloc_or_special(GC_malloc_atomic(1));
  1305. AO_fetch_and_add1(&atomic_count);
  1306. }
  1307. }
  1308. # ifdef GC_GCJ_SUPPORT
  1309. GC_REGISTER_DISPLACEMENT(sizeof(struct fake_vtable *));
  1310. GC_init_gcj_malloc(0, (void *)(GC_word)fake_gcj_mark_proc);
  1311. # endif
  1312. /* Make sure that fn arguments are visible to the collector. */
  1313. uniq(
  1314. GC_malloc(12), GC_malloc(12), GC_malloc(12),
  1315. (GC_gcollect(),GC_malloc(12)),
  1316. GC_malloc(12), GC_malloc(12), GC_malloc(12),
  1317. (GC_gcollect(),GC_malloc(12)),
  1318. GC_malloc(12), GC_malloc(12), GC_malloc(12),
  1319. (GC_gcollect(),GC_malloc(12)),
  1320. GC_malloc(12), GC_malloc(12), GC_malloc(12),
  1321. (GC_gcollect(),GC_malloc(12)),
  1322. GC_malloc(12), GC_malloc(12), GC_malloc(12),
  1323. (GC_gcollect(),GC_malloc(12)),
  1324. (void *)0);
  1325. /* GC_malloc(0) must return NULL or something we can deallocate. */
  1326. GC_free(GC_malloc(0));
  1327. GC_free(GC_malloc_atomic(0));
  1328. GC_free(GC_malloc(0));
  1329. GC_free(GC_malloc_atomic(0));
  1330. # ifndef NO_TEST_HANDLE_FORK
  1331. GC_atfork_prepare();
  1332. pid = fork();
  1333. if (pid != 0) {
  1334. GC_atfork_parent();
  1335. if (pid == -1) {
  1336. GC_printf("Process fork failed\n");
  1337. FAIL;
  1338. }
  1339. if (print_stats)
  1340. GC_log_printf("Forked child process\n");
  1341. if (waitpid(pid, &wstatus, 0) == -1) {
  1342. GC_printf("Wait for child process failed\n");
  1343. FAIL;
  1344. }
  1345. if (!WIFEXITED(wstatus) || WEXITSTATUS(wstatus) != 0) {
  1346. GC_printf("Child process failed, status= 0x%x\n", wstatus);
  1347. FAIL;
  1348. }
  1349. } else {
  1350. GC_atfork_child();
  1351. if (print_stats)
  1352. GC_log_printf("Started a child process\n");
  1353. # ifdef THREADS
  1354. # ifdef PARALLEL_MARK
  1355. GC_gcollect(); /* no parallel markers */
  1356. # endif
  1357. GC_start_mark_threads();
  1358. # endif
  1359. GC_gcollect();
  1360. # ifdef THREADS
  1361. tiny_reverse_test(0);
  1362. GC_gcollect();
  1363. # endif
  1364. if (print_stats)
  1365. GC_log_printf("Finished a child process\n");
  1366. exit(0);
  1367. }
  1368. # endif
  1369. /* Repeated list reversal test. */
  1370. # ifndef NO_CLOCK
  1371. GET_TIME(start_time);
  1372. # endif
  1373. reverse_test();
  1374. # ifndef NO_CLOCK
  1375. if (print_stats) {
  1376. GET_TIME(reverse_time);
  1377. time_diff = MS_TIME_DIFF(reverse_time, start_time);
  1378. GC_log_printf("-------------Finished reverse_test at time %u (%p)\n",
  1379. (unsigned) time_diff, (void *)&start_time);
  1380. }
  1381. # endif
  1382. # ifndef DBG_HDRS_ALL
  1383. typed_test();
  1384. # ifndef NO_CLOCK
  1385. if (print_stats) {
  1386. CLOCK_TYPE typed_time;
  1387. GET_TIME(typed_time);
  1388. time_diff = MS_TIME_DIFF(typed_time, start_time);
  1389. GC_log_printf("-------------Finished typed_test at time %u (%p)\n",
  1390. (unsigned) time_diff, (void *)&start_time);
  1391. }
  1392. # endif
  1393. # endif /* DBG_HDRS_ALL */
  1394. tree_test();
  1395. # ifndef NO_CLOCK
  1396. if (print_stats) {
  1397. CLOCK_TYPE tree_time;
  1398. GET_TIME(tree_time);
  1399. time_diff = MS_TIME_DIFF(tree_time, start_time);
  1400. GC_log_printf("-------------Finished tree_test at time %u (%p)\n",
  1401. (unsigned) time_diff, (void *)&start_time);
  1402. }
  1403. # endif
  1404. /* Run reverse_test a second time, so we hopefully notice corruption. */
  1405. reverse_test();
  1406. # ifndef NO_CLOCK
  1407. if (print_stats) {
  1408. GET_TIME(reverse_time);
  1409. time_diff = MS_TIME_DIFF(reverse_time, start_time);
  1410. GC_log_printf(
  1411. "-------------Finished second reverse_test at time %u (%p)\n",
  1412. (unsigned)time_diff, (void *)&start_time);
  1413. }
  1414. # endif
  1415. /* GC_allocate_ml and GC_need_to_lock are no longer exported, and */
  1416. /* AO_fetch_and_add1() may be unavailable to update a counter. */
  1417. (void)GC_call_with_alloc_lock(inc_int_counter, &n_tests);
  1418. # ifndef NO_CLOCK
  1419. if (print_stats)
  1420. GC_log_printf("Finished %p\n", (void *)&start_time);
  1421. # endif
  1422. }
  1423. void GC_CALLBACK reachable_objs_counter(void *obj, size_t size,
  1424. void *pcounter)
  1425. {
  1426. if (0 == size) {
  1427. GC_printf("Reachable object has zero size\n");
  1428. FAIL;
  1429. }
  1430. if (GC_base(obj) != obj) {
  1431. GC_printf("Invalid reachable object base passed by enumerator: %p\n",
  1432. obj);
  1433. FAIL;
  1434. }
  1435. if (GC_size(obj) != size) {
  1436. GC_printf("Invalid reachable object size passed by enumerator: %lu\n",
  1437. (unsigned long)size);
  1438. FAIL;
  1439. }
  1440. (*(unsigned *)pcounter)++;
  1441. }
  1442. void * GC_CALLBACK reachable_objs_count_enumerator(void *pcounter)
  1443. {
  1444. GC_enumerate_reachable_objects_inner(reachable_objs_counter, pcounter);
  1445. return NULL;
  1446. }
  1447. #define NUMBER_ROUND_UP(v, bound) ((((v) + (bound) - 1) / (bound)) * (bound))
  1448. void check_heap_stats(void)
  1449. {
  1450. size_t max_heap_sz;
  1451. int i;
  1452. # ifndef GC_NO_FINALIZATION
  1453. int still_live;
  1454. # ifndef GC_LONG_REFS_NOT_NEEDED
  1455. int still_long_live = 0;
  1456. # endif
  1457. # ifdef FINALIZE_ON_DEMAND
  1458. int late_finalize_count = 0;
  1459. # endif
  1460. # endif
  1461. unsigned obj_count = 0;
  1462. if (!GC_is_init_called()) {
  1463. GC_printf("GC should be initialized!\n");
  1464. FAIL;
  1465. }
  1466. # ifdef VERY_SMALL_CONFIG
  1467. /* The upper bounds are a guess, which has been empirically */
  1468. /* adjusted. On low end uniprocessors with incremental GC */
  1469. /* these may be particularly dubious, since empirically the */
  1470. /* heap tends to grow largely as a result of the GC not */
  1471. /* getting enough cycles. */
  1472. # if CPP_WORDSZ == 64
  1473. max_heap_sz = 4500000;
  1474. # else
  1475. max_heap_sz = 2800000;
  1476. # endif
  1477. # else
  1478. # if CPP_WORDSZ == 64
  1479. max_heap_sz = 25000000;
  1480. # else
  1481. max_heap_sz = 16000000;
  1482. # endif
  1483. # endif
  1484. # ifdef GC_DEBUG
  1485. max_heap_sz *= 2;
  1486. # ifdef SAVE_CALL_CHAIN
  1487. max_heap_sz *= 3;
  1488. # ifdef SAVE_CALL_COUNT
  1489. max_heap_sz += max_heap_sz * NFRAMES / 4;
  1490. # endif
  1491. # endif
  1492. # endif
  1493. # if defined(ADDRESS_SANITIZER) && !defined(__clang__)
  1494. max_heap_sz = max_heap_sz * 2 - max_heap_sz / 3;
  1495. # endif
  1496. # ifdef MEMORY_SANITIZER
  1497. max_heap_sz += max_heap_sz / 4;
  1498. # endif
  1499. max_heap_sz *= n_tests;
  1500. # if defined(USE_MMAP) || defined(MSWIN32)
  1501. max_heap_sz = NUMBER_ROUND_UP(max_heap_sz, 4 * 1024 * 1024);
  1502. # endif
  1503. /* Garbage collect repeatedly so that all inaccessible objects */
  1504. /* can be finalized. */
  1505. if (!GC_is_disabled())
  1506. while (GC_collect_a_little()) { }
  1507. for (i = 0; i < 16; i++) {
  1508. GC_gcollect();
  1509. # ifndef GC_NO_FINALIZATION
  1510. # ifdef FINALIZE_ON_DEMAND
  1511. late_finalize_count +=
  1512. # endif
  1513. GC_invoke_finalizers();
  1514. # endif
  1515. }
  1516. if (print_stats) {
  1517. struct GC_stack_base sb;
  1518. int res = GC_get_stack_base(&sb);
  1519. if (res == GC_SUCCESS) {
  1520. GC_log_printf("Primordial thread stack bottom: %p\n", sb.mem_base);
  1521. } else if (res == GC_UNIMPLEMENTED) {
  1522. GC_log_printf("GC_get_stack_base() unimplemented\n");
  1523. } else {
  1524. GC_printf("GC_get_stack_base() failed: %d\n", res);
  1525. FAIL;
  1526. }
  1527. }
  1528. (void)GC_call_with_alloc_lock(reachable_objs_count_enumerator,
  1529. &obj_count);
  1530. GC_printf("Completed %u tests\n", n_tests);
  1531. GC_printf("Allocated %d collectable objects\n", (int)collectable_count);
  1532. GC_printf("Allocated %d uncollectable objects\n",
  1533. (int)uncollectable_count);
  1534. GC_printf("Allocated %d atomic objects\n", (int)atomic_count);
  1535. GC_printf("Reallocated %d objects\n", (int)realloc_count);
  1536. GC_printf("Finalized %d/%d objects - ",
  1537. finalized_count, finalizable_count);
  1538. # ifndef GC_NO_FINALIZATION
  1539. # ifdef FINALIZE_ON_DEMAND
  1540. if (finalized_count != late_finalize_count) {
  1541. GC_printf("Demand finalization error\n");
  1542. FAIL;
  1543. }
  1544. # endif
  1545. if (finalized_count > finalizable_count
  1546. || finalized_count < finalizable_count/2) {
  1547. GC_printf("finalization is probably broken\n");
  1548. FAIL;
  1549. } else {
  1550. GC_printf("finalization is probably ok\n");
  1551. }
  1552. still_live = 0;
  1553. for (i = 0; i < MAX_FINALIZED; i++) {
  1554. if (live_indicators[i] != 0) {
  1555. still_live++;
  1556. }
  1557. # ifndef GC_LONG_REFS_NOT_NEEDED
  1558. if (live_long_refs[i] != NULL) {
  1559. still_long_live++;
  1560. }
  1561. # endif
  1562. }
  1563. i = finalizable_count - finalized_count - still_live;
  1564. if (0 != i) {
  1565. GC_printf("%d disappearing links remain and %d more objects "
  1566. "were not finalized\n", still_live, i);
  1567. if (i > 10) {
  1568. GC_printf("\tVery suspicious!\n");
  1569. } else {
  1570. GC_printf("\tSlightly suspicious, but probably OK\n");
  1571. }
  1572. }
  1573. # ifndef GC_LONG_REFS_NOT_NEEDED
  1574. if (0 != still_long_live) {
  1575. GC_printf("%d 'long' links remain\n", still_long_live);
  1576. }
  1577. # endif
  1578. # endif
  1579. GC_printf("Total number of bytes allocated is %lu\n",
  1580. (unsigned long)GC_get_total_bytes());
  1581. GC_printf("Total memory use by allocated blocks is %lu bytes\n",
  1582. (unsigned long)GC_get_memory_use());
  1583. GC_printf("Final heap size is %lu bytes\n",
  1584. (unsigned long)GC_get_heap_size());
  1585. if (GC_get_total_bytes() < (size_t)n_tests *
  1586. # ifdef VERY_SMALL_CONFIG
  1587. 2700000
  1588. # else
  1589. 33500000
  1590. # endif
  1591. ) {
  1592. GC_printf("Incorrect execution - missed some allocations\n");
  1593. FAIL;
  1594. }
  1595. if (GC_get_heap_size() + GC_get_unmapped_bytes() > max_heap_sz) {
  1596. GC_printf("Unexpected heap growth - collector may be broken"
  1597. " (heapsize: %lu, expected: %lu)\n",
  1598. (unsigned long)(GC_get_heap_size() + GC_get_unmapped_bytes()),
  1599. (unsigned long)max_heap_sz);
  1600. FAIL;
  1601. }
  1602. GC_printf("Final number of reachable objects is %u\n", obj_count);
  1603. # ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
  1604. /* Get global counters (just to check the functions work). */
  1605. GC_get_heap_usage_safe(NULL, NULL, NULL, NULL, NULL);
  1606. {
  1607. struct GC_prof_stats_s stats;
  1608. (void)GC_get_prof_stats(&stats, sizeof(stats));
  1609. # ifdef THREADS
  1610. (void)GC_get_prof_stats_unsafe(&stats, sizeof(stats));
  1611. # endif
  1612. }
  1613. (void)GC_get_size_map_at(-1);
  1614. (void)GC_get_size_map_at(1);
  1615. # endif
  1616. # ifdef THREADS
  1617. GC_unregister_my_thread(); /* just to check it works (for main) */
  1618. # endif
  1619. GC_printf("Completed %u collections", (unsigned)GC_get_gc_no());
  1620. # ifndef NO_CLOCK
  1621. GC_printf(" in %lu msecs", GC_get_full_gc_total_time());
  1622. # endif
  1623. # ifdef PARALLEL_MARK
  1624. GC_printf(" (using %d marker threads)", GC_get_parallel() + 1);
  1625. # endif
  1626. GC_printf("\n" "Collector appears to work\n");
  1627. }
  1628. #if defined(MACOS)
  1629. void SetMinimumStack(long minSize)
  1630. {
  1631. if (minSize > LMGetDefltStack())
  1632. {
  1633. long newApplLimit = (long) GetApplLimit()
  1634. - (minSize - LMGetDefltStack());
  1635. SetApplLimit((Ptr) newApplLimit);
  1636. MaxApplZone();
  1637. }
  1638. }
  1639. #define cMinStackSpace (512L * 1024L)
  1640. #endif
  1641. void GC_CALLBACK warn_proc(char *msg, GC_word p)
  1642. {
  1643. GC_printf(msg, (unsigned long)p);
  1644. /*FAIL;*/
  1645. }
  1646. #if defined(CPPCHECK)
  1647. # include "javaxfc.h" /* for GC_finalize_all */
  1648. # define UNTESTED(sym) GC_noop1((word)&sym)
  1649. #endif
  1650. #if defined(MSWINCE) && defined(UNDER_CE)
  1651. # define WINMAIN_LPTSTR LPWSTR
  1652. #else
  1653. # define WINMAIN_LPTSTR LPSTR
  1654. #endif
  1655. #if !defined(PCR) && !defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
  1656. #if ((defined(MSWIN32) && !defined(__MINGW32__)) || defined(MSWINCE)) \
  1657. && !defined(NO_WINMAIN_ENTRY)
  1658. int APIENTRY WinMain(HINSTANCE instance GC_ATTR_UNUSED,
  1659. HINSTANCE prev GC_ATTR_UNUSED,
  1660. WINMAIN_LPTSTR cmd GC_ATTR_UNUSED,
  1661. int n GC_ATTR_UNUSED)
  1662. #elif defined(RTEMS)
  1663. # include <bsp.h>
  1664. # define CONFIGURE_APPLICATION_NEEDS_CLOCK_DRIVER
  1665. # define CONFIGURE_APPLICATION_NEEDS_CONSOLE_DRIVER
  1666. # define CONFIGURE_RTEMS_INIT_TASKS_TABLE
  1667. # define CONFIGURE_MAXIMUM_TASKS 1
  1668. # define CONFIGURE_INIT
  1669. # define CONFIGURE_INIT_TASK_STACK_SIZE (64*1024)
  1670. # include <rtems/confdefs.h>
  1671. rtems_task Init(rtems_task_argument ignord)
  1672. #else
  1673. int main(void)
  1674. #endif
  1675. {
  1676. # if defined(CPPCHECK) && !defined(NO_WINMAIN_ENTRY) \
  1677. && ((defined(MSWIN32) && !defined(__MINGW32__)) || defined(MSWINCE))
  1678. GC_noop1((GC_word)&WinMain);
  1679. # elif defined(CPPCHECK) && defined(RTEMS)
  1680. GC_noop1((GC_word)&Init);
  1681. # endif
  1682. n_tests = 0;
  1683. # if defined(MACOS)
  1684. /* Make sure we have lots and lots of stack space. */
  1685. SetMinimumStack(cMinStackSpace);
  1686. /* Cheat and let stdio initialize toolbox for us. */
  1687. printf("Testing GC Macintosh port\n");
  1688. # endif
  1689. GC_COND_INIT();
  1690. GC_set_warn_proc(warn_proc);
  1691. # if !defined(GC_DISABLE_INCREMENTAL) \
  1692. && (defined(TEST_DEFAULT_VDB) || !defined(DEFAULT_VDB))
  1693. # if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL) \
  1694. && !(defined(MPROTECT_VDB) && defined(USE_MUNMAP))
  1695. GC_enable_incremental();
  1696. # endif
  1697. if (GC_is_incremental_mode()) {
  1698. GC_printf("Switched to incremental mode\n");
  1699. # ifdef PROC_VDB
  1700. GC_printf("Reading dirty bits from /proc\n");
  1701. # elif defined(GWW_VDB)
  1702. GC_printf("Using GetWriteWatch-based implementation\n");
  1703. # ifdef MPROTECT_VDB
  1704. GC_printf("Or emulating dirty bits with mprotect/signals\n");
  1705. # endif
  1706. # elif defined(MPROTECT_VDB)
  1707. GC_printf("Emulating dirty bits with mprotect/signals\n");
  1708. # endif /* MPROTECT_VDB && !GWW_VDB */
  1709. }
  1710. # endif
  1711. set_print_procs();
  1712. run_one_test();
  1713. check_heap_stats();
  1714. # ifndef MSWINCE
  1715. fflush(stdout);
  1716. # endif
  1717. # if defined(CPPCHECK)
  1718. /* Entry points we should be testing, but aren't. */
  1719. # ifndef GC_DEBUG
  1720. UNTESTED(GC_debug_generic_or_special_malloc);
  1721. UNTESTED(GC_debug_register_displacement);
  1722. UNTESTED(GC_post_incr);
  1723. UNTESTED(GC_pre_incr);
  1724. # ifdef GC_GCJ_SUPPORT
  1725. UNTESTED(GC_debug_gcj_malloc);
  1726. # endif
  1727. # endif
  1728. # ifdef AMIGA
  1729. # ifdef GC_AMIGA_FASTALLOC
  1730. UNTESTED(GC_amiga_get_mem);
  1731. # endif
  1732. # ifndef GC_AMIGA_ONLYFAST
  1733. UNTESTED(GC_amiga_set_toany);
  1734. # endif
  1735. # endif
  1736. # if defined(MACOS) && defined(USE_TEMPORARY_MEMORY)
  1737. UNTESTED(GC_MacTemporaryNewPtr);
  1738. # endif
  1739. # if !defined(_M_AMD64) && defined(_MSC_VER)
  1740. UNTESTED(GetFileLineFromStack);
  1741. UNTESTED(GetModuleNameFromStack);
  1742. UNTESTED(GetSymbolNameFromStack);
  1743. # endif
  1744. UNTESTED(GC_get_bytes_since_gc);
  1745. UNTESTED(GC_get_dont_expand);
  1746. UNTESTED(GC_get_dont_precollect);
  1747. UNTESTED(GC_get_finalize_on_demand);
  1748. UNTESTED(GC_get_finalizer_notifier);
  1749. UNTESTED(GC_get_find_leak);
  1750. UNTESTED(GC_get_force_unmap_on_gcollect);
  1751. UNTESTED(GC_get_free_bytes);
  1752. UNTESTED(GC_get_free_space_divisor);
  1753. UNTESTED(GC_get_full_freq);
  1754. UNTESTED(GC_get_java_finalization);
  1755. UNTESTED(GC_get_max_retries);
  1756. UNTESTED(GC_get_no_dls);
  1757. UNTESTED(GC_get_non_gc_bytes);
  1758. UNTESTED(GC_get_on_collection_event);
  1759. UNTESTED(GC_get_on_heap_resize);
  1760. UNTESTED(GC_get_pages_executable);
  1761. UNTESTED(GC_get_push_other_roots);
  1762. UNTESTED(GC_get_start_callback);
  1763. UNTESTED(GC_get_stop_func);
  1764. UNTESTED(GC_get_time_limit);
  1765. UNTESTED(GC_get_warn_proc);
  1766. UNTESTED(GC_is_disabled);
  1767. UNTESTED(GC_set_dont_precollect);
  1768. UNTESTED(GC_set_finalize_on_demand);
  1769. UNTESTED(GC_set_finalizer_notifier);
  1770. UNTESTED(GC_set_free_space_divisor);
  1771. UNTESTED(GC_set_full_freq);
  1772. UNTESTED(GC_set_java_finalization);
  1773. UNTESTED(GC_set_max_retries);
  1774. UNTESTED(GC_set_no_dls);
  1775. UNTESTED(GC_set_non_gc_bytes);
  1776. UNTESTED(GC_set_on_collection_event);
  1777. UNTESTED(GC_set_on_heap_resize);
  1778. UNTESTED(GC_set_oom_fn);
  1779. UNTESTED(GC_set_pages_executable);
  1780. UNTESTED(GC_set_push_other_roots);
  1781. UNTESTED(GC_set_start_callback);
  1782. UNTESTED(GC_set_stop_func);
  1783. UNTESTED(GC_set_time_limit);
  1784. UNTESTED(GC_malloc_explicitly_typed_ignore_off_page);
  1785. UNTESTED(GC_debug_change_stubborn);
  1786. UNTESTED(GC_debug_strndup);
  1787. UNTESTED(GC_deinit);
  1788. UNTESTED(GC_strndup);
  1789. UNTESTED(GC_posix_memalign);
  1790. UNTESTED(GC_new_free_list);
  1791. UNTESTED(GC_new_kind);
  1792. UNTESTED(GC_new_proc);
  1793. UNTESTED(GC_clear_roots);
  1794. UNTESTED(GC_exclude_static_roots);
  1795. UNTESTED(GC_expand_hp);
  1796. UNTESTED(GC_register_describe_type_fn);
  1797. UNTESTED(GC_register_has_static_roots_callback);
  1798. # if !defined(PCR) && !defined(SMALL_CONFIG)
  1799. UNTESTED(GC_get_abort_func);
  1800. UNTESTED(GC_set_abort_func);
  1801. # endif
  1802. # ifdef GC_GCJ_SUPPORT
  1803. UNTESTED(GC_gcj_malloc_ignore_off_page);
  1804. # endif
  1805. # ifndef NO_DEBUGGING
  1806. UNTESTED(GC_dump_regions);
  1807. UNTESTED(GC_is_tmp_root);
  1808. UNTESTED(GC_print_free_list);
  1809. # endif
  1810. # ifdef TRACE_BUF
  1811. UNTESTED(GC_print_trace);
  1812. # endif
  1813. # ifndef GC_NO_FINALIZATION
  1814. UNTESTED(GC_debug_register_finalizer_unreachable);
  1815. UNTESTED(GC_get_await_finalize_proc);
  1816. UNTESTED(GC_register_disappearing_link);
  1817. UNTESTED(GC_set_await_finalize_proc);
  1818. UNTESTED(GC_should_invoke_finalizers);
  1819. # ifndef JAVA_FINALIZATION_NOT_NEEDED
  1820. UNTESTED(GC_finalize_all);
  1821. # endif
  1822. # ifndef NO_DEBUGGING
  1823. UNTESTED(GC_dump_finalization);
  1824. # endif
  1825. # ifndef GC_TOGGLE_REFS_NOT_NEEDED
  1826. UNTESTED(GC_get_toggleref_func);
  1827. UNTESTED(GC_set_toggleref_func);
  1828. UNTESTED(GC_toggleref_add);
  1829. # endif
  1830. # endif
  1831. # if !defined(OS2) && !defined(MACOS) && !defined(GC_ANDROID_LOG) \
  1832. && !defined(MSWIN32) && !defined(MSWINCE)
  1833. UNTESTED(GC_set_log_fd);
  1834. # endif
  1835. # ifdef THREADS
  1836. UNTESTED(GC_allow_register_threads);
  1837. UNTESTED(GC_get_on_thread_event);
  1838. UNTESTED(GC_register_altstack);
  1839. UNTESTED(GC_set_on_thread_event);
  1840. # endif
  1841. # ifndef REDIRECT_MALLOC_IN_HEADER
  1842. # ifdef REDIRECT_MALLOC
  1843. # ifndef strndup
  1844. UNTESTED(strndup);
  1845. # endif
  1846. # ifndef strdup
  1847. UNTESTED(strdup);
  1848. # endif
  1849. # endif
  1850. # ifdef REDIRECT_REALLOC
  1851. UNTESTED(realloc);
  1852. # endif
  1853. # endif /* !REDIRECT_MALLOC_IN_HEADER */
  1854. # ifdef GC_REQUIRE_WCSDUP
  1855. UNTESTED(GC_wcsdup);
  1856. UNTESTED(GC_debug_wcsdup);
  1857. # endif
  1858. # endif
  1859. # ifdef MSWIN32
  1860. GC_win32_free_heap();
  1861. # endif
  1862. # ifdef RTEMS
  1863. exit(0);
  1864. # else
  1865. return(0);
  1866. # endif
  1867. }
  1868. # endif /* !GC_WIN32_THREADS && !GC_PTHREADS */
  1869. #if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
  1870. DWORD __stdcall thr_run_one_test(void * arg GC_ATTR_UNUSED)
  1871. {
  1872. run_one_test();
  1873. return 0;
  1874. }
  1875. #ifdef MSWINCE
  1876. HANDLE win_created_h;
  1877. HWND win_handle;
  1878. LRESULT CALLBACK window_proc(HWND hwnd, UINT uMsg, WPARAM wParam,
  1879. LPARAM lParam)
  1880. {
  1881. LRESULT ret = 0;
  1882. switch (uMsg) {
  1883. case WM_HIBERNATE:
  1884. GC_printf("Received WM_HIBERNATE, calling GC_gcollect\n");
  1885. /* Force "unmap as much memory as possible" mode. */
  1886. GC_gcollect_and_unmap();
  1887. break;
  1888. case WM_CLOSE:
  1889. GC_printf("Received WM_CLOSE, closing window\n");
  1890. DestroyWindow(hwnd);
  1891. break;
  1892. case WM_DESTROY:
  1893. PostQuitMessage(0);
  1894. break;
  1895. default:
  1896. ret = DefWindowProc(hwnd, uMsg, wParam, lParam);
  1897. break;
  1898. }
  1899. return ret;
  1900. }
  1901. DWORD __stdcall thr_window(void * arg GC_ATTR_UNUSED)
  1902. {
  1903. WNDCLASS win_class = {
  1904. CS_NOCLOSE,
  1905. window_proc,
  1906. 0,
  1907. 0,
  1908. GetModuleHandle(NULL),
  1909. NULL,
  1910. NULL,
  1911. (HBRUSH)(COLOR_APPWORKSPACE+1),
  1912. NULL,
  1913. TEXT("GCtestWindow")
  1914. };
  1915. MSG msg;
  1916. if (!RegisterClass(&win_class))
  1917. FAIL;
  1918. win_handle = CreateWindowEx(
  1919. 0,
  1920. TEXT("GCtestWindow"),
  1921. TEXT("GCtest"),
  1922. 0,
  1923. CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
  1924. NULL,
  1925. NULL,
  1926. GetModuleHandle(NULL),
  1927. NULL);
  1928. if (win_handle == NULL)
  1929. FAIL;
  1930. SetEvent(win_created_h);
  1931. ShowWindow(win_handle, SW_SHOW);
  1932. UpdateWindow(win_handle);
  1933. while (GetMessage(&msg, NULL, 0, 0)) {
  1934. TranslateMessage(&msg);
  1935. DispatchMessage(&msg);
  1936. }
  1937. return 0;
  1938. }
  1939. #endif
  1940. #if !defined(NO_WINMAIN_ENTRY)
  1941. int APIENTRY WinMain(HINSTANCE instance GC_ATTR_UNUSED,
  1942. HINSTANCE prev GC_ATTR_UNUSED,
  1943. WINMAIN_LPTSTR cmd GC_ATTR_UNUSED,
  1944. int n GC_ATTR_UNUSED)
  1945. #else
  1946. int main(void)
  1947. #endif
  1948. {
  1949. # if NTHREADS > 0
  1950. HANDLE h[NTHREADS];
  1951. int i;
  1952. # endif
  1953. # ifdef MSWINCE
  1954. HANDLE win_thr_h;
  1955. # endif
  1956. DWORD thread_id;
  1957. # if defined(CPPCHECK) && !defined(NO_WINMAIN_ENTRY)
  1958. GC_noop1((GC_word)&WinMain);
  1959. # endif
  1960. # if defined(GC_DLL) && !defined(GC_NO_THREADS_DISCOVERY) \
  1961. && !defined(MSWINCE) && !defined(THREAD_LOCAL_ALLOC) \
  1962. && !defined(PARALLEL_MARK)
  1963. GC_use_threads_discovery();
  1964. /* Test with implicit thread registration if possible. */
  1965. GC_printf("Using DllMain to track threads\n");
  1966. # endif
  1967. GC_COND_INIT();
  1968. # if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
  1969. GC_enable_incremental();
  1970. # endif
  1971. InitializeCriticalSection(&incr_cs);
  1972. GC_set_warn_proc(warn_proc);
  1973. # ifdef MSWINCE
  1974. win_created_h = CreateEvent(NULL, FALSE, FALSE, NULL);
  1975. if (win_created_h == (HANDLE)NULL) {
  1976. GC_printf("Event creation failed %d\n", (int)GetLastError());
  1977. FAIL;
  1978. }
  1979. win_thr_h = GC_CreateThread(NULL, 0, thr_window, 0, 0, &thread_id);
  1980. if (win_thr_h == (HANDLE)NULL) {
  1981. GC_printf("Thread creation failed %d\n", (int)GetLastError());
  1982. FAIL;
  1983. }
  1984. if (WaitForSingleObject(win_created_h, INFINITE) != WAIT_OBJECT_0)
  1985. FAIL;
  1986. CloseHandle(win_created_h);
  1987. # endif
  1988. set_print_procs();
  1989. # if NTHREADS > 0
  1990. for (i = 0; i < NTHREADS; i++) {
  1991. h[i] = GC_CreateThread(NULL, 0, thr_run_one_test, 0, 0, &thread_id);
  1992. if (h[i] == (HANDLE)NULL) {
  1993. GC_printf("Thread creation failed %d\n", (int)GetLastError());
  1994. FAIL;
  1995. }
  1996. }
  1997. # endif /* NTHREADS > 0 */
  1998. run_one_test();
  1999. # if NTHREADS > 0
  2000. for (i = 0; i < NTHREADS; i++) {
  2001. if (WaitForSingleObject(h[i], INFINITE) != WAIT_OBJECT_0) {
  2002. GC_printf("Thread wait failed %d\n", (int)GetLastError());
  2003. FAIL;
  2004. }
  2005. }
  2006. # endif /* NTHREADS > 0 */
  2007. # ifdef MSWINCE
  2008. PostMessage(win_handle, WM_CLOSE, 0, 0);
  2009. if (WaitForSingleObject(win_thr_h, INFINITE) != WAIT_OBJECT_0)
  2010. FAIL;
  2011. # endif
  2012. check_heap_stats();
  2013. # if defined(CPPCHECK) && defined(GC_WIN32_THREADS)
  2014. UNTESTED(GC_ExitThread);
  2015. # if !defined(MSWINCE) && !defined(CYGWIN32)
  2016. UNTESTED(GC_beginthreadex);
  2017. UNTESTED(GC_endthreadex);
  2018. # endif
  2019. # endif
  2020. return(0);
  2021. }
  2022. #endif /* GC_WIN32_THREADS */
  2023. #ifdef PCR
  2024. int test(void)
  2025. {
  2026. PCR_Th_T * th1;
  2027. PCR_Th_T * th2;
  2028. int code;
  2029. # if defined(CPPCHECK)
  2030. GC_noop1((word)&PCR_GC_Run);
  2031. GC_noop1((word)&PCR_GC_Setup);
  2032. GC_noop1((word)&test);
  2033. # endif
  2034. n_tests = 0;
  2035. /* GC_enable_incremental(); */
  2036. GC_set_warn_proc(warn_proc);
  2037. set_print_procs();
  2038. th1 = PCR_Th_Fork(run_one_test, 0);
  2039. th2 = PCR_Th_Fork(run_one_test, 0);
  2040. run_one_test();
  2041. if (PCR_Th_T_Join(th1, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
  2042. != PCR_ERes_okay || code != 0) {
  2043. GC_printf("Thread 1 failed\n");
  2044. }
  2045. if (PCR_Th_T_Join(th2, &code, NIL, PCR_allSigsBlocked, PCR_waitForever)
  2046. != PCR_ERes_okay || code != 0) {
  2047. GC_printf("Thread 2 failed\n");
  2048. }
  2049. check_heap_stats();
  2050. return(0);
  2051. }
  2052. #endif
  2053. #if defined(GC_PTHREADS)
  2054. void * thr_run_one_test(void * arg GC_ATTR_UNUSED)
  2055. {
  2056. run_one_test();
  2057. return(0);
  2058. }
  2059. #ifdef GC_DEBUG
  2060. # define GC_free GC_debug_free
  2061. #endif
  2062. int main(void)
  2063. {
  2064. # if NTHREADS > 0
  2065. pthread_t th[NTHREADS];
  2066. int i;
  2067. # endif
  2068. pthread_attr_t attr;
  2069. int code;
  2070. # ifdef GC_IRIX_THREADS
  2071. /* Force a larger stack to be preallocated */
  2072. /* Since the initial can't always grow later. */
  2073. *((volatile char *)&code - 1024*1024) = 0; /* Require 1 MB */
  2074. # endif /* GC_IRIX_THREADS */
  2075. # if defined(GC_HPUX_THREADS)
  2076. /* Default stack size is too small, especially with the 64 bit ABI */
  2077. /* Increase it. */
  2078. if (pthread_default_stacksize_np(1024*1024, 0) != 0) {
  2079. GC_printf("pthread_default_stacksize_np failed\n");
  2080. }
  2081. # endif /* GC_HPUX_THREADS */
  2082. # ifdef PTW32_STATIC_LIB
  2083. pthread_win32_process_attach_np ();
  2084. pthread_win32_thread_attach_np ();
  2085. # endif
  2086. # if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY) \
  2087. && !defined(DARWIN_DONT_PARSE_STACK) && !defined(THREAD_LOCAL_ALLOC)
  2088. /* Test with the Darwin implicit thread registration. */
  2089. GC_use_threads_discovery();
  2090. GC_printf("Using Darwin task-threads-based world stop and push\n");
  2091. # endif
  2092. GC_COND_INIT();
  2093. if ((code = pthread_attr_init(&attr)) != 0) {
  2094. GC_printf("pthread_attr_init failed, error=%d\n", code);
  2095. FAIL;
  2096. }
  2097. # if defined(GC_IRIX_THREADS) || defined(GC_FREEBSD_THREADS) \
  2098. || defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS) \
  2099. || defined(GC_OPENBSD_THREADS)
  2100. if ((code = pthread_attr_setstacksize(&attr, 1000 * 1024)) != 0) {
  2101. GC_printf("pthread_attr_setstacksize failed, error=%d\n", code);
  2102. FAIL;
  2103. }
  2104. # endif
  2105. n_tests = 0;
  2106. # if !defined(GC_DISABLE_INCREMENTAL) \
  2107. && (defined(TEST_DEFAULT_VDB) || !defined(DEFAULT_VDB))
  2108. # if !defined(REDIRECT_MALLOC) && !defined(MAKE_BACK_GRAPH) \
  2109. && !defined(USE_PROC_FOR_LIBRARIES) && !defined(NO_INCREMENTAL) \
  2110. && !defined(USE_MUNMAP)
  2111. GC_enable_incremental();
  2112. # endif
  2113. if (GC_is_incremental_mode()) {
  2114. GC_printf("Switched to incremental mode\n");
  2115. # ifdef MPROTECT_VDB
  2116. GC_printf("Emulating dirty bits with mprotect/signals\n");
  2117. # endif
  2118. }
  2119. # endif
  2120. GC_set_min_bytes_allocd(1);
  2121. if (GC_get_min_bytes_allocd() != 1)
  2122. FAIL;
  2123. GC_set_rate(10);
  2124. GC_set_max_prior_attempts(1);
  2125. if (GC_get_rate() != 10 || GC_get_max_prior_attempts() != 1)
  2126. FAIL;
  2127. GC_set_warn_proc(warn_proc);
  2128. if ((code = pthread_key_create(&fl_key, 0)) != 0) {
  2129. GC_printf("Key creation failed %d\n", code);
  2130. FAIL;
  2131. }
  2132. set_print_procs();
  2133. # if NTHREADS > 0
  2134. for (i = 0; i < NTHREADS; ++i) {
  2135. if ((code = pthread_create(th+i, &attr, thr_run_one_test, 0)) != 0) {
  2136. GC_printf("Thread %d creation failed %d\n", i, code);
  2137. FAIL;
  2138. }
  2139. }
  2140. # endif
  2141. run_one_test();
  2142. # if NTHREADS > 0
  2143. for (i = 0; i < NTHREADS; ++i) {
  2144. if ((code = pthread_join(th[i], 0)) != 0) {
  2145. GC_printf("Thread %d failed %d\n", i, code);
  2146. FAIL;
  2147. }
  2148. }
  2149. # endif
  2150. check_heap_stats();
  2151. (void)fflush(stdout);
  2152. (void)pthread_attr_destroy(&attr);
  2153. # if defined(CPPCHECK)
  2154. UNTESTED(GC_set_suspend_signal);
  2155. UNTESTED(GC_set_thr_restart_signal);
  2156. # ifndef GC_NO_DLOPEN
  2157. UNTESTED(GC_dlopen);
  2158. # endif
  2159. # ifndef GC_NO_PTHREAD_CANCEL
  2160. UNTESTED(GC_pthread_cancel);
  2161. # endif
  2162. # ifdef GC_HAVE_PTHREAD_EXIT
  2163. UNTESTED(GC_pthread_exit);
  2164. # endif
  2165. # ifndef GC_NO_PTHREAD_SIGMASK
  2166. UNTESTED(GC_pthread_sigmask);
  2167. # endif
  2168. # ifdef NO_TEST_HANDLE_FORK
  2169. UNTESTED(GC_atfork_child);
  2170. UNTESTED(GC_atfork_parent);
  2171. UNTESTED(GC_atfork_prepare);
  2172. UNTESTED(GC_set_handle_fork);
  2173. UNTESTED(GC_start_mark_threads);
  2174. # endif
  2175. # endif /* CPPCHECK */
  2176. # ifdef PTW32_STATIC_LIB
  2177. pthread_win32_thread_detach_np ();
  2178. pthread_win32_process_detach_np ();
  2179. # endif
  2180. return(0);
  2181. }
  2182. #endif /* GC_PTHREADS */