hs.patch 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. diff --git a/eval.c b/eval.c
  2. index b705302..909cd3d 100644
  3. --- a/eval.c
  4. +++ b/eval.c
  5. @@ -73,6 +73,7 @@ char *strrchr _((const char*,const char));
  6. #endif
  7. #include <time.h>
  8. +#include <sys/mman.h>
  9. #ifdef __BEOS__
  10. #include <net/socket.h>
  11. @@ -1022,7 +1023,7 @@ static struct tag *prot_tag;
  12. _tag.blkid = 0; \
  13. prot_tag = &_tag
  14. -#define PROT_NONE Qfalse /* 0 */
  15. +#define PROT_EMPTY Qfalse /* 0 */
  16. #define PROT_THREAD Qtrue /* 2 */
  17. #define PROT_FUNC INT2FIX(0) /* 1 */
  18. #define PROT_LOOP INT2FIX(1) /* 3 */
  19. @@ -1234,7 +1235,7 @@ error_print()
  20. if (NIL_P(ruby_errinfo)) return;
  21. - PUSH_TAG(PROT_NONE);
  22. + PUSH_TAG(PROT_EMPTY);
  23. if (EXEC_TAG() == 0) {
  24. errat = get_backtrace(ruby_errinfo);
  25. }
  26. @@ -1395,7 +1396,7 @@ ruby_init()
  27. /* default visibility is private at toplevel */
  28. SCOPE_SET(SCOPE_PRIVATE);
  29. - PUSH_TAG(PROT_NONE);
  30. + PUSH_TAG(PROT_EMPTY);
  31. if ((state = EXEC_TAG()) == 0) {
  32. rb_call_inits();
  33. ruby_class = rb_cObject;
  34. @@ -1529,7 +1530,7 @@ ruby_options(argc, argv)
  35. int state;
  36. Init_stack((void*)&state);
  37. - PUSH_TAG(PROT_NONE);
  38. + PUSH_TAG(PROT_EMPTY);
  39. if ((state = EXEC_TAG()) == 0) {
  40. ruby_process_options(argc, argv);
  41. }
  42. @@ -1546,7 +1547,7 @@ void rb_exec_end_proc _((void));
  43. static void
  44. ruby_finalize_0()
  45. {
  46. - PUSH_TAG(PROT_NONE);
  47. + PUSH_TAG(PROT_EMPTY);
  48. if (EXEC_TAG() == 0) {
  49. rb_trap_exit();
  50. }
  51. @@ -1584,7 +1585,7 @@ ruby_cleanup(ex)
  52. Init_stack((void *)&state);
  53. ruby_finalize_0();
  54. errs[0] = ruby_errinfo;
  55. - PUSH_TAG(PROT_NONE);
  56. + PUSH_TAG(PROT_EMPTY);
  57. PUSH_ITER(ITER_NOT);
  58. if ((state = EXEC_TAG()) == 0) {
  59. rb_thread_cleanup();
  60. @@ -1635,7 +1636,7 @@ ruby_exec_internal()
  61. {
  62. int state;
  63. - PUSH_TAG(PROT_NONE);
  64. + PUSH_TAG(PROT_EMPTY);
  65. PUSH_ITER(ITER_NOT);
  66. /* default visibility is private at toplevel */
  67. SCOPE_SET(SCOPE_PRIVATE);
  68. @@ -1857,7 +1858,7 @@ rb_eval_cmd(cmd, arg, level)
  69. }
  70. if (TYPE(cmd) != T_STRING) {
  71. PUSH_ITER(ITER_NOT);
  72. - PUSH_TAG(PROT_NONE);
  73. + PUSH_TAG(PROT_EMPTY);
  74. ruby_safe_level = level;
  75. if ((state = EXEC_TAG()) == 0) {
  76. val = rb_funcall2(cmd, rb_intern("call"), RARRAY(arg)->len, RARRAY(arg)->ptr);
  77. @@ -1879,7 +1880,7 @@ rb_eval_cmd(cmd, arg, level)
  78. ruby_safe_level = level;
  79. - PUSH_TAG(PROT_NONE);
  80. + PUSH_TAG(PROT_EMPTY);
  81. if ((state = EXEC_TAG()) == 0) {
  82. val = eval(ruby_top_self, cmd, Qnil, 0, 0);
  83. }
  84. @@ -2386,7 +2387,7 @@ is_defined(self, node, buf)
  85. val = self;
  86. if (node->nd_recv == (NODE *)1) goto check_bound;
  87. case NODE_CALL:
  88. - PUSH_TAG(PROT_NONE);
  89. + PUSH_TAG(PROT_EMPTY);
  90. if ((state = EXEC_TAG()) == 0) {
  91. val = rb_eval(self, node->nd_recv);
  92. }
  93. @@ -2488,7 +2489,7 @@ is_defined(self, node, buf)
  94. break;
  95. case NODE_COLON2:
  96. - PUSH_TAG(PROT_NONE);
  97. + PUSH_TAG(PROT_EMPTY);
  98. if ((state = EXEC_TAG()) == 0) {
  99. val = rb_eval(self, node->nd_head);
  100. }
  101. @@ -2537,7 +2538,7 @@ is_defined(self, node, buf)
  102. goto again;
  103. default:
  104. - PUSH_TAG(PROT_NONE);
  105. + PUSH_TAG(PROT_EMPTY);
  106. if ((state = EXEC_TAG()) == 0) {
  107. rb_eval(self, node);
  108. }
  109. @@ -2741,7 +2742,7 @@ call_trace_func(event, node, self, id, klass)
  110. klass = rb_iv_get(klass, "__attached__");
  111. }
  112. }
  113. - PUSH_TAG(PROT_NONE);
  114. + PUSH_TAG(PROT_EMPTY);
  115. raised = rb_thread_reset_raised(th);
  116. if ((state = EXEC_TAG()) == 0) {
  117. srcfile = rb_str_new2(ruby_sourcefile?ruby_sourcefile:"(ruby)");
  118. @@ -3304,7 +3305,7 @@ rb_eval(self, n)
  119. volatile VALUE e_info = ruby_errinfo;
  120. volatile int rescuing = 0;
  121. - PUSH_TAG(PROT_NONE);
  122. + PUSH_TAG(PROT_EMPTY);
  123. if ((state = EXEC_TAG()) == 0) {
  124. retry_entry:
  125. result = rb_eval(self, node->nd_head);
  126. @@ -3353,7 +3354,7 @@ rb_eval(self, n)
  127. break;
  128. case NODE_ENSURE:
  129. - PUSH_TAG(PROT_NONE);
  130. + PUSH_TAG(PROT_EMPTY);
  131. if ((state = EXEC_TAG()) == 0) {
  132. result = rb_eval(self, node->nd_head);
  133. }
  134. @@ -3571,7 +3572,7 @@ rb_eval(self, n)
  135. ruby_frame = &frame;
  136. PUSH_SCOPE();
  137. - PUSH_TAG(PROT_NONE);
  138. + PUSH_TAG(PROT_EMPTY);
  139. if (node->nd_rval) {
  140. saved_cref = ruby_cref;
  141. ruby_cref = (NODE*)node->nd_rval;
  142. @@ -4197,7 +4198,7 @@ module_setup(module, n)
  143. }
  144. PUSH_CREF(module);
  145. - PUSH_TAG(PROT_NONE);
  146. + PUSH_TAG(PROT_EMPTY);
  147. if ((state = EXEC_TAG()) == 0) {
  148. EXEC_EVENT_HOOK(RUBY_EVENT_CLASS, n, ruby_cbase,
  149. ruby_frame->last_func, ruby_frame->last_class);
  150. @@ -4604,7 +4605,7 @@ rb_longjmp(tag, mesg)
  151. VALUE e = ruby_errinfo;
  152. int status;
  153. - PUSH_TAG(PROT_NONE);
  154. + PUSH_TAG(PROT_EMPTY);
  155. if ((status = EXEC_TAG()) == 0) {
  156. StringValue(e);
  157. warn_printf("Exception `%s' at %s:%d - %s\n",
  158. @@ -4978,7 +4979,7 @@ rb_yield_0(val, self, klass, flags, avalue)
  159. var = block->var;
  160. if (var) {
  161. - PUSH_TAG(PROT_NONE);
  162. + PUSH_TAG(PROT_EMPTY);
  163. if ((state = EXEC_TAG()) == 0) {
  164. NODE *bvar = NULL;
  165. block_var:
  166. @@ -5051,7 +5052,7 @@ rb_yield_0(val, self, klass, flags, avalue)
  167. ruby_current_node = node;
  168. PUSH_ITER(block->iter);
  169. - PUSH_TAG(lambda ? PROT_NONE : PROT_YIELD);
  170. + PUSH_TAG(lambda ? PROT_EMPTY : PROT_YIELD);
  171. if ((state = EXEC_TAG()) == 0) {
  172. redo:
  173. if (nd_type(node) == NODE_CFUNC || nd_type(node) == NODE_IFUNC) {
  174. @@ -5464,7 +5465,7 @@ rb_rescue2(b_proc, data1, r_proc, data2, va_alist)
  175. VALUE eclass;
  176. va_list args;
  177. - PUSH_TAG(PROT_NONE);
  178. + PUSH_TAG(PROT_EMPTY);
  179. switch (state = EXEC_TAG()) {
  180. case TAG_RETRY:
  181. if (!handle) break;
  182. @@ -5522,7 +5523,7 @@ rb_protect(proc, data, state)
  183. VALUE result = Qnil; /* OK */
  184. int status;
  185. - PUSH_TAG(PROT_NONE);
  186. + PUSH_TAG(PROT_EMPTY);
  187. cont_protect = (VALUE)rb_node_newnode(NODE_MEMO, cont_protect, 0, 0);
  188. if ((status = EXEC_TAG()) == 0) {
  189. result = (*proc)(data);
  190. @@ -5550,7 +5551,7 @@ rb_ensure(b_proc, data1, e_proc, data2)
  191. volatile VALUE result = Qnil;
  192. VALUE retval;
  193. - PUSH_TAG(PROT_NONE);
  194. + PUSH_TAG(PROT_EMPTY);
  195. if ((state = EXEC_TAG()) == 0) {
  196. result = (*b_proc)(data1);
  197. }
  198. @@ -5577,7 +5578,7 @@ rb_with_disable_interrupt(proc, data)
  199. int thr_critical = rb_thread_critical;
  200. rb_thread_critical = Qtrue;
  201. - PUSH_TAG(PROT_NONE);
  202. + PUSH_TAG(PROT_EMPTY);
  203. if ((status = EXEC_TAG()) == 0) {
  204. result = (*proc)(data);
  205. }
  206. @@ -6264,7 +6265,7 @@ rb_funcall_rescue(recv, mid, n, va_alist)
  207. va_init_list(ar, n);
  208. - PUSH_TAG(PROT_NONE);
  209. + PUSH_TAG(PROT_EMPTY);
  210. if ((status = EXEC_TAG()) == 0) {
  211. result = vafuncall(recv, mid, n, &ar);
  212. }
  213. @@ -6539,7 +6540,7 @@ eval(self, src, scope, file, line)
  214. if (TYPE(ruby_class) == T_ICLASS) {
  215. ruby_class = RBASIC(ruby_class)->klass;
  216. }
  217. - PUSH_TAG(PROT_NONE);
  218. + PUSH_TAG(PROT_EMPTY);
  219. if ((state = EXEC_TAG()) == 0) {
  220. NODE *node;
  221. @@ -6698,7 +6699,7 @@ exec_under(func, under, cbase, args)
  222. mode = scope_vmode;
  223. SCOPE_SET(SCOPE_PUBLIC);
  224. - PUSH_TAG(PROT_NONE);
  225. + PUSH_TAG(PROT_EMPTY);
  226. if ((state = EXEC_TAG()) == 0) {
  227. val = (*func)(args);
  228. }
  229. @@ -7009,7 +7010,7 @@ rb_load(fname, wrap)
  230. PUSH_SCOPE();
  231. /* default visibility is private at loading toplevel */
  232. SCOPE_SET(SCOPE_PRIVATE);
  233. - PUSH_TAG(PROT_NONE);
  234. + PUSH_TAG(PROT_EMPTY);
  235. state = EXEC_TAG();
  236. last_func = ruby_frame->last_func;
  237. last_node = ruby_current_node;
  238. @@ -7068,7 +7069,7 @@ rb_load_protect(fname, wrap, state)
  239. {
  240. int status;
  241. - PUSH_TAG(PROT_NONE);
  242. + PUSH_TAG(PROT_EMPTY);
  243. if ((status = EXEC_TAG()) == 0) {
  244. rb_load(fname, wrap);
  245. }
  246. @@ -7389,7 +7390,7 @@ rb_require_safe(fname, safe)
  247. saved.node = ruby_current_node;
  248. saved.func = ruby_frame->last_func;
  249. saved.safe = ruby_safe_level;
  250. - PUSH_TAG(PROT_NONE);
  251. + PUSH_TAG(PROT_EMPTY);
  252. if ((state = EXEC_TAG()) == 0) {
  253. VALUE feature, path;
  254. long handle;
  255. @@ -8097,7 +8098,7 @@ rb_exec_end_proc()
  256. tmp_end_procs = link = ephemeral_end_procs;
  257. ephemeral_end_procs = 0;
  258. while (link) {
  259. - PUSH_TAG(PROT_NONE);
  260. + PUSH_TAG(PROT_EMPTY);
  261. if ((status = EXEC_TAG()) == 0) {
  262. ruby_safe_level = link->safe;
  263. (*link->func)(link->data);
  264. @@ -8115,7 +8116,7 @@ rb_exec_end_proc()
  265. tmp_end_procs = link = end_procs;
  266. end_procs = 0;
  267. while (link) {
  268. - PUSH_TAG(PROT_NONE);
  269. + PUSH_TAG(PROT_EMPTY);
  270. if ((status = EXEC_TAG()) == 0) {
  271. ruby_safe_level = link->safe;
  272. (*link->func)(link->data);
  273. @@ -8852,7 +8853,7 @@ proc_invoke(proc, args, self, klass)
  274. ruby_block = &_block;
  275. PUSH_ITER(ITER_CUR);
  276. ruby_frame->iter = ITER_CUR;
  277. - PUSH_TAG(pcall ? PROT_LAMBDA : PROT_NONE);
  278. + PUSH_TAG(pcall ? PROT_LAMBDA : PROT_EMPTY);
  279. state = EXEC_TAG();
  280. if (state == 0) {
  281. proc_set_safe_level(proc);
  282. @@ -10179,6 +10180,7 @@ win32_set_exception_list(p)
  283. int rb_thread_pending = 0;
  284. VALUE rb_cThread;
  285. +static unsigned int rb_thread_stack_size;
  286. extern VALUE rb_last_status;
  287. @@ -10406,12 +10408,20 @@ thread_mark(th)
  288. rb_gc_mark(th->thread);
  289. if (th->join) rb_gc_mark(th->join->thread);
  290. - rb_gc_mark(th->klass);
  291. - rb_gc_mark(th->wrapper);
  292. - rb_gc_mark((VALUE)th->cref);
  293. + if (curr_thread == th) {
  294. + rb_gc_mark(ruby_class);
  295. + rb_gc_mark(ruby_wrapper);
  296. + rb_gc_mark((VALUE)ruby_cref);
  297. + rb_gc_mark((VALUE)ruby_scope);
  298. + rb_gc_mark((VALUE)ruby_dyna_vars);
  299. + } else {
  300. + rb_gc_mark(th->klass);
  301. + rb_gc_mark(th->wrapper);
  302. + rb_gc_mark((VALUE)th->cref);
  303. + rb_gc_mark((VALUE)th->scope);
  304. + rb_gc_mark((VALUE)th->dyna_vars);
  305. + }
  306. - rb_gc_mark((VALUE)th->scope);
  307. - rb_gc_mark((VALUE)th->dyna_vars);
  308. rb_gc_mark(th->errinfo);
  309. rb_gc_mark(th->last_status);
  310. rb_gc_mark(th->last_line);
  311. @@ -10421,11 +10431,11 @@ thread_mark(th)
  312. rb_gc_mark_maybe(th->sandbox);
  313. /* mark data in copied stack */
  314. - if (th == curr_thread) return;
  315. + if (th == main_thread) return;
  316. if (th->status == THREAD_KILLED) return;
  317. if (th->stk_len == 0) return; /* stack not active, no need to mark. */
  318. - if (th->stk_ptr) {
  319. - rb_gc_mark_locations(th->stk_ptr, th->stk_ptr+th->stk_len);
  320. + if (th->stk_ptr && th != curr_thread) {
  321. + rb_gc_mark_locations(th->stk_pos, th->stk_base);
  322. #if defined(THINK_C) || defined(__human68k__)
  323. rb_gc_mark_locations(th->stk_ptr+2, th->stk_ptr+th->stk_len+2);
  324. #endif
  325. @@ -10435,24 +10445,30 @@ thread_mark(th)
  326. }
  327. #endif
  328. }
  329. - frame = th->frame;
  330. +
  331. + if (curr_thread == th)
  332. + frame = ruby_frame;
  333. + else
  334. + frame = th->frame;
  335. +
  336. while (frame && frame != top_frame) {
  337. - frame = ADJ(frame);
  338. rb_gc_mark_frame(frame);
  339. if (frame->tmp) {
  340. struct FRAME *tmp = frame->tmp;
  341. -
  342. while (tmp && tmp != top_frame) {
  343. - tmp = ADJ(tmp);
  344. rb_gc_mark_frame(tmp);
  345. tmp = tmp->prev;
  346. }
  347. }
  348. frame = frame->prev;
  349. }
  350. - block = th->block;
  351. +
  352. + if (curr_thread == th)
  353. + block = ruby_block;
  354. + else
  355. + block = th->block;
  356. +
  357. while (block) {
  358. - block = ADJ(block);
  359. rb_gc_mark_frame(&block->frame);
  360. block = block->prev;
  361. }
  362. @@ -10515,7 +10531,7 @@ static inline void
  363. stack_free(th)
  364. rb_thread_t th;
  365. {
  366. - if (th->stk_ptr) free(th->stk_ptr);
  367. + if (th->stk_ptr) munmap(th->stk_ptr, th->stk_size);
  368. th->stk_ptr = 0;
  369. #ifdef __ia64
  370. if (th->bstr_ptr) free(th->bstr_ptr);
  371. @@ -10576,35 +10592,8 @@ rb_thread_save_context(th)
  372. static VALUE tval;
  373. len = ruby_stack_length(&pos);
  374. - th->stk_len = 0;
  375. - th->stk_pos = pos;
  376. - if (len > th->stk_max) {
  377. - VALUE *ptr = realloc(th->stk_ptr, sizeof(VALUE) * len);
  378. - if (!ptr) rb_memerror();
  379. - th->stk_ptr = ptr;
  380. - th->stk_max = len;
  381. - }
  382. th->stk_len = len;
  383. - FLUSH_REGISTER_WINDOWS;
  384. - MEMCPY(th->stk_ptr, th->stk_pos, VALUE, th->stk_len);
  385. -#ifdef __ia64
  386. - th->bstr_pos = rb_gc_register_stack_start;
  387. - len = (VALUE*)rb_ia64_bsp() - th->bstr_pos;
  388. - th->bstr_len = 0;
  389. - if (len > th->bstr_max) {
  390. - VALUE *ptr = realloc(th->bstr_ptr, sizeof(VALUE) * len);
  391. - if (!ptr) rb_memerror();
  392. - th->bstr_ptr = ptr;
  393. - th->bstr_max = len;
  394. - }
  395. - th->bstr_len = len;
  396. - rb_ia64_flushrs();
  397. - MEMCPY(th->bstr_ptr, th->bstr_pos, VALUE, th->bstr_len);
  398. -#endif
  399. -#ifdef SAVE_WIN32_EXCEPTION_LIST
  400. - th->win32_exception_list = win32_get_exception_list();
  401. -#endif
  402. -
  403. + th->stk_pos = pos;
  404. th->frame = ruby_frame;
  405. th->scope = ruby_scope;
  406. ruby_scope->flags |= SCOPE_DONT_RECYCLE;
  407. @@ -10714,11 +10703,6 @@ rb_thread_restore_context_0(rb_thread_t th, int exit)
  408. #endif
  409. tmp = th;
  410. ex = exit;
  411. - FLUSH_REGISTER_WINDOWS;
  412. - MEMCPY(tmp->stk_pos, tmp->stk_ptr, VALUE, tmp->stk_len);
  413. -#ifdef __ia64
  414. - MEMCPY(tmp->bstr_pos, tmp->bstr_ptr, VALUE, tmp->bstr_len);
  415. -#endif
  416. tval = rb_lastline_get();
  417. rb_lastline_set(tmp->last_line);
  418. @@ -10809,8 +10793,8 @@ rb_thread_restore_context(th, exit)
  419. rb_thread_t th;
  420. int exit;
  421. {
  422. - if (!th->stk_ptr) rb_bug("unsaved context");
  423. - stack_extend(th, exit);
  424. + if (!th->stk_ptr && th != main_thread) rb_bug("unsaved context");
  425. + rb_thread_restore_context_0(th, exit);
  426. }
  427. static void
  428. @@ -10829,7 +10813,6 @@ rb_thread_die(th)
  429. {
  430. th->thgroup = 0;
  431. th->status = THREAD_KILLED;
  432. - stack_free(th);
  433. }
  434. static void
  435. @@ -12096,6 +12079,7 @@ rb_thread_group(thread)
  436. \
  437. th->stk_ptr = 0;\
  438. th->stk_len = 0;\
  439. + th->stk_size = 0;\
  440. th->stk_max = 0;\
  441. th->wait_for = 0;\
  442. IA64_INIT(th->bstr_ptr = 0);\
  443. @@ -12143,6 +12127,48 @@ rb_thread_alloc(klass)
  444. THREAD_ALLOC(th);
  445. th->thread = Data_Wrap_Struct(klass, thread_mark, thread_free, th);
  446. + /* if main_thread != NULL, then this is NOT the main thread, so
  447. + * we create a heap-stack
  448. + */
  449. + if (main_thread) {
  450. + /* Allocate stack, don't forget to add 1 extra word because of the MATH below */
  451. + unsigned int pagesize = getpagesize();
  452. + unsigned int total_size = rb_thread_stack_size + pagesize + sizeof(int);
  453. + void *stack_area = NULL;
  454. +
  455. + stack_area = mmap(NULL, total_size, PROT_READ | PROT_WRITE | PROT_EXEC,
  456. + MAP_PRIVATE | MAP_ANON, -1, 0);
  457. +
  458. + if (stack_area == MAP_FAILED) {
  459. + fprintf(stderr, "Thread stack allocation failed!\n");
  460. + rb_memerror();
  461. + }
  462. +
  463. + th->stk_ptr = th->stk_pos = stack_area;
  464. + th->stk_size = total_size;
  465. +
  466. + if (mprotect(th->stk_ptr, pagesize, PROT_NONE) == -1) {
  467. + fprintf(stderr, "Failed to create thread guard region: %s\n", strerror(errno));
  468. + rb_memerror();
  469. + }
  470. +
  471. + th->guard = th->stk_ptr + (pagesize/sizeof(VALUE *));
  472. +
  473. + /* point stk_base at the top of the stack */
  474. + /* ASSUMPTIONS:
  475. + * 1.) The address returned by malloc is "suitably aligned" for anything on this system
  476. + * 2.) Adding a value that is "aligned" for this platform should not unalign the address
  477. + * returned from malloc.
  478. + * 3.) Don't push anything on to the stack, otherwise it'll get unaligned.
  479. + * 4.) x86_64 ABI says aligned AFTER arguments have been pushed. You *must* then do a call[lq]
  480. + * or push[lq] something else on to the stack if you inted to do a ret.
  481. + */
  482. + th->stk_base = th->stk_ptr + ((total_size - sizeof(int))/sizeof(VALUE *));
  483. + th->stk_len = rb_thread_stack_size;
  484. + } else {
  485. + th->stk_ptr = th->stk_pos = rb_gc_stack_start;
  486. + }
  487. +
  488. for (vars = th->dyna_vars; vars; vars = vars->next) {
  489. if (FL_TEST(vars, DVAR_DONT_RECYCLE)) break;
  490. FL_SET(vars, DVAR_DONT_RECYCLE);
  491. @@ -12246,17 +12272,22 @@ rb_thread_stop_timer()
  492. int rb_thread_tick = THREAD_TICK;
  493. #endif
  494. +struct thread_start_args {
  495. + VALUE (*fn)();
  496. + void *arg;
  497. + rb_thread_t th;
  498. +} new_th;
  499. +
  500. +static VALUE
  501. +rb_thread_start_2();
  502. +
  503. static VALUE
  504. rb_thread_start_0(fn, arg, th)
  505. VALUE (*fn)();
  506. void *arg;
  507. rb_thread_t th;
  508. {
  509. - volatile rb_thread_t th_save = th;
  510. volatile VALUE thread = th->thread;
  511. - struct BLOCK *volatile saved_block = 0;
  512. - enum rb_thread_status status;
  513. - int state;
  514. if (OBJ_FROZEN(curr_thread->thgroup)) {
  515. rb_raise(rb_eThreadError,
  516. @@ -12284,16 +12315,41 @@ rb_thread_start_0(fn, arg, th)
  517. return thread;
  518. }
  519. - if (ruby_block) { /* should nail down higher blocks */
  520. - struct BLOCK dummy;
  521. + new_th.fn = fn;
  522. + new_th.arg = arg;
  523. + new_th.th = th;
  524. +
  525. +#if defined(__i386__)
  526. + __asm__ __volatile__ ("movl %0, %%esp\n\t"
  527. + "calll *%1\n"
  528. + :: "r" (th->stk_base),
  529. + "r" (rb_thread_start_2));
  530. +#elif defined(__x86_64__)
  531. + __asm__ __volatile__ ("movq %0, %%rsp\n\t"
  532. + "callq *%1\n"
  533. + :: "r" (th->stk_base),
  534. + "r" (rb_thread_start_2));
  535. +#else
  536. + #error unsupported architecture!
  537. +#endif
  538. + /* NOTREACHED */
  539. + return 0;
  540. +}
  541. - dummy.prev = ruby_block;
  542. - blk_copy_prev(&dummy);
  543. - saved_block = ruby_block = dummy.prev;
  544. - }
  545. - scope_dup(ruby_scope);
  546. +static VALUE
  547. +rb_thread_start_2()
  548. +{
  549. + volatile rb_thread_t th = new_th.th;
  550. + volatile rb_thread_t th_save = th;
  551. + volatile VALUE thread = th->thread;
  552. + struct BLOCK *volatile saved_block = 0;
  553. + enum rb_thread_status status;
  554. + int state;
  555. + struct tag *tag;
  556. + struct RVarmap *vars;
  557. + struct FRAME dummy_frame;
  558. - if (!th->next) {
  559. + if (!th->next) {
  560. /* merge in thread list */
  561. th->prev = curr_thread;
  562. curr_thread->next->prev = th;
  563. @@ -12301,13 +12357,27 @@ rb_thread_start_0(fn, arg, th)
  564. curr_thread->next = th;
  565. th->priority = curr_thread->priority;
  566. th->thgroup = curr_thread->thgroup;
  567. + }
  568. + curr_thread = th;
  569. +
  570. + dummy_frame = *ruby_frame;
  571. + dummy_frame.prev = top_frame;
  572. + ruby_frame = &dummy_frame;
  573. +
  574. + if (ruby_block) { /* should nail down higher blocks */
  575. + struct BLOCK dummy;
  576. +
  577. + dummy.prev = ruby_block;
  578. + blk_copy_prev(&dummy);
  579. + saved_block = ruby_block = dummy.prev;
  580. }
  581. + scope_dup(ruby_scope);
  582. +
  583. PUSH_TAG(PROT_THREAD);
  584. if ((state = EXEC_TAG()) == 0) {
  585. if (THREAD_SAVE_CONTEXT(th) == 0) {
  586. - curr_thread = th;
  587. - th->result = (*fn)(arg, th);
  588. + th->result = (*new_th.fn)(new_th.arg, th);
  589. }
  590. th = th_save;
  591. }
  592. @@ -12644,6 +12714,43 @@ rb_thread_cleanup()
  593. END_FOREACH_FROM(curr, th);
  594. }
  595. +/*
  596. + * call-seq:
  597. + * Thread.stack_size => fixnum
  598. + *
  599. + * Returns the thread stack size in bytes
  600. + */
  601. +static VALUE
  602. +rb_thread_stacksize_get()
  603. +{
  604. + return INT2FIX(rb_thread_stack_size);
  605. +}
  606. +
  607. +/*
  608. + * call-seq:
  609. + * Thread.stack_size= fixnum => Qnil
  610. + *
  611. + * Sets the global thread stacksize and returns Qnil.
  612. + */
  613. +static VALUE
  614. +rb_thread_stacksize_set(obj, val)
  615. + VALUE obj;
  616. + VALUE val;
  617. +{
  618. +
  619. + unsigned int size = FIX2UINT(val);
  620. +
  621. + /* 16byte alignment works for both x86 and x86_64 */
  622. + if (size & (~0xf)) {
  623. + size += 0x10;
  624. + size = size & (~0xf);
  625. + }
  626. +
  627. + rb_thread_stack_size = size;
  628. +
  629. + return Qnil;
  630. +}
  631. +
  632. int rb_thread_critical;
  633. @@ -13473,7 +13580,7 @@ rb_exec_recursive(func, obj, arg)
  634. int state;
  635. hash = recursive_push(hash, objid);
  636. - PUSH_TAG(PROT_NONE);
  637. + PUSH_TAG(PROT_EMPTY);
  638. if ((state = EXEC_TAG()) == 0) {
  639. result = (*func) (obj, arg, Qfalse);
  640. }
  641. @@ -13500,6 +13607,8 @@ Init_Thread()
  642. {
  643. VALUE cThGroup;
  644. + rb_thread_stack_size = (1024 * 1024);
  645. +
  646. recursive_key = rb_intern("__recursive_key__");
  647. rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
  648. rb_cThread = rb_define_class("Thread", rb_cObject);
  649. @@ -13524,6 +13633,9 @@ Init_Thread()
  650. rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
  651. rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
  652. + rb_define_singleton_method(rb_cThread, "stack_size", rb_thread_stacksize_get, 0);
  653. + rb_define_singleton_method(rb_cThread, "stack_size=", rb_thread_stacksize_set, 1);
  654. +
  655. rb_define_method(rb_cThread, "run", rb_thread_run, 0);
  656. rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
  657. rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
  658. diff --git a/gc.c b/gc.c
  659. index a564f0b..d6d654d 100644
  660. --- a/gc.c
  661. +++ b/gc.c
  662. @@ -506,12 +506,12 @@ stack_end_address(VALUE **stack_end_p)
  663. # define STACK_END (stack_end)
  664. #endif
  665. #if STACK_GROW_DIRECTION < 0
  666. -# define STACK_LENGTH (rb_gc_stack_start - STACK_END)
  667. +# define STACK_LENGTH(start) ((start) - STACK_END)
  668. #elif STACK_GROW_DIRECTION > 0
  669. -# define STACK_LENGTH (STACK_END - rb_gc_stack_start + 1)
  670. +# define STACK_LENGTH(start) (STACK_END - (start) + 1)
  671. #else
  672. -# define STACK_LENGTH ((STACK_END < rb_gc_stack_start) ? rb_gc_stack_start - STACK_END\
  673. - : STACK_END - rb_gc_stack_start + 1)
  674. +# define STACK_LENGTH(start) ((STACK_END < (start)) ? (start) - STACK_END\
  675. + : STACK_END - (start) + 1)
  676. #endif
  677. #if STACK_GROW_DIRECTION > 0
  678. # define STACK_UPPER(x, a, b) a
  679. @@ -534,27 +534,36 @@ stack_grow_direction(addr)
  680. #define GC_WATER_MARK 512
  681. -#define CHECK_STACK(ret) do {\
  682. +#define CHECK_STACK(ret, start) do {\
  683. SET_STACK_END;\
  684. - (ret) = (STACK_LENGTH > STACK_LEVEL_MAX + GC_WATER_MARK);\
  685. + (ret) = (STACK_LENGTH(start) > STACK_LEVEL_MAX + GC_WATER_MARK);\
  686. } while (0)
  687. size_t
  688. ruby_stack_length(p)
  689. VALUE **p;
  690. {
  691. - SET_STACK_END;
  692. - if (p) *p = STACK_UPPER(STACK_END, rb_gc_stack_start, STACK_END);
  693. - return STACK_LENGTH;
  694. + SET_STACK_END;
  695. + VALUE *start;
  696. + if (rb_curr_thread == rb_main_thread) {
  697. + start = rb_gc_stack_start;
  698. + } else {
  699. + start = rb_curr_thread->stk_base;
  700. + }
  701. + if (p) *p = STACK_UPPER(STACK_END, start, STACK_END);
  702. + return STACK_LENGTH(start);
  703. }
  704. int
  705. ruby_stack_check()
  706. {
  707. - int ret;
  708. -
  709. - CHECK_STACK(ret);
  710. - return ret;
  711. + int ret;
  712. + if (rb_curr_thread == rb_main_thread) {
  713. + CHECK_STACK(ret, rb_gc_stack_start);
  714. + } else {
  715. + CHECK_STACK(ret, rb_curr_thread->stk_base);
  716. + }
  717. + return ret;
  718. }
  719. #define MARK_STACK_MAX 1024
  720. @@ -1441,10 +1450,13 @@ garbage_collect()
  721. init_mark_stack();
  722. - gc_mark((VALUE)ruby_current_node, 0);
  723. -
  724. /* mark frame stack */
  725. - for (frame = ruby_frame; frame; frame = frame->prev) {
  726. + if (rb_curr_thread == rb_main_thread)
  727. + frame = ruby_frame;
  728. + else
  729. + frame = rb_main_thread->frame;
  730. +
  731. + for (; frame; frame = frame->prev) {
  732. rb_gc_mark_frame(frame);
  733. if (frame->tmp) {
  734. struct FRAME *tmp = frame->tmp;
  735. @@ -1454,16 +1466,35 @@ garbage_collect()
  736. }
  737. }
  738. }
  739. - gc_mark((VALUE)ruby_scope, 0);
  740. - gc_mark((VALUE)ruby_dyna_vars, 0);
  741. +
  742. + if (rb_curr_thread == rb_main_thread) {
  743. + gc_mark((VALUE)ruby_current_node, 0);
  744. + gc_mark((VALUE)ruby_scope, 0);
  745. + gc_mark((VALUE)ruby_dyna_vars, 0);
  746. + } else {
  747. + gc_mark((VALUE)rb_main_thread->node, 0);
  748. + gc_mark((VALUE)rb_main_thread->scope, 0);
  749. + gc_mark((VALUE)rb_main_thread->dyna_vars, 0);
  750. +
  751. + /* scan the current thread's stack */
  752. + rb_gc_mark_locations((VALUE*)STACK_END, rb_curr_thread->stk_base);
  753. + }
  754. +
  755. if (finalizer_table) {
  756. - mark_tbl(finalizer_table, 0);
  757. + mark_tbl(finalizer_table, 0);
  758. }
  759. FLUSH_REGISTER_WINDOWS;
  760. /* This assumes that all registers are saved into the jmp_buf (and stack) */
  761. rb_setjmp(save_regs_gc_mark);
  762. mark_locations_array((VALUE*)save_regs_gc_mark, sizeof(save_regs_gc_mark) / sizeof(VALUE *));
  763. +
  764. + /* If this is not the main thread, we need to scan the C stack, so
  765. + * set STACK_END to the end of the C stack.
  766. + */
  767. + if (rb_curr_thread != rb_main_thread)
  768. + STACK_END = rb_main_thread->stk_pos;
  769. +
  770. #if STACK_GROW_DIRECTION < 0
  771. rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start);
  772. #elif STACK_GROW_DIRECTION > 0
  773. @@ -1483,6 +1514,7 @@ garbage_collect()
  774. rb_gc_mark_locations((VALUE*)((char*)STACK_END + 2),
  775. (VALUE*)((char*)rb_gc_stack_start + 2));
  776. #endif
  777. +
  778. rb_gc_mark_threads();
  779. /* mark protected global variables */
  780. diff --git a/lib/logger.rb b/lib/logger.rb
  781. index 15d95fc..5a6d467 100644
  782. --- a/lib/logger.rb
  783. +++ b/lib/logger.rb
  784. @@ -170,7 +170,7 @@ require 'monitor'
  785. class Logger
  786. VERSION = "1.2.6"
  787. - id, name, rev = %w$Id$
  788. + id, name, rev = %w$Id logger.rb 1234$
  789. ProgName = "#{name.chomp(",v")}/#{rev}"
  790. class Error < RuntimeError; end
  791. diff --git a/node.h b/node.h
  792. index c209fa5..740e66a 100644
  793. --- a/node.h
  794. +++ b/node.h
  795. @@ -411,8 +411,11 @@ struct rb_thread {
  796. size_t stk_len;
  797. size_t stk_max;
  798. + size_t stk_size;
  799. VALUE *stk_ptr;
  800. VALUE *stk_pos;
  801. + VALUE *stk_base;
  802. + VALUE *guard;
  803. #ifdef __ia64
  804. size_t bstr_len;
  805. size_t bstr_max;
  806. diff --git a/signal.c b/signal.c
  807. index fb21fd3..acac6a7 100644
  808. --- a/signal.c
  809. +++ b/signal.c
  810. @@ -14,6 +14,7 @@
  811. #include "ruby.h"
  812. #include "rubysig.h"
  813. +#include "node.h"
  814. #include <signal.h>
  815. #include <stdio.h>
  816. @@ -428,15 +429,22 @@ typedef RETSIGTYPE (*sighandler_t)_((int));
  817. static sighandler_t
  818. ruby_signal(signum, handler)
  819. int signum;
  820. - sighandler_t handler;
  821. + void *handler;
  822. {
  823. struct sigaction sigact, old;
  824. rb_trap_accept_nativethreads[signum] = 0;
  825. - sigact.sa_handler = handler;
  826. + if (signum == SIGSEGV || signum == SIGBUS) {
  827. + sigact.sa_sigaction = handler;
  828. + sigact.sa_flags = (SA_ONSTACK | SA_RESETHAND | SA_SIGINFO);
  829. + } else {
  830. + sigact.sa_handler = handler;
  831. + sigact.sa_flags = 0;
  832. + }
  833. +
  834. sigemptyset(&sigact.sa_mask);
  835. - sigact.sa_flags = 0;
  836. +
  837. # ifdef SA_NOCLDWAIT
  838. if (signum == SIGCHLD && handler == SIG_IGN)
  839. sigact.sa_flags |= SA_NOCLDWAIT;
  840. @@ -599,7 +607,132 @@ sighandler(sig)
  841. }
  842. }
  843. +#include <stdio.h>
  844. +#ifdef HAVE_STDARG_PROTOTYPES
  845. +#include <stdarg.h>
  846. +#define va_init_list(a,b) va_start(a,b)
  847. +#else
  848. +#include <varargs.h>
  849. +#define va_init_list(a,b) va_start(a)
  850. +#endif
  851. +
  852. +void
  853. +#ifdef HAVE_STDARG_PROTOTYPES
  854. +sig_printf(const char *fmt, ...)
  855. +#else
  856. + sig_printf(fmt, va_alist)
  857. + const char *fmt;
  858. + va_dcl
  859. +#endif
  860. +{
  861. + char buf[BUFSIZ];
  862. + va_list args;
  863. + FILE *out = stderr;
  864. +
  865. + va_init_list(args, fmt);
  866. + vfprintf(out, fmt, args);
  867. + va_end(args);
  868. + fprintf(out, "\n");
  869. +}
  870. +
  871. +static void
  872. +dump_machine_state(uc)
  873. + ucontext_t *uc;
  874. +{
  875. + const char *dump64 =
  876. + " ----------------- Register state dump ----------------------\n"
  877. + "rax = 0x%.16x rbx = 0x%.16x rcx = 0x%.16x rdx = 0x%.16x\n"
  878. + "rdi = 0x%.16x rsi = 0x%.16x rbp = 0x%.16x rsp = 0x%.16x\n"
  879. + "r8 = 0x%.16x r9 = 0x%.16x r10 = 0x%.16x r11 = 0x%.16x\n"
  880. + "r12 = 0x%.16x r13 = 0x%.16x r14 = 0x%.16x r15 = 0x%.16x\n"
  881. + "rip = 0x%.16x rflags = 0x%.16x cs = 0x%.16x fs = 0x%.16x\n"
  882. + "gs = 0x%.16x";
  883. +
  884. + const char *dump32 =
  885. + " ----------------- Register state dump -------------------\n"
  886. + "eax = 0x%.8x ebx = 0x%.8x ecx = 0x%.8x edx = 0x%.8x\n"
  887. + "edi = 0x%.8x esi = 0x%.8x ebp = 0x%.8x esp = 0x%.8x\n"
  888. + "ss = 0x%.8x eflags = 0x%.8x eip = 0x%.8x cs = 0x%.8x\n"
  889. + "ds = 0x%.8x es = 0x%.8x fs = 0x%.8x gs = 0x%.8x\n";
  890. +
  891. +#if defined(__LP64__) && defined(__APPLE__)
  892. + sig_printf(dump64, uc->uc_mcontext->__ss.__rax, uc->uc_mcontext->__ss.__rbx,
  893. + uc->uc_mcontext->__ss.__rcx, uc->uc_mcontext->__ss.__rdx, uc->uc_mcontext->__ss.__rdi,
  894. + uc->uc_mcontext->__ss.__rsi, uc->uc_mcontext->__ss.__rbp, uc->uc_mcontext->__ss.__rsp,
  895. + uc->uc_mcontext->__ss.__r8, uc->uc_mcontext->__ss.__r9, uc->uc_mcontext->__ss.__r10,
  896. + uc->uc_mcontext->__ss.__r11, uc->uc_mcontext->__ss.__r12, uc->uc_mcontext->__ss.__r13,
  897. + uc->uc_mcontext->__ss.__r14, uc->uc_mcontext->__ss.__r15, uc->uc_mcontext->__ss.__rip,
  898. + uc->uc_mcontext->__ss.__rflags, uc->uc_mcontext->__ss.__cs, uc->uc_mcontext->__ss.__fs,
  899. + uc->uc_mcontext->__ss.__gs);
  900. +#elif !defined(__LP64__) && defined(__APPLE__)
  901. + sig_printf(dump32, uc->uc_mcontext->__ss.__eax, uc->uc_mcontext->__ss.__ebx,
  902. + uc->uc_mcontext->__ss.__ecx, uc->uc_mcontext->__ss.__edx,
  903. + uc->uc_mcontext->__ss.__edi, uc->uc_mcontext->__ss.__esi,
  904. + uc->uc_mcontext->__ss.__ebp, uc->uc_mcontext->__ss.__esp,
  905. + uc->uc_mcontext->__ss.__ss, uc->uc_mcontext->__ss.__eflags,
  906. + uc->uc_mcontext->__ss.__eip, uc->uc_mcontext->__ss.__cs,
  907. + uc->uc_mcontext->__ss.__ds, uc->uc_mcontext->__ss.__es,
  908. + uc->uc_mcontext->__ss.__fs, uc->uc_mcontext->__ss.__gs);
  909. +#elif defined(__i386__)
  910. + sig_printf(dump32, uc->uc_mcontext.gregs[REG_EAX], uc->uc_mcontext.gregs[REG_EBX],
  911. + uc->uc_mcontext.gregs[REG_ECX], uc->uc_mcontext.gregs[REG_EDX],
  912. + uc->uc_mcontext.gregs[REG_EDI], uc->uc_mcontext.gregs[REG_ESI],
  913. + uc->uc_mcontext.gregs[REG_EBP], uc->uc_mcontext.gregs[REG_ESP],
  914. + uc->uc_mcontext.gregs[REG_SS], uc->uc_mcontext.gregs[REG_EFL],
  915. + uc->uc_mcontext.gregs[REG_EIP], uc->uc_mcontext.gregs[REG_EIP],
  916. + uc->uc_mcontext.gregs[REG_DS], uc->uc_mcontext.gregs[REG_ES],
  917. + uc->uc_mcontext.gregs[REG_FS], uc->uc_mcontext.gregs[REG_FS]);
  918. +#elif defined(__x86_64__)
  919. + sig_printf(dump64, uc->uc_mcontext.gregs[REG_RAX], uc->uc_mcontext.gregs[REG_RBX],
  920. + uc->uc_mcontext.gregs[REG_RCX], uc->uc_mcontext.gregs[REG_RDX],
  921. + uc->uc_mcontext.gregs[REG_RDI], uc->uc_mcontext.gregs[REG_RSI],
  922. + uc->uc_mcontext.gregs[REG_RBP], uc->uc_mcontext.gregs[REG_RSP],
  923. + uc->uc_mcontext.gregs[REG_R8], uc->uc_mcontext.gregs[REG_R9],
  924. + uc->uc_mcontext.gregs[REG_R10], uc->uc_mcontext.gregs[REG_R11],
  925. + uc->uc_mcontext.gregs[REG_R12], uc->uc_mcontext.gregs[REG_R13],
  926. + uc->uc_mcontext.gregs[REG_R14], uc->uc_mcontext.gregs[REG_R15],
  927. + uc->uc_mcontext.gregs[REG_RIP], uc->uc_mcontext.gregs[REG_EFL],
  928. + uc->uc_mcontext.gregs[REG_CSGSFS]);
  929. +#else
  930. +#endif
  931. +}
  932. +
  933. +static int
  934. +check_guard(caddr_t fault_addr, rb_thread_t th) {
  935. + if(fault_addr <= (caddr_t)rb_curr_thread->guard &&
  936. + fault_addr >= (caddr_t)rb_curr_thread->stk_ptr) {
  937. + return 1;
  938. + }
  939. + return 0;
  940. +}
  941. +
  942. #ifdef SIGBUS
  943. +#ifdef POSIX_SIGNAL
  944. +static void sigbus _((int, siginfo_t*, void*));
  945. +static void
  946. +sigbus(sig, ip, context)
  947. + int sig;
  948. + siginfo_t *ip;
  949. + void *context;
  950. +{
  951. +#if defined(HAVE_NATIVETHREAD) && defined(HAVE_NATIVETHREAD_KILL)
  952. + if (!is_ruby_native_thread() && !rb_trap_accept_nativethreads[sig]) {
  953. + sigsend_to_ruby_thread(sig);
  954. + return;
  955. + }
  956. +#endif
  957. +
  958. + dump_machine_state(context);
  959. + if (check_guard((caddr_t)ip->si_addr, rb_curr_thread)) {
  960. + /* we hit the guard page, print out a warning to help app developers */
  961. + rb_bug("Thread stack overflow! Try increasing it!");
  962. + } else {
  963. + rb_bug("Bus Error");
  964. + }
  965. +}
  966. +
  967. +#else /* !defined(POSIX_SIGNAL) */
  968. +
  969. static RETSIGTYPE sigbus _((int));
  970. static RETSIGTYPE
  971. sigbus(sig)
  972. @@ -615,8 +748,38 @@ sigbus(sig)
  973. rb_bug("Bus Error");
  974. }
  975. #endif
  976. +#endif
  977. +
  978. #ifdef SIGSEGV
  979. +#ifdef POSIX_SIGNAL
  980. +static void sigsegv _((int, siginfo_t*, void*));
  981. +static void
  982. +sigsegv(sig, ip, context)
  983. + int sig;
  984. + siginfo_t *ip;
  985. + void *context;
  986. +{
  987. +#if defined(HAVE_NATIVETHREAD) && defined(HAVE_NATIVETHREAD_KILL)
  988. + if (!is_ruby_native_thread() && !rb_trap_accept_nativethreads[sig]) {
  989. + sigsend_to_ruby_thread(sig);
  990. + return;
  991. + }
  992. +#endif
  993. +
  994. + extern int ruby_gc_stress;
  995. + ruby_gc_stress = 0;
  996. + dump_machine_state(context);
  997. + if (check_guard((caddr_t)ip->si_addr, rb_curr_thread)) {
  998. + /* we hit the guard page, print out a warning to help app developers */
  999. + rb_bug("Thread stack overflow! Try increasing it!");
  1000. + } else {
  1001. + rb_bug("Segmentation fault");
  1002. + }
  1003. +}
  1004. +
  1005. +#else /* !defined(POSIX_SIGNAL) */
  1006. +
  1007. static RETSIGTYPE sigsegv _((int));
  1008. static RETSIGTYPE
  1009. sigsegv(sig)
  1010. @@ -634,6 +797,7 @@ sigsegv(sig)
  1011. rb_bug("Segmentation fault");
  1012. }
  1013. #endif
  1014. +#endif
  1015. #ifdef SIGPIPE
  1016. static RETSIGTYPE sigpipe _((int));
  1017. @@ -705,7 +869,8 @@ static VALUE
  1018. trap(arg)
  1019. struct trap_arg *arg;
  1020. {
  1021. - sighandler_t func, oldfunc;
  1022. + sighandler_t oldfunc;
  1023. + void *func;
  1024. VALUE command, oldcmd;
  1025. int sig = -1;
  1026. const char *s;
  1027. @@ -952,6 +1117,20 @@ sig_list()
  1028. }
  1029. static void
  1030. +create_sigstack()
  1031. +{
  1032. + stack_t ss;
  1033. + ss.ss_size = SIGSTKSZ;
  1034. + ss.ss_sp = malloc(ss.ss_size);
  1035. + ss.ss_flags = 0;
  1036. + if (sigaltstack(&ss, NULL) < 0) {
  1037. + free(ss.ss_sp);
  1038. + fprintf(stderr, "Couldn't create signal stack! Error %d: %s\n", errno, strerror(errno));
  1039. + exit(1);
  1040. + }
  1041. +}
  1042. +
  1043. +static void
  1044. install_sighandler(signum, handler)
  1045. int signum;
  1046. sighandler_t handler;
  1047. @@ -960,7 +1139,7 @@ install_sighandler(signum, handler)
  1048. old = ruby_signal(signum, handler);
  1049. if (old != SIG_DFL) {
  1050. - ruby_signal(signum, old);
  1051. + ruby_signal(signum, old);
  1052. }
  1053. }
  1054. @@ -1089,6 +1268,8 @@ Init_signal()
  1055. rb_alias(rb_eSignal, rb_intern("signm"), rb_intern("message"));
  1056. rb_define_method(rb_eInterrupt, "initialize", interrupt_init, -1);
  1057. + create_sigstack();
  1058. +
  1059. install_sighandler(SIGINT, sighandler);
  1060. #ifdef SIGHUP
  1061. install_sighandler(SIGHUP, sighandler);