hs.patch 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. diff --git a/eval.c b/eval.c
  2. index 4098b83..c70f270 100644
  3. --- a/eval.c
  4. +++ b/eval.c
  5. @@ -73,6 +73,7 @@ char *strrchr _((const char*,const char));
  6. #endif
  7. #include <time.h>
  8. +#include <sys/mman.h>
  9. #ifdef __BEOS__
  10. #include <net/socket.h>
  11. @@ -1025,7 +1026,7 @@ static struct tag *prot_tag;
  12. _tag.blkid = 0; \
  13. prot_tag = &_tag
  14. -#define PROT_NONE Qfalse /* 0 */
  15. +#define PROT_EMPTY Qfalse /* 0 */
  16. #define PROT_THREAD Qtrue /* 2 */
  17. #define PROT_FUNC INT2FIX(0) /* 1 */
  18. #define PROT_LOOP INT2FIX(1) /* 3 */
  19. @@ -1236,7 +1237,7 @@ error_print()
  20. if (NIL_P(ruby_errinfo)) return;
  21. - PUSH_TAG(PROT_NONE);
  22. + PUSH_TAG(PROT_EMPTY);
  23. if (EXEC_TAG() == 0) {
  24. errat = get_backtrace(ruby_errinfo);
  25. }
  26. @@ -1396,7 +1397,7 @@ ruby_init()
  27. /* default visibility is private at toplevel */
  28. SCOPE_SET(SCOPE_PRIVATE);
  29. - PUSH_TAG(PROT_NONE);
  30. + PUSH_TAG(PROT_EMPTY);
  31. if ((state = EXEC_TAG()) == 0) {
  32. rb_call_inits();
  33. ruby_class = rb_cObject;
  34. @@ -1530,7 +1531,7 @@ ruby_options(argc, argv)
  35. int state;
  36. Init_stack((void*)&state);
  37. - PUSH_TAG(PROT_NONE);
  38. + PUSH_TAG(PROT_EMPTY);
  39. if ((state = EXEC_TAG()) == 0) {
  40. ruby_process_options(argc, argv);
  41. }
  42. @@ -1547,7 +1548,7 @@ void rb_exec_end_proc _((void));
  43. static void
  44. ruby_finalize_0()
  45. {
  46. - PUSH_TAG(PROT_NONE);
  47. + PUSH_TAG(PROT_EMPTY);
  48. if (EXEC_TAG() == 0) {
  49. rb_trap_exit();
  50. }
  51. @@ -1585,7 +1586,7 @@ ruby_cleanup(ex)
  52. Init_stack((void *)&state);
  53. ruby_finalize_0();
  54. errs[0] = ruby_errinfo;
  55. - PUSH_TAG(PROT_NONE);
  56. + PUSH_TAG(PROT_EMPTY);
  57. PUSH_ITER(ITER_NOT);
  58. if ((state = EXEC_TAG()) == 0) {
  59. rb_thread_cleanup();
  60. @@ -1636,7 +1637,7 @@ ruby_exec_internal()
  61. {
  62. int state;
  63. - PUSH_TAG(PROT_NONE);
  64. + PUSH_TAG(PROT_EMPTY);
  65. PUSH_ITER(ITER_NOT);
  66. /* default visibility is private at toplevel */
  67. SCOPE_SET(SCOPE_PRIVATE);
  68. @@ -1858,7 +1859,7 @@ rb_eval_cmd(cmd, arg, level)
  69. }
  70. if (TYPE(cmd) != T_STRING) {
  71. PUSH_ITER(ITER_NOT);
  72. - PUSH_TAG(PROT_NONE);
  73. + PUSH_TAG(PROT_EMPTY);
  74. ruby_safe_level = level;
  75. if ((state = EXEC_TAG()) == 0) {
  76. val = rb_funcall2(cmd, rb_intern("call"), RARRAY(arg)->len, RARRAY(arg)->ptr);
  77. @@ -1880,7 +1881,7 @@ rb_eval_cmd(cmd, arg, level)
  78. ruby_safe_level = level;
  79. - PUSH_TAG(PROT_NONE);
  80. + PUSH_TAG(PROT_EMPTY);
  81. if ((state = EXEC_TAG()) == 0) {
  82. val = eval(ruby_top_self, cmd, Qnil, 0, 0);
  83. }
  84. @@ -2387,7 +2388,7 @@ is_defined(self, node, buf)
  85. val = self;
  86. if (node->nd_recv == (NODE *)1) goto check_bound;
  87. case NODE_CALL:
  88. - PUSH_TAG(PROT_NONE);
  89. + PUSH_TAG(PROT_EMPTY);
  90. if ((state = EXEC_TAG()) == 0) {
  91. val = rb_eval(self, node->nd_recv);
  92. }
  93. @@ -2489,7 +2490,7 @@ is_defined(self, node, buf)
  94. break;
  95. case NODE_COLON2:
  96. - PUSH_TAG(PROT_NONE);
  97. + PUSH_TAG(PROT_EMPTY);
  98. if ((state = EXEC_TAG()) == 0) {
  99. val = rb_eval(self, node->nd_head);
  100. }
  101. @@ -2538,7 +2539,7 @@ is_defined(self, node, buf)
  102. goto again;
  103. default:
  104. - PUSH_TAG(PROT_NONE);
  105. + PUSH_TAG(PROT_EMPTY);
  106. if ((state = EXEC_TAG()) == 0) {
  107. rb_eval(self, node);
  108. }
  109. @@ -2742,7 +2743,7 @@ call_trace_func(event, node, self, id, klass)
  110. klass = rb_iv_get(klass, "__attached__");
  111. }
  112. }
  113. - PUSH_TAG(PROT_NONE);
  114. + PUSH_TAG(PROT_EMPTY);
  115. raised = rb_thread_reset_raised(th);
  116. if ((state = EXEC_TAG()) == 0) {
  117. srcfile = rb_str_new2(ruby_sourcefile?ruby_sourcefile:"(ruby)");
  118. @@ -3302,7 +3303,7 @@ rb_eval(self, n)
  119. volatile VALUE e_info = ruby_errinfo;
  120. volatile int rescuing = 0;
  121. - PUSH_TAG(PROT_NONE);
  122. + PUSH_TAG(PROT_EMPTY);
  123. if ((state = EXEC_TAG()) == 0) {
  124. retry_entry:
  125. result = rb_eval(self, node->nd_head);
  126. @@ -3351,7 +3352,7 @@ rb_eval(self, n)
  127. break;
  128. case NODE_ENSURE:
  129. - PUSH_TAG(PROT_NONE);
  130. + PUSH_TAG(PROT_EMPTY);
  131. if ((state = EXEC_TAG()) == 0) {
  132. result = rb_eval(self, node->nd_head);
  133. }
  134. @@ -3569,7 +3570,7 @@ rb_eval(self, n)
  135. ruby_frame = &frame;
  136. PUSH_SCOPE();
  137. - PUSH_TAG(PROT_NONE);
  138. + PUSH_TAG(PROT_EMPTY);
  139. if (node->nd_rval) {
  140. saved_cref = ruby_cref;
  141. ruby_cref = (NODE*)node->nd_rval;
  142. @@ -4195,7 +4196,7 @@ module_setup(module, n)
  143. }
  144. PUSH_CREF(module);
  145. - PUSH_TAG(PROT_NONE);
  146. + PUSH_TAG(PROT_EMPTY);
  147. if ((state = EXEC_TAG()) == 0) {
  148. EXEC_EVENT_HOOK(RUBY_EVENT_CLASS, n, ruby_cbase,
  149. ruby_frame->last_func, ruby_frame->last_class);
  150. @@ -4602,7 +4603,7 @@ rb_longjmp(tag, mesg)
  151. VALUE e = ruby_errinfo;
  152. int status;
  153. - PUSH_TAG(PROT_NONE);
  154. + PUSH_TAG(PROT_EMPTY);
  155. if ((status = EXEC_TAG()) == 0) {
  156. StringValue(e);
  157. warn_printf("Exception `%s' at %s:%d - %s\n",
  158. @@ -4974,7 +4975,7 @@ rb_yield_0(val, self, klass, flags, avalue)
  159. node = block->body;
  160. if (block->var) {
  161. - PUSH_TAG(PROT_NONE);
  162. + PUSH_TAG(PROT_EMPTY);
  163. if ((state = EXEC_TAG()) == 0) {
  164. if (block->var == (NODE*)1) { /* no parameter || */
  165. if (lambda && RARRAY(val)->len != 0) {
  166. @@ -5032,7 +5033,7 @@ rb_yield_0(val, self, klass, flags, avalue)
  167. ruby_current_node = node;
  168. PUSH_ITER(block->iter);
  169. - PUSH_TAG(lambda ? PROT_NONE : PROT_YIELD);
  170. + PUSH_TAG(lambda ? PROT_EMPTY : PROT_YIELD);
  171. if ((state = EXEC_TAG()) == 0) {
  172. redo:
  173. if (nd_type(node) == NODE_CFUNC || nd_type(node) == NODE_IFUNC) {
  174. @@ -5430,7 +5431,7 @@ rb_rescue2(b_proc, data1, r_proc, data2, va_alist)
  175. VALUE eclass;
  176. va_list args;
  177. - PUSH_TAG(PROT_NONE);
  178. + PUSH_TAG(PROT_EMPTY);
  179. switch (state = EXEC_TAG()) {
  180. case TAG_RETRY:
  181. if (!handle) break;
  182. @@ -5488,7 +5489,7 @@ rb_protect(proc, data, state)
  183. VALUE result = Qnil; /* OK */
  184. int status;
  185. - PUSH_TAG(PROT_NONE);
  186. + PUSH_TAG(PROT_EMPTY);
  187. cont_protect = (VALUE)rb_node_newnode(NODE_MEMO, cont_protect, 0, 0);
  188. if ((status = EXEC_TAG()) == 0) {
  189. result = (*proc)(data);
  190. @@ -5516,7 +5517,7 @@ rb_ensure(b_proc, data1, e_proc, data2)
  191. volatile VALUE result = Qnil;
  192. VALUE retval;
  193. - PUSH_TAG(PROT_NONE);
  194. + PUSH_TAG(PROT_EMPTY);
  195. if ((state = EXEC_TAG()) == 0) {
  196. result = (*b_proc)(data1);
  197. }
  198. @@ -5543,7 +5544,7 @@ rb_with_disable_interrupt(proc, data)
  199. int thr_critical = rb_thread_critical;
  200. rb_thread_critical = Qtrue;
  201. - PUSH_TAG(PROT_NONE);
  202. + PUSH_TAG(PROT_EMPTY);
  203. if ((status = EXEC_TAG()) == 0) {
  204. result = (*proc)(data);
  205. }
  206. @@ -6230,7 +6231,7 @@ rb_funcall_rescue(recv, mid, n, va_alist)
  207. va_init_list(ar, n);
  208. - PUSH_TAG(PROT_NONE);
  209. + PUSH_TAG(PROT_EMPTY);
  210. if ((status = EXEC_TAG()) == 0) {
  211. result = vafuncall(recv, mid, n, &ar);
  212. }
  213. @@ -6499,7 +6500,7 @@ eval(self, src, scope, file, line)
  214. if (TYPE(ruby_class) == T_ICLASS) {
  215. ruby_class = RBASIC(ruby_class)->klass;
  216. }
  217. - PUSH_TAG(PROT_NONE);
  218. + PUSH_TAG(PROT_EMPTY);
  219. if ((state = EXEC_TAG()) == 0) {
  220. NODE *node;
  221. @@ -6658,7 +6659,7 @@ exec_under(func, under, cbase, args)
  222. mode = scope_vmode;
  223. SCOPE_SET(SCOPE_PUBLIC);
  224. - PUSH_TAG(PROT_NONE);
  225. + PUSH_TAG(PROT_EMPTY);
  226. if ((state = EXEC_TAG()) == 0) {
  227. val = (*func)(args);
  228. }
  229. @@ -6889,7 +6890,7 @@ rb_load(fname, wrap)
  230. PUSH_SCOPE();
  231. /* default visibility is private at loading toplevel */
  232. SCOPE_SET(SCOPE_PRIVATE);
  233. - PUSH_TAG(PROT_NONE);
  234. + PUSH_TAG(PROT_EMPTY);
  235. state = EXEC_TAG();
  236. last_func = ruby_frame->last_func;
  237. last_node = ruby_current_node;
  238. @@ -6948,7 +6949,7 @@ rb_load_protect(fname, wrap, state)
  239. {
  240. int status;
  241. - PUSH_TAG(PROT_NONE);
  242. + PUSH_TAG(PROT_EMPTY);
  243. if ((status = EXEC_TAG()) == 0) {
  244. rb_load(fname, wrap);
  245. }
  246. @@ -7269,7 +7270,7 @@ rb_require_safe(fname, safe)
  247. saved.node = ruby_current_node;
  248. saved.func = ruby_frame->last_func;
  249. saved.safe = ruby_safe_level;
  250. - PUSH_TAG(PROT_NONE);
  251. + PUSH_TAG(PROT_EMPTY);
  252. if ((state = EXEC_TAG()) == 0) {
  253. VALUE feature, path;
  254. long handle;
  255. @@ -7977,7 +7978,7 @@ rb_exec_end_proc()
  256. tmp_end_procs = link = ephemeral_end_procs;
  257. ephemeral_end_procs = 0;
  258. while (link) {
  259. - PUSH_TAG(PROT_NONE);
  260. + PUSH_TAG(PROT_EMPTY);
  261. if ((status = EXEC_TAG()) == 0) {
  262. ruby_safe_level = link->safe;
  263. (*link->func)(link->data);
  264. @@ -7995,7 +7996,7 @@ rb_exec_end_proc()
  265. tmp_end_procs = link = end_procs;
  266. end_procs = 0;
  267. while (link) {
  268. - PUSH_TAG(PROT_NONE);
  269. + PUSH_TAG(PROT_EMPTY);
  270. if ((status = EXEC_TAG()) == 0) {
  271. ruby_safe_level = link->safe;
  272. (*link->func)(link->data);
  273. @@ -8654,7 +8655,7 @@ proc_invoke(proc, args, self, klass)
  274. ruby_block = &_block;
  275. PUSH_ITER(ITER_CUR);
  276. ruby_frame->iter = ITER_CUR;
  277. - PUSH_TAG(pcall ? PROT_LAMBDA : PROT_NONE);
  278. + PUSH_TAG(pcall ? PROT_LAMBDA : PROT_EMPTY);
  279. state = EXEC_TAG();
  280. if (state == 0) {
  281. proc_set_safe_level(proc);
  282. @@ -9896,6 +9897,7 @@ win32_set_exception_list(p)
  283. int rb_thread_pending = 0;
  284. VALUE rb_cThread;
  285. +static unsigned int rb_thread_stack_size;
  286. extern VALUE rb_last_status;
  287. @@ -10123,12 +10125,20 @@ thread_mark(th)
  288. rb_gc_mark(th->thread);
  289. if (th->join) rb_gc_mark(th->join->thread);
  290. - rb_gc_mark(th->klass);
  291. - rb_gc_mark(th->wrapper);
  292. - rb_gc_mark((VALUE)th->cref);
  293. + if (curr_thread == th) {
  294. + rb_gc_mark(ruby_class);
  295. + rb_gc_mark(ruby_wrapper);
  296. + rb_gc_mark((VALUE)ruby_cref);
  297. + rb_gc_mark((VALUE)ruby_scope);
  298. + rb_gc_mark((VALUE)ruby_dyna_vars);
  299. + } else {
  300. + rb_gc_mark(th->klass);
  301. + rb_gc_mark(th->wrapper);
  302. + rb_gc_mark((VALUE)th->cref);
  303. + rb_gc_mark((VALUE)th->scope);
  304. + rb_gc_mark((VALUE)th->dyna_vars);
  305. + }
  306. - rb_gc_mark((VALUE)th->scope);
  307. - rb_gc_mark((VALUE)th->dyna_vars);
  308. rb_gc_mark(th->errinfo);
  309. rb_gc_mark(th->last_status);
  310. rb_gc_mark(th->last_line);
  311. @@ -10138,11 +10148,11 @@ thread_mark(th)
  312. rb_gc_mark_maybe(th->sandbox);
  313. /* mark data in copied stack */
  314. - if (th == curr_thread) return;
  315. + if (th == main_thread) return;
  316. if (th->status == THREAD_KILLED) return;
  317. if (th->stk_len == 0) return; /* stack not active, no need to mark. */
  318. - if (th->stk_ptr) {
  319. - rb_gc_mark_locations(th->stk_ptr, th->stk_ptr+th->stk_len);
  320. + if (th->stk_ptr && th != curr_thread) {
  321. + rb_gc_mark_locations(th->stk_pos, th->stk_base);
  322. #if defined(THINK_C) || defined(__human68k__)
  323. rb_gc_mark_locations(th->stk_ptr+2, th->stk_ptr+th->stk_len+2);
  324. #endif
  325. @@ -10152,24 +10162,30 @@ thread_mark(th)
  326. }
  327. #endif
  328. }
  329. - frame = th->frame;
  330. +
  331. + if (curr_thread == th)
  332. + frame = ruby_frame;
  333. + else
  334. + frame = th->frame;
  335. +
  336. while (frame && frame != top_frame) {
  337. - frame = ADJ(frame);
  338. rb_gc_mark_frame(frame);
  339. if (frame->tmp) {
  340. struct FRAME *tmp = frame->tmp;
  341. -
  342. while (tmp && tmp != top_frame) {
  343. - tmp = ADJ(tmp);
  344. rb_gc_mark_frame(tmp);
  345. tmp = tmp->prev;
  346. }
  347. }
  348. frame = frame->prev;
  349. }
  350. - block = th->block;
  351. +
  352. + if (curr_thread == th)
  353. + block = ruby_block;
  354. + else
  355. + block = th->block;
  356. +
  357. while (block) {
  358. - block = ADJ(block);
  359. rb_gc_mark_frame(&block->frame);
  360. block = block->prev;
  361. }
  362. @@ -10232,7 +10248,7 @@ static inline void
  363. stack_free(th)
  364. rb_thread_t th;
  365. {
  366. - if (th->stk_ptr) free(th->stk_ptr);
  367. + if (th->stk_ptr) munmap(th->stk_ptr, th->stk_size);
  368. th->stk_ptr = 0;
  369. #ifdef __ia64
  370. if (th->bstr_ptr) free(th->bstr_ptr);
  371. @@ -10293,35 +10309,8 @@ rb_thread_save_context(th)
  372. static VALUE tval;
  373. len = ruby_stack_length(&pos);
  374. - th->stk_len = 0;
  375. - th->stk_pos = pos;
  376. - if (len > th->stk_max) {
  377. - VALUE *ptr = realloc(th->stk_ptr, sizeof(VALUE) * len);
  378. - if (!ptr) rb_memerror();
  379. - th->stk_ptr = ptr;
  380. - th->stk_max = len;
  381. - }
  382. th->stk_len = len;
  383. - FLUSH_REGISTER_WINDOWS;
  384. - MEMCPY(th->stk_ptr, th->stk_pos, VALUE, th->stk_len);
  385. -#ifdef __ia64
  386. - th->bstr_pos = rb_gc_register_stack_start;
  387. - len = (VALUE*)rb_ia64_bsp() - th->bstr_pos;
  388. - th->bstr_len = 0;
  389. - if (len > th->bstr_max) {
  390. - VALUE *ptr = realloc(th->bstr_ptr, sizeof(VALUE) * len);
  391. - if (!ptr) rb_memerror();
  392. - th->bstr_ptr = ptr;
  393. - th->bstr_max = len;
  394. - }
  395. - th->bstr_len = len;
  396. - rb_ia64_flushrs();
  397. - MEMCPY(th->bstr_ptr, th->bstr_pos, VALUE, th->bstr_len);
  398. -#endif
  399. -#ifdef SAVE_WIN32_EXCEPTION_LIST
  400. - th->win32_exception_list = win32_get_exception_list();
  401. -#endif
  402. -
  403. + th->stk_pos = pos;
  404. th->frame = ruby_frame;
  405. th->scope = ruby_scope;
  406. ruby_scope->flags |= SCOPE_DONT_RECYCLE;
  407. @@ -10431,11 +10420,6 @@ rb_thread_restore_context_0(rb_thread_t th, int exit)
  408. #endif
  409. tmp = th;
  410. ex = exit;
  411. - FLUSH_REGISTER_WINDOWS;
  412. - MEMCPY(tmp->stk_pos, tmp->stk_ptr, VALUE, tmp->stk_len);
  413. -#ifdef __ia64
  414. - MEMCPY(tmp->bstr_pos, tmp->bstr_ptr, VALUE, tmp->bstr_len);
  415. -#endif
  416. tval = rb_lastline_get();
  417. rb_lastline_set(tmp->last_line);
  418. @@ -10526,8 +10510,8 @@ rb_thread_restore_context(th, exit)
  419. rb_thread_t th;
  420. int exit;
  421. {
  422. - if (!th->stk_ptr) rb_bug("unsaved context");
  423. - stack_extend(th, exit);
  424. + if (!th->stk_ptr && th != main_thread) rb_bug("unsaved context");
  425. + rb_thread_restore_context_0(th, exit);
  426. }
  427. static void
  428. @@ -10546,7 +10530,6 @@ rb_thread_die(th)
  429. {
  430. th->thgroup = 0;
  431. th->status = THREAD_KILLED;
  432. - stack_free(th);
  433. }
  434. static void
  435. @@ -11822,6 +11805,7 @@ rb_thread_group(thread)
  436. \
  437. th->stk_ptr = 0;\
  438. th->stk_len = 0;\
  439. + th->stk_size = 0;\
  440. th->stk_max = 0;\
  441. th->wait_for = 0;\
  442. IA64_INIT(th->bstr_ptr = 0);\
  443. @@ -11869,6 +11853,48 @@ rb_thread_alloc(klass)
  444. THREAD_ALLOC(th);
  445. th->thread = Data_Wrap_Struct(klass, thread_mark, thread_free, th);
  446. + /* if main_thread != NULL, then this is NOT the main thread, so
  447. + * we create a heap-stack
  448. + */
  449. + if (main_thread) {
  450. + /* Allocate stack, don't forget to add 1 extra word because of the MATH below */
  451. + unsigned int pagesize = getpagesize();
  452. + unsigned int total_size = rb_thread_stack_size + pagesize + sizeof(int);
  453. + void *stack_area = NULL;
  454. +
  455. + stack_area = mmap(NULL, total_size, PROT_READ | PROT_WRITE | PROT_EXEC,
  456. + MAP_PRIVATE | MAP_ANON, -1, 0);
  457. +
  458. + if (stack_area == MAP_FAILED) {
  459. + fprintf(stderr, "Thread stack allocation failed!\n");
  460. + rb_memerror();
  461. + }
  462. +
  463. + th->stk_ptr = th->stk_pos = stack_area;
  464. + th->stk_size = total_size;
  465. +
  466. + if (mprotect(th->stk_ptr, pagesize, PROT_NONE) == -1) {
  467. + fprintf(stderr, "Failed to create thread guard region: %s\n", strerror(errno));
  468. + rb_memerror();
  469. + }
  470. +
  471. + th->guard = th->stk_ptr + (pagesize/sizeof(VALUE *));
  472. +
  473. + /* point stk_base at the top of the stack */
  474. + /* ASSUMPTIONS:
  475. + * 1.) The address returned by malloc is "suitably aligned" for anything on this system
  476. + * 2.) Adding a value that is "aligned" for this platform should not unalign the address
  477. + * returned from malloc.
  478. + * 3.) Don't push anything on to the stack, otherwise it'll get unaligned.
  479. + * 4.) x86_64 ABI says aligned AFTER arguments have been pushed. You *must* then do a call[lq]
  480. + * or push[lq] something else on to the stack if you inted to do a ret.
  481. + */
  482. + th->stk_base = th->stk_ptr + ((total_size - sizeof(int))/sizeof(VALUE *));
  483. + th->stk_len = rb_thread_stack_size;
  484. + } else {
  485. + th->stk_ptr = th->stk_pos = rb_gc_stack_start;
  486. + }
  487. +
  488. for (vars = th->dyna_vars; vars; vars = vars->next) {
  489. if (FL_TEST(vars, DVAR_DONT_RECYCLE)) break;
  490. FL_SET(vars, DVAR_DONT_RECYCLE);
  491. @@ -12014,17 +12040,22 @@ rb_thread_cancel_timer()
  492. }
  493. #endif
  494. +struct thread_start_args {
  495. + VALUE (*fn)();
  496. + void *arg;
  497. + rb_thread_t th;
  498. +} new_th;
  499. +
  500. +static VALUE
  501. +rb_thread_start_2();
  502. +
  503. static VALUE
  504. rb_thread_start_0(fn, arg, th)
  505. VALUE (*fn)();
  506. void *arg;
  507. rb_thread_t th;
  508. {
  509. - volatile rb_thread_t th_save = th;
  510. volatile VALUE thread = th->thread;
  511. - struct BLOCK *volatile saved_block = 0;
  512. - enum rb_thread_status status;
  513. - int state;
  514. if (OBJ_FROZEN(curr_thread->thgroup)) {
  515. rb_raise(rb_eThreadError,
  516. @@ -12054,16 +12085,41 @@ rb_thread_start_0(fn, arg, th)
  517. return thread;
  518. }
  519. - if (ruby_block) { /* should nail down higher blocks */
  520. - struct BLOCK dummy;
  521. + new_th.fn = fn;
  522. + new_th.arg = arg;
  523. + new_th.th = th;
  524. +
  525. +#if defined(__i386__)
  526. + __asm__ __volatile__ ("movl %0, %%esp\n\t"
  527. + "calll *%1\n"
  528. + :: "r" (th->stk_base),
  529. + "r" (rb_thread_start_2));
  530. +#elif defined(__x86_64__)
  531. + __asm__ __volatile__ ("movq %0, %%rsp\n\t"
  532. + "callq *%1\n"
  533. + :: "r" (th->stk_base),
  534. + "r" (rb_thread_start_2));
  535. +#else
  536. + #error unsupported architecture!
  537. +#endif
  538. + /* NOTREACHED */
  539. + return 0;
  540. +}
  541. - dummy.prev = ruby_block;
  542. - blk_copy_prev(&dummy);
  543. - saved_block = ruby_block = dummy.prev;
  544. - }
  545. - scope_dup(ruby_scope);
  546. +static VALUE
  547. +rb_thread_start_2()
  548. +{
  549. + volatile rb_thread_t th = new_th.th;
  550. + volatile rb_thread_t th_save = th;
  551. + volatile VALUE thread = th->thread;
  552. + struct BLOCK *volatile saved_block = 0;
  553. + enum rb_thread_status status;
  554. + int state;
  555. + struct tag *tag;
  556. + struct RVarmap *vars;
  557. + struct FRAME dummy_frame;
  558. - if (!th->next) {
  559. + if (!th->next) {
  560. /* merge in thread list */
  561. th->prev = curr_thread;
  562. curr_thread->next->prev = th;
  563. @@ -12071,13 +12127,27 @@ rb_thread_start_0(fn, arg, th)
  564. curr_thread->next = th;
  565. th->priority = curr_thread->priority;
  566. th->thgroup = curr_thread->thgroup;
  567. + }
  568. + curr_thread = th;
  569. +
  570. + dummy_frame = *ruby_frame;
  571. + dummy_frame.prev = top_frame;
  572. + ruby_frame = &dummy_frame;
  573. +
  574. + if (ruby_block) { /* should nail down higher blocks */
  575. + struct BLOCK dummy;
  576. +
  577. + dummy.prev = ruby_block;
  578. + blk_copy_prev(&dummy);
  579. + saved_block = ruby_block = dummy.prev;
  580. }
  581. + scope_dup(ruby_scope);
  582. +
  583. PUSH_TAG(PROT_THREAD);
  584. if ((state = EXEC_TAG()) == 0) {
  585. if (THREAD_SAVE_CONTEXT(th) == 0) {
  586. - curr_thread = th;
  587. - th->result = (*fn)(arg, th);
  588. + th->result = (*new_th.fn)(new_th.arg, th);
  589. }
  590. th = th_save;
  591. }
  592. @@ -12414,6 +12484,43 @@ rb_thread_cleanup()
  593. END_FOREACH_FROM(curr, th);
  594. }
  595. +/*
  596. + * call-seq:
  597. + * Thread.stack_size => fixnum
  598. + *
  599. + * Returns the thread stack size in bytes
  600. + */
  601. +static VALUE
  602. +rb_thread_stacksize_get()
  603. +{
  604. + return INT2FIX(rb_thread_stack_size);
  605. +}
  606. +
  607. +/*
  608. + * call-seq:
  609. + * Thread.stack_size= fixnum => Qnil
  610. + *
  611. + * Sets the global thread stacksize and returns Qnil.
  612. + */
  613. +static VALUE
  614. +rb_thread_stacksize_set(obj, val)
  615. + VALUE obj;
  616. + VALUE val;
  617. +{
  618. +
  619. + unsigned int size = FIX2UINT(val);
  620. +
  621. + /* 16byte alignment works for both x86 and x86_64 */
  622. + if (size & (~0xf)) {
  623. + size += 0x10;
  624. + size = size & (~0xf);
  625. + }
  626. +
  627. + rb_thread_stack_size = size;
  628. +
  629. + return Qnil;
  630. +}
  631. +
  632. int rb_thread_critical;
  633. @@ -13167,6 +13274,8 @@ Init_Thread()
  634. {
  635. VALUE cThGroup;
  636. + rb_thread_stack_size = (1024 * 1024);
  637. +
  638. rb_eThreadError = rb_define_class("ThreadError", rb_eStandardError);
  639. rb_cThread = rb_define_class("Thread", rb_cObject);
  640. rb_undef_alloc_func(rb_cThread);
  641. @@ -13190,6 +13299,9 @@ Init_Thread()
  642. rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
  643. rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
  644. + rb_define_singleton_method(rb_cThread, "stack_size", rb_thread_stacksize_get, 0);
  645. + rb_define_singleton_method(rb_cThread, "stack_size=", rb_thread_stacksize_set, 1);
  646. +
  647. rb_define_method(rb_cThread, "run", rb_thread_run, 0);
  648. rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
  649. rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
  650. diff --git a/gc.c b/gc.c
  651. index 318e24c..0746834 100644
  652. --- a/gc.c
  653. +++ b/gc.c
  654. @@ -466,12 +466,12 @@ stack_end_address(VALUE **stack_end_p)
  655. # define STACK_END (stack_end)
  656. #endif
  657. #if STACK_GROW_DIRECTION < 0
  658. -# define STACK_LENGTH (rb_gc_stack_start - STACK_END)
  659. +# define STACK_LENGTH(start) ((start) - STACK_END)
  660. #elif STACK_GROW_DIRECTION > 0
  661. -# define STACK_LENGTH (STACK_END - rb_gc_stack_start + 1)
  662. +# define STACK_LENGTH(start) (STACK_END - (start) + 1)
  663. #else
  664. -# define STACK_LENGTH ((STACK_END < rb_gc_stack_start) ? rb_gc_stack_start - STACK_END\
  665. - : STACK_END - rb_gc_stack_start + 1)
  666. +# define STACK_LENGTH(start) ((STACK_END < (start)) ? (start) - STACK_END\
  667. + : STACK_END - (start) + 1)
  668. #endif
  669. #if STACK_GROW_DIRECTION > 0
  670. # define STACK_UPPER(x, a, b) a
  671. @@ -494,27 +494,36 @@ stack_grow_direction(addr)
  672. #define GC_WATER_MARK 512
  673. -#define CHECK_STACK(ret) do {\
  674. +#define CHECK_STACK(ret, start) do {\
  675. SET_STACK_END;\
  676. - (ret) = (STACK_LENGTH > STACK_LEVEL_MAX + GC_WATER_MARK);\
  677. + (ret) = (STACK_LENGTH(start) > STACK_LEVEL_MAX + GC_WATER_MARK);\
  678. } while (0)
  679. size_t
  680. ruby_stack_length(p)
  681. VALUE **p;
  682. {
  683. - SET_STACK_END;
  684. - if (p) *p = STACK_UPPER(STACK_END, rb_gc_stack_start, STACK_END);
  685. - return STACK_LENGTH;
  686. + SET_STACK_END;
  687. + VALUE *start;
  688. + if (rb_curr_thread == rb_main_thread) {
  689. + start = rb_gc_stack_start;
  690. + } else {
  691. + start = rb_curr_thread->stk_base;
  692. + }
  693. + if (p) *p = STACK_UPPER(STACK_END, start, STACK_END);
  694. + return STACK_LENGTH(start);
  695. }
  696. int
  697. ruby_stack_check()
  698. {
  699. - int ret;
  700. -
  701. - CHECK_STACK(ret);
  702. - return ret;
  703. + int ret;
  704. + if (rb_curr_thread == rb_main_thread) {
  705. + CHECK_STACK(ret, rb_gc_stack_start);
  706. + } else {
  707. + CHECK_STACK(ret, rb_curr_thread->stk_base);
  708. + }
  709. + return ret;
  710. }
  711. #define MARK_STACK_MAX 1024
  712. @@ -1404,10 +1413,13 @@ garbage_collect()
  713. init_mark_stack();
  714. - gc_mark((VALUE)ruby_current_node, 0);
  715. -
  716. /* mark frame stack */
  717. - for (frame = ruby_frame; frame; frame = frame->prev) {
  718. + if (rb_curr_thread == rb_main_thread)
  719. + frame = ruby_frame;
  720. + else
  721. + frame = rb_main_thread->frame;
  722. +
  723. + for (; frame; frame = frame->prev) {
  724. rb_gc_mark_frame(frame);
  725. if (frame->tmp) {
  726. struct FRAME *tmp = frame->tmp;
  727. @@ -1417,16 +1429,35 @@ garbage_collect()
  728. }
  729. }
  730. }
  731. - gc_mark((VALUE)ruby_scope, 0);
  732. - gc_mark((VALUE)ruby_dyna_vars, 0);
  733. +
  734. + if (rb_curr_thread == rb_main_thread) {
  735. + gc_mark((VALUE)ruby_current_node, 0);
  736. + gc_mark((VALUE)ruby_scope, 0);
  737. + gc_mark((VALUE)ruby_dyna_vars, 0);
  738. + } else {
  739. + gc_mark((VALUE)rb_main_thread->node, 0);
  740. + gc_mark((VALUE)rb_main_thread->scope, 0);
  741. + gc_mark((VALUE)rb_main_thread->dyna_vars, 0);
  742. +
  743. + /* scan the current thread's stack */
  744. + rb_gc_mark_locations((VALUE*)STACK_END, rb_curr_thread->stk_base);
  745. + }
  746. +
  747. if (finalizer_table) {
  748. - mark_tbl(finalizer_table, 0);
  749. + mark_tbl(finalizer_table, 0);
  750. }
  751. FLUSH_REGISTER_WINDOWS;
  752. /* This assumes that all registers are saved into the jmp_buf (and stack) */
  753. setjmp(save_regs_gc_mark);
  754. mark_locations_array((VALUE*)save_regs_gc_mark, sizeof(save_regs_gc_mark) / sizeof(VALUE *));
  755. +
  756. + /* If this is not the main thread, we need to scan the C stack, so
  757. + * set STACK_END to the end of the C stack.
  758. + */
  759. + if (rb_curr_thread != rb_main_thread)
  760. + STACK_END = rb_main_thread->stk_pos;
  761. +
  762. #if STACK_GROW_DIRECTION < 0
  763. rb_gc_mark_locations((VALUE*)STACK_END, rb_gc_stack_start);
  764. #elif STACK_GROW_DIRECTION > 0
  765. @@ -1446,6 +1477,7 @@ garbage_collect()
  766. rb_gc_mark_locations((VALUE*)((char*)STACK_END + 2),
  767. (VALUE*)((char*)rb_gc_stack_start + 2));
  768. #endif
  769. +
  770. rb_gc_mark_threads();
  771. /* mark protected global variables */
  772. diff --git a/lib/logger.rb b/lib/logger.rb
  773. index e9ab171..2564c33 100644
  774. --- a/lib/logger.rb
  775. +++ b/lib/logger.rb
  776. @@ -170,7 +170,7 @@ require 'monitor'
  777. class Logger
  778. VERSION = "1.2.6"
  779. - id, name, rev = %w$Id$
  780. + id, name, rev = %w$Id logger.rb 1234$
  781. ProgName = "#{name.chomp(",v")}/#{rev}"
  782. class Error < RuntimeError; end
  783. diff --git a/node.h b/node.h
  784. index 476a826..ecdc053 100644
  785. --- a/node.h
  786. +++ b/node.h
  787. @@ -411,8 +411,11 @@ struct rb_thread {
  788. size_t stk_len;
  789. size_t stk_max;
  790. + size_t stk_size;
  791. VALUE *stk_ptr;
  792. VALUE *stk_pos;
  793. + VALUE *stk_base;
  794. + VALUE *guard;
  795. #ifdef __ia64
  796. size_t bstr_len;
  797. size_t bstr_max;
  798. diff --git a/signal.c b/signal.c
  799. index b6cad9d..116ac05 100644
  800. --- a/signal.c
  801. +++ b/signal.c
  802. @@ -14,6 +14,7 @@
  803. #include "ruby.h"
  804. #include "rubysig.h"
  805. +#include "node.h"
  806. #include <signal.h>
  807. #include <stdio.h>
  808. @@ -425,15 +426,22 @@ typedef RETSIGTYPE (*sighandler_t)_((int));
  809. static sighandler_t
  810. ruby_signal(signum, handler)
  811. int signum;
  812. - sighandler_t handler;
  813. + void *handler;
  814. {
  815. struct sigaction sigact, old;
  816. rb_trap_accept_nativethreads[signum] = 0;
  817. - sigact.sa_handler = handler;
  818. + if (signum == SIGSEGV || signum == SIGBUS) {
  819. + sigact.sa_sigaction = handler;
  820. + sigact.sa_flags = (SA_ONSTACK | SA_RESETHAND | SA_SIGINFO);
  821. + } else {
  822. + sigact.sa_handler = handler;
  823. + sigact.sa_flags = 0;
  824. + }
  825. +
  826. sigemptyset(&sigact.sa_mask);
  827. - sigact.sa_flags = 0;
  828. +
  829. # ifdef SA_NOCLDWAIT
  830. if (signum == SIGCHLD && handler == SIG_IGN)
  831. sigact.sa_flags |= SA_NOCLDWAIT;
  832. @@ -596,7 +604,132 @@ sighandler(sig)
  833. }
  834. }
  835. +#include <stdio.h>
  836. +#ifdef HAVE_STDARG_PROTOTYPES
  837. +#include <stdarg.h>
  838. +#define va_init_list(a,b) va_start(a,b)
  839. +#else
  840. +#include <varargs.h>
  841. +#define va_init_list(a,b) va_start(a)
  842. +#endif
  843. +
  844. +void
  845. +#ifdef HAVE_STDARG_PROTOTYPES
  846. +sig_printf(const char *fmt, ...)
  847. +#else
  848. + sig_printf(fmt, va_alist)
  849. + const char *fmt;
  850. + va_dcl
  851. +#endif
  852. +{
  853. + char buf[BUFSIZ];
  854. + va_list args;
  855. + FILE *out = stderr;
  856. +
  857. + va_init_list(args, fmt);
  858. + vfprintf(out, fmt, args);
  859. + va_end(args);
  860. + fprintf(out, "\n");
  861. +}
  862. +
  863. +static void
  864. +dump_machine_state(uc)
  865. + ucontext_t *uc;
  866. +{
  867. + const char *dump64 =
  868. + " ----------------- Register state dump ----------------------\n"
  869. + "rax = 0x%.16x rbx = 0x%.16x rcx = 0x%.16x rdx = 0x%.16x\n"
  870. + "rdi = 0x%.16x rsi = 0x%.16x rbp = 0x%.16x rsp = 0x%.16x\n"
  871. + "r8 = 0x%.16x r9 = 0x%.16x r10 = 0x%.16x r11 = 0x%.16x\n"
  872. + "r12 = 0x%.16x r13 = 0x%.16x r14 = 0x%.16x r15 = 0x%.16x\n"
  873. + "rip = 0x%.16x rflags = 0x%.16x cs = 0x%.16x fs = 0x%.16x\n"
  874. + "gs = 0x%.16x";
  875. +
  876. + const char *dump32 =
  877. + " ----------------- Register state dump -------------------\n"
  878. + "eax = 0x%.8x ebx = 0x%.8x ecx = 0x%.8x edx = 0x%.8x\n"
  879. + "edi = 0x%.8x esi = 0x%.8x ebp = 0x%.8x esp = 0x%.8x\n"
  880. + "ss = 0x%.8x eflags = 0x%.8x eip = 0x%.8x cs = 0x%.8x\n"
  881. + "ds = 0x%.8x es = 0x%.8x fs = 0x%.8x gs = 0x%.8x\n";
  882. +
  883. +#if defined(__LP64__) && defined(__APPLE__)
  884. + sig_printf(dump64, uc->uc_mcontext->__ss.__rax, uc->uc_mcontext->__ss.__rbx,
  885. + uc->uc_mcontext->__ss.__rcx, uc->uc_mcontext->__ss.__rdx, uc->uc_mcontext->__ss.__rdi,
  886. + uc->uc_mcontext->__ss.__rsi, uc->uc_mcontext->__ss.__rbp, uc->uc_mcontext->__ss.__rsp,
  887. + uc->uc_mcontext->__ss.__r8, uc->uc_mcontext->__ss.__r9, uc->uc_mcontext->__ss.__r10,
  888. + uc->uc_mcontext->__ss.__r11, uc->uc_mcontext->__ss.__r12, uc->uc_mcontext->__ss.__r13,
  889. + uc->uc_mcontext->__ss.__r14, uc->uc_mcontext->__ss.__r15, uc->uc_mcontext->__ss.__rip,
  890. + uc->uc_mcontext->__ss.__rflags, uc->uc_mcontext->__ss.__cs, uc->uc_mcontext->__ss.__fs,
  891. + uc->uc_mcontext->__ss.__gs);
  892. +#elif !defined(__LP64__) && defined(__APPLE__)
  893. + sig_printf(dump32, uc->uc_mcontext->__ss.__eax, uc->uc_mcontext->__ss.__ebx,
  894. + uc->uc_mcontext->__ss.__ecx, uc->uc_mcontext->__ss.__edx,
  895. + uc->uc_mcontext->__ss.__edi, uc->uc_mcontext->__ss.__esi,
  896. + uc->uc_mcontext->__ss.__ebp, uc->uc_mcontext->__ss.__esp,
  897. + uc->uc_mcontext->__ss.__ss, uc->uc_mcontext->__ss.__eflags,
  898. + uc->uc_mcontext->__ss.__eip, uc->uc_mcontext->__ss.__cs,
  899. + uc->uc_mcontext->__ss.__ds, uc->uc_mcontext->__ss.__es,
  900. + uc->uc_mcontext->__ss.__fs, uc->uc_mcontext->__ss.__gs);
  901. +#elif defined(__i386__)
  902. + sig_printf(dump32, uc->uc_mcontext.gregs[REG_EAX], uc->uc_mcontext.gregs[REG_EBX],
  903. + uc->uc_mcontext.gregs[REG_ECX], uc->uc_mcontext.gregs[REG_EDX],
  904. + uc->uc_mcontext.gregs[REG_EDI], uc->uc_mcontext.gregs[REG_ESI],
  905. + uc->uc_mcontext.gregs[REG_EBP], uc->uc_mcontext.gregs[REG_ESP],
  906. + uc->uc_mcontext.gregs[REG_SS], uc->uc_mcontext.gregs[REG_EFL],
  907. + uc->uc_mcontext.gregs[REG_EIP], uc->uc_mcontext.gregs[REG_EIP],
  908. + uc->uc_mcontext.gregs[REG_DS], uc->uc_mcontext.gregs[REG_ES],
  909. + uc->uc_mcontext.gregs[REG_FS], uc->uc_mcontext.gregs[REG_FS]);
  910. +#elif defined(__x86_64__)
  911. + sig_printf(dump64, uc->uc_mcontext.gregs[REG_RAX], uc->uc_mcontext.gregs[REG_RBX],
  912. + uc->uc_mcontext.gregs[REG_RCX], uc->uc_mcontext.gregs[REG_RDX],
  913. + uc->uc_mcontext.gregs[REG_RDI], uc->uc_mcontext.gregs[REG_RSI],
  914. + uc->uc_mcontext.gregs[REG_RBP], uc->uc_mcontext.gregs[REG_RSP],
  915. + uc->uc_mcontext.gregs[REG_R8], uc->uc_mcontext.gregs[REG_R9],
  916. + uc->uc_mcontext.gregs[REG_R10], uc->uc_mcontext.gregs[REG_R11],
  917. + uc->uc_mcontext.gregs[REG_R12], uc->uc_mcontext.gregs[REG_R13],
  918. + uc->uc_mcontext.gregs[REG_R14], uc->uc_mcontext.gregs[REG_R15],
  919. + uc->uc_mcontext.gregs[REG_RIP], uc->uc_mcontext.gregs[REG_EFL],
  920. + uc->uc_mcontext.gregs[REG_CSGSFS]);
  921. +#else
  922. +#endif
  923. +}
  924. +
  925. +static int
  926. +check_guard(caddr_t fault_addr, rb_thread_t th) {
  927. + if(fault_addr <= (caddr_t)rb_curr_thread->guard &&
  928. + fault_addr >= (caddr_t)rb_curr_thread->stk_ptr) {
  929. + return 1;
  930. + }
  931. + return 0;
  932. +}
  933. +
  934. #ifdef SIGBUS
  935. +#ifdef POSIX_SIGNAL
  936. +static void sigbus _((int, siginfo_t*, void*));
  937. +static void
  938. +sigbus(sig, ip, context)
  939. + int sig;
  940. + siginfo_t *ip;
  941. + void *context;
  942. +{
  943. +#if defined(HAVE_NATIVETHREAD) && defined(HAVE_NATIVETHREAD_KILL)
  944. + if (!is_ruby_native_thread() && !rb_trap_accept_nativethreads[sig]) {
  945. + sigsend_to_ruby_thread(sig);
  946. + return;
  947. + }
  948. +#endif
  949. +
  950. + dump_machine_state(context);
  951. + if (check_guard((caddr_t)ip->si_addr, rb_curr_thread)) {
  952. + /* we hit the guard page, print out a warning to help app developers */
  953. + rb_bug("Thread stack overflow! Try increasing it!");
  954. + } else {
  955. + rb_bug("Bus Error");
  956. + }
  957. +}
  958. +
  959. +#else /* !defined(POSIX_SIGNAL) */
  960. +
  961. static RETSIGTYPE sigbus _((int));
  962. static RETSIGTYPE
  963. sigbus(sig)
  964. @@ -612,8 +745,36 @@ sigbus(sig)
  965. rb_bug("Bus Error");
  966. }
  967. #endif
  968. +#endif
  969. +
  970. #ifdef SIGSEGV
  971. +#ifdef POSIX_SIGNAL
  972. +static void sigsegv _((int, siginfo_t*, void*));
  973. +static void
  974. +sigsegv(sig, ip, context)
  975. + int sig;
  976. + siginfo_t *ip;
  977. + void *context;
  978. +{
  979. +#if defined(HAVE_NATIVETHREAD) && defined(HAVE_NATIVETHREAD_KILL)
  980. + if (!is_ruby_native_thread() && !rb_trap_accept_nativethreads[sig]) {
  981. + sigsend_to_ruby_thread(sig);
  982. + return;
  983. + }
  984. +#endif
  985. +
  986. + dump_machine_state(context);
  987. + if (check_guard((caddr_t)ip->si_addr, rb_curr_thread)) {
  988. + /* we hit the guard page, print out a warning to help app developers */
  989. + rb_bug("Thread stack overflow! Try increasing it!");
  990. + } else {
  991. + rb_bug("Segmentation fault");
  992. + }
  993. +}
  994. +
  995. +#else /* !defined(POSIX_SIGNAL) */
  996. +
  997. static RETSIGTYPE sigsegv _((int));
  998. static RETSIGTYPE
  999. sigsegv(sig)
  1000. @@ -629,6 +790,7 @@ sigsegv(sig)
  1001. rb_bug("Segmentation fault");
  1002. }
  1003. #endif
  1004. +#endif
  1005. #ifdef SIGPIPE
  1006. static RETSIGTYPE sigpipe _((int));
  1007. @@ -698,7 +860,8 @@ static VALUE
  1008. trap(arg)
  1009. struct trap_arg *arg;
  1010. {
  1011. - sighandler_t func, oldfunc;
  1012. + sighandler_t oldfunc;
  1013. + void *func;
  1014. VALUE command, oldcmd;
  1015. int sig = -1;
  1016. char *s;
  1017. @@ -945,6 +1108,20 @@ sig_list()
  1018. }
  1019. static void
  1020. +create_sigstack()
  1021. +{
  1022. + stack_t ss;
  1023. + ss.ss_size = SIGSTKSZ;
  1024. + ss.ss_sp = malloc(ss.ss_size);
  1025. + ss.ss_flags = 0;
  1026. + if (sigaltstack(&ss, NULL) < 0) {
  1027. + free(ss.ss_sp);
  1028. + fprintf(stderr, "Couldn't create signal stack! Error %d: %s\n", errno, strerror(errno));
  1029. + exit(1);
  1030. + }
  1031. +}
  1032. +
  1033. +static void
  1034. install_sighandler(signum, handler)
  1035. int signum;
  1036. sighandler_t handler;
  1037. @@ -953,7 +1130,7 @@ install_sighandler(signum, handler)
  1038. old = ruby_signal(signum, handler);
  1039. if (old != SIG_DFL) {
  1040. - ruby_signal(signum, old);
  1041. + ruby_signal(signum, old);
  1042. }
  1043. }
  1044. @@ -1080,6 +1257,8 @@ Init_signal()
  1045. rb_alias(rb_eSignal, rb_intern("signm"), rb_intern("message"));
  1046. rb_define_method(rb_eInterrupt, "initialize", interrupt_init, 1);
  1047. + create_sigstack();
  1048. +
  1049. install_sighandler(SIGINT, sighandler);
  1050. #ifdef SIGHUP
  1051. install_sighandler(SIGHUP, sighandler);