00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011 #include "ruby/ruby.h"
00012 #include "ruby/st.h"
00013 #include "ruby/encoding.h"
00014
00015 #include "gc.h"
00016 #include "vm_core.h"
00017 #include "iseq.h"
00018 #include "eval_intern.h"
00019
00020 #include "vm_insnhelper.h"
00021 #include "vm_insnhelper.c"
00022 #include "vm_exec.h"
00023 #include "vm_exec.c"
00024
00025 #include "vm_method.c"
00026 #include "vm_eval.c"
00027
00028 #include <assert.h>
00029
00030 #define BUFSIZE 0x100
00031 #define PROCDEBUG 0
00032
00033 VALUE rb_cRubyVM;
00034 VALUE rb_cThread;
00035 VALUE rb_cEnv;
00036 VALUE rb_mRubyVMFrozenCore;
00037
00038 VALUE ruby_vm_global_state_version = 1;
00039 VALUE ruby_vm_const_missing_count = 0;
00040
00041 char ruby_vm_redefined_flag[BOP_LAST_];
00042
00043 rb_thread_t *ruby_current_thread = 0;
00044 rb_vm_t *ruby_current_vm = 0;
00045
00046 static void thread_free(void *ptr);
00047
00048 VALUE rb_insns_name_array(void);
00049
00050 void vm_analysis_operand(int insn, int n, VALUE op);
00051 void vm_analysis_register(int reg, int isset);
00052 void vm_analysis_insn(int insn);
00053
00054 void
00055 rb_vm_change_state(void)
00056 {
00057 INC_VM_STATE_VERSION();
00058 }
00059
00060 void
00061 rb_vm_inc_const_missing_count(void)
00062 {
00063 ruby_vm_const_missing_count +=1;
00064 }
00065
00066
00067
00068 static inline VALUE
00069 rb_vm_set_finish_env(rb_thread_t * th)
00070 {
00071 vm_push_frame(th, 0, VM_FRAME_MAGIC_FINISH,
00072 Qnil, th->cfp->lfp[0], 0,
00073 th->cfp->sp, 0, 1);
00074 th->cfp->pc = (VALUE *)&finish_insn_seq[0];
00075 return Qtrue;
00076 }
00077
00078 static void
00079 vm_set_top_stack(rb_thread_t * th, VALUE iseqval)
00080 {
00081 rb_iseq_t *iseq;
00082 GetISeqPtr(iseqval, iseq);
00083
00084 if (iseq->type != ISEQ_TYPE_TOP) {
00085 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
00086 }
00087
00088
00089 rb_vm_set_finish_env(th);
00090
00091 vm_push_frame(th, iseq, VM_FRAME_MAGIC_TOP,
00092 th->top_self, 0, iseq->iseq_encoded,
00093 th->cfp->sp, 0, iseq->local_size);
00094
00095 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00096 }
00097
00098 static void
00099 vm_set_eval_stack(rb_thread_t * th, VALUE iseqval, const NODE *cref)
00100 {
00101 rb_iseq_t *iseq;
00102 rb_block_t * const block = th->base_block;
00103 GetISeqPtr(iseqval, iseq);
00104
00105
00106 rb_vm_set_finish_env(th);
00107 vm_push_frame(th, iseq, VM_FRAME_MAGIC_EVAL, block->self,
00108 GC_GUARDED_PTR(block->dfp), iseq->iseq_encoded,
00109 th->cfp->sp, block->lfp, iseq->local_size);
00110
00111 if (cref) {
00112 th->cfp->dfp[-1] = (VALUE)cref;
00113 }
00114
00115 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00116 }
00117
00118 static void
00119 vm_set_main_stack(rb_thread_t *th, VALUE iseqval)
00120 {
00121 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
00122 rb_binding_t *bind;
00123 rb_iseq_t *iseq;
00124 rb_env_t *env;
00125
00126 GetBindingPtr(toplevel_binding, bind);
00127 GetEnvPtr(bind->env, env);
00128 th->base_block = &env->block;
00129 vm_set_eval_stack(th, iseqval, 0);
00130 th->base_block = 0;
00131
00132
00133 GetISeqPtr(iseqval, iseq);
00134 if (bind && iseq->local_size > 0) {
00135 bind->env = rb_vm_make_env_object(th, th->cfp);
00136 }
00137
00138 CHECK_STACK_OVERFLOW(th->cfp, iseq->stack_max);
00139 }
00140
00141 rb_control_frame_t *
00142 rb_vm_get_ruby_level_next_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00143 {
00144 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00145 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00146 return cfp;
00147 }
00148 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00149 }
00150 return 0;
00151 }
00152
00153 static rb_control_frame_t *
00154 vm_get_ruby_level_caller_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
00155 {
00156 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00157 return cfp;
00158 }
00159
00160 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00161
00162 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00163 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00164 return cfp;
00165 }
00166
00167 if ((cfp->flag & VM_FRAME_FLAG_PASSED) == 0) {
00168 break;
00169 }
00170 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00171 }
00172 return 0;
00173 }
00174
00175
00176
00177
00178
00179
00180
00181
00182
00183
00184
00185 #define ENV_IN_HEAP_P(th, env) \
00186 (!((th)->stack < (env) && (env) < ((th)->stack + (th)->stack_size)))
00187 #define ENV_VAL(env) ((env)[1])
00188
00189 static void
00190 env_mark(void * const ptr)
00191 {
00192 RUBY_MARK_ENTER("env");
00193 if (ptr) {
00194 const rb_env_t * const env = ptr;
00195
00196 if (env->env) {
00197
00198 RUBY_GC_INFO("env->env\n");
00199 rb_gc_mark_locations(env->env, env->env + env->env_size);
00200 }
00201
00202 RUBY_GC_INFO("env->prev_envval\n");
00203 RUBY_MARK_UNLESS_NULL(env->prev_envval);
00204 RUBY_MARK_UNLESS_NULL(env->block.self);
00205 RUBY_MARK_UNLESS_NULL(env->block.proc);
00206
00207 if (env->block.iseq) {
00208 if (BUILTIN_TYPE(env->block.iseq) == T_NODE) {
00209 RUBY_MARK_UNLESS_NULL((VALUE)env->block.iseq);
00210 }
00211 else {
00212 RUBY_MARK_UNLESS_NULL(env->block.iseq->self);
00213 }
00214 }
00215 }
00216 RUBY_MARK_LEAVE("env");
00217 }
00218
00219 static void
00220 env_free(void * const ptr)
00221 {
00222 RUBY_FREE_ENTER("env");
00223 if (ptr) {
00224 const rb_env_t * const env = ptr;
00225 RUBY_FREE_UNLESS_NULL(env->env);
00226 ruby_xfree(ptr);
00227 }
00228 RUBY_FREE_LEAVE("env");
00229 }
00230
00231 static size_t
00232 env_memsize(const void *ptr)
00233 {
00234 if (ptr) {
00235 const rb_env_t * const env = ptr;
00236 size_t size = sizeof(rb_env_t);
00237 if (env->env) {
00238 size += env->env_size * sizeof(VALUE);
00239 }
00240 return size;
00241 }
00242 return 0;
00243 }
00244
00245 static const rb_data_type_t env_data_type = {
00246 "VM/env",
00247 env_mark, env_free, env_memsize,
00248 };
00249
00250 static VALUE
00251 env_alloc(void)
00252 {
00253 VALUE obj;
00254 rb_env_t *env;
00255 obj = TypedData_Make_Struct(rb_cEnv, rb_env_t, &env_data_type, env);
00256 env->env = 0;
00257 env->prev_envval = 0;
00258 env->block.iseq = 0;
00259 return obj;
00260 }
00261
00262 static VALUE check_env_value(VALUE envval);
00263
00264 static int
00265 check_env(rb_env_t * const env)
00266 {
00267 printf("---\n");
00268 printf("envptr: %p\n", (void *)&env->block.dfp[0]);
00269 printf("orphan: %p\n", (void *)env->block.dfp[1]);
00270 printf("inheap: %p\n", (void *)env->block.dfp[2]);
00271 printf("envval: %10p ", (void *)env->block.dfp[3]);
00272 dp(env->block.dfp[3]);
00273 printf("penvv : %10p ", (void *)env->block.dfp[4]);
00274 dp(env->block.dfp[4]);
00275 printf("lfp: %10p\n", (void *)env->block.lfp);
00276 printf("dfp: %10p\n", (void *)env->block.dfp);
00277 if (env->block.dfp[4]) {
00278 printf(">>\n");
00279 check_env_value(env->block.dfp[4]);
00280 printf("<<\n");
00281 }
00282 return 1;
00283 }
00284
00285 static VALUE
00286 check_env_value(VALUE envval)
00287 {
00288 rb_env_t *env;
00289 GetEnvPtr(envval, env);
00290
00291 if (check_env(env)) {
00292 return envval;
00293 }
00294 rb_bug("invalid env");
00295 return Qnil;
00296 }
00297
00298 static VALUE
00299 vm_make_env_each(rb_thread_t * const th, rb_control_frame_t * const cfp,
00300 VALUE *envptr, VALUE * const endptr)
00301 {
00302 VALUE envval, penvval = 0;
00303 rb_env_t *env;
00304 VALUE *nenvptr;
00305 int i, local_size;
00306
00307 if (ENV_IN_HEAP_P(th, envptr)) {
00308 return ENV_VAL(envptr);
00309 }
00310
00311 if (envptr != endptr) {
00312 VALUE *penvptr = GC_GUARDED_PTR_REF(*envptr);
00313 rb_control_frame_t *pcfp = cfp;
00314
00315 if (ENV_IN_HEAP_P(th, penvptr)) {
00316 penvval = ENV_VAL(penvptr);
00317 }
00318 else {
00319 while (pcfp->dfp != penvptr) {
00320 pcfp++;
00321 if (pcfp->dfp == 0) {
00322 SDR();
00323 rb_bug("invalid dfp");
00324 }
00325 }
00326 penvval = vm_make_env_each(th, pcfp, penvptr, endptr);
00327 cfp->lfp = pcfp->lfp;
00328 *envptr = GC_GUARDED_PTR(pcfp->dfp);
00329 }
00330 }
00331
00332
00333 envval = env_alloc();
00334 GetEnvPtr(envval, env);
00335
00336 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00337 local_size = 2;
00338 }
00339 else {
00340 local_size = cfp->iseq->local_size;
00341 }
00342
00343 env->env_size = local_size + 1 + 2;
00344 env->local_size = local_size;
00345 env->env = ALLOC_N(VALUE, env->env_size);
00346 env->prev_envval = penvval;
00347
00348 for (i = 0; i <= local_size; i++) {
00349 env->env[i] = envptr[-local_size + i];
00350 #if 0
00351 fprintf(stderr, "%2d ", &envptr[-local_size + i] - th->stack); dp(env->env[i]);
00352 if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00353
00354 envptr[-local_size + i] = 0;
00355 }
00356 #endif
00357 }
00358
00359 *envptr = envval;
00360 nenvptr = &env->env[i - 1];
00361 nenvptr[1] = envval;
00362 nenvptr[2] = penvval;
00363
00364
00365 cfp->dfp = nenvptr;
00366 if (envptr == endptr) {
00367 cfp->lfp = nenvptr;
00368 }
00369
00370
00371 env->block.self = cfp->self;
00372 env->block.lfp = cfp->lfp;
00373 env->block.dfp = cfp->dfp;
00374 env->block.iseq = cfp->iseq;
00375
00376 if (!RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
00377
00378 env->block.iseq = 0;
00379 }
00380 return envval;
00381 }
00382
00383 static int
00384 collect_local_variables_in_iseq(rb_iseq_t *iseq, const VALUE ary)
00385 {
00386 int i;
00387 if (!iseq) return 0;
00388 for (i = 0; i < iseq->local_table_size; i++) {
00389 ID lid = iseq->local_table[i];
00390 if (rb_is_local_id(lid)) {
00391 rb_ary_push(ary, ID2SYM(lid));
00392 }
00393 }
00394 return 1;
00395 }
00396
00397 static int
00398 collect_local_variables_in_env(rb_env_t * env, const VALUE ary)
00399 {
00400
00401 while (collect_local_variables_in_iseq(env->block.iseq, ary),
00402 env->prev_envval) {
00403 GetEnvPtr(env->prev_envval, env);
00404 }
00405 return 0;
00406 }
00407
00408 static int
00409 vm_collect_local_variables_in_heap(rb_thread_t *th, VALUE *dfp, VALUE ary)
00410 {
00411 if (ENV_IN_HEAP_P(th, dfp)) {
00412 rb_env_t *env;
00413 GetEnvPtr(ENV_VAL(dfp), env);
00414 collect_local_variables_in_env(env, ary);
00415 return 1;
00416 }
00417 else {
00418 return 0;
00419 }
00420 }
00421
00422 VALUE
00423 rb_vm_make_env_object(rb_thread_t * th, rb_control_frame_t *cfp)
00424 {
00425 VALUE envval;
00426
00427 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_FINISH) {
00428
00429 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00430 }
00431
00432 envval = vm_make_env_each(th, cfp, cfp->dfp, cfp->lfp);
00433
00434 if (PROCDEBUG) {
00435 check_env_value(envval);
00436 }
00437
00438 return envval;
00439 }
00440
00441 void
00442 rb_vm_stack_to_heap(rb_thread_t * const th)
00443 {
00444 rb_control_frame_t *cfp = th->cfp;
00445 while ((cfp = rb_vm_get_ruby_level_next_cfp(th, cfp)) != 0) {
00446 rb_vm_make_env_object(th, cfp);
00447 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00448 }
00449 }
00450
00451
00452
00453 static VALUE
00454 vm_make_proc_from_block(rb_thread_t *th, rb_block_t *block)
00455 {
00456 VALUE proc = block->proc;
00457
00458 if (block->proc) {
00459 return block->proc;
00460 }
00461
00462 proc = rb_vm_make_proc(th, block, rb_cProc);
00463 block->proc = proc;
00464
00465 return proc;
00466 }
00467
00468 VALUE
00469 rb_vm_make_proc(rb_thread_t *th, const rb_block_t *block, VALUE klass)
00470 {
00471 VALUE procval, envval, blockprocval = 0;
00472 rb_proc_t *proc;
00473 rb_control_frame_t *cfp = RUBY_VM_GET_CFP_FROM_BLOCK_PTR(block);
00474
00475 if (block->proc) {
00476 rb_bug("rb_vm_make_proc: Proc value is already created.");
00477 }
00478
00479 if (GC_GUARDED_PTR_REF(cfp->lfp[0])) {
00480 rb_proc_t *p;
00481
00482 blockprocval = vm_make_proc_from_block(
00483 th, (rb_block_t *)GC_GUARDED_PTR_REF(*cfp->lfp));
00484
00485 GetProcPtr(blockprocval, p);
00486 *cfp->lfp = GC_GUARDED_PTR(&p->block);
00487 }
00488
00489 envval = rb_vm_make_env_object(th, cfp);
00490
00491 if (PROCDEBUG) {
00492 check_env_value(envval);
00493 }
00494 procval = rb_proc_alloc(klass);
00495 GetProcPtr(procval, proc);
00496 proc->blockprocval = blockprocval;
00497 proc->block.self = block->self;
00498 proc->block.lfp = block->lfp;
00499 proc->block.dfp = block->dfp;
00500 proc->block.iseq = block->iseq;
00501 proc->block.proc = procval;
00502 proc->envval = envval;
00503 proc->safe_level = th->safe_level;
00504
00505 if (VMDEBUG) {
00506 if (th->stack < block->dfp && block->dfp < th->stack + th->stack_size) {
00507 rb_bug("invalid ptr: block->dfp");
00508 }
00509 if (th->stack < block->lfp && block->lfp < th->stack + th->stack_size) {
00510 rb_bug("invalid ptr: block->lfp");
00511 }
00512 }
00513
00514 return procval;
00515 }
00516
00517
00518
00519 static inline VALUE
00520 invoke_block_from_c(rb_thread_t *th, const rb_block_t *block,
00521 VALUE self, int argc, const VALUE *argv,
00522 const rb_block_t *blockptr, const NODE *cref)
00523 {
00524 if (SPECIAL_CONST_P(block->iseq))
00525 return Qnil;
00526 else if (BUILTIN_TYPE(block->iseq) != T_NODE) {
00527 const rb_iseq_t *iseq = block->iseq;
00528 const rb_control_frame_t *cfp;
00529 rb_control_frame_t *ncfp;
00530 int i, opt_pc, arg_size = iseq->arg_size;
00531 int type = block_proc_is_lambda(block->proc) ?
00532 VM_FRAME_MAGIC_LAMBDA : VM_FRAME_MAGIC_BLOCK;
00533
00534 rb_vm_set_finish_env(th);
00535
00536 cfp = th->cfp;
00537 CHECK_STACK_OVERFLOW(cfp, argc + iseq->stack_max);
00538
00539 for (i=0; i<argc; i++) {
00540 cfp->sp[i] = argv[i];
00541 }
00542
00543 opt_pc = vm_yield_setup_args(th, iseq, argc, cfp->sp, blockptr,
00544 type == VM_FRAME_MAGIC_LAMBDA);
00545
00546 ncfp = vm_push_frame(th, iseq, type,
00547 self, GC_GUARDED_PTR(block->dfp),
00548 iseq->iseq_encoded + opt_pc, cfp->sp + arg_size, block->lfp,
00549 iseq->local_size - arg_size);
00550 ncfp->me = th->passed_me;
00551 th->passed_me = 0;
00552
00553 if (cref) {
00554 th->cfp->dfp[-1] = (VALUE)cref;
00555 }
00556
00557 return vm_exec(th);
00558 }
00559 else {
00560 return vm_yield_with_cfunc(th, block, self, argc, argv, blockptr);
00561 }
00562 }
00563
00564 static inline const rb_block_t *
00565 check_block(rb_thread_t *th)
00566 {
00567 const rb_block_t *blockptr = GC_GUARDED_PTR_REF(th->cfp->lfp[0]);
00568
00569 if (blockptr == 0) {
00570 rb_vm_localjump_error("no block given", Qnil, 0);
00571 }
00572
00573 return blockptr;
00574 }
00575
00576 static inline VALUE
00577 vm_yield_with_cref(rb_thread_t *th, int argc, const VALUE *argv, const NODE *cref)
00578 {
00579 const rb_block_t *blockptr = check_block(th);
00580 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, cref);
00581 }
00582
00583 static inline VALUE
00584 vm_yield(rb_thread_t *th, int argc, const VALUE *argv)
00585 {
00586 const rb_block_t *blockptr = check_block(th);
00587 return invoke_block_from_c(th, blockptr, blockptr->self, argc, argv, 0, 0);
00588 }
00589
00590 VALUE
00591 rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, VALUE self,
00592 int argc, const VALUE *argv, const rb_block_t * blockptr)
00593 {
00594 VALUE val = Qundef;
00595 int state;
00596 volatile int stored_safe = th->safe_level;
00597
00598 TH_PUSH_TAG(th);
00599 if ((state = EXEC_TAG()) == 0) {
00600 if (!proc->is_from_method) {
00601 th->safe_level = proc->safe_level;
00602 }
00603 val = invoke_block_from_c(th, &proc->block, self, argc, argv, blockptr, 0);
00604 }
00605 TH_POP_TAG();
00606
00607 if (!proc->is_from_method) {
00608 th->safe_level = stored_safe;
00609 }
00610
00611 if (state) {
00612 JUMP_TAG(state);
00613 }
00614 return val;
00615 }
00616
00617
00618
00619 static rb_control_frame_t *
00620 vm_normal_frame(rb_thread_t *th, rb_control_frame_t *cfp)
00621 {
00622 while (cfp->pc == 0) {
00623 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
00624 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
00625 return 0;
00626 }
00627 }
00628 return cfp;
00629 }
00630
00631 static VALUE
00632 vm_cfp_svar_get(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key)
00633 {
00634 cfp = vm_normal_frame(th, cfp);
00635 return lfp_svar_get(th, cfp ? cfp->lfp : 0, key);
00636 }
00637
00638 static void
00639 vm_cfp_svar_set(rb_thread_t *th, rb_control_frame_t *cfp, VALUE key, const VALUE val)
00640 {
00641 cfp = vm_normal_frame(th, cfp);
00642 lfp_svar_set(th, cfp ? cfp->lfp : 0, key, val);
00643 }
00644
00645 static VALUE
00646 vm_svar_get(VALUE key)
00647 {
00648 rb_thread_t *th = GET_THREAD();
00649 return vm_cfp_svar_get(th, th->cfp, key);
00650 }
00651
00652 static void
00653 vm_svar_set(VALUE key, VALUE val)
00654 {
00655 rb_thread_t *th = GET_THREAD();
00656 vm_cfp_svar_set(th, th->cfp, key, val);
00657 }
00658
00659 VALUE
00660 rb_backref_get(void)
00661 {
00662 return vm_svar_get(1);
00663 }
00664
00665 void
00666 rb_backref_set(VALUE val)
00667 {
00668 vm_svar_set(1, val);
00669 }
00670
00671 VALUE
00672 rb_lastline_get(void)
00673 {
00674 return vm_svar_get(0);
00675 }
00676
00677 void
00678 rb_lastline_set(VALUE val)
00679 {
00680 vm_svar_set(0, val);
00681 }
00682
00683
00684
00685 int
00686 rb_vm_get_sourceline(const rb_control_frame_t *cfp)
00687 {
00688 int line_no = 0;
00689 const rb_iseq_t *iseq = cfp->iseq;
00690
00691 if (RUBY_VM_NORMAL_ISEQ_P(iseq) && iseq->insn_info_size > 0) {
00692 rb_num_t i;
00693 size_t pos = cfp->pc - cfp->iseq->iseq_encoded;
00694
00695 if (iseq->insn_info_table[0].position == pos) goto found;
00696 for (i = 1; i < iseq->insn_info_size; i++) {
00697 if (iseq->insn_info_table[i].position == pos) {
00698 line_no = iseq->insn_info_table[i - 1].line_no;
00699 goto found;
00700 }
00701 }
00702 line_no = iseq->insn_info_table[i - 1].line_no;
00703 }
00704 found:
00705 return line_no;
00706 }
00707
00708 static int
00709 vm_backtrace_each(rb_thread_t *th, int lev, void (*init)(void *), rb_backtrace_iter_func *iter, void *arg)
00710 {
00711 const rb_control_frame_t *limit_cfp = th->cfp;
00712 const rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
00713 VALUE file = Qnil;
00714 int line_no = 0;
00715
00716 cfp -= 2;
00717 while (lev-- >= 0) {
00718 if (++limit_cfp > cfp) {
00719 return FALSE;
00720 }
00721 }
00722 if (init) (*init)(arg);
00723 limit_cfp = RUBY_VM_NEXT_CONTROL_FRAME(limit_cfp);
00724 if (th->vm->progname) file = th->vm->progname;
00725 while (cfp > limit_cfp) {
00726 if (cfp->iseq != 0) {
00727 if (cfp->pc != 0) {
00728 rb_iseq_t *iseq = cfp->iseq;
00729
00730 line_no = rb_vm_get_sourceline(cfp);
00731 file = iseq->filename;
00732 if ((*iter)(arg, file, line_no, iseq->name)) break;
00733 }
00734 }
00735 else if (RUBYVM_CFUNC_FRAME_P(cfp)) {
00736 ID id;
00737 extern VALUE ruby_engine_name;
00738
00739 if (NIL_P(file)) file = ruby_engine_name;
00740 if (cfp->me->def)
00741 id = cfp->me->def->original_id;
00742 else
00743 id = cfp->me->called_id;
00744 if ((*iter)(arg, file, line_no, rb_id2str(id))) break;
00745 }
00746 cfp = RUBY_VM_NEXT_CONTROL_FRAME(cfp);
00747 }
00748 return TRUE;
00749 }
00750
00751 static void
00752 vm_backtrace_alloc(void *arg)
00753 {
00754 VALUE *aryp = arg;
00755 *aryp = rb_ary_new();
00756 }
00757
00758 static int
00759 vm_backtrace_push(void *arg, VALUE file, int line_no, VALUE name)
00760 {
00761 VALUE *aryp = arg;
00762 VALUE bt;
00763
00764 if (line_no) {
00765 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:%d:in `%s'",
00766 RSTRING_PTR(file), line_no, RSTRING_PTR(name));
00767 }
00768 else {
00769 bt = rb_enc_sprintf(rb_enc_compatible(file, name), "%s:in `%s'",
00770 RSTRING_PTR(file), RSTRING_PTR(name));
00771 }
00772 rb_ary_push(*aryp, bt);
00773 return 0;
00774 }
00775
00776 static inline VALUE
00777 vm_backtrace(rb_thread_t *th, int lev)
00778 {
00779 VALUE ary = 0;
00780
00781 if (lev < 0) {
00782 ary = rb_ary_new();
00783 }
00784 vm_backtrace_each(th, lev, vm_backtrace_alloc, vm_backtrace_push, &ary);
00785 if (!ary) return Qnil;
00786 return rb_ary_reverse(ary);
00787 }
00788
00789 const char *
00790 rb_sourcefile(void)
00791 {
00792 rb_thread_t *th = GET_THREAD();
00793 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00794
00795 if (cfp) {
00796 return RSTRING_PTR(cfp->iseq->filename);
00797 }
00798 else {
00799 return 0;
00800 }
00801 }
00802
00803 int
00804 rb_sourceline(void)
00805 {
00806 rb_thread_t *th = GET_THREAD();
00807 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00808
00809 if (cfp) {
00810 return rb_vm_get_sourceline(cfp);
00811 }
00812 else {
00813 return 0;
00814 }
00815 }
00816
00817 NODE *
00818 rb_vm_cref(void)
00819 {
00820 rb_thread_t *th = GET_THREAD();
00821 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00822 return vm_get_cref(cfp->iseq, cfp->lfp, cfp->dfp);
00823 }
00824
00825 #if 0
00826 void
00827 debug_cref(NODE *cref)
00828 {
00829 while (cref) {
00830 dp(cref->nd_clss);
00831 printf("%ld\n", cref->nd_visi);
00832 cref = cref->nd_next;
00833 }
00834 }
00835 #endif
00836
00837 VALUE
00838 rb_vm_cbase(void)
00839 {
00840 rb_thread_t *th = GET_THREAD();
00841 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
00842
00843 return vm_get_cbase(cfp->iseq, cfp->lfp, cfp->dfp);
00844 }
00845
00846
00847
00848 static VALUE
00849 make_localjump_error(const char *mesg, VALUE value, int reason)
00850 {
00851 extern VALUE rb_eLocalJumpError;
00852 VALUE exc = rb_exc_new2(rb_eLocalJumpError, mesg);
00853 ID id;
00854
00855 switch (reason) {
00856 case TAG_BREAK:
00857 CONST_ID(id, "break");
00858 break;
00859 case TAG_REDO:
00860 CONST_ID(id, "redo");
00861 break;
00862 case TAG_RETRY:
00863 CONST_ID(id, "retry");
00864 break;
00865 case TAG_NEXT:
00866 CONST_ID(id, "next");
00867 break;
00868 case TAG_RETURN:
00869 CONST_ID(id, "return");
00870 break;
00871 default:
00872 CONST_ID(id, "noreason");
00873 break;
00874 }
00875 rb_iv_set(exc, "@exit_value", value);
00876 rb_iv_set(exc, "@reason", ID2SYM(id));
00877 return exc;
00878 }
00879
00880 void
00881 rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
00882 {
00883 VALUE exc = make_localjump_error(mesg, value, reason);
00884 rb_exc_raise(exc);
00885 }
00886
00887 VALUE
00888 rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
00889 {
00890 VALUE result = Qnil;
00891
00892 if (val == Qundef) {
00893 val = GET_THREAD()->tag->retval;
00894 }
00895 switch (state) {
00896 case 0:
00897 break;
00898 case TAG_RETURN:
00899 result = make_localjump_error("unexpected return", val, state);
00900 break;
00901 case TAG_BREAK:
00902 result = make_localjump_error("unexpected break", val, state);
00903 break;
00904 case TAG_NEXT:
00905 result = make_localjump_error("unexpected next", val, state);
00906 break;
00907 case TAG_REDO:
00908 result = make_localjump_error("unexpected redo", Qnil, state);
00909 break;
00910 case TAG_RETRY:
00911 result = make_localjump_error("retry outside of rescue clause", Qnil, state);
00912 break;
00913 default:
00914 break;
00915 }
00916 return result;
00917 }
00918
00919 void
00920 rb_vm_jump_tag_but_local_jump(int state, VALUE val)
00921 {
00922 if (val != Qnil) {
00923 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, val);
00924 rb_exc_raise(exc);
00925 }
00926 JUMP_TAG(state);
00927 }
00928
00929 NORETURN(static void vm_iter_break(rb_thread_t *th));
00930
00931 static void
00932 vm_iter_break(rb_thread_t *th)
00933 {
00934 rb_control_frame_t *cfp = th->cfp;
00935 VALUE *dfp = GC_GUARDED_PTR_REF(*cfp->dfp);
00936
00937 th->state = TAG_BREAK;
00938 th->errinfo = (VALUE)NEW_THROW_OBJECT(Qnil, (VALUE)dfp, TAG_BREAK);
00939 TH_JUMP_TAG(th, TAG_BREAK);
00940 }
00941
00942 void
00943 rb_iter_break(void)
00944 {
00945 vm_iter_break(GET_THREAD());
00946 }
00947
00948
00949
00950 static st_table *vm_opt_method_table = 0;
00951
00952 static void
00953 rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me)
00954 {
00955 VALUE bop;
00956 if (!me->def || me->def->type == VM_METHOD_TYPE_CFUNC) {
00957 if (st_lookup(vm_opt_method_table, (st_data_t)me, &bop)) {
00958 ruby_vm_redefined_flag[bop] = 1;
00959 }
00960 }
00961 }
00962
00963 static void
00964 add_opt_method(VALUE klass, ID mid, VALUE bop)
00965 {
00966 rb_method_entry_t *me;
00967 if (st_lookup(RCLASS_M_TBL(klass), mid, (void *)&me) && me->def &&
00968 me->def->type == VM_METHOD_TYPE_CFUNC) {
00969 st_insert(vm_opt_method_table, (st_data_t)me, (st_data_t)bop);
00970 }
00971 else {
00972 rb_bug("undefined optimized method: %s", rb_id2name(mid));
00973 }
00974 }
00975
00976 static void
00977 vm_init_redefined_flag(void)
00978 {
00979 ID mid;
00980 VALUE bop;
00981
00982 vm_opt_method_table = st_init_numtable();
00983
00984 #define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
00985 #define C(k) add_opt_method(rb_c##k, mid, bop)
00986 OP(PLUS, PLUS), (C(Fixnum), C(Float), C(String), C(Array));
00987 OP(MINUS, MINUS), (C(Fixnum));
00988 OP(MULT, MULT), (C(Fixnum), C(Float));
00989 OP(DIV, DIV), (C(Fixnum), C(Float));
00990 OP(MOD, MOD), (C(Fixnum), C(Float));
00991 OP(Eq, EQ), (C(Fixnum), C(Float), C(String));
00992 OP(Eqq, EQQ), (C(Fixnum), C(Bignum), C(Float), C(Symbol), C(String));
00993 OP(LT, LT), (C(Fixnum));
00994 OP(LE, LE), (C(Fixnum));
00995 OP(LTLT, LTLT), (C(String), C(Array));
00996 OP(AREF, AREF), (C(Array), C(Hash));
00997 OP(ASET, ASET), (C(Array), C(Hash));
00998 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
00999 OP(Size, SIZE), (C(Array), C(String), C(Hash));
01000 OP(Succ, SUCC), (C(Fixnum), C(String), C(Time));
01001 OP(GT, GT), (C(Fixnum));
01002 OP(GE, GE), (C(Fixnum));
01003 #undef C
01004 #undef OP
01005 }
01006
01007
01008
01009 #if VMDEBUG
01010 static const char *
01011 vm_frametype_name(const rb_control_frame_t *cfp)
01012 {
01013 switch (VM_FRAME_TYPE(cfp)) {
01014 case VM_FRAME_MAGIC_METHOD: return "method";
01015 case VM_FRAME_MAGIC_BLOCK: return "block";
01016 case VM_FRAME_MAGIC_CLASS: return "class";
01017 case VM_FRAME_MAGIC_TOP: return "top";
01018 case VM_FRAME_MAGIC_FINISH: return "finish";
01019 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
01020 case VM_FRAME_MAGIC_PROC: return "proc";
01021 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
01022 case VM_FRAME_MAGIC_EVAL: return "eval";
01023 case VM_FRAME_MAGIC_LAMBDA: return "lambda";
01024 default:
01025 rb_bug("unknown frame");
01026 }
01027 }
01028 #endif
01029
01030
01031
01032
01033
01034
01035
01036
01037
01038
01039
01040
01041
01042
01043
01044
01045
01046
01047
01048
01049
01050
01051
01052
01053
01054
01055
01056
01057
01058
01059
01060
01061
01062
01063
01064
01065
01066
01067
01068
01069
01070
01071
01072
01073
01074
01075
01076
01077
01078
01079
01080
01081
01082
01083
01084
01085
01086
01087
01088
01089
01090
01091
01092
01093
01094
01095
01096
01097
01098
01099
01100
01101
01102
01103
01104
01105
01106
01107
01108
01109
01110
01111
01112
01113
01114
01115
01116
01117
01118
01119
01120
01121
01122
01123
01124
01125
01126
01127
01128
01129
01130
01131
01132
01133 static VALUE
01134 vm_exec(rb_thread_t *th)
01135 {
01136 int state;
01137 VALUE result, err;
01138 VALUE initial = 0;
01139 VALUE *escape_dfp = NULL;
01140
01141 TH_PUSH_TAG(th);
01142 _tag.retval = Qnil;
01143 if ((state = EXEC_TAG()) == 0) {
01144 vm_loop_start:
01145 result = vm_exec_core(th, initial);
01146 if ((state = th->state) != 0) {
01147 err = result;
01148 th->state = 0;
01149 goto exception_handler;
01150 }
01151 }
01152 else {
01153 int i;
01154 struct iseq_catch_table_entry *entry;
01155 unsigned long epc, cont_pc, cont_sp;
01156 VALUE catch_iseqval;
01157 rb_control_frame_t *cfp;
01158 VALUE type;
01159
01160 err = th->errinfo;
01161
01162 exception_handler:
01163 cont_pc = cont_sp = catch_iseqval = 0;
01164
01165 while (th->cfp->pc == 0 || th->cfp->iseq == 0) {
01166 if (UNLIKELY(VM_FRAME_TYPE(th->cfp) == VM_FRAME_MAGIC_CFUNC)) {
01167 const rb_method_entry_t *me = th->cfp->me;
01168 EXEC_EVENT_HOOK(th, RUBY_EVENT_C_RETURN, th->cfp->self, me->called_id, me->klass);
01169 }
01170 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01171 }
01172
01173 cfp = th->cfp;
01174 epc = cfp->pc - cfp->iseq->iseq_encoded;
01175
01176 if (state == TAG_BREAK || state == TAG_RETURN) {
01177 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01178
01179 if (cfp->dfp == escape_dfp) {
01180 if (state == TAG_RETURN) {
01181 if ((cfp + 1)->pc != &finish_insn_seq[0]) {
01182 SET_THROWOBJ_CATCH_POINT(err, (VALUE)(cfp + 1)->dfp);
01183 SET_THROWOBJ_STATE(err, state = TAG_BREAK);
01184 }
01185 else {
01186 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01187 entry = &cfp->iseq->catch_table[i];
01188 if (entry->start < epc && entry->end >= epc) {
01189 if (entry->type == CATCH_TYPE_ENSURE) {
01190 catch_iseqval = entry->iseq;
01191 cont_pc = entry->cont;
01192 cont_sp = entry->sp;
01193 break;
01194 }
01195 }
01196 }
01197 if (!catch_iseqval) {
01198 result = GET_THROWOBJ_VAL(err);
01199 th->errinfo = Qnil;
01200 th->cfp += 2;
01201 goto finish_vme;
01202 }
01203 }
01204
01205 }
01206 else {
01207
01208 #if OPT_STACK_CACHING
01209 initial = (GET_THROWOBJ_VAL(err));
01210 #else
01211 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01212 #endif
01213 th->errinfo = Qnil;
01214 goto vm_loop_start;
01215 }
01216 }
01217 }
01218
01219 if (state == TAG_RAISE) {
01220 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01221 entry = &cfp->iseq->catch_table[i];
01222 if (entry->start < epc && entry->end >= epc) {
01223
01224 if (entry->type == CATCH_TYPE_RESCUE ||
01225 entry->type == CATCH_TYPE_ENSURE) {
01226 catch_iseqval = entry->iseq;
01227 cont_pc = entry->cont;
01228 cont_sp = entry->sp;
01229 break;
01230 }
01231 }
01232 }
01233 }
01234 else if (state == TAG_RETRY) {
01235 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01236 entry = &cfp->iseq->catch_table[i];
01237 if (entry->start < epc && entry->end >= epc) {
01238
01239 if (entry->type == CATCH_TYPE_ENSURE) {
01240 catch_iseqval = entry->iseq;
01241 cont_pc = entry->cont;
01242 cont_sp = entry->sp;
01243 break;
01244 }
01245 else if (entry->type == CATCH_TYPE_RETRY) {
01246 VALUE *escape_dfp;
01247 escape_dfp = GET_THROWOBJ_CATCH_POINT(err);
01248 if (cfp->dfp == escape_dfp) {
01249 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01250 th->errinfo = Qnil;
01251 goto vm_loop_start;
01252 }
01253 }
01254 }
01255 }
01256 }
01257 else if (state == TAG_BREAK && ((VALUE)escape_dfp & ~0x03) == 0) {
01258 type = CATCH_TYPE_BREAK;
01259
01260 search_restart_point:
01261 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01262 entry = &cfp->iseq->catch_table[i];
01263
01264 if (entry->start < epc && entry->end >= epc) {
01265 if (entry->type == CATCH_TYPE_ENSURE) {
01266 catch_iseqval = entry->iseq;
01267 cont_pc = entry->cont;
01268 cont_sp = entry->sp;
01269 break;
01270 }
01271 else if (entry->type == type) {
01272 cfp->pc = cfp->iseq->iseq_encoded + entry->cont;
01273 cfp->sp = cfp->bp + entry->sp;
01274
01275 if (state != TAG_REDO) {
01276 #if OPT_STACK_CACHING
01277 initial = (GET_THROWOBJ_VAL(err));
01278 #else
01279 *th->cfp->sp++ = (GET_THROWOBJ_VAL(err));
01280 #endif
01281 }
01282 th->errinfo = Qnil;
01283 goto vm_loop_start;
01284 }
01285 }
01286 }
01287 }
01288 else if (state == TAG_REDO) {
01289 type = CATCH_TYPE_REDO;
01290 goto search_restart_point;
01291 }
01292 else if (state == TAG_NEXT) {
01293 type = CATCH_TYPE_NEXT;
01294 goto search_restart_point;
01295 }
01296 else {
01297 for (i = 0; i < cfp->iseq->catch_table_size; i++) {
01298 entry = &cfp->iseq->catch_table[i];
01299 if (entry->start < epc && entry->end >= epc) {
01300
01301 if (entry->type == CATCH_TYPE_ENSURE) {
01302 catch_iseqval = entry->iseq;
01303 cont_pc = entry->cont;
01304 cont_sp = entry->sp;
01305 break;
01306 }
01307 }
01308 }
01309 }
01310
01311 if (catch_iseqval != 0) {
01312
01313 rb_iseq_t *catch_iseq;
01314
01315
01316 GetISeqPtr(catch_iseqval, catch_iseq);
01317 cfp->sp = cfp->bp + cont_sp;
01318 cfp->pc = cfp->iseq->iseq_encoded + cont_pc;
01319
01320
01321 cfp->sp[0] = err;
01322 vm_push_frame(th, catch_iseq, VM_FRAME_MAGIC_BLOCK,
01323 cfp->self, (VALUE)cfp->dfp, catch_iseq->iseq_encoded,
01324 cfp->sp + 1 , cfp->lfp, catch_iseq->local_size - 1);
01325
01326 state = 0;
01327 th->state = 0;
01328 th->errinfo = Qnil;
01329 goto vm_loop_start;
01330 }
01331 else {
01332
01333
01334 switch (VM_FRAME_TYPE(th->cfp)) {
01335 case VM_FRAME_MAGIC_METHOD:
01336 EXEC_EVENT_HOOK(th, RUBY_EVENT_RETURN, th->cfp->self, 0, 0);
01337 break;
01338 case VM_FRAME_MAGIC_CLASS:
01339 EXEC_EVENT_HOOK(th, RUBY_EVENT_END, th->cfp->self, 0, 0);
01340 break;
01341 }
01342
01343 th->cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(th->cfp);
01344
01345 if (VM_FRAME_TYPE(th->cfp) != VM_FRAME_MAGIC_FINISH) {
01346 goto exception_handler;
01347 }
01348 else {
01349 vm_pop_frame(th);
01350 th->errinfo = err;
01351 TH_POP_TAG2();
01352 JUMP_TAG(state);
01353 }
01354 }
01355 }
01356 finish_vme:
01357 TH_POP_TAG();
01358 return result;
01359 }
01360
01361
01362
01363 VALUE
01364 rb_iseq_eval(VALUE iseqval)
01365 {
01366 rb_thread_t *th = GET_THREAD();
01367 VALUE val;
01368 volatile VALUE tmp;
01369
01370 vm_set_top_stack(th, iseqval);
01371
01372 val = vm_exec(th);
01373 tmp = iseqval;
01374 return val;
01375 }
01376
01377 VALUE
01378 rb_iseq_eval_main(VALUE iseqval)
01379 {
01380 rb_thread_t *th = GET_THREAD();
01381 VALUE val;
01382 volatile VALUE tmp;
01383
01384 vm_set_main_stack(th, iseqval);
01385
01386 val = vm_exec(th);
01387 tmp = iseqval;
01388 return val;
01389 }
01390
01391 int
01392 rb_thread_method_id_and_class(rb_thread_t *th,
01393 ID *idp, VALUE *klassp)
01394 {
01395 rb_control_frame_t *cfp = th->cfp;
01396 rb_iseq_t *iseq = cfp->iseq;
01397 if (!iseq) {
01398 if (idp) *idp = cfp->me->def->original_id;
01399 if (klassp) *klassp = cfp->me->klass;
01400 return 1;
01401 }
01402 while (iseq) {
01403 if (RUBY_VM_IFUNC_P(iseq)) {
01404 if (idp) CONST_ID(*idp, "<ifunc>");
01405 if (klassp) *klassp = 0;
01406 return 1;
01407 }
01408 if (iseq->defined_method_id) {
01409 if (idp) *idp = iseq->defined_method_id;
01410 if (klassp) *klassp = iseq->klass;
01411 return 1;
01412 }
01413 if (iseq->local_iseq == iseq) {
01414 break;
01415 }
01416 iseq = iseq->parent_iseq;
01417 }
01418 return 0;
01419 }
01420
01421 int
01422 rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
01423 {
01424 return rb_thread_method_id_and_class(GET_THREAD(), idp, klassp);
01425 }
01426
01427 VALUE
01428 rb_thread_current_status(const rb_thread_t *th)
01429 {
01430 const rb_control_frame_t *cfp = th->cfp;
01431 VALUE str = Qnil;
01432
01433 if (cfp->iseq != 0) {
01434 if (cfp->pc != 0) {
01435 rb_iseq_t *iseq = cfp->iseq;
01436 int line_no = rb_vm_get_sourceline(cfp);
01437 char *file = RSTRING_PTR(iseq->filename);
01438 str = rb_sprintf("%s:%d:in `%s'",
01439 file, line_no, RSTRING_PTR(iseq->name));
01440 }
01441 }
01442 else if (cfp->me->def->original_id) {
01443 str = rb_sprintf("`%s#%s' (cfunc)",
01444 RSTRING_PTR(rb_class_name(cfp->me->klass)),
01445 rb_id2name(cfp->me->def->original_id));
01446 }
01447
01448 return str;
01449 }
01450
01451 VALUE
01452 rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
01453 const rb_block_t *blockptr, VALUE filename, VALUE filepath)
01454 {
01455 rb_thread_t *th = GET_THREAD();
01456 const rb_control_frame_t *reg_cfp = th->cfp;
01457 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, filepath, 0, ISEQ_TYPE_TOP);
01458 VALUE val;
01459
01460 vm_push_frame(th, DATA_PTR(iseqval), VM_FRAME_MAGIC_TOP,
01461 recv, (VALUE)blockptr, 0, reg_cfp->sp, 0, 1);
01462
01463 val = (*func)(arg);
01464
01465 vm_pop_frame(th);
01466 return val;
01467 }
01468
01469
01470
01471 static int
01472 vm_mark_each_thread_func(st_data_t key, st_data_t value, st_data_t dummy)
01473 {
01474 VALUE thval = (VALUE)key;
01475 rb_gc_mark(thval);
01476 return ST_CONTINUE;
01477 }
01478
01479 static void
01480 mark_event_hooks(rb_event_hook_t *hook)
01481 {
01482 while (hook) {
01483 rb_gc_mark(hook->data);
01484 hook = hook->next;
01485 }
01486 }
01487
01488 void
01489 rb_vm_mark(void *ptr)
01490 {
01491 int i;
01492
01493 RUBY_MARK_ENTER("vm");
01494 RUBY_GC_INFO("-------------------------------------------------\n");
01495 if (ptr) {
01496 rb_vm_t *vm = ptr;
01497 if (vm->living_threads) {
01498 st_foreach(vm->living_threads, vm_mark_each_thread_func, 0);
01499 }
01500 RUBY_MARK_UNLESS_NULL(vm->thgroup_default);
01501 RUBY_MARK_UNLESS_NULL(vm->mark_object_ary);
01502 RUBY_MARK_UNLESS_NULL(vm->load_path);
01503 RUBY_MARK_UNLESS_NULL(vm->loaded_features);
01504 RUBY_MARK_UNLESS_NULL(vm->top_self);
01505 RUBY_MARK_UNLESS_NULL(vm->coverages);
01506 rb_gc_mark_locations(vm->special_exceptions, vm->special_exceptions + ruby_special_error_count);
01507
01508 if (vm->loading_table) {
01509 rb_mark_tbl(vm->loading_table);
01510 }
01511
01512 mark_event_hooks(vm->event_hooks);
01513
01514 for (i = 0; i < RUBY_NSIG; i++) {
01515 if (vm->trap_list[i].cmd)
01516 rb_gc_mark(vm->trap_list[i].cmd);
01517 }
01518 }
01519
01520 RUBY_MARK_LEAVE("vm");
01521 }
01522
01523 #define vm_free 0
01524
01525 int
01526 ruby_vm_destruct(void *ptr)
01527 {
01528 RUBY_FREE_ENTER("vm");
01529 if (ptr) {
01530 rb_vm_t *vm = ptr;
01531 rb_thread_t *th = vm->main_thread;
01532 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01533 struct rb_objspace *objspace = vm->objspace;
01534 #endif
01535 rb_gc_force_recycle(vm->self);
01536 vm->main_thread = 0;
01537 if (th) {
01538 thread_free(th);
01539 }
01540 if (vm->living_threads) {
01541 st_free_table(vm->living_threads);
01542 vm->living_threads = 0;
01543 }
01544 rb_thread_lock_unlock(&vm->global_vm_lock);
01545 rb_thread_lock_destroy(&vm->global_vm_lock);
01546 ruby_xfree(vm);
01547 ruby_current_vm = 0;
01548 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
01549 if (objspace) {
01550 rb_objspace_free(objspace);
01551 }
01552 #endif
01553 }
01554 RUBY_FREE_LEAVE("vm");
01555 return 0;
01556 }
01557
01558 static size_t
01559 vm_memsize(const void *ptr)
01560 {
01561 if (ptr) {
01562 const rb_vm_t *vmobj = ptr;
01563 return sizeof(rb_vm_t) + st_memsize(vmobj->living_threads);
01564 }
01565 else {
01566 return 0;
01567 }
01568 }
01569
01570 static const rb_data_type_t vm_data_type = {
01571 "VM",
01572 rb_vm_mark, vm_free, vm_memsize,
01573 };
01574
01575 static void
01576 vm_init2(rb_vm_t *vm)
01577 {
01578 MEMZERO(vm, rb_vm_t, 1);
01579 vm->src_encoding_index = -1;
01580 }
01581
01582
01583
01584 #define USE_THREAD_DATA_RECYCLE 1
01585
01586 #if USE_THREAD_DATA_RECYCLE
01587 #define RECYCLE_MAX 64
01588 static VALUE *thread_recycle_stack_slot[RECYCLE_MAX];
01589 static int thread_recycle_stack_count = 0;
01590
01591 static VALUE *
01592 thread_recycle_stack(size_t size)
01593 {
01594 if (thread_recycle_stack_count) {
01595 return thread_recycle_stack_slot[--thread_recycle_stack_count];
01596 }
01597 else {
01598 return ALLOC_N(VALUE, size);
01599 }
01600 }
01601
01602 #else
01603 #define thread_recycle_stack(size) ALLOC_N(VALUE, (size))
01604 #endif
01605
01606 void
01607 rb_thread_recycle_stack_release(VALUE *stack)
01608 {
01609 #if USE_THREAD_DATA_RECYCLE
01610 if (thread_recycle_stack_count < RECYCLE_MAX) {
01611 thread_recycle_stack_slot[thread_recycle_stack_count++] = stack;
01612 return;
01613 }
01614 #endif
01615 ruby_xfree(stack);
01616 }
01617
01618 #ifdef USE_THREAD_RECYCLE
01619 static rb_thread_t *
01620 thread_recycle_struct(void)
01621 {
01622 void *p = ALLOC_N(rb_thread_t, 1);
01623 memset(p, 0, sizeof(rb_thread_t));
01624 return p;
01625 }
01626 #endif
01627
01628 void rb_gc_mark_machine_stack(rb_thread_t *th);
01629
01630 void
01631 rb_thread_mark(void *ptr)
01632 {
01633 rb_thread_t *th = NULL;
01634 RUBY_MARK_ENTER("thread");
01635 if (ptr) {
01636 th = ptr;
01637 if (th->stack) {
01638 VALUE *p = th->stack;
01639 VALUE *sp = th->cfp->sp;
01640 rb_control_frame_t *cfp = th->cfp;
01641 rb_control_frame_t *limit_cfp = (void *)(th->stack + th->stack_size);
01642
01643 while (p < sp) {
01644 rb_gc_mark(*p++);
01645 }
01646 rb_gc_mark_locations(p, p + th->mark_stack_len);
01647
01648 while (cfp != limit_cfp) {
01649 rb_iseq_t *iseq = cfp->iseq;
01650 rb_gc_mark(cfp->proc);
01651 if (iseq) {
01652 rb_gc_mark(RUBY_VM_NORMAL_ISEQ_P(iseq) ? iseq->self : (VALUE)iseq);
01653 }
01654 if (cfp->me) ((rb_method_entry_t *)cfp->me)->mark = 1;
01655 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
01656 }
01657 }
01658
01659
01660 RUBY_MARK_UNLESS_NULL(th->first_proc);
01661 if (th->first_proc) RUBY_MARK_UNLESS_NULL(th->first_args);
01662
01663 RUBY_MARK_UNLESS_NULL(th->thgroup);
01664 RUBY_MARK_UNLESS_NULL(th->value);
01665 RUBY_MARK_UNLESS_NULL(th->errinfo);
01666 RUBY_MARK_UNLESS_NULL(th->thrown_errinfo);
01667 RUBY_MARK_UNLESS_NULL(th->local_svar);
01668 RUBY_MARK_UNLESS_NULL(th->top_self);
01669 RUBY_MARK_UNLESS_NULL(th->top_wrapper);
01670 RUBY_MARK_UNLESS_NULL(th->fiber);
01671 RUBY_MARK_UNLESS_NULL(th->root_fiber);
01672 RUBY_MARK_UNLESS_NULL(th->stat_insn_usage);
01673 RUBY_MARK_UNLESS_NULL(th->last_status);
01674
01675 RUBY_MARK_UNLESS_NULL(th->locking_mutex);
01676
01677 rb_mark_tbl(th->local_storage);
01678
01679 if (GET_THREAD() != th && th->machine_stack_start && th->machine_stack_end) {
01680 rb_gc_mark_machine_stack(th);
01681 rb_gc_mark_locations((VALUE *)&th->machine_regs,
01682 (VALUE *)(&th->machine_regs) +
01683 sizeof(th->machine_regs) / sizeof(VALUE));
01684 }
01685
01686 mark_event_hooks(th->event_hooks);
01687 }
01688
01689 RUBY_MARK_LEAVE("thread");
01690 }
01691
01692 static void
01693 thread_free(void *ptr)
01694 {
01695 rb_thread_t *th;
01696 RUBY_FREE_ENTER("thread");
01697
01698 if (ptr) {
01699 th = ptr;
01700
01701 if (!th->root_fiber) {
01702 RUBY_FREE_UNLESS_NULL(th->stack);
01703 }
01704
01705 if (th->locking_mutex != Qfalse) {
01706 rb_bug("thread_free: locking_mutex must be NULL (%p:%ld)", (void *)th, th->locking_mutex);
01707 }
01708 if (th->keeping_mutexes != NULL) {
01709 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, th->keeping_mutexes);
01710 }
01711
01712 if (th->local_storage) {
01713 st_free_table(th->local_storage);
01714 }
01715
01716 #if USE_VALUE_CACHE
01717 {
01718 VALUE *ptr = th->value_cache_ptr;
01719 while (*ptr) {
01720 VALUE v = *ptr;
01721 RBASIC(v)->flags = 0;
01722 RBASIC(v)->klass = 0;
01723 ptr++;
01724 }
01725 }
01726 #endif
01727
01728 if (th->vm && th->vm->main_thread == th) {
01729 RUBY_GC_INFO("main thread\n");
01730 }
01731 else {
01732 #ifdef USE_SIGALTSTACK
01733 if (th->altstack) {
01734 free(th->altstack);
01735 }
01736 #endif
01737 ruby_xfree(ptr);
01738 }
01739 }
01740 RUBY_FREE_LEAVE("thread");
01741 }
01742
01743 static size_t
01744 thread_memsize(const void *ptr)
01745 {
01746 if (ptr) {
01747 const rb_thread_t *th = ptr;
01748 size_t size = sizeof(rb_thread_t);
01749
01750 if (!th->root_fiber) {
01751 size += th->stack_size * sizeof(VALUE);
01752 }
01753 if (th->local_storage) {
01754 st_memsize(th->local_storage);
01755 }
01756 return size;
01757 }
01758 else {
01759 return 0;
01760 }
01761 }
01762
01763 static const rb_data_type_t thread_data_type = {
01764 "VM/thread",
01765 rb_thread_mark,
01766 thread_free,
01767 thread_memsize,
01768 };
01769
01770 static VALUE
01771 thread_alloc(VALUE klass)
01772 {
01773 VALUE volatile obj;
01774 #ifdef USE_THREAD_RECYCLE
01775 rb_thread_t *th = thread_recycle_struct();
01776 obj = TypedData_Wrap_Struct(klass, &thread_data_type, th);
01777 #else
01778 rb_thread_t *th;
01779 obj = TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
01780 #endif
01781 return obj;
01782 }
01783
01784 static void
01785 th_init2(rb_thread_t *th, VALUE self)
01786 {
01787 th->self = self;
01788
01789
01790 th->stack_size = RUBY_VM_THREAD_STACK_SIZE;
01791 th->stack = thread_recycle_stack(th->stack_size);
01792
01793 th->cfp = (void *)(th->stack + th->stack_size);
01794
01795 vm_push_frame(th, 0, VM_FRAME_MAGIC_TOP, Qnil, 0, 0,
01796 th->stack, 0, 1);
01797
01798 th->status = THREAD_RUNNABLE;
01799 th->errinfo = Qnil;
01800 th->last_status = Qnil;
01801
01802 #if USE_VALUE_CACHE
01803 th->value_cache_ptr = &th->value_cache[0];
01804 #endif
01805 }
01806
01807 static void
01808 th_init(rb_thread_t *th, VALUE self)
01809 {
01810 th_init2(th, self);
01811 }
01812
01813 static VALUE
01814 ruby_thread_init(VALUE self)
01815 {
01816 rb_thread_t *th;
01817 rb_vm_t *vm = GET_THREAD()->vm;
01818 GetThreadPtr(self, th);
01819
01820 th_init(th, self);
01821 th->vm = vm;
01822
01823 th->top_wrapper = 0;
01824 th->top_self = rb_vm_top_self();
01825 return self;
01826 }
01827
01828 VALUE
01829 rb_thread_alloc(VALUE klass)
01830 {
01831 VALUE self = thread_alloc(klass);
01832 ruby_thread_init(self);
01833 return self;
01834 }
01835
01836 static void
01837 vm_define_method(rb_thread_t *th, VALUE obj, ID id, VALUE iseqval,
01838 rb_num_t is_singleton, NODE *cref)
01839 {
01840 VALUE klass = cref->nd_clss;
01841 int noex = (int)cref->nd_visi;
01842 rb_iseq_t *miseq;
01843 GetISeqPtr(iseqval, miseq);
01844
01845 if (NIL_P(klass)) {
01846 rb_raise(rb_eTypeError, "no class/module to add method");
01847 }
01848
01849 if (is_singleton) {
01850 if (FIXNUM_P(obj) || SYMBOL_P(obj)) {
01851 rb_raise(rb_eTypeError,
01852 "can't define singleton method \"%s\" for %s",
01853 rb_id2name(id), rb_obj_classname(obj));
01854 }
01855
01856 if (OBJ_FROZEN(obj)) {
01857 rb_error_frozen("object");
01858 }
01859
01860 klass = rb_singleton_class(obj);
01861 noex = NOEX_PUBLIC;
01862 }
01863
01864
01865 COPY_CREF(miseq->cref_stack, cref);
01866 miseq->klass = klass;
01867 miseq->defined_method_id = id;
01868 rb_add_method(klass, id, VM_METHOD_TYPE_ISEQ, miseq, noex);
01869
01870 if (!is_singleton && noex == NOEX_MODFUNC) {
01871 rb_add_method(rb_singleton_class(klass), id, VM_METHOD_TYPE_ISEQ, miseq, NOEX_PUBLIC);
01872 }
01873 INC_VM_STATE_VERSION();
01874 }
01875
01876 #define REWIND_CFP(expr) do { \
01877 rb_thread_t *th__ = GET_THREAD(); \
01878 th__->cfp++; expr; th__->cfp--; \
01879 } while (0)
01880
01881 static VALUE
01882 m_core_define_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01883 {
01884 REWIND_CFP({
01885 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 0, rb_vm_cref());
01886 });
01887 return Qnil;
01888 }
01889
01890 static VALUE
01891 m_core_define_singleton_method(VALUE self, VALUE cbase, VALUE sym, VALUE iseqval)
01892 {
01893 REWIND_CFP({
01894 vm_define_method(GET_THREAD(), cbase, SYM2ID(sym), iseqval, 1, rb_vm_cref());
01895 });
01896 return Qnil;
01897 }
01898
01899 static VALUE
01900 m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
01901 {
01902 REWIND_CFP({
01903 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
01904 });
01905 return Qnil;
01906 }
01907
01908 static VALUE
01909 m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
01910 {
01911 REWIND_CFP({
01912 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
01913 });
01914 return Qnil;
01915 }
01916
01917 static VALUE
01918 m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
01919 {
01920 REWIND_CFP({
01921 rb_undef(cbase, SYM2ID(sym));
01922 INC_VM_STATE_VERSION();
01923 });
01924 return Qnil;
01925 }
01926
01927 static VALUE
01928 m_core_set_postexe(VALUE self, VALUE iseqval)
01929 {
01930 REWIND_CFP({
01931 rb_iseq_t *blockiseq;
01932 rb_block_t *blockptr;
01933 rb_thread_t *th = GET_THREAD();
01934 rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(th, th->cfp);
01935 VALUE proc;
01936 extern void rb_call_end_proc(VALUE data);
01937
01938 GetISeqPtr(iseqval, blockiseq);
01939
01940 blockptr = RUBY_VM_GET_BLOCK_PTR_IN_CFP(cfp);
01941 blockptr->iseq = blockiseq;
01942 blockptr->proc = 0;
01943
01944 proc = rb_vm_make_proc(th, blockptr, rb_cProc);
01945 rb_set_end_proc(rb_call_end_proc, proc);
01946 });
01947 return Qnil;
01948 }
01949
01950 extern VALUE *rb_gc_stack_start;
01951 extern size_t rb_gc_stack_maxsize;
01952 #ifdef __ia64
01953 extern VALUE *rb_gc_register_stack_start;
01954 #endif
01955
01956
01957
01958
01959 static VALUE
01960 sdr(void)
01961 {
01962 rb_vm_bugreport();
01963 return Qnil;
01964 }
01965
01966
01967 static VALUE
01968 nsdr(void)
01969 {
01970 VALUE ary = rb_ary_new();
01971 #if HAVE_BACKTRACE
01972 #include <execinfo.h>
01973 #define MAX_NATIVE_TRACE 1024
01974 static void *trace[MAX_NATIVE_TRACE];
01975 int n = backtrace(trace, MAX_NATIVE_TRACE);
01976 char **syms = backtrace_symbols(trace, n);
01977 int i;
01978
01979 if (syms == 0) {
01980 rb_memerror();
01981 }
01982
01983 for (i=0; i<n; i++) {
01984 rb_ary_push(ary, rb_str_new2(syms[i]));
01985 }
01986 free(syms);
01987 #endif
01988 return ary;
01989 }
01990
01991 void
01992 Init_VM(void)
01993 {
01994 VALUE opts;
01995 VALUE klass;
01996 VALUE fcore;
01997
01998
01999 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
02000 rb_undef_alloc_func(rb_cRubyVM);
02001 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
02002
02003
02004 fcore = rb_class_new(rb_cBasicObject);
02005 RBASIC(fcore)->flags = T_ICLASS;
02006 klass = rb_singleton_class(fcore);
02007 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
02008 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
02009 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
02010 rb_define_method_id(klass, id_core_define_method, m_core_define_method, 3);
02011 rb_define_method_id(klass, id_core_define_singleton_method, m_core_define_singleton_method, 3);
02012 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 1);
02013 rb_obj_freeze(fcore);
02014 rb_gc_register_mark_object(fcore);
02015 rb_mRubyVMFrozenCore = fcore;
02016
02017
02018 rb_cEnv = rb_define_class_under(rb_cRubyVM, "Env", rb_cObject);
02019 rb_undef_alloc_func(rb_cEnv);
02020 rb_undef_method(CLASS_OF(rb_cEnv), "new");
02021
02022
02023 rb_cThread = rb_define_class("Thread", rb_cObject);
02024 rb_undef_alloc_func(rb_cThread);
02025
02026
02027 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN", rb_hash_new());
02028 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_REGS", rb_hash_new());
02029 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_INSN_BIGRAM", rb_hash_new());
02030 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
02031
02032 #if OPT_DIRECT_THREADED_CODE
02033 rb_ary_push(opts, rb_str_new2("direct threaded code"));
02034 #elif OPT_TOKEN_THREADED_CODE
02035 rb_ary_push(opts, rb_str_new2("token threaded code"));
02036 #elif OPT_CALL_THREADED_CODE
02037 rb_ary_push(opts, rb_str_new2("call threaded code"));
02038 #endif
02039
02040 #if OPT_BASIC_OPERATIONS
02041 rb_ary_push(opts, rb_str_new2("optimize basic operation"));
02042 #endif
02043
02044 #if OPT_STACK_CACHING
02045 rb_ary_push(opts, rb_str_new2("stack caching"));
02046 #endif
02047 #if OPT_OPERANDS_UNIFICATION
02048 rb_ary_push(opts, rb_str_new2("operands unification]"));
02049 #endif
02050 #if OPT_INSTRUCTIONS_UNIFICATION
02051 rb_ary_push(opts, rb_str_new2("instructions unification"));
02052 #endif
02053 #if OPT_INLINE_METHOD_CACHE
02054 rb_ary_push(opts, rb_str_new2("inline method cache"));
02055 #endif
02056 #if OPT_BLOCKINLINING
02057 rb_ary_push(opts, rb_str_new2("block inlining"));
02058 #endif
02059
02060
02061 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
02062
02063
02064 #if VMDEBUG
02065 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
02066 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
02067 #else
02068 (void)sdr;
02069 (void)nsdr;
02070 #endif
02071
02072
02073 {
02074 rb_vm_t *vm = ruby_current_vm;
02075 rb_thread_t *th = GET_THREAD();
02076 VALUE filename = rb_str_new2("<main>");
02077 volatile VALUE iseqval = rb_iseq_new(0, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
02078 volatile VALUE th_self;
02079 rb_iseq_t *iseq;
02080
02081
02082 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
02083
02084
02085 th_self = th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
02086 vm->main_thread = th;
02087 vm->running_thread = th;
02088 th->vm = vm;
02089 th->top_wrapper = 0;
02090 th->top_self = rb_vm_top_self();
02091 rb_thread_set_current(th);
02092
02093 vm->living_threads = st_init_numtable();
02094 st_insert(vm->living_threads, th_self, (st_data_t) th->thread_id);
02095
02096 rb_gc_register_mark_object(iseqval);
02097 GetISeqPtr(iseqval, iseq);
02098 th->cfp->iseq = iseq;
02099 th->cfp->pc = iseq->iseq_encoded;
02100 th->cfp->self = th->top_self;
02101
02102 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
02103 }
02104 vm_init_redefined_flag();
02105 }
02106
02107 void
02108 rb_vm_set_progname(VALUE filename)
02109 {
02110 rb_thread_t *th = GET_VM()->main_thread;
02111 rb_control_frame_t *cfp = (void *)(th->stack + th->stack_size);
02112 --cfp;
02113 cfp->iseq->filename = filename;
02114 }
02115
02116 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02117 struct rb_objspace *rb_objspace_alloc(void);
02118 #endif
02119 void ruby_thread_init_stack(rb_thread_t *th);
02120
02121 extern void Init_native_thread(void);
02122
02123 void
02124 Init_BareVM(void)
02125 {
02126
02127 rb_vm_t * vm = malloc(sizeof(*vm));
02128 rb_thread_t * th = malloc(sizeof(*th));
02129 if (!vm || !th) {
02130 fprintf(stderr, "[FATAL] failed to allocate memory\n");
02131 exit(EXIT_FAILURE);
02132 }
02133 MEMZERO(th, rb_thread_t, 1);
02134
02135 rb_thread_set_current_raw(th);
02136
02137 vm_init2(vm);
02138 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
02139 vm->objspace = rb_objspace_alloc();
02140 #endif
02141 ruby_current_vm = vm;
02142
02143 Init_native_thread();
02144 th_init2(th, 0);
02145 th->vm = vm;
02146 ruby_thread_init_stack(th);
02147 }
02148
02149
02150
02151 static VALUE
02152 main_to_s(VALUE obj)
02153 {
02154 return rb_str_new2("main");
02155 }
02156
02157 VALUE
02158 rb_vm_top_self(void)
02159 {
02160 return GET_VM()->top_self;
02161 }
02162
02163 void
02164 Init_top_self(void)
02165 {
02166 rb_vm_t *vm = GET_VM();
02167
02168 vm->top_self = rb_obj_alloc(rb_cObject);
02169 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
02170
02171
02172 vm->mark_object_ary = rb_ary_tmp_new(1);
02173 }
02174
02175 VALUE *
02176 ruby_vm_verbose_ptr(rb_vm_t *vm)
02177 {
02178 return &vm->verbose;
02179 }
02180
02181 VALUE *
02182 ruby_vm_debug_ptr(rb_vm_t *vm)
02183 {
02184 return &vm->debug;
02185 }
02186
02187 VALUE *
02188 rb_ruby_verbose_ptr(void)
02189 {
02190 return ruby_vm_verbose_ptr(GET_VM());
02191 }
02192
02193 VALUE *
02194 rb_ruby_debug_ptr(void)
02195 {
02196 return ruby_vm_debug_ptr(GET_VM());
02197 }
02198