#ifndef EVAL_INTERN_H_INCLUDED #define EVAL_INTERN_H_INCLUDED #define PASS_PASSED_BLOCK() \ (GET_THREAD()->passed_block = \ GC_GUARDED_PTR_REF((rb_block_t *)GET_THREAD()->cfp->lfp[0])) #include "ruby.h" #include "node.h" #include "util.h" #include "rubysig.h" #include "yarvcore.h" #ifdef HAVE_STDLIB_H #include #endif #ifndef EXIT_SUCCESS #define EXIT_SUCCESS 0 #endif #ifndef EXIT_FAILURE #define EXIT_FAILURE 1 #endif #include #include #include "st.h" #include "dln.h" #ifdef __APPLE__ #include #endif /* Make alloca work the best possible way. */ #ifdef __GNUC__ # ifndef atarist # ifndef alloca # define alloca __builtin_alloca # endif # endif /* atarist */ #else # ifdef HAVE_ALLOCA_H # include # else # ifdef _AIX #pragma alloca # else # ifndef alloca /* predefined by HP cc +Olibcalls */ void *alloca(); # endif # endif /* AIX */ # endif /* HAVE_ALLOCA_H */ #endif /* __GNUC__ */ #ifdef HAVE_STDARG_PROTOTYPES #include #define va_init_list(a,b) va_start(a,b) #else #include #define va_init_list(a,b) va_start(a) #endif #ifndef HAVE_STRING_H char *strrchr _((const char *, const char)); #endif #ifdef HAVE_UNISTD_H #include #endif #ifdef __BEOS__ #include #endif #ifdef __MACOS__ #include "macruby_private.h" #endif #ifdef __VMS #include "vmsruby_private.h" #endif #if !defined(setjmp) && defined(HAVE__SETJMP) #define ruby_setjmp(env) _setjmp(env) #define ruby_longjmp(env,val) _longjmp(env,val) #else #define ruby_setjmp(env) setjmp(env) #define ruby_longjmp(env,val) longjmp(env,val) #endif #include #include #include #if defined(__VMS) #pragma nostandard #endif #ifdef HAVE_SYS_SELECT_H #include #endif /* Solaris sys/select.h switches select to select_large_fdset to support larger file descriptors if FD_SETSIZE is larger than 1024 on 32bit environment. But Ruby doesn't change FD_SETSIZE because fd_set is allocated dynamically. So following definition is required to use select_large_fdset. */ #ifdef HAVE_SELECT_LARGE_FDSET #define select(n, r, w, e, t) select_large_fdset(n, r, w, e, t) #endif #ifdef HAVE_SYS_PARAM_H #include #endif #include #define TH_PUSH_TAG(th) do { \ rb_thread_t * const _th = th; \ struct rb_vm_tag _tag; \ _tag.tag = 0; \ _tag.prev = _th->tag; \ _th->tag = &_tag; #define TH_POP_TAG() \ _th->tag = _tag.prev; \ } while (0) #define TH_POP_TAG2() \ _th->tag = _tag.prev #define PUSH_TAG(ptag) TH_PUSH_TAG(GET_THREAD()) #define POP_TAG() TH_POP_TAG() #define POP_TAG_INIT() } while (0) #define PUSH_THREAD_TAG() \ PUSH_TAG(PROT_THREAD) #define POP_THREAD_TAG() \ POP_TAG() #define PROT_NONE Qfalse /* 0 */ #define PROT_THREAD Qtrue /* 2 */ #define PROT_FUNC INT2FIX(0) /* 1 */ #define PROT_LOOP INT2FIX(1) /* 3 */ #define PROT_LAMBDA INT2FIX(2) /* 5 */ #define PROT_YIELD INT2FIX(3) /* 7 */ #define PROT_TOP INT2FIX(4) /* 9 */ #define TH_EXEC_TAG() \ (FLUSH_REGISTER_WINDOWS, ruby_setjmp(_th->tag->buf)) #define EXEC_TAG() \ TH_EXEC_TAG() #define TH_JUMP_TAG(th, st) do { \ ruby_longjmp(th->tag->buf,(st)); \ } while (0) #define JUMP_TAG(st) TH_JUMP_TAG(GET_THREAD(), st) #define TAG_RETURN 0x1 #define TAG_BREAK 0x2 #define TAG_NEXT 0x3 #define TAG_RETRY 0x4 #define TAG_REDO 0x5 #define TAG_RAISE 0x6 #define TAG_THROW 0x7 #define TAG_FATAL 0x8 #define TAG_CONTCALL 0x9 #define TAG_THREAD 0xa #define TAG_MASK 0xf #define SCOPE_TEST(f) \ (ruby_cref()->nd_visi & (f)) #define SCOPE_CHECK(f) \ (ruby_cref()->nd_visi == (f)) #define SCOPE_SET(f) \ { \ ruby_cref()->nd_visi = (f); \ } extern VALUE exception_error; extern VALUE sysstack_error; void rb_thread_cleanup _((void)); void rb_thread_wait_other_threads _((void)); int thread_set_raised(rb_thread_t *th); int thread_reset_raised(rb_thread_t *th); VALUE rb_f_eval(int argc, VALUE *argv, VALUE self); VALUE rb_make_exception _((int argc, VALUE *argv)); NORETURN(void rb_raise_jump _((VALUE))); NORETURN(void print_undef _((VALUE, ID))); NORETURN(void th_localjump_error(const char *, VALUE, int)); NORETURN(void th_jump_tag_but_local_jump(int, VALUE)); VALUE th_compile(rb_thread_t *th, VALUE str, VALUE file, VALUE line); NODE *th_get_cref(rb_thread_t *th, rb_iseq_t *iseq, rb_control_frame_t *cfp); NODE *th_cref_push(rb_thread_t *th, VALUE, int); NODE *th_set_special_cref(rb_thread_t *th, VALUE *lfp, NODE * cref_stack); static rb_control_frame_t * th_get_ruby_level_cfp(rb_thread_t *th, rb_control_frame_t *cfp) { rb_iseq_t *iseq = 0; while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) { if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) { iseq = cfp->iseq; break; } cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp); } if (!iseq) { return 0; } return cfp; } static inline NODE * ruby_cref() { rb_thread_t *th = GET_THREAD(); rb_control_frame_t *cfp = th_get_ruby_level_cfp(th, th->cfp); return th_get_cref(th, cfp->iseq, cfp); } VALUE th_get_cbase(rb_thread_t *th); VALUE rb_obj_is_proc(VALUE); void rb_vm_check_redefinition_opt_method(NODE *node); void rb_thread_terminate_all(void); #define ruby_cbase() th_get_cbase(GET_THREAD()) #endif /* EVAL_INTERN_H_INCLUDED */