summaryrefslogtreecommitdiff
path: root/eval_intern.h
diff options
context:
space:
mode:
Diffstat (limited to 'eval_intern.h')
-rw-r--r--eval_intern.h405
1 files changed, 245 insertions, 160 deletions
diff --git a/eval_intern.h b/eval_intern.h
index e5680a2731..91808f1f29 100644
--- a/eval_intern.h
+++ b/eval_intern.h
@@ -1,17 +1,27 @@
-
-#ifndef EVAL_INTERN_H_INCLUDED
-#define EVAL_INTERN_H_INCLUDED
-
-#define PASS_PASSED_BLOCK() \
- (GET_THREAD()->passed_block = \
- GC_GUARDED_PTR_REF((rb_block_t *)GET_THREAD()->cfp->lfp[0]))
+#ifndef RUBY_EVAL_INTERN_H
+#define RUBY_EVAL_INTERN_H
#include "ruby/ruby.h"
-#include "ruby/node.h"
-#include "ruby/util.h"
-#include "ruby/signal.h"
#include "vm_core.h"
+static inline void
+vm_passed_block_handler_set(rb_execution_context_t *ec, VALUE block_handler)
+{
+ vm_block_handler_verify(block_handler);
+ ec->passed_block_handler = block_handler;
+}
+
+static inline void
+pass_passed_block_handler(rb_execution_context_t *ec)
+{
+ VALUE block_handler = rb_vm_frame_block_handler(ec->cfp);
+ vm_passed_block_handler_set(ec, block_handler);
+ VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_PASSED);
+}
+
+#define PASS_PASSED_BLOCK_HANDLER_EC(ec) pass_passed_block_handler(ec)
+#define PASS_PASSED_BLOCK_HANDLER() pass_passed_block_handler(GET_EC())
+
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
@@ -23,80 +33,47 @@
#endif
#include <stdio.h>
-#include <setjmp.h>
-
-#include "ruby/st.h"
-#include "dln.h"
-
-#ifdef __APPLE__
-#include <crt_externs.h>
+#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
+# include "wasm/setjmp.h"
+#else
+# include <setjmp.h>
#endif
-/* Make alloca work the best possible way. */
-#ifdef __GNUC__
-# ifndef atarist
-# ifndef alloca
-# define alloca __builtin_alloca
-# endif
-# endif /* atarist */
-#else
-# ifdef HAVE_ALLOCA_H
-# include <alloca.h>
+#ifdef __APPLE__
+# ifdef HAVE_CRT_EXTERNS_H
+# include <crt_externs.h>
# else
-# ifdef _AIX
-#pragma alloca
-# else
-# ifndef alloca /* predefined by HP cc +Olibcalls */
-void *alloca();
-# endif
-# endif /* AIX */
-# endif /* HAVE_ALLOCA_H */
-#endif /* __GNUC__ */
-
-#ifdef HAVE_STDARG_PROTOTYPES
-#include <stdarg.h>
-#define va_init_list(a,b) va_start(a,b)
-#else
-#include <varargs.h>
-#define va_init_list(a,b) va_start(a)
+# include "missing/crt_externs.h"
+# endif
#endif
#ifndef HAVE_STRING_H
-char *strrchr _((const char *, const char));
+char *strrchr(const char *, const char);
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
-#ifdef __BEOS__
+#ifdef HAVE_NET_SOCKET_H
#include <net/socket.h>
#endif
-#ifdef __MACOS__
-#include "macruby_private.h"
-#endif
-
-#ifdef __VMS
-#include "vmsruby_private.h"
-#endif
-
-#if !defined(setjmp) && defined(HAVE__SETJMP)
-#define ruby_setjmp(env) _setjmp(env)
-#define ruby_longjmp(env,val) _longjmp(env,val)
-#else
-#define ruby_setjmp(env) setjmp(env)
-#define ruby_longjmp(env,val) longjmp(env,val)
+#define ruby_setjmp(env) RUBY_SETJMP(env)
+#define ruby_longjmp(env,val) RUBY_LONGJMP((env),(val))
+#ifdef __CYGWIN__
+# ifndef _setjmp
+int _setjmp(jmp_buf);
+# endif
+# ifndef _longjmp
+NORETURN(void _longjmp(jmp_buf, int));
+# endif
#endif
#include <sys/types.h>
#include <signal.h>
#include <errno.h>
-#if defined(__VMS)
-#pragma nostandard
-#endif
-
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
@@ -108,7 +85,8 @@ char *strrchr _((const char *, const char));
So following definition is required to use select_large_fdset.
*/
#ifdef HAVE_SELECT_LARGE_FDSET
-#define select(n, r, w, e, t) select_large_fdset(n, r, w, e, t)
+#define select(n, r, w, e, t) select_large_fdset((n), (r), (w), (e), (t))
+extern int select_large_fdset(int, fd_set *, fd_set *, fd_set *, struct timeval *);
#endif
#ifdef HAVE_SYS_PARAM_H
@@ -117,130 +95,237 @@ char *strrchr _((const char *, const char));
#include <sys/stat.h>
-#define SAVE_ROOT_JMPBUF(th, stmt) do \
- if (ruby_setjmp((th)->root_jmpbuf) == 0) { \
- stmt; \
- } \
- else { \
- rb_fiber_start(); \
- } while (0)
-
-#define TH_PUSH_TAG(th) do { \
- rb_thread_t * const _th = th; \
+#define EC_PUSH_TAG(ec) do { \
+ rb_execution_context_t * const _ec = (ec); \
struct rb_vm_tag _tag; \
- _tag.tag = 0; \
- _tag.prev = _th->tag; \
- _th->tag = &_tag;
-
-#define TH_POP_TAG() \
- _th->tag = _tag.prev; \
+ _tag.state = TAG_NONE; \
+ _tag.tag = Qundef; \
+ _tag.prev = _ec->tag; \
+ _tag.lock_rec = rb_ec_vm_lock_rec(_ec); \
+ rb_vm_tag_jmpbuf_init(&_tag.buf); \
+
+#define EC_POP_TAG() \
+ _ec->tag = _tag.prev; \
+ rb_vm_tag_jmpbuf_deinit(&_tag.buf); \
} while (0)
-#define TH_POP_TAG2() \
- _th->tag = _tag.prev
+#define EC_TMPPOP_TAG() \
+ _ec->tag = _tag.prev
-#define PUSH_TAG() TH_PUSH_TAG(GET_THREAD())
-#define POP_TAG() TH_POP_TAG()
+#define EC_REPUSH_TAG() (void)(_ec->tag = &_tag)
-#define TH_EXEC_TAG() \
- (FLUSH_REGISTER_WINDOWS, ruby_setjmp(_th->tag->buf))
+#if defined __GNUC__ && __GNUC__ == 4 && (__GNUC_MINOR__ >= 6 && __GNUC_MINOR__ <= 8) || defined __clang__
+/* This macro prevents GCC 4.6--4.8 from emitting maybe-uninitialized warnings.
+ * This macro also prevents Clang from dumping core in EC_EXEC_TAG().
+ * (I confirmed Clang 4.0.1 and 5.0.0.)
+ */
+# define VAR_FROM_MEMORY(var) __extension__(*(__typeof__(var) volatile *)&(var))
+# define VAR_INITIALIZED(var) ((var) = VAR_FROM_MEMORY(var))
+# define VAR_NOCLOBBERED(var) volatile var
+#else
+# define VAR_FROM_MEMORY(var) (var)
+# define VAR_INITIALIZED(var) ((void)&(var))
+# define VAR_NOCLOBBERED(var) var
+#endif
-#define EXEC_TAG() \
- TH_EXEC_TAG()
+static inline void
+rb_ec_vm_lock_rec_check(const rb_execution_context_t *ec, unsigned int recorded_lock_rec)
+{
+ unsigned int current_lock_rec = rb_ec_vm_lock_rec(ec);
+ if (current_lock_rec != recorded_lock_rec) {
+ rb_ec_vm_lock_rec_release(ec, recorded_lock_rec, current_lock_rec);
+ }
+}
-#define TH_JUMP_TAG(th, st) do { \
- ruby_longjmp(th->tag->buf,(st)); \
-} while (0)
+/* clear ec->tag->state, and return the value */
+static inline int
+rb_ec_tag_state(const rb_execution_context_t *ec)
+{
+ struct rb_vm_tag *tag = ec->tag;
+ enum ruby_tag_type state = tag->state;
+ tag->state = TAG_NONE;
+ rb_ec_vm_lock_rec_check(ec, tag->lock_rec);
+ RBIMPL_ASSUME(state > TAG_NONE);
+ RBIMPL_ASSUME(state <= TAG_FATAL);
+ return state;
+}
-#define JUMP_TAG(st) TH_JUMP_TAG(GET_THREAD(), st)
+NORETURN(static inline void rb_ec_tag_jump(const rb_execution_context_t *ec, enum ruby_tag_type st));
+static inline void
+rb_ec_tag_jump(const rb_execution_context_t *ec, enum ruby_tag_type st)
+{
+ RUBY_ASSERT(st > TAG_NONE && st <= TAG_FATAL, ": Invalid tag jump: %d", (int)st);
+ ec->tag->state = st;
+ ruby_longjmp(RB_VM_TAG_JMPBUF_GET(ec->tag->buf), 1);
+}
-#define TAG_RETURN 0x1
-#define TAG_BREAK 0x2
-#define TAG_NEXT 0x3
-#define TAG_RETRY 0x4
-#define TAG_REDO 0x5
-#define TAG_RAISE 0x6
-#define TAG_THROW 0x7
-#define TAG_FATAL 0x8
-#define TAG_MASK 0xf
+/*
+ setjmp() in assignment expression rhs is undefined behavior
+ [ISO/IEC 9899:1999] 7.13.1.1
+*/
+#define EC_EXEC_TAG() \
+ (UNLIKELY(ruby_setjmp(RB_VM_TAG_JMPBUF_GET(_tag.buf))) ? rb_ec_tag_state(VAR_FROM_MEMORY(_ec)) : (EC_REPUSH_TAG(), 0))
+
+#define EC_JUMP_TAG(ec, st) rb_ec_tag_jump(ec, st)
-#define NEW_THROW_OBJECT(val, pt, st) \
- ((VALUE)NEW_NODE(NODE_LIT, (val), (pt), (st)))
-#define SET_THROWOBJ_CATCH_POINT(obj, val) \
- (RNODE((obj))->u2.value = (val))
-#define SET_THROWOBJ_STATE(obj, val) \
- (RNODE((obj))->u3.value = (val))
+#define INTERNAL_EXCEPTION_P(exc) FIXNUM_P(exc)
-#define GET_THROWOBJ_VAL(obj) ((VALUE)RNODE((obj))->u1.value)
-#define GET_THROWOBJ_CATCH_POINT(obj) ((VALUE*)RNODE((obj))->u2.value)
-#define GET_THROWOBJ_STATE(obj) ((int)RNODE((obj))->u3.value)
+/* CREF operators */
-#define SCOPE_TEST(f) \
- (ruby_cref()->nd_visi & (f))
+#define CREF_FL_PUSHED_BY_EVAL IMEMO_FL_USER1
+#define CREF_FL_OMOD_SHARED IMEMO_FL_USER2
+#define CREF_FL_SINGLETON IMEMO_FL_USER3
-#define SCOPE_CHECK(f) \
- (ruby_cref()->nd_visi == (f))
+static inline int CREF_SINGLETON(const rb_cref_t *cref);
-#define SCOPE_SET(f) \
-{ \
- ruby_cref()->nd_visi = (f); \
+static inline VALUE
+CREF_CLASS(const rb_cref_t *cref)
+{
+ if (CREF_SINGLETON(cref)) {
+ return CLASS_OF(cref->klass_or_self);
+ }
+ else {
+ return cref->klass_or_self;
+ }
}
-#define CHECK_STACK_OVERFLOW(cfp, margin) do \
- if (((VALUE *)(cfp)->sp) + (margin) >= ((VALUE *)cfp)) { \
- rb_exc_raise(sysstack_error); \
- } \
-while (0)
+static inline VALUE
+CREF_CLASS_FOR_DEFINITION(const rb_cref_t *cref)
+{
+ if (CREF_SINGLETON(cref)) {
+ return rb_singleton_class(cref->klass_or_self);
+ }
+ else {
+ return cref->klass_or_self;
+ }
+}
-void rb_thread_cleanup _((void));
-void rb_thread_wait_other_threads _((void));
+static inline rb_cref_t *
+CREF_NEXT(const rb_cref_t *cref)
+{
+ return cref->next;
+}
-int thread_set_raised(rb_thread_t *th);
-int thread_reset_raised(rb_thread_t *th);
+static inline const rb_scope_visibility_t *
+CREF_SCOPE_VISI(const rb_cref_t *cref)
+{
+ return &cref->scope_visi;
+}
-VALUE rb_f_eval(int argc, VALUE *argv, VALUE self);
-VALUE rb_make_exception _((int argc, VALUE *argv));
+static inline VALUE
+CREF_REFINEMENTS(const rb_cref_t *cref)
+{
+ return cref->refinements;
+}
-NORETURN(void rb_fiber_start(void));
+static inline void
+CREF_REFINEMENTS_SET(rb_cref_t *cref, VALUE refs)
+{
+ RB_OBJ_WRITE(cref, &cref->refinements, refs);
+}
-NORETURN(void rb_raise_jump _((VALUE)));
-NORETURN(void print_undef _((VALUE, ID)));
-NORETURN(void vm_localjump_error(const char *, VALUE, int));
-NORETURN(void vm_jump_tag_but_local_jump(int, VALUE));
+static inline int
+CREF_PUSHED_BY_EVAL(const rb_cref_t *cref)
+{
+ return cref->flags & CREF_FL_PUSHED_BY_EVAL;
+}
-NODE *vm_get_cref(rb_thread_t *th, rb_iseq_t *iseq, rb_control_frame_t *cfp);
-NODE *vm_cref_push(rb_thread_t *th, VALUE, int);
-NODE *vm_set_special_cref(rb_thread_t *th, VALUE *lfp, NODE * cref_stack);
-VALUE vm_make_jump_tag_but_local_jump(int state, VALUE val);
+static inline void
+CREF_PUSHED_BY_EVAL_SET(rb_cref_t *cref)
+{
+ cref->flags |= CREF_FL_PUSHED_BY_EVAL;
+}
-static rb_control_frame_t *
-vm_get_ruby_level_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
+static inline int
+CREF_SINGLETON(const rb_cref_t *cref)
{
- while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp)) {
- if (RUBY_VM_NORMAL_ISEQ_P(cfp->iseq)) {
- return cfp;
- }
- cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
- }
- return 0;
+ return cref->flags & CREF_FL_SINGLETON;
}
-static inline NODE *
-ruby_cref()
+static inline void
+CREF_SINGLETON_SET(rb_cref_t *cref)
{
- rb_thread_t *th = GET_THREAD();
- rb_control_frame_t *cfp = vm_get_ruby_level_cfp(th, th->cfp);
- return vm_get_cref(th, cfp->iseq, cfp);
+ cref->flags |= CREF_FL_SINGLETON;
}
-VALUE vm_get_cbase(rb_thread_t *th);
-VALUE rb_obj_is_proc(VALUE);
-void rb_vm_check_redefinition_opt_method(NODE *node);
-VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, rb_block_t *blockptr, VALUE filename);
-void rb_thread_terminate_all(void);
-void rb_vm_set_eval_stack(rb_thread_t *, VALUE iseq);
-VALUE rb_vm_top_self();
+static inline int
+CREF_OMOD_SHARED(const rb_cref_t *cref)
+{
+ return cref->flags & CREF_FL_OMOD_SHARED;
+}
+
+static inline void
+CREF_OMOD_SHARED_SET(rb_cref_t *cref)
+{
+ cref->flags |= CREF_FL_OMOD_SHARED;
+}
-#define ruby_cbase() vm_get_cbase(GET_THREAD())
+static inline void
+CREF_OMOD_SHARED_UNSET(rb_cref_t *cref)
+{
+ cref->flags &= ~CREF_FL_OMOD_SHARED;
+}
+
+enum {
+ RAISED_EXCEPTION = 1,
+ RAISED_STACKOVERFLOW = 2,
+ RAISED_NOMEMORY = 4
+};
+#define rb_ec_raised_set(ec, f) ((ec)->raised_flag |= (f))
+#define rb_ec_raised_reset(ec, f) ((ec)->raised_flag &= ~(f))
+#define rb_ec_raised_p(ec, f) (((ec)->raised_flag & (f)) != 0)
+#define rb_ec_raised_clear(ec) ((ec)->raised_flag = 0)
+int rb_ec_set_raised(rb_execution_context_t *ec);
+int rb_ec_reset_raised(rb_execution_context_t *ec);
+int rb_ec_stack_check(rb_execution_context_t *ec);
+
+VALUE rb_f_eval(int argc, const VALUE *argv, VALUE self);
+VALUE rb_make_exception(int argc, const VALUE *argv);
+
+NORETURN(void rb_method_name_error(VALUE, VALUE));
+
+NORETURN(void rb_fiber_start(rb_fiber_t*));
+
+NORETURN(void rb_print_undef(VALUE, ID, rb_method_visibility_t));
+NORETURN(void rb_print_undef_str(VALUE, VALUE));
+NORETURN(void rb_print_inaccessible(VALUE, ID, rb_method_visibility_t));
+NORETURN(void rb_vm_localjump_error(const char *,VALUE, int));
+NORETURN(void rb_vm_jump_tag_but_local_jump(enum ruby_tag_type));
+
+VALUE rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val);
+rb_cref_t *rb_vm_cref(void);
+rb_cref_t *rb_vm_cref_replace_with_duplicated_cref(void);
+VALUE rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg, VALUE block_handler, VALUE filename);
+VALUE rb_vm_call_cfunc_in_box(VALUE recv, VALUE (*func)(VALUE, VALUE), VALUE arg1, VALUE arg2, VALUE filename, const rb_box_t *box);
+void rb_vm_frame_flag_set_box_require(const rb_execution_context_t *ec);
+const rb_box_t *rb_vm_current_box(const rb_execution_context_t *ec);
+const rb_box_t *rb_vm_caller_box(const rb_execution_context_t *ec);
+const rb_box_t *rb_vm_loading_box(const rb_execution_context_t *ec);
+void rb_vm_set_progname(VALUE filename);
+VALUE rb_vm_cbase(void);
+
+/* vm_backtrace.c */
+#define RUBY_BACKTRACE_START 0
+#define RUBY_ALL_BACKTRACE_LINES -1
+VALUE rb_ec_backtrace_object(const rb_execution_context_t *ec);
+VALUE rb_ec_backtrace_str_ary(const rb_execution_context_t *ec, long lev, long n);
+VALUE rb_ec_backtrace_location_ary(const rb_execution_context_t *ec, long lev, long n, bool skip_internal);
+
+#ifndef CharNext /* defined as CharNext[AW] on Windows. */
+# ifdef HAVE_MBLEN
+# define CharNext(p) rb_char_next(p)
+static inline char *
+rb_char_next(const char *p)
+{
+ if (p) {
+ int len = mblen(p, RUBY_MBCHAR_MAXSIZE);
+ p += len > 0 ? len : 1;
+ }
+ return (char *)p;
+}
+# else
+# define CharNext(p) ((p) + 1)
+# endif
+#endif
-#endif /* EVAL_INTERN_H_INCLUDED */
+#endif /* RUBY_EVAL_INTERN_H */