summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--common.mk14
-rw-r--r--configure.ac10
-rw-r--r--gc.c25
-rw-r--r--iseq.c15
-rw-r--r--lib/syntax_suggest/api.rb6
-rw-r--r--prism/srcs.mk142
-rw-r--r--prism/srcs.mk.in40
-rw-r--r--proc.c11
-rw-r--r--random.c36
-rw-r--r--shape.c6
-rw-r--r--template/GNUmakefile.in3
-rw-r--r--template/Makefile.in4
-rw-r--r--test/.excludes-zjit/TestRubyOptimization.rb1
-rw-r--r--test/ruby/test_gc.rb2
-rw-r--r--test/ruby/test_keyword.rb2
-rw-r--r--test/ruby/test_shapes.rb22
-rw-r--r--test/ruby/test_zjit.rb118
-rwxr-xr-xtool/make-snapshot9
-rw-r--r--tool/prereq.status2
-rwxr-xr-xtool/sync_default_gems.rb1
-rw-r--r--win32/Makefile.sub1
-rw-r--r--yjit/src/codegen.rs4
-rw-r--r--zjit/src/backend/lir.rs6
-rw-r--r--zjit/src/codegen.rs449
-rw-r--r--zjit/src/gc.rs33
-rw-r--r--zjit/src/hir.rs16
-rw-r--r--zjit/src/invariants.rs27
-rw-r--r--zjit/src/profile.rs70
28 files changed, 777 insertions, 298 deletions
diff --git a/common.mk b/common.mk
index f4ea06c263..2a2f3b7ff3 100644
--- a/common.mk
+++ b/common.mk
@@ -205,13 +205,6 @@ $(PRISM_BUILD_DIR)/.time $(PRISM_BUILD_DIR)/util/.time:
$(Q) $(MAKEDIRS) $(@D)
@$(NULLCMD) > $@
-$(PRISM_SRCDIR)/srcs.mk: $(HAVE_BASERUBY:yes=$(PRISM_SRCDIR)/templates/template.rb) \
- $(HAVE_BASERUBY:yes=$(PRISM_SRCDIR)/generate-srcs.mk.rb)
- $(ECHO) Updating prism/srcs.mk
- $(BASERUBY) $(PRISM_SRCDIR)/generate-srcs.mk.rb > $@
-
-srcs: $(PRISM_SRCDIR)/srcs.mk
-
EXPORTOBJS = $(DLNOBJ) \
localeinit.$(OBJEXT) \
loadpath.$(OBJEXT) \
@@ -1221,7 +1214,6 @@ incs: $(INSNS) {$(VPATH)}node_name.inc {$(VPATH)}known_errors.inc \
{$(VPATH)}vm_call_iseq_optimized.inc $(srcdir)/revision.h \
$(REVISION_H) \
$(UNICODE_DATA_HEADERS) $(ENC_HEADERS) \
- $(top_srcdir)/prism/ast.h $(top_srcdir)/prism/diagnostic.h \
{$(VPATH)}id.h {$(VPATH)}probes.dmyh
insns: $(INSNS)
@@ -1310,6 +1302,11 @@ $(REVISION_H)$(yes_baseruby:yes=~disabled~):
# uncommon.mk: $(REVISION_H)
# $(MKFILES): $(REVISION_H)
+# $(common_mk_includes) is set by config.status or GNUmakefile
+common_mk__$(gnumake:yes=artifact)_ = uncommon.mk
+common_mk_$(gnumake)_artifact_ = $(MKFILES)
+$(common_mk__artifact_): $(srcdir)/common.mk $(common_mk_includes)
+
ripper_srcs: $(RIPPER_SRCS)
$(RIPPER_SRCS): $(srcdir)/parse.y $(srcdir)/defs/id.def
@@ -1982,3 +1979,4 @@ $(CROSS_COMPILING:yes=)builtin.$(OBJEXT): {$(VPATH)}mini_builtin.c
$(CROSS_COMPILING:yes=)builtin.$(OBJEXT): {$(VPATH)}miniprelude.c
!include $(srcdir)/prism/srcs.mk
+!include $(srcdir)/depend
diff --git a/configure.ac b/configure.ac
index 366ffe1e05..71f6fe69d9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4698,9 +4698,12 @@ AC_CONFIG_FILES(Makefile:template/Makefile.in, [
sed '/^MISSING/s/\$U\././g;/^VCS *=/s#@VCS@#'"$VCS"'#;/^VCSUP *=/s#@VCSUP@#'"$VCSUP"'#' Makefile
echo; test x"$EXEEXT" = x || echo 'miniruby: miniruby$(EXEEXT)'
AS_IF([test "$gnumake" != yes], [
- echo ['$(MKFILES): $(srcdir)/common.mk $(srcdir)/depend $(srcdir)/prism/srcs.mk']
- sed ['s/{\$([^(){}]*)[^{}]*}//g;/^!/d'] ${srcdir}/common.mk ${srcdir}/depend
- cat ${srcdir}/prism/srcs.mk
+ # extract NMake-style include list
+ set = `sed -n 's/^!include *//p' ${srcdir}/common.mk`
+ echo common_mk_includes "@S|@*" # generate the macro assignment
+ shift
+ common_mk_includes="`echo \"@S|@*\" | sed 's|\$(srcdir)|.|g'`"
+ (cd ${srcdir} && sed -f tool/prereq.status common.mk ${common_mk_includes})
AS_IF([test "$YJIT_SUPPORT" = yes], [
cat ${srcdir}/yjit/not_gmake.mk
echo ['$(MKFILES): ${srcdir}/yjit/not_gmake.mk']
@@ -4723,6 +4726,7 @@ AC_CONFIG_FILES(Makefile:template/Makefile.in, [
]) &&
test -z "`${MAKE-make} -f $tmpgmk info-program | grep '^PROGRAM=ruby$'`" &&
echo 'ruby: $(PROGRAM);' >> $tmpmk
+ rm -f uncommon.mk # remove stale uncommon.mk, it should be updated by GNUmakefile
test "$tmpmk" = "$tmpgmk" || rm -f "$tmpgmk"
]) && mv -f $tmpmk Makefile],
[EXEEXT='$EXEEXT' MAKE='${MAKE-make}' gnumake='$gnumake' GIT='$GIT' YJIT_SUPPORT='$YJIT_SUPPORT'])
diff --git a/gc.c b/gc.c
index 160398f9a6..c2fc681253 100644
--- a/gc.c
+++ b/gc.c
@@ -4698,19 +4698,22 @@ rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALU
APPEND_S("shared -> ");
rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
}
- else if (ARY_EMBED_P(obj)) {
- APPEND_F("[%s%s] len: %ld (embed)",
- C(ARY_EMBED_P(obj), "E"),
- C(ARY_SHARED_P(obj), "S"),
- RARRAY_LEN(obj));
- }
else {
- APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
- C(ARY_EMBED_P(obj), "E"),
+ APPEND_F("[%s%s%s] ",
+ C(ARY_EMBED_P(obj), "E"),
C(ARY_SHARED_P(obj), "S"),
- RARRAY_LEN(obj),
- ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
- (void *)RARRAY_CONST_PTR(obj));
+ C(ARY_SHARED_ROOT_P(obj), "R"));
+
+ if (ARY_EMBED_P(obj)) {
+ APPEND_F("len: %ld (embed)",
+ RARRAY_LEN(obj));
+ }
+ else {
+ APPEND_F("len: %ld, capa:%ld ptr:%p",
+ RARRAY_LEN(obj),
+ RARRAY(obj)->as.heap.aux.capa,
+ (void *)RARRAY_CONST_PTR(obj));
+ }
}
break;
case T_STRING: {
diff --git a/iseq.c b/iseq.c
index 4334bdd795..09346994dd 100644
--- a/iseq.c
+++ b/iseq.c
@@ -1497,9 +1497,9 @@ rb_iseq_remove_coverage_all(void)
/* define wrapper class methods (RubyVM::InstructionSequence) */
static void
-iseqw_mark(void *ptr)
+iseqw_mark_and_move(void *ptr)
{
- rb_gc_mark_movable(*(VALUE *)ptr);
+ rb_gc_mark_and_move((VALUE *)ptr);
}
static size_t
@@ -1508,20 +1508,13 @@ iseqw_memsize(const void *ptr)
return rb_iseq_memsize(*(const rb_iseq_t **)ptr);
}
-static void
-iseqw_ref_update(void *ptr)
-{
- VALUE *vptr = ptr;
- *vptr = rb_gc_location(*vptr);
-}
-
static const rb_data_type_t iseqw_data_type = {
"T_IMEMO/iseq",
{
- iseqw_mark,
+ iseqw_mark_and_move,
RUBY_TYPED_DEFAULT_FREE,
iseqw_memsize,
- iseqw_ref_update,
+ iseqw_mark_and_move,
},
0, 0, RUBY_TYPED_FREE_IMMEDIATELY|RUBY_TYPED_WB_PROTECTED
};
diff --git a/lib/syntax_suggest/api.rb b/lib/syntax_suggest/api.rb
index 46c9c8adac..0f82d8362a 100644
--- a/lib/syntax_suggest/api.rb
+++ b/lib/syntax_suggest/api.rb
@@ -146,11 +146,7 @@ module SyntaxSuggest
def self.valid_without?(without_lines:, code_lines:)
lines = code_lines - Array(without_lines).flatten
- if lines.empty?
- true
- else
- valid?(lines)
- end
+ lines.empty? || valid?(lines)
end
# SyntaxSuggest.invalid? [Private]
diff --git a/prism/srcs.mk b/prism/srcs.mk
new file mode 100644
index 0000000000..aa5c0fa2b5
--- /dev/null
+++ b/prism/srcs.mk
@@ -0,0 +1,142 @@
+PRISM_TEMPLATES_DIR = $(PRISM_SRCDIR)/templates
+PRISM_TEMPLATE = $(PRISM_TEMPLATES_DIR)/template.rb
+PRISM_CONFIG = $(PRISM_SRCDIR)/config.yml
+
+srcs uncommon.mk: prism/.srcs.mk.time
+
+prism/.srcs.mk.time:
+prism/$(HAVE_BASERUBY:yes=.srcs.mk.time): \
+ $(PRISM_SRCDIR)/templates/template.rb \
+ $(PRISM_SRCDIR)/srcs.mk.in
+ $(BASERUBY) $(tooldir)/generic_erb.rb -c -t$@ -o $(PRISM_SRCDIR)/srcs.mk $(PRISM_SRCDIR)/srcs.mk.in
+
+realclean-prism-srcs::
+ $(RM) $(PRISM_SRCDIR)/srcs.mk
+
+realclean-srcs-local:: realclean-prism-srcs
+
+main srcs: $(srcdir)/prism/api_node.c
+$(srcdir)/prism/api_node.c: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/ext/prism/api_node.c.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) ext/prism/api_node.c $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/api_node.c
+
+main incs: $(srcdir)/prism/ast.h
+$(srcdir)/prism/ast.h: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/include/prism/ast.h.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) include/prism/ast.h $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/ast.h
+
+main incs: $(srcdir)/prism/diagnostic.h
+$(srcdir)/prism/diagnostic.h: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/include/prism/diagnostic.h.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) include/prism/diagnostic.h $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/diagnostic.h
+
+main srcs: $(srcdir)/lib/prism/compiler.rb
+$(srcdir)/lib/prism/compiler.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/compiler.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/compiler.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/compiler.rb
+
+main srcs: $(srcdir)/lib/prism/dispatcher.rb
+$(srcdir)/lib/prism/dispatcher.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/dispatcher.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/dispatcher.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/dispatcher.rb
+
+main srcs: $(srcdir)/lib/prism/dot_visitor.rb
+$(srcdir)/lib/prism/dot_visitor.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/dot_visitor.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/dot_visitor.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/dot_visitor.rb
+
+main srcs: $(srcdir)/lib/prism/dsl.rb
+$(srcdir)/lib/prism/dsl.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/dsl.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/dsl.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/dsl.rb
+
+main srcs: $(srcdir)/lib/prism/inspect_visitor.rb
+$(srcdir)/lib/prism/inspect_visitor.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/inspect_visitor.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/inspect_visitor.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/inspect_visitor.rb
+
+main srcs: $(srcdir)/lib/prism/mutation_compiler.rb
+$(srcdir)/lib/prism/mutation_compiler.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/mutation_compiler.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/mutation_compiler.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/mutation_compiler.rb
+
+main srcs: $(srcdir)/lib/prism/node.rb
+$(srcdir)/lib/prism/node.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/node.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/node.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/node.rb
+
+main srcs: $(srcdir)/lib/prism/reflection.rb
+$(srcdir)/lib/prism/reflection.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/reflection.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/reflection.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/reflection.rb
+
+main srcs: $(srcdir)/lib/prism/serialize.rb
+$(srcdir)/lib/prism/serialize.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/serialize.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/serialize.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/serialize.rb
+
+main srcs: $(srcdir)/lib/prism/visitor.rb
+$(srcdir)/lib/prism/visitor.rb: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/lib/prism/visitor.rb.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) lib/prism/visitor.rb $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/lib/prism/visitor.rb
+
+main srcs: $(srcdir)/prism/diagnostic.c
+$(srcdir)/prism/diagnostic.c: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/src/diagnostic.c.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) src/diagnostic.c $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/diagnostic.c
+
+main srcs: $(srcdir)/prism/node.c
+$(srcdir)/prism/node.c: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/src/node.c.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) src/node.c $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/node.c
+
+main srcs: $(srcdir)/prism/prettyprint.c
+$(srcdir)/prism/prettyprint.c: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/src/prettyprint.c.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) src/prettyprint.c $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/prettyprint.c
+
+main srcs: $(srcdir)/prism/serialize.c
+$(srcdir)/prism/serialize.c: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/src/serialize.c.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) src/serialize.c $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/serialize.c
+
+main srcs: $(srcdir)/prism/token_type.c
+$(srcdir)/prism/token_type.c: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/src/token_type.c.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) src/token_type.c $@
+
+realclean-prism-srcs::
+ $(RM) $(srcdir)/prism/token_type.c
diff --git a/prism/srcs.mk.in b/prism/srcs.mk.in
new file mode 100644
index 0000000000..655de155d5
--- /dev/null
+++ b/prism/srcs.mk.in
@@ -0,0 +1,40 @@
+<% # -*- ruby -*-
+require_relative 'templates/template'
+
+script = File.basename(__FILE__)
+srcs = output ? File.basename(output) : script.chomp('.in')
+mk = 'uncommon.mk'
+
+# %>
+PRISM_TEMPLATES_DIR = $(PRISM_SRCDIR)/templates
+PRISM_TEMPLATE = $(PRISM_TEMPLATES_DIR)/template.rb
+PRISM_CONFIG = $(PRISM_SRCDIR)/config.yml
+
+srcs <%=%><%=mk%>: prism/.srcs.mk.time
+
+prism/.srcs.mk.time:
+prism/$(HAVE_BASERUBY:yes=.srcs.mk.time): \
+ $(PRISM_SRCDIR)/templates/template.rb \
+ $(PRISM_SRCDIR)/<%=%><%=script%>
+ $(BASERUBY) $(tooldir)/generic_erb.rb -c -t$@ -o $(PRISM_SRCDIR)/<%=%><%=srcs%> $(PRISM_SRCDIR)/<%=%><%=script%>
+
+realclean-prism-srcs::
+ $(RM) $(PRISM_SRCDIR)/<%=%><%=srcs%>
+
+realclean-srcs-local:: realclean-prism-srcs
+<% Prism::Template::TEMPLATES.map do |t|
+ /\.(?:[ch]|rb)\z/ =~ t or next
+ s = '$(srcdir)/' + t.sub(%r[\A(?:(src)|ext|include)/]) {$1 && 'prism/'}
+ s.sub!(%r[\A\$(srcdir)/prism/], '$(PRISM_SRCDIR)/')
+ target = s.end_with?('.h') ? 'incs' : 'srcs'
+# %>
+
+main <%=%><%=target%>: <%=%><%=s%>
+<%=%><%=s%>: $(PRISM_CONFIG) $(PRISM_TEMPLATE) $(PRISM_TEMPLATES_DIR)/<%=%><%=t%>.erb
+ $(Q) $(BASERUBY) $(PRISM_TEMPLATE) <%=%><%=t%> $@
+
+realclean-prism-srcs::
+ $(RM) <%=%><%=s%>
+<%
+end
+# %>
diff --git a/proc.c b/proc.c
index 68f63040b7..554ec5e237 100644
--- a/proc.c
+++ b/proc.c
@@ -716,12 +716,15 @@ rb_func_proc_dup(VALUE src_obj)
VALUE proc_obj = TypedData_Make_Struct(rb_obj_class(src_obj), cfunc_proc_t, &proc_data_type, proc);
memcpy(&proc->basic, src_proc, sizeof(rb_proc_t));
+ RB_OBJ_WRITTEN(proc_obj, Qundef, proc->basic.block.as.captured.self);
+ RB_OBJ_WRITTEN(proc_obj, Qundef, proc->basic.block.as.captured.code.val);
+ const VALUE *src_ep = src_proc->block.as.captured.ep;
VALUE *ep = *(VALUE **)&proc->basic.block.as.captured.ep = proc->env + VM_ENV_DATA_SIZE - 1;
- ep[VM_ENV_DATA_INDEX_FLAGS] = src_proc->block.as.captured.ep[VM_ENV_DATA_INDEX_FLAGS];
- ep[VM_ENV_DATA_INDEX_ME_CREF] = src_proc->block.as.captured.ep[VM_ENV_DATA_INDEX_ME_CREF];
- ep[VM_ENV_DATA_INDEX_SPECVAL] = src_proc->block.as.captured.ep[VM_ENV_DATA_INDEX_SPECVAL];
- ep[VM_ENV_DATA_INDEX_ENV] = src_proc->block.as.captured.ep[VM_ENV_DATA_INDEX_ENV];
+ ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS];
+ ep[VM_ENV_DATA_INDEX_ME_CREF] = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
+ ep[VM_ENV_DATA_INDEX_SPECVAL] = src_ep[VM_ENV_DATA_INDEX_SPECVAL];
+ RB_OBJ_WRITE(proc_obj, &ep[VM_ENV_DATA_INDEX_ENV], src_ep[VM_ENV_DATA_INDEX_ENV]);
return proc_obj;
}
diff --git a/random.c b/random.c
index 85d72057cd..9b8cec40b4 100644
--- a/random.c
+++ b/random.c
@@ -142,18 +142,21 @@ static const rb_random_interface_t random_mt_if = {
};
static rb_random_mt_t *
-rand_mt_start(rb_random_mt_t *r)
+rand_mt_start(rb_random_mt_t *r, VALUE obj)
{
if (!genrand_initialized(&r->mt)) {
r->base.seed = rand_init(&random_mt_if, &r->base, random_seed(Qundef));
+ if (obj) {
+ RB_OBJ_WRITTEN(obj, Qundef, r->base.seed);
+ }
}
return r;
}
static rb_random_t *
-rand_start(rb_random_mt_t *r)
+rand_start(rb_random_mt_t *r, VALUE obj)
{
- return &rand_mt_start(r)->base;
+ return &rand_mt_start(r, obj)->base;
}
static rb_ractor_local_key_t default_rand_key;
@@ -192,7 +195,13 @@ default_rand(void)
static rb_random_mt_t *
default_mt(void)
{
- return rand_mt_start(default_rand());
+ return rand_mt_start(default_rand(), Qfalse);
+}
+
+static rb_random_t *
+default_rand_start(void)
+{
+ return &default_mt()->base;
}
unsigned int
@@ -293,7 +302,7 @@ get_rnd(VALUE obj)
rb_random_t *ptr;
TypedData_Get_Struct(obj, rb_random_t, &rb_random_data_type, ptr);
if (RTYPEDDATA_TYPE(obj) == &random_mt_type)
- return rand_start((rb_random_mt_t *)ptr);
+ return rand_start((rb_random_mt_t *)ptr, obj);
return ptr;
}
@@ -309,11 +318,11 @@ static rb_random_t *
try_get_rnd(VALUE obj)
{
if (obj == rb_cRandom) {
- return rand_start(default_rand());
+ return default_rand_start();
}
if (!rb_typeddata_is_kind_of(obj, &rb_random_data_type)) return NULL;
if (RTYPEDDATA_TYPE(obj) == &random_mt_type)
- return rand_start(DATA_PTR(obj));
+ return rand_start(DATA_PTR(obj), obj);
rb_random_t *rnd = DATA_PTR(obj);
if (!rnd) {
rb_raise(rb_eArgError, "uninitialized random: %s",
@@ -829,6 +838,7 @@ rand_mt_copy(VALUE obj, VALUE orig)
mt = &rnd1->mt;
*rnd1 = *rnd2;
+ RB_OBJ_WRITTEN(obj, Qundef, rnd1->base.seed);
mt->next = mt->state + numberof(mt->state) - mt->left + 1;
return obj;
}
@@ -916,7 +926,7 @@ rand_mt_load(VALUE obj, VALUE dump)
}
mt->left = (unsigned int)x;
mt->next = mt->state + numberof(mt->state) - x + 1;
- rnd->base.seed = rb_to_int(seed);
+ RB_OBJ_WRITE(obj, &rnd->base.seed, rb_to_int(seed));
return obj;
}
@@ -975,7 +985,7 @@ static VALUE
rb_f_srand(int argc, VALUE *argv, VALUE obj)
{
VALUE seed, old;
- rb_random_mt_t *r = rand_mt_start(default_rand());
+ rb_random_mt_t *r = default_mt();
if (rb_check_arity(argc, 0, 1) == 0) {
seed = random_seed(obj);
@@ -1337,7 +1347,7 @@ rb_random_bytes(VALUE obj, long n)
static VALUE
random_s_bytes(VALUE obj, VALUE len)
{
- rb_random_t *rnd = rand_start(default_rand());
+ rb_random_t *rnd = default_rand_start();
return rand_bytes(&random_mt_if, rnd, NUM2LONG(rb_to_int(len)));
}
@@ -1359,7 +1369,7 @@ random_s_bytes(VALUE obj, VALUE len)
static VALUE
random_s_seed(VALUE obj)
{
- rb_random_mt_t *rnd = rand_mt_start(default_rand());
+ rb_random_mt_t *rnd = default_mt();
return rnd->base.seed;
}
@@ -1689,7 +1699,7 @@ static VALUE
rb_f_rand(int argc, VALUE *argv, VALUE obj)
{
VALUE vmax;
- rb_random_t *rnd = rand_start(default_rand());
+ rb_random_t *rnd = default_rand_start();
if (rb_check_arity(argc, 0, 1) && !NIL_P(vmax = argv[0])) {
VALUE v = rand_range(obj, rnd, vmax);
@@ -1716,7 +1726,7 @@ rb_f_rand(int argc, VALUE *argv, VALUE obj)
static VALUE
random_s_rand(int argc, VALUE *argv, VALUE obj)
{
- VALUE v = rand_random(argc, argv, Qnil, rand_start(default_rand()));
+ VALUE v = rand_random(argc, argv, Qnil, default_rand_start());
check_random_number(v, argv);
return v;
}
diff --git a/shape.c b/shape.c
index df4faf960c..d58f9f1d0c 100644
--- a/shape.c
+++ b/shape.c
@@ -369,7 +369,7 @@ RUBY_FUNC_EXPORTED shape_id_t
rb_obj_shape_id(VALUE obj)
{
if (RB_SPECIAL_CONST_P(obj)) {
- return SPECIAL_CONST_SHAPE_ID;
+ rb_bug("rb_obj_shape_id: called on a special constant");
}
if (BUILTIN_TYPE(obj) == T_CLASS || BUILTIN_TYPE(obj) == T_MODULE) {
@@ -1180,6 +1180,7 @@ rb_shape_copy_complex_ivars(VALUE dest, VALUE obj, shape_id_t src_shape_id, st_t
st_delete(table, &id, NULL);
}
rb_obj_init_too_complex(dest, table);
+ rb_gc_writebarrier_remember(dest);
}
size_t
@@ -1424,6 +1425,9 @@ rb_shape_parent(VALUE self)
static VALUE
rb_shape_debug_shape(VALUE self, VALUE obj)
{
+ if (RB_SPECIAL_CONST_P(obj)) {
+ rb_raise(rb_eArgError, "Can't get shape of special constant");
+ }
return shape_id_t_to_rb_cShape(rb_obj_shape_id(obj));
}
diff --git a/template/GNUmakefile.in b/template/GNUmakefile.in
index 22ff1078dc..452e7cdeef 100644
--- a/template/GNUmakefile.in
+++ b/template/GNUmakefile.in
@@ -27,5 +27,8 @@ override UNICODE_TABLES_DEPENDENTS = \
$(UNICODE_TABLES_DATA_FILES)))),\
force,none)
+# extract NMake-style include list
+$(eval common_mk_includes := $(shell sed -n 's/^!include *//p' $(srcdir)/common.mk))
+
-include uncommon.mk
include $(srcdir)/defs/gmake.mk
diff --git a/template/Makefile.in b/template/Makefile.in
index daecd1debe..39f702b66d 100644
--- a/template/Makefile.in
+++ b/template/Makefile.in
@@ -426,8 +426,8 @@ $(MKFILES): config.status $(srcdir)/version.h $(ABI_VERSION_HDR)
$(MAKE) -f conftest.mk | grep '^AUTO_REMAKE$$' >/dev/null 2>&1 || \
{ echo "$@ updated, restart."; exit 1; }
-uncommon.mk: $(srcdir)/common.mk $(srcdir)/depend
- sed -f $(srcdir)/tool/prereq.status $(srcdir)/common.mk $(srcdir)/depend > $@
+uncommon.mk: $(srcdir)/tool/prereq.status
+ sed -f $(srcdir)/tool/prereq.status $(srcdir)/common.mk $(common_mk_includes) > $@
.PHONY: reconfig
reconfig-args = $(srcdir)/$(CONFIGURE) $(yes_silence:yes=--silent) $(configure_args)
diff --git a/test/.excludes-zjit/TestRubyOptimization.rb b/test/.excludes-zjit/TestRubyOptimization.rb
new file mode 100644
index 0000000000..5361ab02c7
--- /dev/null
+++ b/test/.excludes-zjit/TestRubyOptimization.rb
@@ -0,0 +1 @@
+exclude(:test_side_effect_in_popped_splat, 'Test fails with ZJIT due to locals invalidation')
diff --git a/test/ruby/test_gc.rb b/test/ruby/test_gc.rb
index 5fc9ea508c..7aba333e92 100644
--- a/test/ruby/test_gc.rb
+++ b/test/ruby/test_gc.rb
@@ -447,7 +447,7 @@ class TestGc < Test::Unit::TestCase
end
def test_singleton_method_added
- assert_in_out_err([], <<-EOS, [], [], "[ruby-dev:44436]")
+ assert_in_out_err([], <<-EOS, [], [], "[ruby-dev:44436]", timeout: 30)
class BasicObject
undef singleton_method_added
def singleton_method_added(mid)
diff --git a/test/ruby/test_keyword.rb b/test/ruby/test_keyword.rb
index 4563308fa2..1e3e0e53b1 100644
--- a/test/ruby/test_keyword.rb
+++ b/test/ruby/test_keyword.rb
@@ -4033,7 +4033,7 @@ class TestKeywordArguments < Test::Unit::TestCase
tap { m }
GC.start
tap { m }
- }, bug8964
+ }, bug8964, timeout: 30
assert_normal_exit %q{
prc = Proc.new {|a: []|}
GC.stress = true
diff --git a/test/ruby/test_shapes.rb b/test/ruby/test_shapes.rb
index 77bba6421b..50b1679c18 100644
--- a/test/ruby/test_shapes.rb
+++ b/test/ruby/test_shapes.rb
@@ -1032,12 +1032,22 @@ class TestShapes < Test::Unit::TestCase
assert_shape_equal(RubyVM::Shape.root_shape, RubyVM::Shape.of([]))
end
- def test_true_has_special_const_shape_id
- assert_equal(RubyVM::Shape::SPECIAL_CONST_SHAPE_ID, RubyVM::Shape.of(true).id)
- end
-
- def test_nil_has_special_const_shape_id
- assert_equal(RubyVM::Shape::SPECIAL_CONST_SHAPE_ID, RubyVM::Shape.of(nil).id)
+ def test_raise_on_special_consts
+ assert_raise ArgumentError do
+ RubyVM::Shape.of(true)
+ end
+ assert_raise ArgumentError do
+ RubyVM::Shape.of(false)
+ end
+ assert_raise ArgumentError do
+ RubyVM::Shape.of(nil)
+ end
+ assert_raise ArgumentError do
+ RubyVM::Shape.of(0)
+ end
+ assert_raise ArgumentError do
+ RubyVM::Shape.of(:foo)
+ end
end
def test_root_shape_transition_to_special_const_on_frozen
diff --git a/test/ruby/test_zjit.rb b/test/ruby/test_zjit.rb
index d30af737c3..96ac99b6db 100644
--- a/test/ruby/test_zjit.rb
+++ b/test/ruby/test_zjit.rb
@@ -509,6 +509,116 @@ class TestZJIT < Test::Unit::TestCase
}, insns: [:opt_ge], call_threshold: 2
end
+ def test_new_hash_empty
+ assert_compiles '{}', %q{
+ def test = {}
+ test
+ }, insns: [:newhash]
+ end
+
+ def test_new_hash_nonempty
+ assert_compiles '{"key" => "value", 42 => 100}', %q{
+ def test
+ key = "key"
+ value = "value"
+ num = 42
+ result = 100
+ {key => value, num => result}
+ end
+ test
+ }, insns: [:newhash]
+ end
+
+ def test_new_hash_single_key_value
+ assert_compiles '{"key" => "value"}', %q{
+ def test = {"key" => "value"}
+ test
+ }, insns: [:newhash]
+ end
+
+ def test_new_hash_with_computation
+ assert_compiles '{"sum" => 5, "product" => 6}', %q{
+ def test(a, b)
+ {"sum" => a + b, "product" => a * b}
+ end
+ test(2, 3)
+ }, insns: [:newhash]
+ end
+
+ def test_new_hash_with_user_defined_hash_method
+ assert_runs 'true', %q{
+ class CustomKey
+ attr_reader :val
+
+ def initialize(val)
+ @val = val
+ end
+
+ def hash
+ @val.hash
+ end
+
+ def eql?(other)
+ other.is_a?(CustomKey) && @val == other.val
+ end
+ end
+
+ def test
+ key = CustomKey.new("key")
+ hash = {key => "value"}
+ hash[key] == "value"
+ end
+ test
+ }
+ end
+
+ def test_new_hash_with_user_hash_method_exception
+ assert_runs 'RuntimeError', %q{
+ class BadKey
+ def hash
+ raise "Hash method failed!"
+ end
+ end
+
+ def test
+ key = BadKey.new
+ {key => "value"}
+ end
+
+ begin
+ test
+ rescue => e
+ e.class
+ end
+ }
+ end
+
+ def test_new_hash_with_user_eql_method_exception
+ assert_runs 'RuntimeError', %q{
+ class BadKey
+ def hash
+ 42
+ end
+
+ def eql?(other)
+ raise "Eql method failed!"
+ end
+ end
+
+ def test
+ key1 = BadKey.new
+ key2 = BadKey.new
+ {key1 => "value1", key2 => "value2"}
+ end
+
+ begin
+ test
+ rescue => e
+ e.class
+ end
+ }
+ end
+
def test_opt_hash_freeze
assert_compiles '{}', <<~RUBY, insns: [:opt_hash_freeze]
def test = {}.freeze
@@ -1135,6 +1245,14 @@ class TestZJIT < Test::Unit::TestCase
}, insns: [:defined]
end
+ def test_defined_with_method_call
+ assert_compiles '["method", nil]', %q{
+ def test = return defined?("x".reverse(1)), defined?("x".reverse(1).reverse)
+
+ test
+ }, insns: [:defined]
+ end
+
def test_defined_yield
assert_compiles "nil", "defined?(yield)"
assert_compiles '[nil, nil, "yield"]', %q{
diff --git a/tool/make-snapshot b/tool/make-snapshot
index 7d4fce4f15..2b9a5006e0 100755
--- a/tool/make-snapshot
+++ b/tool/make-snapshot
@@ -480,7 +480,14 @@ def package(vcs, rev, destdir, tmp = nil)
vars["UNICODE_VERSION"] = $unicode_version if $unicode_version
args = vars.dup
mk.gsub!(/@([A-Za-z_]\w*)@/) {args.delete($1); vars[$1] || ENV[$1]}
- mk << commonmk.gsub(/\{\$([^(){}]*)[^{}]*\}/, "").gsub(/^!/, '-').sub(/^revision\.tmp::$/, '\& Makefile')
+ commonmk.gsub!(/^!(?:include \$\(srcdir\)\/(.*))?/) do
+ if inc = $1 and File.exist?(inc)
+ File.binread(inc).gsub(/^!/, '# !')
+ else
+ "#"
+ end
+ end
+ mk << commonmk.gsub(/\{\$([^(){}]*)[^{}]*\}/, "").sub(/^revision\.tmp::$/, '\& Makefile')
mk << <<-'APPEND'
update-download:: touch-unicode-files
diff --git a/tool/prereq.status b/tool/prereq.status
index da92460c8d..6aca615e90 100644
--- a/tool/prereq.status
+++ b/tool/prereq.status
@@ -42,4 +42,4 @@ s,@srcdir@,.,g
s/@[A-Za-z][A-Za-z0-9_]*@//g
s/{\$([^(){}]*)}//g
-s/^!/-/
+s/^!/#!/
diff --git a/tool/sync_default_gems.rb b/tool/sync_default_gems.rb
index 5794edaa83..029a27c829 100755
--- a/tool/sync_default_gems.rb
+++ b/tool/sync_default_gems.rb
@@ -330,6 +330,7 @@ module SyncDefaultGems
rm_rf("test/prism/snapshots")
rm("prism/extconf.rb")
+ `git checkout prism/generate-srcs.mk.rb`
when "resolv"
rm_rf(%w[lib/resolv.* ext/win32/resolv test/resolv ext/win32/lib/win32/resolv.rb])
cp_r("#{upstream}/lib/resolv.rb", "lib")
diff --git a/win32/Makefile.sub b/win32/Makefile.sub
index 664d54e5ff..1bdef106b3 100644
--- a/win32/Makefile.sub
+++ b/win32/Makefile.sub
@@ -570,7 +570,6 @@ ACTIONS_ENDGROUP = @::
ABI_VERSION_HDR = $(hdrdir)/ruby/internal/abi.h
!include $(srcdir)/common.mk
-!include $(srcdir)/depend
!ifdef SCRIPTPROGRAMS
!else if [echo>scriptbin.mk SCRIPTPROGRAMS = \]
diff --git a/yjit/src/codegen.rs b/yjit/src/codegen.rs
index 9644b948d7..44d458020d 100644
--- a/yjit/src/codegen.rs
+++ b/yjit/src/codegen.rs
@@ -3104,7 +3104,7 @@ fn gen_set_ivar(
// Get the iv index
let shape_too_complex = comptime_receiver.shape_too_complex();
- let ivar_index = if !shape_too_complex {
+ let ivar_index = if !comptime_receiver.special_const_p() && !shape_too_complex {
let shape_id = comptime_receiver.shape_id_of();
let mut ivar_index: u16 = 0;
if unsafe { rb_shape_get_iv_index(shape_id, ivar_name, &mut ivar_index) } {
@@ -3369,7 +3369,7 @@ fn gen_definedivar(
// Specialize base on compile time values
let comptime_receiver = jit.peek_at_self();
- if comptime_receiver.shape_too_complex() || asm.ctx.get_chain_depth() >= GET_IVAR_MAX_DEPTH {
+ if comptime_receiver.special_const_p() || comptime_receiver.shape_too_complex() || asm.ctx.get_chain_depth() >= GET_IVAR_MAX_DEPTH {
// Fall back to calling rb_ivar_defined
// Save the PC and SP because the callee may allocate
diff --git a/zjit/src/backend/lir.rs b/zjit/src/backend/lir.rs
index 54bef9d925..460e2719dd 100644
--- a/zjit/src/backend/lir.rs
+++ b/zjit/src/backend/lir.rs
@@ -1684,6 +1684,7 @@ impl Assembler {
}
pub fn cpop_into(&mut self, opnd: Opnd) {
+ assert!(matches!(opnd, Opnd::Reg(_)), "Destination of cpop_into must be a register, got: {opnd:?}");
self.push_insn(Insn::CPopInto(opnd));
}
@@ -1831,6 +1832,7 @@ impl Assembler {
}
pub fn lea_into(&mut self, out: Opnd, opnd: Opnd) {
+ assert!(matches!(out, Opnd::Reg(_)), "Destination of lea_into must be a register, got: {out:?}");
self.push_insn(Insn::Lea { opnd, out });
}
@@ -1856,7 +1858,7 @@ impl Assembler {
}
pub fn load_into(&mut self, dest: Opnd, opnd: Opnd) {
- assert!(matches!(dest, Opnd::Reg(_) | Opnd::VReg{..}), "Destination of load_into must be a register");
+ assert!(matches!(dest, Opnd::Reg(_)), "Destination of load_into must be a register, got: {dest:?}");
match (dest, opnd) {
(Opnd::Reg(dest), Opnd::Reg(opnd)) if dest == opnd => {}, // skip if noop
_ => self.push_insn(Insn::LoadInto { dest, opnd }),
@@ -1882,6 +1884,7 @@ impl Assembler {
}
pub fn mov(&mut self, dest: Opnd, src: Opnd) {
+ assert!(!matches!(dest, Opnd::VReg { .. }), "Destination of mov must not be Opnd::VReg, got: {dest:?}");
self.push_insn(Insn::Mov { dest, src });
}
@@ -1919,6 +1922,7 @@ impl Assembler {
}
pub fn store(&mut self, dest: Opnd, src: Opnd) {
+ assert!(!matches!(dest, Opnd::VReg { .. }), "Destination of store must not be Opnd::VReg, got: {dest:?}");
self.push_insn(Insn::Store { dest, src });
}
diff --git a/zjit/src/codegen.rs b/zjit/src/codegen.rs
index a58950ab9a..5780a26357 100644
--- a/zjit/src/codegen.rs
+++ b/zjit/src/codegen.rs
@@ -1,6 +1,6 @@
-use std::cell::Cell;
+use std::cell::{Cell, RefCell};
use std::rc::Rc;
-use std::ffi::{c_int, c_void};
+use std::ffi::{c_int, c_long, c_void};
use crate::asm::Label;
use crate::backend::current::{Reg, ALLOC_REGS};
@@ -27,7 +27,7 @@ struct JITState {
labels: Vec<Option<Target>>,
/// ISEQ calls that need to be compiled later
- iseq_calls: Vec<Rc<IseqCall>>,
+ iseq_calls: Vec<Rc<RefCell<IseqCall>>>,
/// The number of bytes allocated for basic block arguments spilled onto the C stack
c_stack_slots: usize,
@@ -46,12 +46,8 @@ impl JITState {
}
/// Retrieve the output of a given instruction that has been compiled
- fn get_opnd(&self, insn_id: InsnId) -> Option<lir::Opnd> {
- let opnd = self.opnds[insn_id.0];
- if opnd.is_none() {
- debug!("Failed to get_opnd({insn_id})");
- }
- opnd
+ fn get_opnd(&self, insn_id: InsnId) -> lir::Opnd {
+ self.opnds[insn_id.0].expect(&format!("Failed to get_opnd({insn_id})"))
}
/// Find or create a label for a given BlockId
@@ -130,13 +126,14 @@ fn gen_iseq_entry_point_body(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<CodePt
};
// Stub callee ISEQs for JIT-to-JIT calls
- for iseq_call in jit.iseq_calls.into_iter() {
+ for iseq_call in jit.iseq_calls.iter() {
gen_iseq_call(cb, iseq, iseq_call)?;
}
// Remember the block address to reuse it later
let payload = get_or_create_iseq_payload(iseq);
payload.status = IseqStatus::Compiled(start_ptr);
+ payload.iseq_calls.extend(jit.iseq_calls);
append_gc_offsets(iseq, &gc_offsets);
// Return a JIT code address
@@ -144,19 +141,20 @@ fn gen_iseq_entry_point_body(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<CodePt
}
/// Stub a branch for a JIT-to-JIT call
-fn gen_iseq_call(cb: &mut CodeBlock, caller_iseq: IseqPtr, iseq_call: Rc<IseqCall>) -> Option<()> {
+fn gen_iseq_call(cb: &mut CodeBlock, caller_iseq: IseqPtr, iseq_call: &Rc<RefCell<IseqCall>>) -> Option<()> {
// Compile a function stub
let Some(stub_ptr) = gen_function_stub(cb, iseq_call.clone()) else {
// Failed to compile the stub. Bail out of compiling the caller ISEQ.
debug!("Failed to compile iseq: could not compile stub: {} -> {}",
- iseq_get_location(caller_iseq, 0), iseq_get_location(iseq_call.iseq, 0));
+ iseq_get_location(caller_iseq, 0), iseq_get_location(iseq_call.borrow().iseq, 0));
return None;
};
// Update the JIT-to-JIT call to call the stub
let stub_addr = stub_ptr.raw_ptr(cb);
- iseq_call.regenerate(cb, |asm| {
- asm_comment!(asm, "call function stub: {}", iseq_get_location(iseq_call.iseq, 0));
+ let iseq = iseq_call.borrow().iseq;
+ iseq_call.borrow_mut().regenerate(cb, |asm| {
+ asm_comment!(asm, "call function stub: {}", iseq_get_location(iseq, 0));
asm.ccall(stub_addr, vec![]);
});
Some(())
@@ -209,7 +207,7 @@ fn gen_entry(cb: &mut CodeBlock, iseq: IseqPtr, function: &Function, function_pt
}
/// Compile an ISEQ into machine code
-fn gen_iseq(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<(CodePtr, Vec<Rc<IseqCall>>)> {
+fn gen_iseq(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<(CodePtr, Vec<Rc<RefCell<IseqCall>>>)> {
// Return an existing pointer if it's already compiled
let payload = get_or_create_iseq_payload(iseq);
match payload.status {
@@ -231,6 +229,7 @@ fn gen_iseq(cb: &mut CodeBlock, iseq: IseqPtr) -> Option<(CodePtr, Vec<Rc<IseqCa
let result = gen_function(cb, iseq, &function);
if let Some((start_ptr, gc_offsets, jit)) = result {
payload.status = IseqStatus::Compiled(start_ptr);
+ payload.iseq_calls.extend(jit.iseq_calls.clone());
append_gc_offsets(iseq, &gc_offsets);
Some((start_ptr, jit.iseq_calls))
} else {
@@ -313,18 +312,24 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
// Convert InsnId to lir::Opnd
macro_rules! opnd {
($insn_id:ident) => {
- jit.get_opnd($insn_id.clone())?
+ jit.get_opnd($insn_id.clone())
};
}
macro_rules! opnds {
($insn_ids:ident) => {
{
- Option::from_iter($insn_ids.iter().map(|insn_id| jit.get_opnd(*insn_id)))?
+ $insn_ids.iter().map(|insn_id| jit.get_opnd(*insn_id)).collect::<Vec<_>>()
}
};
}
+ macro_rules! no_output {
+ ($call:expr) => {
+ { let () = $call; return Some(()); }
+ };
+ }
+
if !matches!(*insn, Insn::Snapshot { .. }) {
asm_comment!(asm, "Insn: {insn_id} {insn}");
}
@@ -332,55 +337,59 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
let out_opnd = match insn {
Insn::Const { val: Const::Value(val) } => gen_const(*val),
Insn::NewArray { elements, state } => gen_new_array(asm, opnds!(elements), &function.frame_state(*state)),
+ Insn::NewHash { elements, state } => gen_new_hash(jit, asm, elements, &function.frame_state(*state)),
Insn::NewRange { low, high, flag, state } => gen_new_range(asm, opnd!(low), opnd!(high), *flag, &function.frame_state(*state)),
Insn::ArrayDup { val, state } => gen_array_dup(asm, opnd!(val), &function.frame_state(*state)),
Insn::StringCopy { val, chilled, state } => gen_string_copy(asm, opnd!(val), *chilled, &function.frame_state(*state)),
- Insn::StringConcat { strings, state } => gen_string_concat(jit, asm, opnds!(strings), &function.frame_state(*state))?,
- Insn::StringIntern { val, state } => gen_intern(asm, opnd!(val), &function.frame_state(*state))?,
+ // concatstrings shouldn't have 0 strings
+ // If it happens we abort the compilation for now
+ Insn::StringConcat { strings, .. } if strings.is_empty() => return None,
+ Insn::StringConcat { strings, state } => gen_string_concat(jit, asm, opnds!(strings), &function.frame_state(*state)),
+ Insn::StringIntern { val, state } => gen_intern(asm, opnd!(val), &function.frame_state(*state)),
Insn::Param { idx } => unreachable!("block.insns should not have Insn::Param({idx})"),
Insn::Snapshot { .. } => return Some(()), // we don't need to do anything for this instruction at the moment
- Insn::Jump(branch) => return gen_jump(jit, asm, branch),
- Insn::IfTrue { val, target } => return gen_if_true(jit, asm, opnd!(val), target),
- Insn::IfFalse { val, target } => return gen_if_false(jit, asm, opnd!(val), target),
- Insn::SendWithoutBlock { cd, state, self_val, args, .. } => gen_send_without_block(jit, asm, *cd, &function.frame_state(*state), opnd!(self_val), opnds!(args))?,
+ Insn::Jump(branch) => no_output!(gen_jump(jit, asm, branch)),
+ Insn::IfTrue { val, target } => no_output!(gen_if_true(jit, asm, opnd!(val), target)),
+ Insn::IfFalse { val, target } => no_output!(gen_if_false(jit, asm, opnd!(val), target)),
+ Insn::SendWithoutBlock { cd, state, self_val, args, .. } => gen_send_without_block(jit, asm, *cd, &function.frame_state(*state), opnd!(self_val), opnds!(args)),
// Give up SendWithoutBlockDirect for 6+ args since asm.ccall() doesn't support it.
Insn::SendWithoutBlockDirect { cd, state, self_val, args, .. } if args.len() + 1 > C_ARG_OPNDS.len() => // +1 for self
- gen_send_without_block(jit, asm, *cd, &function.frame_state(*state), opnd!(self_val), opnds!(args))?,
- Insn::SendWithoutBlockDirect { cme, iseq, self_val, args, state, .. } => gen_send_without_block_direct(cb, jit, asm, *cme, *iseq, opnd!(self_val), opnds!(args), &function.frame_state(*state))?,
+ gen_send_without_block(jit, asm, *cd, &function.frame_state(*state), opnd!(self_val), opnds!(args)),
+ Insn::SendWithoutBlockDirect { cme, iseq, self_val, args, state, .. } => gen_send_without_block_direct(cb, jit, asm, *cme, *iseq, opnd!(self_val), opnds!(args), &function.frame_state(*state)),
Insn::InvokeBuiltin { bf, args, state, .. } => gen_invokebuiltin(jit, asm, &function.frame_state(*state), bf, opnds!(args))?,
- Insn::Return { val } => return Some(gen_return(asm, opnd!(val))?),
- Insn::FixnumAdd { left, right, state } => gen_fixnum_add(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?,
- Insn::FixnumSub { left, right, state } => gen_fixnum_sub(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?,
- Insn::FixnumMult { left, right, state } => gen_fixnum_mult(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state))?,
- Insn::FixnumEq { left, right } => gen_fixnum_eq(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumNeq { left, right } => gen_fixnum_neq(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumLt { left, right } => gen_fixnum_lt(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumLe { left, right } => gen_fixnum_le(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumGt { left, right } => gen_fixnum_gt(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumGe { left, right } => gen_fixnum_ge(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumAnd { left, right } => gen_fixnum_and(asm, opnd!(left), opnd!(right))?,
- Insn::FixnumOr { left, right } => gen_fixnum_or(asm, opnd!(left), opnd!(right))?,
- Insn::IsNil { val } => gen_isnil(asm, opnd!(val))?,
- Insn::Test { val } => gen_test(asm, opnd!(val))?,
+ Insn::Return { val } => no_output!(gen_return(asm, opnd!(val))),
+ Insn::FixnumAdd { left, right, state } => gen_fixnum_add(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state)),
+ Insn::FixnumSub { left, right, state } => gen_fixnum_sub(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state)),
+ Insn::FixnumMult { left, right, state } => gen_fixnum_mult(jit, asm, opnd!(left), opnd!(right), &function.frame_state(*state)),
+ Insn::FixnumEq { left, right } => gen_fixnum_eq(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumNeq { left, right } => gen_fixnum_neq(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumLt { left, right } => gen_fixnum_lt(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumLe { left, right } => gen_fixnum_le(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumGt { left, right } => gen_fixnum_gt(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumGe { left, right } => gen_fixnum_ge(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumAnd { left, right } => gen_fixnum_and(asm, opnd!(left), opnd!(right)),
+ Insn::FixnumOr { left, right } => gen_fixnum_or(asm, opnd!(left), opnd!(right)),
+ Insn::IsNil { val } => gen_isnil(asm, opnd!(val)),
+ Insn::Test { val } => gen_test(asm, opnd!(val)),
Insn::GuardType { val, guard_type, state } => gen_guard_type(jit, asm, opnd!(val), *guard_type, &function.frame_state(*state))?,
- Insn::GuardBitEquals { val, expected, state } => gen_guard_bit_equals(jit, asm, opnd!(val), *expected, &function.frame_state(*state))?,
- Insn::PatchPoint { invariant, state } => return gen_patch_point(jit, asm, invariant, &function.frame_state(*state)),
- Insn::CCall { cfun, args, name: _, return_type: _, elidable: _ } => gen_ccall(asm, *cfun, opnds!(args))?,
+ Insn::GuardBitEquals { val, expected, state } => gen_guard_bit_equals(jit, asm, opnd!(val), *expected, &function.frame_state(*state)),
+ Insn::PatchPoint { invariant, state } => no_output!(gen_patch_point(jit, asm, invariant, &function.frame_state(*state))),
+ Insn::CCall { cfun, args, name: _, return_type: _, elidable: _ } => gen_ccall(asm, *cfun, opnds!(args)),
Insn::GetIvar { self_val, id, state: _ } => gen_getivar(asm, opnd!(self_val), *id),
- Insn::SetGlobal { id, val, state } => return gen_setglobal(jit, asm, *id, opnd!(val), &function.frame_state(*state)),
+ Insn::SetGlobal { id, val, state } => no_output!(gen_setglobal(jit, asm, *id, opnd!(val), &function.frame_state(*state))),
Insn::GetGlobal { id, state: _ } => gen_getglobal(asm, *id),
- &Insn::GetLocal { ep_offset, level } => gen_getlocal_with_ep(asm, ep_offset, level)?,
- &Insn::SetLocal { val, ep_offset, level } => return gen_setlocal_with_ep(asm, jit, function, val, ep_offset, level),
- Insn::GetConstantPath { ic, state } => gen_get_constant_path(jit, asm, *ic, &function.frame_state(*state))?,
- Insn::SetIvar { self_val, id, val, state: _ } => return gen_setivar(asm, opnd!(self_val), *id, opnd!(val)),
- Insn::SideExit { state, reason } => return gen_side_exit(jit, asm, reason, &function.frame_state(*state)),
+ &Insn::GetLocal { ep_offset, level } => gen_getlocal_with_ep(asm, ep_offset, level),
+ &Insn::SetLocal { val, ep_offset, level } => no_output!(gen_setlocal_with_ep(asm, opnd!(val), function.type_of(val), ep_offset, level)),
+ Insn::GetConstantPath { ic, state } => gen_get_constant_path(jit, asm, *ic, &function.frame_state(*state)),
+ Insn::SetIvar { self_val, id, val, state: _ } => no_output!(gen_setivar(asm, opnd!(self_val), *id, opnd!(val))),
+ Insn::SideExit { state, reason } => no_output!(gen_side_exit(jit, asm, reason, &function.frame_state(*state))),
Insn::PutSpecialObject { value_type } => gen_putspecialobject(asm, *value_type),
- Insn::AnyToString { val, str, state } => gen_anytostring(asm, opnd!(val), opnd!(str), &function.frame_state(*state))?,
- Insn::Defined { op_type, obj, pushval, v, state } => gen_defined(jit, asm, *op_type, *obj, *pushval, opnd!(v), &function.frame_state(*state))?,
+ Insn::AnyToString { val, str, state } => gen_anytostring(asm, opnd!(val), opnd!(str), &function.frame_state(*state)),
+ Insn::Defined { op_type, obj, pushval, v, state } => gen_defined(jit, asm, *op_type, *obj, *pushval, opnd!(v), &function.frame_state(*state)),
Insn::GetSpecialSymbol { symbol_type, state: _ } => gen_getspecial_symbol(asm, *symbol_type),
Insn::GetSpecialNumber { nth, state } => gen_getspecial_number(asm, *nth, &function.frame_state(*state)),
- &Insn::IncrCounter(counter) => return Some(gen_incr_counter(asm, counter)),
- Insn::ObjToString { val, cd, state, .. } => gen_objtostring(jit, asm, opnd!(val), *cd, &function.frame_state(*state))?,
+ &Insn::IncrCounter(counter) => no_output!(gen_incr_counter(asm, counter)),
+ Insn::ObjToString { val, cd, state, .. } => gen_objtostring(jit, asm, opnd!(val), *cd, &function.frame_state(*state)),
Insn::ArrayExtend { .. }
| Insn::ArrayMax { .. }
| Insn::ArrayPush { .. }
@@ -388,7 +397,6 @@ fn gen_insn(cb: &mut CodeBlock, jit: &mut JITState, asm: &mut Assembler, functio
| Insn::FixnumDiv { .. }
| Insn::FixnumMod { .. }
| Insn::HashDup { .. }
- | Insn::NewHash { .. }
| Insn::Send { .. }
| Insn::Throw { .. }
| Insn::ToArray { .. }
@@ -446,8 +454,8 @@ fn gen_get_ep(asm: &mut Assembler, level: u32) -> Opnd {
ep_opnd
}
-fn gen_objtostring(jit: &mut JITState, asm: &mut Assembler, val: Opnd, cd: *const rb_call_data, state: &FrameState) -> Option<Opnd> {
- gen_prepare_non_leaf_call(jit, asm, state)?;
+fn gen_objtostring(jit: &mut JITState, asm: &mut Assembler, val: Opnd, cd: *const rb_call_data, state: &FrameState) -> Opnd {
+ gen_prepare_non_leaf_call(jit, asm, state);
let iseq_opnd = Opnd::Value(jit.iseq.into());
@@ -459,12 +467,12 @@ fn gen_objtostring(jit: &mut JITState, asm: &mut Assembler, val: Opnd, cd: *cons
// Need to replicate what CALL_SIMPLE_METHOD does
asm_comment!(asm, "side-exit if rb_vm_objtostring returns Qundef");
asm.cmp(ret, Qundef.into());
- asm.je(side_exit(jit, state, ObjToStringFallback)?);
+ asm.je(side_exit(jit, state, ObjToStringFallback));
- Some(ret)
+ ret
}
-fn gen_defined(jit: &JITState, asm: &mut Assembler, op_type: usize, obj: VALUE, pushval: VALUE, tested_value: Opnd, state: &FrameState) -> Option<Opnd> {
+fn gen_defined(jit: &JITState, asm: &mut Assembler, op_type: usize, obj: VALUE, pushval: VALUE, tested_value: Opnd, state: &FrameState) -> Opnd {
match op_type as defined_type {
DEFINED_YIELD => {
// `yield` goes to the block handler stowed in the "local" iseq which is
@@ -476,21 +484,21 @@ fn gen_defined(jit: &JITState, asm: &mut Assembler, op_type: usize, obj: VALUE,
let block_handler = asm.load(Opnd::mem(64, lep, SIZEOF_VALUE_I32 * VM_ENV_DATA_INDEX_SPECVAL));
let pushval = asm.load(pushval.into());
asm.cmp(block_handler, VM_BLOCK_HANDLER_NONE.into());
- Some(asm.csel_e(Qnil.into(), pushval.into()))
+ asm.csel_e(Qnil.into(), pushval.into())
} else {
- Some(Qnil.into())
+ Qnil.into()
}
}
_ => {
// Save the PC and SP because the callee may allocate or call #respond_to?
- gen_prepare_non_leaf_call(jit, asm, state)?;
+ gen_prepare_non_leaf_call(jit, asm, state);
// TODO: Inline the cases for each op_type
// Call vm_defined(ec, reg_cfp, op_type, obj, v)
let def_result = asm_ccall!(asm, rb_vm_defined, EC, CFP, op_type.into(), obj.into(), tested_value);
asm.cmp(def_result.with_num_bits(8), 0.into());
- Some(asm.csel_ne(pushval.into(), Qnil.into()))
+ asm.csel_ne(pushval.into(), Qnil.into())
}
}
}
@@ -498,41 +506,40 @@ fn gen_defined(jit: &JITState, asm: &mut Assembler, op_type: usize, obj: VALUE,
/// Get a local variable from a higher scope or the heap. `local_ep_offset` is in number of VALUEs.
/// We generate this instruction with level=0 only when the local variable is on the heap, so we
/// can't optimize the level=0 case using the SP register.
-fn gen_getlocal_with_ep(asm: &mut Assembler, local_ep_offset: u32, level: u32) -> Option<lir::Opnd> {
+fn gen_getlocal_with_ep(asm: &mut Assembler, local_ep_offset: u32, level: u32) -> lir::Opnd {
let ep = gen_get_ep(asm, level);
- let offset = -(SIZEOF_VALUE_I32 * i32::try_from(local_ep_offset).ok()?);
- Some(asm.load(Opnd::mem(64, ep, offset)))
+ let offset = -(SIZEOF_VALUE_I32 * i32::try_from(local_ep_offset).expect(&format!("Could not convert local_ep_offset {local_ep_offset} to i32")));
+ asm.load(Opnd::mem(64, ep, offset))
}
/// Set a local variable from a higher scope or the heap. `local_ep_offset` is in number of VALUEs.
/// We generate this instruction with level=0 only when the local variable is on the heap, so we
/// can't optimize the level=0 case using the SP register.
-fn gen_setlocal_with_ep(asm: &mut Assembler, jit: &JITState, function: &Function, val: InsnId, local_ep_offset: u32, level: u32) -> Option<()> {
+fn gen_setlocal_with_ep(asm: &mut Assembler, val: Opnd, val_type: Type, local_ep_offset: u32, level: u32) {
let ep = gen_get_ep(asm, level);
// When we've proved that we're writing an immediate,
// we can skip the write barrier.
- if function.type_of(val).is_immediate() {
- let offset = -(SIZEOF_VALUE_I32 * i32::try_from(local_ep_offset).ok()?);
- asm.mov(Opnd::mem(64, ep, offset), jit.get_opnd(val)?);
+ if val_type.is_immediate() {
+ let offset = -(SIZEOF_VALUE_I32 * i32::try_from(local_ep_offset).expect(&format!("Could not convert local_ep_offset {local_ep_offset} to i32")));
+ asm.mov(Opnd::mem(64, ep, offset), val);
} else {
// We're potentially writing a reference to an IMEMO/env object,
// so take care of the write barrier with a function.
- let local_index = c_int::try_from(local_ep_offset).ok().and_then(|idx| idx.checked_mul(-1))?;
- asm_ccall!(asm, rb_vm_env_write, ep, local_index.into(), jit.get_opnd(val)?);
+ let local_index = c_int::try_from(local_ep_offset).ok().and_then(|idx| idx.checked_mul(-1)).expect(&format!("Could not turn {local_ep_offset} into a negative c_int"));
+ asm_ccall!(asm, rb_vm_env_write, ep, local_index.into(), val);
}
- Some(())
}
-fn gen_get_constant_path(jit: &JITState, asm: &mut Assembler, ic: *const iseq_inline_constant_cache, state: &FrameState) -> Option<Opnd> {
+fn gen_get_constant_path(jit: &JITState, asm: &mut Assembler, ic: *const iseq_inline_constant_cache, state: &FrameState) -> Opnd {
unsafe extern "C" {
fn rb_vm_opt_getconstant_path(ec: EcPtr, cfp: CfpPtr, ic: *const iseq_inline_constant_cache) -> VALUE;
}
// Anything could be called on const_missing
- gen_prepare_non_leaf_call(jit, asm, state)?;
+ gen_prepare_non_leaf_call(jit, asm, state);
- Some(asm_ccall!(asm, rb_vm_opt_getconstant_path, EC, CFP, Opnd::const_ptr(ic)))
+ asm_ccall!(asm, rb_vm_opt_getconstant_path, EC, CFP, Opnd::const_ptr(ic))
}
fn gen_invokebuiltin(jit: &JITState, asm: &mut Assembler, state: &FrameState, bf: &rb_builtin_function, args: Vec<Opnd>) -> Option<lir::Opnd> {
@@ -543,7 +550,7 @@ fn gen_invokebuiltin(jit: &JITState, asm: &mut Assembler, state: &FrameState, bf
}
// Anything can happen inside builtin functions
- gen_prepare_non_leaf_call(jit, asm, state)?;
+ gen_prepare_non_leaf_call(jit, asm, state);
let mut cargs = vec![EC];
cargs.extend(args);
@@ -554,13 +561,13 @@ fn gen_invokebuiltin(jit: &JITState, asm: &mut Assembler, state: &FrameState, bf
}
/// Record a patch point that should be invalidated on a given invariant
-fn gen_patch_point(jit: &mut JITState, asm: &mut Assembler, invariant: &Invariant, state: &FrameState) -> Option<()> {
+fn gen_patch_point(jit: &mut JITState, asm: &mut Assembler, invariant: &Invariant, state: &FrameState) {
let payload_ptr = get_or_create_iseq_payload_ptr(jit.iseq);
let label = asm.new_label("patch_point").unwrap_label();
let invariant = invariant.clone();
// Compile a side exit. Fill nop instructions if the last patch point is too close.
- asm.patch_point(build_side_exit(jit, state, PatchPoint(invariant), Some(label))?);
+ asm.patch_point(build_side_exit(jit, state, PatchPoint(invariant), Some(label)));
// Remember the current address as a patch point
asm.pos_marker(move |code_ptr, cb| {
@@ -583,13 +590,12 @@ fn gen_patch_point(jit: &mut JITState, asm: &mut Assembler, invariant: &Invarian
}
}
});
- Some(())
}
/// Lowering for [`Insn::CCall`]. This is a low-level raw call that doesn't know
/// anything about the callee, so handling for e.g. GC safety is dealt with elsewhere.
-fn gen_ccall(asm: &mut Assembler, cfun: *const u8, args: Vec<Opnd>) -> Option<lir::Opnd> {
- Some(asm.ccall(cfun, args))
+fn gen_ccall(asm: &mut Assembler, cfun: *const u8, args: Vec<Opnd>) -> lir::Opnd {
+ asm.ccall(cfun, args)
}
/// Emit an uncached instance variable lookup
@@ -598,9 +604,8 @@ fn gen_getivar(asm: &mut Assembler, recv: Opnd, id: ID) -> Opnd {
}
/// Emit an uncached instance variable store
-fn gen_setivar(asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd) -> Option<()> {
+fn gen_setivar(asm: &mut Assembler, recv: Opnd, id: ID, val: Opnd) {
asm_ccall!(asm, rb_ivar_set, recv, id.0.into(), val);
- Some(())
}
/// Look up global variables
@@ -609,25 +614,23 @@ fn gen_getglobal(asm: &mut Assembler, id: ID) -> Opnd {
}
/// Intern a string
-fn gen_intern(asm: &mut Assembler, val: Opnd, state: &FrameState) -> Option<Opnd> {
+fn gen_intern(asm: &mut Assembler, val: Opnd, state: &FrameState) -> Opnd {
gen_prepare_call_with_gc(asm, state);
- Some(asm_ccall!(asm, rb_str_intern, val))
+ asm_ccall!(asm, rb_str_intern, val)
}
/// Set global variables
-fn gen_setglobal(jit: &mut JITState, asm: &mut Assembler, id: ID, val: Opnd, state: &FrameState) -> Option<()> {
+fn gen_setglobal(jit: &mut JITState, asm: &mut Assembler, id: ID, val: Opnd, state: &FrameState) {
// When trace_var is used, setting a global variable can cause exceptions
- gen_prepare_non_leaf_call(jit, asm, state)?;
+ gen_prepare_non_leaf_call(jit, asm, state);
asm_ccall!(asm, rb_gvar_set, id.0.into(), val);
- Some(())
}
/// Side-exit into the interpreter
-fn gen_side_exit(jit: &mut JITState, asm: &mut Assembler, reason: &SideExitReason, state: &FrameState) -> Option<()> {
- asm.jmp(side_exit(jit, state, *reason)?);
- Some(())
+fn gen_side_exit(jit: &mut JITState, asm: &mut Assembler, reason: &SideExitReason, state: &FrameState) {
+ asm.jmp(side_exit(jit, state, *reason));
}
/// Emit a special object lookup
@@ -742,11 +745,11 @@ fn gen_branch_params(jit: &mut JITState, asm: &mut Assembler, branch: &BranchEdg
match param_opnd(idx) {
Opnd::Reg(reg) => {
// If a parameter is a register, we need to parallel-move it
- moves.push((reg, jit.get_opnd(arg)?));
+ moves.push((reg, jit.get_opnd(arg)));
},
param => {
// If a parameter is memory, we set it beforehand
- asm.mov(param, jit.get_opnd(arg)?);
+ asm.mov(param, jit.get_opnd(arg));
}
}
}
@@ -795,18 +798,17 @@ fn gen_param(asm: &mut Assembler, idx: usize) -> lir::Opnd {
}
/// Compile a jump to a basic block
-fn gen_jump(jit: &mut JITState, asm: &mut Assembler, branch: &BranchEdge) -> Option<()> {
+fn gen_jump(jit: &mut JITState, asm: &mut Assembler, branch: &BranchEdge) {
// Set basic block arguments
gen_branch_params(jit, asm, branch);
// Jump to the basic block
let target = jit.get_label(asm, branch.target);
asm.jmp(target);
- Some(())
}
/// Compile a conditional branch to a basic block
-fn gen_if_true(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, branch: &BranchEdge) -> Option<()> {
+fn gen_if_true(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, branch: &BranchEdge) {
// If val is zero, move on to the next instruction.
let if_false = asm.new_label("if_false");
asm.test(val, val);
@@ -819,12 +821,10 @@ fn gen_if_true(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, branch:
asm.jmp(if_true);
asm.write_label(if_false);
-
- Some(())
}
/// Compile a conditional branch to a basic block
-fn gen_if_false(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, branch: &BranchEdge) -> Option<()> {
+fn gen_if_false(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, branch: &BranchEdge) {
// If val is not zero, move on to the next instruction.
let if_true = asm.new_label("if_true");
asm.test(val, val);
@@ -837,8 +837,6 @@ fn gen_if_false(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, branch:
asm.jmp(if_false);
asm.write_label(if_true);
-
- Some(())
}
/// Compile a dynamic dispatch without block
@@ -849,8 +847,8 @@ fn gen_send_without_block(
state: &FrameState,
self_val: Opnd,
args: Vec<Opnd>,
-) -> Option<lir::Opnd> {
- gen_spill_locals(jit, asm, state)?;
+) -> lir::Opnd {
+ gen_spill_locals(jit, asm, state);
// Spill the receiver and the arguments onto the stack.
// They need to be on the interpreter stack to let the interpreter access them.
// TODO: Avoid spilling operands that have been spilled before.
@@ -879,7 +877,7 @@ fn gen_send_without_block(
// TODO(max): Add a PatchPoint here that can side-exit the function if the callee messed with
// the frame's locals
- Some(ret)
+ ret
}
/// Compile a direct jump to an ISEQ call without block
@@ -892,13 +890,13 @@ fn gen_send_without_block_direct(
recv: Opnd,
args: Vec<Opnd>,
state: &FrameState,
-) -> Option<lir::Opnd> {
+) -> lir::Opnd {
// Save cfp->pc and cfp->sp for the caller frame
gen_save_pc(asm, state);
gen_save_sp(asm, state.stack().len() - args.len() - 1); // -1 for receiver
- gen_spill_locals(jit, asm, state)?;
- gen_spill_stack(jit, asm, state)?;
+ gen_spill_locals(jit, asm, state);
+ gen_spill_stack(jit, asm, state);
// Set up the new frame
// TODO: Lazily materialize caller frames on side exits or when needed
@@ -944,7 +942,7 @@ fn gen_send_without_block_direct(
let new_sp = asm.sub(SP, sp_offset.into());
asm.mov(SP, new_sp);
- Some(ret)
+ ret
}
/// Compile a string resurrection
@@ -974,7 +972,7 @@ fn gen_new_array(
) -> lir::Opnd {
gen_prepare_call_with_gc(asm, state);
- let length: ::std::os::raw::c_long = elements.len().try_into().expect("Unable to fit length of elements into c_long");
+ let length: c_long = elements.len().try_into().expect("Unable to fit length of elements into c_long");
let new_array = asm_ccall!(asm, rb_ary_new_capa, length.into());
@@ -985,6 +983,61 @@ fn gen_new_array(
new_array
}
+/// Compile a new hash instruction
+fn gen_new_hash(
+ jit: &mut JITState,
+ asm: &mut Assembler,
+ elements: &Vec<(InsnId, InsnId)>,
+ state: &FrameState,
+) -> lir::Opnd {
+ gen_prepare_non_leaf_call(jit, asm, state);
+
+ let cap: c_long = elements.len().try_into().expect("Unable to fit length of elements into c_long");
+ let new_hash = asm_ccall!(asm, rb_hash_new_with_size, lir::Opnd::Imm(cap));
+
+ if !elements.is_empty() {
+ let mut pairs = Vec::new();
+ for (key_id, val_id) in elements.iter() {
+ let key = jit.get_opnd(*key_id);
+ let val = jit.get_opnd(*val_id);
+ pairs.push(key);
+ pairs.push(val);
+ }
+
+ let n = pairs.len();
+
+ // Calculate the compile-time NATIVE_STACK_PTR offset from NATIVE_BASE_PTR
+ // At this point, frame_setup(&[], jit.c_stack_slots) has been called,
+ // which allocated aligned_stack_bytes(jit.c_stack_slots) on the stack
+ let frame_size = aligned_stack_bytes(jit.c_stack_slots);
+ let allocation_size = aligned_stack_bytes(n);
+
+ asm_comment!(asm, "allocate {} bytes on C stack for {} hash elements", allocation_size, n);
+ asm.sub_into(NATIVE_STACK_PTR, allocation_size.into());
+
+ // Calculate the total offset from NATIVE_BASE_PTR to our buffer
+ let total_offset_from_base = (frame_size + allocation_size) as i32;
+
+ for (idx, &pair_opnd) in pairs.iter().enumerate() {
+ let slot_offset = -total_offset_from_base + (idx as i32 * SIZEOF_VALUE_I32);
+ asm.mov(
+ Opnd::mem(VALUE_BITS, NATIVE_BASE_PTR, slot_offset),
+ pair_opnd
+ );
+ }
+
+ let argv = asm.lea(Opnd::mem(64, NATIVE_BASE_PTR, -total_offset_from_base));
+
+ let argc = (elements.len() * 2) as ::std::os::raw::c_long;
+ asm_ccall!(asm, rb_hash_bulk_insert, lir::Opnd::Imm(argc), argv, new_hash);
+
+ asm_comment!(asm, "restore C stack pointer");
+ asm.add_into(NATIVE_STACK_PTR, allocation_size.into());
+ }
+
+ new_hash
+}
+
/// Compile a new range instruction
fn gen_new_range(
asm: &mut Assembler,
@@ -1000,7 +1053,7 @@ fn gen_new_range(
}
/// Compile code that exits from JIT code with a return value
-fn gen_return(asm: &mut Assembler, val: lir::Opnd) -> Option<()> {
+fn gen_return(asm: &mut Assembler, val: lir::Opnd) {
// Pop the current frame (ec->cfp++)
// Note: the return PC is already in the previous CFP
asm_comment!(asm, "pop stack frame");
@@ -1015,31 +1068,28 @@ fn gen_return(asm: &mut Assembler, val: lir::Opnd) -> Option<()> {
// Return from the function
asm.frame_teardown(&[]); // matching the setup in :bb0-prologue:
asm.cret(C_RET_OPND);
- Some(())
}
/// Compile Fixnum + Fixnum
-fn gen_fixnum_add(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> Option<lir::Opnd> {
+fn gen_fixnum_add(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> lir::Opnd {
// Add left + right and test for overflow
let left_untag = asm.sub(left, Opnd::Imm(1));
let out_val = asm.add(left_untag, right);
- asm.jo(side_exit(jit, state, FixnumAddOverflow)?);
+ asm.jo(side_exit(jit, state, FixnumAddOverflow));
- Some(out_val)
+ out_val
}
/// Compile Fixnum - Fixnum
-fn gen_fixnum_sub(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> Option<lir::Opnd> {
+fn gen_fixnum_sub(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> lir::Opnd {
// Subtract left - right and test for overflow
let val_untag = asm.sub(left, right);
- asm.jo(side_exit(jit, state, FixnumSubOverflow)?);
- let out_val = asm.add(val_untag, Opnd::Imm(1));
-
- Some(out_val)
+ asm.jo(side_exit(jit, state, FixnumSubOverflow));
+ asm.add(val_untag, Opnd::Imm(1))
}
/// Compile Fixnum * Fixnum
-fn gen_fixnum_mult(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> Option<lir::Opnd> {
+fn gen_fixnum_mult(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd, state: &FrameState) -> lir::Opnd {
// Do some bitwise gymnastics to handle tag bits
// x * y is translated to (x >> 1) * (y - 1) + 1
let left_untag = asm.rshift(left, Opnd::UImm(1));
@@ -1047,107 +1097,105 @@ fn gen_fixnum_mult(jit: &mut JITState, asm: &mut Assembler, left: lir::Opnd, rig
let out_val = asm.mul(left_untag, right_untag);
// Test for overflow
- asm.jo_mul(side_exit(jit, state, FixnumMultOverflow)?);
- let out_val = asm.add(out_val, Opnd::UImm(1));
-
- Some(out_val)
+ asm.jo_mul(side_exit(jit, state, FixnumMultOverflow));
+ asm.add(out_val, Opnd::UImm(1))
}
/// Compile Fixnum == Fixnum
-fn gen_fixnum_eq(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_fixnum_eq(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
asm.cmp(left, right);
- Some(asm.csel_e(Qtrue.into(), Qfalse.into()))
+ asm.csel_e(Qtrue.into(), Qfalse.into())
}
/// Compile Fixnum != Fixnum
-fn gen_fixnum_neq(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_fixnum_neq(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
asm.cmp(left, right);
- Some(asm.csel_ne(Qtrue.into(), Qfalse.into()))
+ asm.csel_ne(Qtrue.into(), Qfalse.into())
}
/// Compile Fixnum < Fixnum
-fn gen_fixnum_lt(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_fixnum_lt(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
asm.cmp(left, right);
- Some(asm.csel_l(Qtrue.into(), Qfalse.into()))
+ asm.csel_l(Qtrue.into(), Qfalse.into())
}
/// Compile Fixnum <= Fixnum
-fn gen_fixnum_le(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_fixnum_le(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
asm.cmp(left, right);
- Some(asm.csel_le(Qtrue.into(), Qfalse.into()))
+ asm.csel_le(Qtrue.into(), Qfalse.into())
}
/// Compile Fixnum > Fixnum
-fn gen_fixnum_gt(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_fixnum_gt(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
asm.cmp(left, right);
- Some(asm.csel_g(Qtrue.into(), Qfalse.into()))
+ asm.csel_g(Qtrue.into(), Qfalse.into())
}
/// Compile Fixnum >= Fixnum
-fn gen_fixnum_ge(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_fixnum_ge(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
asm.cmp(left, right);
- Some(asm.csel_ge(Qtrue.into(), Qfalse.into()))
+ asm.csel_ge(Qtrue.into(), Qfalse.into())
}
/// Compile Fixnum & Fixnum
-fn gen_fixnum_and(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
- Some(asm.and(left, right))
+fn gen_fixnum_and(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
+ asm.and(left, right)
}
/// Compile Fixnum | Fixnum
-fn gen_fixnum_or(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> Option<lir::Opnd> {
- Some(asm.or(left, right))
+fn gen_fixnum_or(asm: &mut Assembler, left: lir::Opnd, right: lir::Opnd) -> lir::Opnd {
+ asm.or(left, right)
}
// Compile val == nil
-fn gen_isnil(asm: &mut Assembler, val: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_isnil(asm: &mut Assembler, val: lir::Opnd) -> lir::Opnd {
asm.cmp(val, Qnil.into());
// TODO: Implement and use setcc
- Some(asm.csel_e(Opnd::Imm(1), Opnd::Imm(0)))
+ asm.csel_e(Opnd::Imm(1), Opnd::Imm(0))
}
-fn gen_anytostring(asm: &mut Assembler, val: lir::Opnd, str: lir::Opnd, state: &FrameState) -> Option<lir::Opnd> {
+fn gen_anytostring(asm: &mut Assembler, val: lir::Opnd, str: lir::Opnd, state: &FrameState) -> lir::Opnd {
gen_prepare_call_with_gc(asm, state);
- Some(asm_ccall!(asm, rb_obj_as_string_result, str, val))
+ asm_ccall!(asm, rb_obj_as_string_result, str, val)
}
/// Evaluate if a value is truthy
/// Produces a CBool type (0 or 1)
/// In Ruby, only nil and false are falsy
/// Everything else evaluates to true
-fn gen_test(asm: &mut Assembler, val: lir::Opnd) -> Option<lir::Opnd> {
+fn gen_test(asm: &mut Assembler, val: lir::Opnd) -> lir::Opnd {
// Test if any bit (outside of the Qnil bit) is on
// See RB_TEST(), include/ruby/internal/special_consts.h
asm.test(val, Opnd::Imm(!Qnil.as_i64()));
- Some(asm.csel_e(0.into(), 1.into()))
+ asm.csel_e(0.into(), 1.into())
}
/// Compile a type check with a side exit
fn gen_guard_type(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, guard_type: Type, state: &FrameState) -> Option<lir::Opnd> {
if guard_type.is_subtype(types::Fixnum) {
asm.test(val, Opnd::UImm(RUBY_FIXNUM_FLAG as u64));
- asm.jz(side_exit(jit, state, GuardType(guard_type))?);
+ asm.jz(side_exit(jit, state, GuardType(guard_type)));
} else if guard_type.is_subtype(types::Flonum) {
// Flonum: (val & RUBY_FLONUM_MASK) == RUBY_FLONUM_FLAG
let masked = asm.and(val, Opnd::UImm(RUBY_FLONUM_MASK as u64));
asm.cmp(masked, Opnd::UImm(RUBY_FLONUM_FLAG as u64));
- asm.jne(side_exit(jit, state, GuardType(guard_type))?);
+ asm.jne(side_exit(jit, state, GuardType(guard_type)));
} else if guard_type.is_subtype(types::StaticSymbol) {
// Static symbols have (val & 0xff) == RUBY_SYMBOL_FLAG
// Use 8-bit comparison like YJIT does
debug_assert!(val.try_num_bits(8).is_some(), "GuardType should not be used for a known constant, but val was: {val:?}");
asm.cmp(val.try_num_bits(8)?, Opnd::UImm(RUBY_SYMBOL_FLAG as u64));
- asm.jne(side_exit(jit, state, GuardType(guard_type))?);
+ asm.jne(side_exit(jit, state, GuardType(guard_type)));
} else if guard_type.is_subtype(types::NilClass) {
asm.cmp(val, Qnil.into());
- asm.jne(side_exit(jit, state, GuardType(guard_type))?);
+ asm.jne(side_exit(jit, state, GuardType(guard_type)));
} else if guard_type.is_subtype(types::TrueClass) {
asm.cmp(val, Qtrue.into());
- asm.jne(side_exit(jit, state, GuardType(guard_type))?);
+ asm.jne(side_exit(jit, state, GuardType(guard_type)));
} else if guard_type.is_subtype(types::FalseClass) {
asm.cmp(val, Qfalse.into());
- asm.jne(side_exit(jit, state, GuardType(guard_type))?);
+ asm.jne(side_exit(jit, state, GuardType(guard_type)));
} else if guard_type.is_immediate() {
// All immediate types' guard should have been handled above
panic!("unexpected immediate guard type: {guard_type}");
@@ -1162,7 +1210,7 @@ fn gen_guard_type(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, guard
};
// Check if it's a special constant
- let side_exit = side_exit(jit, state, GuardType(guard_type))?;
+ let side_exit = side_exit(jit, state, GuardType(guard_type));
asm.test(val, (RUBY_IMMEDIATE_MASK as u64).into());
asm.jnz(side_exit.clone());
@@ -1182,14 +1230,14 @@ fn gen_guard_type(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, guard
}
/// Compile an identity check with a side exit
-fn gen_guard_bit_equals(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, expected: VALUE, state: &FrameState) -> Option<lir::Opnd> {
+fn gen_guard_bit_equals(jit: &mut JITState, asm: &mut Assembler, val: lir::Opnd, expected: VALUE, state: &FrameState) -> lir::Opnd {
asm.cmp(val, Opnd::Value(expected));
- asm.jnz(side_exit(jit, state, GuardBitEquals(expected))?);
- Some(val)
+ asm.jnz(side_exit(jit, state, GuardBitEquals(expected)));
+ val
}
/// Generate code that increments a counter in ZJIT stats
-fn gen_incr_counter(asm: &mut Assembler, counter: Counter) -> () {
+fn gen_incr_counter(asm: &mut Assembler, counter: Counter) {
let ptr = counter_ptr(counter);
let ptr_reg = asm.load(Opnd::const_ptr(ptr as *const u8));
let counter_opnd = Opnd::mem(64, ptr_reg, 0);
@@ -1221,30 +1269,27 @@ fn gen_save_sp(asm: &mut Assembler, stack_size: usize) {
}
/// Spill locals onto the stack.
-fn gen_spill_locals(jit: &JITState, asm: &mut Assembler, state: &FrameState) -> Option<()> {
+fn gen_spill_locals(jit: &JITState, asm: &mut Assembler, state: &FrameState) {
// TODO: Avoid spilling locals that have been spilled before and not changed.
asm_comment!(asm, "spill locals");
for (idx, &insn_id) in state.locals().enumerate() {
- asm.mov(Opnd::mem(64, SP, (-local_idx_to_ep_offset(jit.iseq, idx) - 1) * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?);
+ asm.mov(Opnd::mem(64, SP, (-local_idx_to_ep_offset(jit.iseq, idx) - 1) * SIZEOF_VALUE_I32), jit.get_opnd(insn_id));
}
- Some(())
}
/// Spill the virtual stack onto the stack.
-fn gen_spill_stack(jit: &JITState, asm: &mut Assembler, state: &FrameState) -> Option<()> {
+fn gen_spill_stack(jit: &JITState, asm: &mut Assembler, state: &FrameState) {
// This function does not call gen_save_sp() at the moment because
// gen_send_without_block_direct() spills stack slots above SP for arguments.
asm_comment!(asm, "spill stack");
for (idx, &insn_id) in state.stack().enumerate() {
- asm.mov(Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32), jit.get_opnd(insn_id)?);
+ asm.mov(Opnd::mem(64, SP, idx as i32 * SIZEOF_VALUE_I32), jit.get_opnd(insn_id));
}
- Some(())
}
/// Prepare for calling a C function that may call an arbitrary method.
/// Use gen_prepare_call_with_gc() if the method is leaf but allocates objects.
-#[must_use]
-fn gen_prepare_non_leaf_call(jit: &JITState, asm: &mut Assembler, state: &FrameState) -> Option<()> {
+fn gen_prepare_non_leaf_call(jit: &JITState, asm: &mut Assembler, state: &FrameState) {
// TODO: Lazily materialize caller frames when needed
// Save PC for backtraces and allocation tracing
gen_save_pc(asm, state);
@@ -1252,11 +1297,10 @@ fn gen_prepare_non_leaf_call(jit: &JITState, asm: &mut Assembler, state: &FrameS
// Save SP and spill the virtual stack in case it raises an exception
// and the interpreter uses the stack for handling the exception
gen_save_sp(asm, state.stack().len());
- gen_spill_stack(jit, asm, state)?;
+ gen_spill_stack(jit, asm, state);
// Spill locals in case the method looks at caller Bindings
- gen_spill_locals(jit, asm, state)?;
- Some(())
+ gen_spill_locals(jit, asm, state);
}
/// Prepare for calling a C function that may allocate objects and trigger GC.
@@ -1353,20 +1397,20 @@ fn compile_iseq(iseq: IseqPtr) -> Option<Function> {
}
/// Build a Target::SideExit for non-PatchPoint instructions
-fn side_exit(jit: &mut JITState, state: &FrameState, reason: SideExitReason) -> Option<Target> {
+fn side_exit(jit: &mut JITState, state: &FrameState, reason: SideExitReason) -> Target {
build_side_exit(jit, state, reason, None)
}
/// Build a Target::SideExit out of a FrameState
-fn build_side_exit(jit: &mut JITState, state: &FrameState, reason: SideExitReason, label: Option<Label>) -> Option<Target> {
+fn build_side_exit(jit: &mut JITState, state: &FrameState, reason: SideExitReason, label: Option<Label>) -> Target {
let mut stack = Vec::new();
for &insn_id in state.stack() {
- stack.push(jit.get_opnd(insn_id)?);
+ stack.push(jit.get_opnd(insn_id));
}
let mut locals = Vec::new();
for &insn_id in state.locals() {
- locals.push(jit.get_opnd(insn_id)?);
+ locals.push(jit.get_opnd(insn_id));
}
let target = Target::SideExit {
@@ -1376,7 +1420,7 @@ fn build_side_exit(jit: &mut JITState, state: &FrameState, reason: SideExitReaso
reason,
label,
};
- Some(target)
+ target
}
/// Return true if a given ISEQ is known to escape EP to the heap on entry.
@@ -1429,9 +1473,9 @@ c_callable! {
with_vm_lock(src_loc!(), || {
// gen_push_frame() doesn't set PC and SP, so we need to set them before exit.
// function_stub_hit_body() may allocate and call gc_validate_pc(), so we always set PC.
- let iseq_call = unsafe { Rc::from_raw(iseq_call_ptr as *const IseqCall) };
+ let iseq_call = unsafe { Rc::from_raw(iseq_call_ptr as *const RefCell<IseqCall>) };
let cfp = unsafe { get_ec_cfp(ec) };
- let pc = unsafe { rb_iseq_pc_at_idx(iseq_call.iseq, 0) }; // TODO: handle opt_pc once supported
+ let pc = unsafe { rb_iseq_pc_at_idx(iseq_call.borrow().iseq, 0) }; // TODO: handle opt_pc once supported
unsafe { rb_set_cfp_pc(cfp, pc) };
unsafe { rb_set_cfp_sp(cfp, sp) };
@@ -1439,10 +1483,10 @@ c_callable! {
// TODO: Alan thinks the payload status part of this check can happen without the VM lock, since the whole
// code path can be made read-only. But you still need the check as is while holding the VM lock in any case.
let cb = ZJITState::get_code_block();
- let payload = get_or_create_iseq_payload(iseq_call.iseq);
+ let payload = get_or_create_iseq_payload(iseq_call.borrow().iseq);
if cb.has_dropped_bytes() || payload.status == IseqStatus::CantCompile {
// We'll use this Rc again, so increment the ref count decremented by from_raw.
- unsafe { Rc::increment_strong_count(iseq_call_ptr as *const IseqCall); }
+ unsafe { Rc::increment_strong_count(iseq_call_ptr as *const RefCell<IseqCall>); }
// Exit to the interpreter
return ZJITState::get_exit_trampoline().raw_ptr(cb);
@@ -1463,22 +1507,23 @@ c_callable! {
}
/// Compile an ISEQ for a function stub
-fn function_stub_hit_body(cb: &mut CodeBlock, iseq_call: &Rc<IseqCall>) -> Option<CodePtr> {
+fn function_stub_hit_body(cb: &mut CodeBlock, iseq_call: &Rc<RefCell<IseqCall>>) -> Option<CodePtr> {
// Compile the stubbed ISEQ
- let Some((code_ptr, iseq_calls)) = gen_iseq(cb, iseq_call.iseq) else {
- debug!("Failed to compile iseq: gen_iseq failed: {}", iseq_get_location(iseq_call.iseq, 0));
+ let Some((code_ptr, iseq_calls)) = gen_iseq(cb, iseq_call.borrow().iseq) else {
+ debug!("Failed to compile iseq: gen_iseq failed: {}", iseq_get_location(iseq_call.borrow().iseq, 0));
return None;
};
// Stub callee ISEQs for JIT-to-JIT calls
- for callee_iseq_call in iseq_calls.into_iter() {
- gen_iseq_call(cb, iseq_call.iseq, callee_iseq_call)?;
+ for callee_iseq_call in iseq_calls.iter() {
+ gen_iseq_call(cb, iseq_call.borrow().iseq, callee_iseq_call)?;
}
// Update the stub to call the code pointer
let code_addr = code_ptr.raw_ptr(cb);
- iseq_call.regenerate(cb, |asm| {
- asm_comment!(asm, "call compiled function: {}", iseq_get_location(iseq_call.iseq, 0));
+ let iseq = iseq_call.borrow().iseq;
+ iseq_call.borrow_mut().regenerate(cb, |asm| {
+ asm_comment!(asm, "call compiled function: {}", iseq_get_location(iseq, 0));
asm.ccall(code_addr, vec![]);
});
@@ -1486,9 +1531,9 @@ fn function_stub_hit_body(cb: &mut CodeBlock, iseq_call: &Rc<IseqCall>) -> Optio
}
/// Compile a stub for an ISEQ called by SendWithoutBlockDirect
-fn gen_function_stub(cb: &mut CodeBlock, iseq_call: Rc<IseqCall>) -> Option<CodePtr> {
+fn gen_function_stub(cb: &mut CodeBlock, iseq_call: Rc<RefCell<IseqCall>>) -> Option<CodePtr> {
let mut asm = Assembler::new();
- asm_comment!(asm, "Stub: {}", iseq_get_location(iseq_call.iseq, 0));
+ asm_comment!(asm, "Stub: {}", iseq_get_location(iseq_call.borrow().iseq, 0));
// Call function_stub_hit using the shared trampoline. See `gen_function_stub_hit_trampoline`.
// Use load_into instead of mov, which is split on arm64, to avoid clobbering ALLOC_REGS.
@@ -1550,21 +1595,14 @@ pub fn gen_exit_trampoline(cb: &mut CodeBlock) -> Option<CodePtr> {
})
}
-fn gen_string_concat(jit: &mut JITState, asm: &mut Assembler, strings: Vec<Opnd>, state: &FrameState) -> Option<Opnd> {
- let n = strings.len();
-
- // concatstrings shouldn't have 0 strings
- // If it happens we abort the compilation for now
- if n == 0 {
- return None;
- }
-
- gen_prepare_non_leaf_call(jit, asm, state)?;
+fn gen_string_concat(jit: &mut JITState, asm: &mut Assembler, strings: Vec<Opnd>, state: &FrameState) -> Opnd {
+ gen_prepare_non_leaf_call(jit, asm, state);
// Calculate the compile-time NATIVE_STACK_PTR offset from NATIVE_BASE_PTR
// At this point, frame_setup(&[], jit.c_stack_slots) has been called,
// which allocated aligned_stack_bytes(jit.c_stack_slots) on the stack
let frame_size = aligned_stack_bytes(jit.c_stack_slots);
+ let n = strings.len();
let allocation_size = aligned_stack_bytes(n);
asm_comment!(asm, "allocate {} bytes on C stack for {} strings", allocation_size, n);
@@ -1588,7 +1626,7 @@ fn gen_string_concat(jit: &mut JITState, asm: &mut Assembler, strings: Vec<Opnd>
asm_comment!(asm, "restore C stack pointer");
asm.add_into(NATIVE_STACK_PTR, allocation_size.into());
- Some(result)
+ result
}
/// Given the number of spill slots needed for a function, return the number of bytes
@@ -1602,7 +1640,7 @@ fn aligned_stack_bytes(num_slots: usize) -> usize {
impl Assembler {
/// Make a C call while marking the start and end positions for IseqCall
- fn ccall_with_iseq_call(&mut self, fptr: *const u8, opnds: Vec<Opnd>, iseq_call: &Rc<IseqCall>) -> Opnd {
+ fn ccall_with_iseq_call(&mut self, fptr: *const u8, opnds: Vec<Opnd>, iseq_call: &Rc<RefCell<IseqCall>>) -> Opnd {
// We need to create our own branch rc objects so that we can move the closure below
let start_iseq_call = iseq_call.clone();
let end_iseq_call = iseq_call.clone();
@@ -1611,10 +1649,10 @@ impl Assembler {
fptr,
opnds,
move |code_ptr, _| {
- start_iseq_call.start_addr.set(Some(code_ptr));
+ start_iseq_call.borrow_mut().start_addr.set(Some(code_ptr));
},
move |code_ptr, _| {
- end_iseq_call.end_addr.set(Some(code_ptr));
+ end_iseq_call.borrow_mut().end_addr.set(Some(code_ptr));
},
)
}
@@ -1622,9 +1660,9 @@ impl Assembler {
/// Store info about a JIT-to-JIT call
#[derive(Debug)]
-struct IseqCall {
+pub struct IseqCall {
/// Callee ISEQ that start_addr jumps to
- iseq: IseqPtr,
+ pub iseq: IseqPtr,
/// Position where the call instruction starts
start_addr: Cell<Option<CodePtr>>,
@@ -1635,12 +1673,13 @@ struct IseqCall {
impl IseqCall {
/// Allocate a new IseqCall
- fn new(iseq: IseqPtr) -> Rc<Self> {
- Rc::new(IseqCall {
+ fn new(iseq: IseqPtr) -> Rc<RefCell<Self>> {
+ let iseq_call = IseqCall {
iseq,
start_addr: Cell::new(None),
end_addr: Cell::new(None),
- })
+ };
+ Rc::new(RefCell::new(iseq_call))
}
/// Regenerate a IseqCall with a given callback
diff --git a/zjit/src/gc.rs b/zjit/src/gc.rs
index 52a036d49e..3462b80232 100644
--- a/zjit/src/gc.rs
+++ b/zjit/src/gc.rs
@@ -1,6 +1,9 @@
// This module is responsible for marking/moving objects on GC.
+use std::cell::RefCell;
+use std::rc::Rc;
use std::{ffi::c_void, ops::Range};
+use crate::codegen::IseqCall;
use crate::{cruby::*, profile::IseqProfile, state::ZJITState, stats::with_time_stat, virtualmem::CodePtr};
use crate::stats::Counter::gc_time_ns;
@@ -15,6 +18,9 @@ pub struct IseqPayload {
/// GC offsets of the JIT code. These are the addresses of objects that need to be marked.
pub gc_offsets: Vec<CodePtr>,
+
+ /// JIT-to-JIT calls in the ISEQ. The IseqPayload's ISEQ is the caller of it.
+ pub iseq_calls: Vec<Rc<RefCell<IseqCall>>>,
}
impl IseqPayload {
@@ -23,6 +29,7 @@ impl IseqPayload {
status: IseqStatus::NotCompiled,
profile: IseqProfile::new(iseq_size),
gc_offsets: vec![],
+ iseq_calls: vec![],
}
}
}
@@ -112,6 +119,16 @@ pub extern "C" fn rb_zjit_iseq_update_references(payload: *mut c_void) {
with_time_stat(gc_time_ns, || iseq_update_references(payload));
}
+/// GC callback for updating object references after all object moves
+#[unsafe(no_mangle)]
+pub extern "C" fn rb_zjit_root_update_references() {
+ if !ZJITState::has_instance() {
+ return;
+ }
+ let invariants = ZJITState::get_invariants();
+ invariants.update_references();
+}
+
fn iseq_mark(payload: &IseqPayload) {
// Mark objects retained by profiling instructions
payload.profile.each_object(|object| {
@@ -135,10 +152,22 @@ fn iseq_mark(payload: &IseqPayload) {
/// This is a mirror of [iseq_mark].
fn iseq_update_references(payload: &mut IseqPayload) {
// Move objects retained by profiling instructions
- payload.profile.each_object_mut(|object| {
- *object = unsafe { rb_gc_location(*object) };
+ payload.profile.each_object_mut(|old_object| {
+ let new_object = unsafe { rb_gc_location(*old_object) };
+ if *old_object != new_object {
+ *old_object = new_object;
+ }
});
+ // Move ISEQ references in IseqCall
+ for iseq_call in payload.iseq_calls.iter_mut() {
+ let old_iseq = iseq_call.borrow().iseq;
+ let new_iseq = unsafe { rb_gc_location(VALUE(old_iseq as usize)) }.0 as IseqPtr;
+ if old_iseq != new_iseq {
+ iseq_call.borrow_mut().iseq = new_iseq;
+ }
+ }
+
// Move objects baked in JIT code
let cb = ZJITState::get_code_block();
for &offset in payload.gc_offsets.iter() {
diff --git a/zjit/src/hir.rs b/zjit/src/hir.rs
index c50c1ce985..afe358ec1d 100644
--- a/zjit/src/hir.rs
+++ b/zjit/src/hir.rs
@@ -439,6 +439,7 @@ pub enum SideExitReason {
CalleeSideExit,
ObjToStringFallback,
UnknownSpecialVariable(u64),
+ UnhandledDefinedType(usize),
}
impl std::fmt::Display for SideExitReason {
@@ -1330,7 +1331,7 @@ impl Function {
Insn::SendWithoutBlockDirect { .. } => types::BasicObject,
Insn::Send { .. } => types::BasicObject,
Insn::InvokeBuiltin { return_type, .. } => return_type.unwrap_or(types::BasicObject),
- Insn::Defined { .. } => types::BasicObject,
+ Insn::Defined { pushval, .. } => Type::from_value(*pushval).union(types::NilClass),
Insn::DefinedIvar { .. } => types::BasicObject,
Insn::GetConstantPath { .. } => types::BasicObject,
Insn::ArrayMax { .. } => types::BasicObject,
@@ -1940,12 +1941,12 @@ impl Function {
}
| &Insn::Return { val }
| &Insn::Throw { val, .. }
- | &Insn::Defined { v: val, .. }
| &Insn::Test { val }
| &Insn::SetLocal { val, .. }
| &Insn::IsNil { val } =>
worklist.push_back(val),
&Insn::SetGlobal { val, state, .. }
+ | &Insn::Defined { v: val, state, .. }
| &Insn::StringIntern { val, state }
| &Insn::StringCopy { val, state, .. }
| &Insn::GuardType { val, state, .. }
@@ -2957,6 +2958,11 @@ pub fn iseq_to_hir(iseq: *const rb_iseq_t) -> Result<Function, ParseError> {
let pushval = get_arg(pc, 2);
let v = state.stack_pop()?;
let exit_id = fun.push_insn(block, Insn::Snapshot { state: exit_state });
+ if op_type == DEFINED_METHOD.try_into().unwrap() {
+ // TODO(Shopify/ruby#703): Fix codegen for defined?(method call expr)
+ fun.push_insn(block, Insn::SideExit { state: exit_id, reason: SideExitReason::UnhandledDefinedType(op_type)});
+ break; // End the block
+ }
state.stack_push(fun.push_insn(block, Insn::Defined { op_type, obj, pushval, v, state: exit_id }));
}
YARVINSN_definedivar => {
@@ -4204,10 +4210,10 @@ mod tests {
fn test@<compiled>:2:
bb0(v0:BasicObject):
v2:NilClass = Const Value(nil)
- v4:BasicObject = Defined constant, v2
- v6:BasicObject = Defined func, v0
+ v4:StringExact|NilClass = Defined constant, v2
+ v6:StringExact|NilClass = Defined func, v0
v7:NilClass = Const Value(nil)
- v9:BasicObject = Defined global-variable, v7
+ v9:StringExact|NilClass = Defined global-variable, v7
v11:ArrayExact = NewArray v4, v6, v9
Return v11
"#]]);
diff --git a/zjit/src/invariants.rs b/zjit/src/invariants.rs
index 3f291415be..14fea76d1b 100644
--- a/zjit/src/invariants.rs
+++ b/zjit/src/invariants.rs
@@ -1,6 +1,6 @@
use std::{collections::{HashMap, HashSet}, mem};
-use crate::{backend::lir::{asm_comment, Assembler}, cruby::{rb_callable_method_entry_t, ruby_basic_operators, src_loc, with_vm_lock, IseqPtr, RedefinitionFlag, ID}, gc::IseqPayload, hir::Invariant, options::debug, state::{zjit_enabled_p, ZJITState}, virtualmem::CodePtr};
+use crate::{backend::lir::{asm_comment, Assembler}, cruby::{rb_callable_method_entry_t, rb_gc_location, ruby_basic_operators, src_loc, with_vm_lock, IseqPtr, RedefinitionFlag, ID, VALUE}, gc::IseqPayload, hir::Invariant, options::debug, state::{zjit_enabled_p, ZJITState}, virtualmem::CodePtr};
use crate::stats::with_time_stat;
use crate::stats::Counter::invalidation_time_ns;
use crate::gc::remove_gc_offsets;
@@ -56,6 +56,31 @@ pub struct Invariants {
single_ractor_patch_points: HashSet<PatchPoint>,
}
+impl Invariants {
+ /// Update object references in Invariants
+ pub fn update_references(&mut self) {
+ Self::update_iseq_references(&mut self.ep_escape_iseqs);
+ Self::update_iseq_references(&mut self.no_ep_escape_iseqs);
+ }
+
+ /// Update ISEQ references in a given HashSet<IseqPtr>
+ fn update_iseq_references(iseqs: &mut HashSet<IseqPtr>) {
+ let mut moved: Vec<IseqPtr> = Vec::with_capacity(iseqs.len());
+
+ iseqs.retain(|&old_iseq| {
+ let new_iseq = unsafe { rb_gc_location(VALUE(old_iseq as usize)) }.0 as IseqPtr;
+ if old_iseq != new_iseq {
+ moved.push(new_iseq);
+ }
+ old_iseq == new_iseq
+ });
+
+ for new_iseq in moved {
+ iseqs.insert(new_iseq);
+ }
+ }
+}
+
/// Called when a basic operator is redefined. Note that all the blocks assuming
/// the stability of different operators are invalidated together and we don't
/// do fine-grained tracking.
diff --git a/zjit/src/profile.rs b/zjit/src/profile.rs
index 7ffaea29dc..771d90cb0e 100644
--- a/zjit/src/profile.rs
+++ b/zjit/src/profile.rs
@@ -98,19 +98,32 @@ fn profile_operands(profiler: &mut Profiler, profile: &mut IseqProfile, n: usize
let obj = profiler.peek_at_stack((n - i - 1) as isize);
// TODO(max): Handle GC-hidden classes like Array, Hash, etc and make them look normal or
// drop them or something
- let ty = ProfiledType::new(obj.class_of(), obj.shape_id_of());
+ let ty = ProfiledType::new(obj);
unsafe { rb_gc_writebarrier(profiler.iseq.into(), ty.class()) };
types[i].observe(ty);
}
}
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+struct Flags(u32);
+
+impl Flags {
+ const NONE: u32 = 0;
+ const IS_IMMEDIATE: u32 = 1 << 0;
+
+ pub fn none() -> Self { Self(Self::NONE) }
+
+ pub fn immediate() -> Self { Self(Self::IS_IMMEDIATE) }
+ pub fn is_immediate(self) -> bool { (self.0 & Self::IS_IMMEDIATE) != 0 }
+}
+
/// opt_send_without_block/opt_plus/... should store:
/// * the class of the receiver, so we can do method lookup
/// * the shape of the receiver, so we can optimize ivar lookup
/// with those two, pieces of information, we can also determine when an object is an immediate:
-/// * Integer + SPECIAL_CONST_SHAPE_ID == Fixnum
-/// * Float + SPECIAL_CONST_SHAPE_ID == Flonum
-/// * Symbol + SPECIAL_CONST_SHAPE_ID == StaticSymbol
+/// * Integer + IS_IMMEDIATE == Fixnum
+/// * Float + IS_IMMEDIATE == Flonum
+/// * Symbol + IS_IMMEDIATE == StaticSymbol
/// * NilClass == Nil
/// * TrueClass == True
/// * FalseClass == False
@@ -118,6 +131,7 @@ fn profile_operands(profiler: &mut Profiler, profile: &mut IseqProfile, n: usize
pub struct ProfiledType {
class: VALUE,
shape: ShapeId,
+ flags: Flags,
}
impl Default for ProfiledType {
@@ -127,12 +141,42 @@ impl Default for ProfiledType {
}
impl ProfiledType {
- fn new(class: VALUE, shape: ShapeId) -> Self {
- Self { class, shape }
+ fn new(obj: VALUE) -> Self {
+ if obj == Qfalse {
+ return Self { class: unsafe { rb_cFalseClass },
+ shape: INVALID_SHAPE_ID,
+ flags: Flags::immediate() };
+ }
+ if obj == Qtrue {
+ return Self { class: unsafe { rb_cTrueClass },
+ shape: INVALID_SHAPE_ID,
+ flags: Flags::immediate() };
+ }
+ if obj == Qnil {
+ return Self { class: unsafe { rb_cNilClass },
+ shape: INVALID_SHAPE_ID,
+ flags: Flags::immediate() };
+ }
+ if obj.fixnum_p() {
+ return Self { class: unsafe { rb_cInteger },
+ shape: INVALID_SHAPE_ID,
+ flags: Flags::immediate() };
+ }
+ if obj.flonum_p() {
+ return Self { class: unsafe { rb_cFloat },
+ shape: INVALID_SHAPE_ID,
+ flags: Flags::immediate() };
+ }
+ if obj.static_sym_p() {
+ return Self { class: unsafe { rb_cSymbol },
+ shape: INVALID_SHAPE_ID,
+ flags: Flags::immediate() };
+ }
+ Self { class: obj.class_of(), shape: obj.shape_id_of(), flags: Flags::none() }
}
pub fn empty() -> Self {
- Self { class: VALUE(0), shape: INVALID_SHAPE_ID }
+ Self { class: VALUE(0), shape: INVALID_SHAPE_ID, flags: Flags::none() }
}
pub fn is_empty(&self) -> bool {
@@ -148,27 +192,27 @@ impl ProfiledType {
}
pub fn is_fixnum(&self) -> bool {
- self.class == unsafe { rb_cInteger } && self.shape == SPECIAL_CONST_SHAPE_ID
+ self.class == unsafe { rb_cInteger } && self.flags.is_immediate()
}
pub fn is_flonum(&self) -> bool {
- self.class == unsafe { rb_cFloat } && self.shape == SPECIAL_CONST_SHAPE_ID
+ self.class == unsafe { rb_cFloat } && self.flags.is_immediate()
}
pub fn is_static_symbol(&self) -> bool {
- self.class == unsafe { rb_cSymbol } && self.shape == SPECIAL_CONST_SHAPE_ID
+ self.class == unsafe { rb_cSymbol } && self.flags.is_immediate()
}
pub fn is_nil(&self) -> bool {
- self.class == unsafe { rb_cNilClass } && self.shape == SPECIAL_CONST_SHAPE_ID
+ self.class == unsafe { rb_cNilClass } && self.flags.is_immediate()
}
pub fn is_true(&self) -> bool {
- self.class == unsafe { rb_cTrueClass } && self.shape == SPECIAL_CONST_SHAPE_ID
+ self.class == unsafe { rb_cTrueClass } && self.flags.is_immediate()
}
pub fn is_false(&self) -> bool {
- self.class == unsafe { rb_cFalseClass } && self.shape == SPECIAL_CONST_SHAPE_ID
+ self.class == unsafe { rb_cFalseClass } && self.flags.is_immediate()
}
}