guile-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Guile-commits] 01/02: Renumber instructions and bump objcode version


From: Andy Wingo
Subject: [Guile-commits] 01/02: Renumber instructions and bump objcode version
Date: Thu, 20 Sep 2018 05:42:43 -0400 (EDT)

wingo pushed a commit to branch master
in repository guile.

commit 4060728e0a171a17f8a7d4698602a72d093277bd
Author: Andy Wingo <address@hidden>
Date:   Thu Sep 20 11:22:15 2018 +0200

    Renumber instructions and bump objcode version
    
    * libguile/loader.h (SCM_OBJCODE_MINIMUM_MINOR_VERSION):
      (SCM_OBJCODE_MINOR_VERSION): Bump version.
    * module/system/vm/assembler.scm (*bytecode-minor-version*): Bump.
    * libguile/vm-engine.c: Rearrange opcodes to be contiguous and in a
      somewhat sensible order.
---
 libguile/loader.h              |    4 +-
 libguile/vm-engine.c           | 2671 +++++++++++++++++++---------------------
 module/system/vm/assembler.scm |    2 +-
 3 files changed, 1283 insertions(+), 1394 deletions(-)

diff --git a/libguile/loader.h b/libguile/loader.h
index d9f632d..979d560 100644
--- a/libguile/loader.h
+++ b/libguile/loader.h
@@ -39,8 +39,8 @@
 
 /* Major and minor versions must be single characters. */
 #define SCM_OBJCODE_MAJOR_VERSION 4
-#define SCM_OBJCODE_MINIMUM_MINOR_VERSION 0
-#define SCM_OBJCODE_MINOR_VERSION 0
+#define SCM_OBJCODE_MINIMUM_MINOR_VERSION 1
+#define SCM_OBJCODE_MINOR_VERSION 1
 #define SCM_OBJCODE_MAJOR_VERSION_STRING        \
   SCM_CPP_STRINGIFY(SCM_OBJCODE_MAJOR_VERSION)
 #define SCM_OBJCODE_MINOR_VERSION_STRING        \
diff --git a/libguile/vm-engine.c b/libguile/vm-engine.c
index 4daa9fe..4e0ef77 100644
--- a/libguile/vm-engine.c
+++ b/libguile/vm-engine.c
@@ -314,10 +314,6 @@ VM_NAME (scm_thread *thread)
 
   
 
-  /*
-   * Call and return
-   */
-
   /* halt _:24
    *
    * Bring the VM to a halt, returning all the values from the stack.
@@ -351,6 +347,94 @@ VM_NAME (scm_thread *thread)
       return ret;
     }
 
+  /* instrument-entry _:24 data:32
+   *
+   * Increase execution counter for this function and potentially tier
+   * up to the next JIT level.  DATA is an offset to raw profiler,
+   * recording execution counts and the next-level JIT code
+   * corresponding to this function.  Also run the apply hook.
+   */
+  VM_DEFINE_OP (1, instrument_entry, "instrument-entry", OP2 (X32, N32))
+    {
+#if ENABLE_JIT
+      if (!VP->disable_mcode)
+        {
+          struct scm_jit_function_data *data;
+
+          int32_t data_offset = ip[1];
+          data = (struct scm_jit_function_data *) (ip + data_offset);
+
+          if (data->mcode)
+            {
+              SYNC_IP ();
+              scm_jit_enter_mcode (thread, data->mcode);
+              CACHE_REGISTER ();
+              NEXT (0);
+            }
+
+          if (data->counter >= scm_jit_counter_threshold)
+            {
+              const uint8_t *mcode;
+
+              SYNC_IP ();
+              mcode = scm_jit_compute_mcode (thread, data);
+
+              if (mcode)
+                {
+                  scm_jit_enter_mcode (thread, mcode);
+                  CACHE_REGISTER ();
+                  NEXT (0);
+                }
+            }
+          else
+            data->counter += SCM_JIT_COUNTER_ENTRY_INCREMENT;
+        }
+#endif
+
+      APPLY_HOOK ();
+
+      NEXT (2);
+    }
+
+  /* instrument-loop _:24 data:32
+   *
+   * Increase execution counter for this function and potentially tier
+   * up to the next JIT level.  DATA is an offset to raw profiler,
+   * recording execution counts and the next-level JIT code
+   * corresponding to this function.
+   */
+  VM_DEFINE_OP (2, instrument_loop, "instrument-loop", OP2 (X32, N32))
+    {
+#if ENABLE_JIT
+      if (!VP->disable_mcode)
+        {
+          int32_t data_offset = ip[1];
+          struct scm_jit_function_data *data;
+
+          data = (struct scm_jit_function_data *) (ip + data_offset);
+
+          if (data->counter >= scm_jit_counter_threshold)
+            {
+              const uint8_t *mcode;
+
+              SYNC_IP ();
+              mcode = scm_jit_compute_mcode (thread, data);
+
+              if (mcode)
+                {
+                  scm_jit_enter_mcode (thread, mcode);
+                  CACHE_REGISTER ();
+                  NEXT (0);
+                }
+            }
+          else
+            data->counter += SCM_JIT_COUNTER_LOOP_INCREMENT;
+        }
+#endif
+
+      NEXT (2);
+    }
+
   /* call proc:24 _:8 nlocals:24
    *
    * Call a procedure.  PROC is the local corresponding to a procedure.
@@ -364,7 +448,7 @@ VM_NAME (scm_thread *thread)
    * stack; the precise number can be had by subtracting the address of
    * PROC from the post-call SP.
    */
-  VM_DEFINE_OP (1, call, "call", OP2 (X8_F24, X8_C24))
+  VM_DEFINE_OP (3, call, "call", OP2 (X8_F24, X8_C24))
     {
       uint32_t proc, nlocals;
       union scm_vm_stack_element *old_fp, *new_fp;
@@ -396,7 +480,7 @@ VM_NAME (scm_thread *thread)
    * the current IP.  Since PROC is not dereferenced, it may be some
    * other representation of the closure.
    */
-  VM_DEFINE_OP (2, call_label, "call-label", OP3 (X8_F24, X8_C24, L32))
+  VM_DEFINE_OP (4, call_label, "call-label", OP3 (X8_F24, X8_C24, L32))
     {
       uint32_t proc, nlocals;
       int32_t label;
@@ -426,7 +510,7 @@ VM_NAME (scm_thread *thread)
    * stack frame.  Requires that the procedure and all of the arguments
    * have already been shuffled into position.
    */
-  VM_DEFINE_OP (3, tail_call, "tail-call", OP1 (X32))
+  VM_DEFINE_OP (5, tail_call, "tail-call", OP1 (X32))
     {
       ip = CALL_INTRINSIC (get_callee_vcode, (thread));
       CACHE_SP ();
@@ -438,7 +522,7 @@ VM_NAME (scm_thread *thread)
    * Tail-call a known procedure.  As call is to call-label, tail-call
    * is to tail-call-label.
    */
-  VM_DEFINE_OP (4, tail_call_label, "tail-call-label", OP2 (X32, L32))
+  VM_DEFINE_OP (6, tail_call_label, "tail-call-label", OP2 (X32, L32))
     {
       int32_t label;
       
@@ -449,53 +533,36 @@ VM_NAME (scm_thread *thread)
       NEXT (0);
     }
 
-  /* instrument-entry _:24 data:32
+  /* return-values _:24
    *
-   * Increase execution counter for this function and potentially tier
-   * up to the next JIT level.  DATA is an offset to raw profiler,
-   * recording execution counts and the next-level JIT code
-   * corresponding to this function.  Also run the apply hook.
+   * Return all values from a call frame.
    */
-  VM_DEFINE_OP (5, instrument_entry, "instrument-entry", OP2 (X32, N32))
+  VM_DEFINE_OP (7, return_values, "return-values", OP1 (X32))
     {
+      union scm_vm_stack_element *old_fp;
+      size_t frame_size = 3;
+      uint8_t *mcode;
+
+      RETURN_HOOK ();
+
+      old_fp = VP->fp;
+      VP->fp = SCM_FRAME_DYNAMIC_LINK (old_fp);
+
 #if ENABLE_JIT
       if (!VP->disable_mcode)
         {
-          struct scm_jit_function_data *data;
-
-          int32_t data_offset = ip[1];
-          data = (struct scm_jit_function_data *) (ip + data_offset);
-
-          if (data->mcode)
+          mcode = SCM_FRAME_MACHINE_RETURN_ADDRESS (old_fp);
+          if (mcode)
             {
-              SYNC_IP ();
-              scm_jit_enter_mcode (thread, data->mcode);
+              scm_jit_enter_mcode (thread, mcode);
               CACHE_REGISTER ();
               NEXT (0);
             }
-
-          if (data->counter >= scm_jit_counter_threshold)
-            {
-              const uint8_t *mcode;
-
-              SYNC_IP ();
-              mcode = scm_jit_compute_mcode (thread, data);
-
-              if (mcode)
-                {
-                  scm_jit_enter_mcode (thread, mcode);
-                  CACHE_REGISTER ();
-                  NEXT (0);
-                }
-            }
-          else
-            data->counter += SCM_JIT_COUNTER_ENTRY_INCREMENT;
         }
 #endif
 
-      APPLY_HOOK ();
-
-      NEXT (2);
+      ip = SCM_FRAME_VIRTUAL_RETURN_ADDRESS (old_fp);
+      NEXT (0);
     }
 
   /* receive dst:12 proc:12 _:8 nlocals:24
@@ -504,7 +571,7 @@ VM_NAME (scm_thread *thread)
    * PROC, asserting that the call actually returned at least one
    * value.  Afterwards, resets the frame to NLOCALS locals.
    */
-  VM_DEFINE_OP (6, receive, "receive", DOP2 (X8_F12_F12, X8_C24))
+  VM_DEFINE_OP (8, receive, "receive", DOP2 (X8_F12_F12, X8_C24))
     {
       uint16_t dst, proc;
       uint32_t nlocals;
@@ -525,7 +592,7 @@ VM_NAME (scm_thread *thread)
    * return values equals NVALUES exactly.  After receive-values has
    * run, the values can be copied down via `mov'.
    */
-  VM_DEFINE_OP (7, receive_values, "receive-values", OP2 (X8_F24, B1_X7_C24))
+  VM_DEFINE_OP (9, receive_values, "receive-values", OP2 (X8_F24, B1_X7_C24))
     {
       uint32_t proc, nvalues;
       UNPACK_24 (op, proc);
@@ -539,442 +606,278 @@ VM_NAME (scm_thread *thread)
       NEXT (2);
     }
 
-  /* shuffle-down from:12 to:12
+  /* assert-nargs-ee expected:24
+   * assert-nargs-ge expected:24
+   * assert-nargs-le expected:24
    *
-   * Shuffle down values from FROM to TO, reducing the frame size by
-   * (FROM-TO) slots.  Part of the internal implementation of
-   * call-with-values, values, and apply.
+   * If the number of actual arguments is not ==, >=, or <= EXPECTED,
+   * respectively, signal an error.
    */
-  VM_DEFINE_OP (8, shuffle_down, "shuffle-down", OP1 (X8_F12_F12))
+  VM_DEFINE_OP (10, assert_nargs_ee, "assert-nargs-ee", OP1 (X8_C24))
     {
-      uint32_t n, from, to, nlocals;
-
-      UNPACK_12_12 (op, from, to);
-
-      VM_ASSERT (from > to, abort ());
-      nlocals = FRAME_LOCALS_COUNT ();
-
-      for (n = 0; from + n < nlocals; n++)
-        FP_SET (to + n, FP_REF (from + n));
-
-      RESET_FRAME (to + n);
-
+      uint32_t expected;
+      UNPACK_24 (op, expected);
+      VM_ASSERT (FRAME_LOCALS_COUNT () == expected,
+                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
+      NEXT (1);
+    }
+  VM_DEFINE_OP (11, assert_nargs_ge, "assert-nargs-ge", OP1 (X8_C24))
+    {
+      uint32_t expected;
+      UNPACK_24 (op, expected);
+      VM_ASSERT (FRAME_LOCALS_COUNT () >= expected,
+                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
+      NEXT (1);
+    }
+  VM_DEFINE_OP (12, assert_nargs_le, "assert-nargs-le", OP1 (X8_C24))
+    {
+      uint32_t expected;
+      UNPACK_24 (op, expected);
+      VM_ASSERT (FRAME_LOCALS_COUNT () <= expected,
+                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
       NEXT (1);
     }
 
-  /* return-values _:24
+  /* assert-nargs-ee/locals expected:12 nlocals:12
    *
-   * Return all values from a call frame.
+   * Equivalent to a sequence of assert-nargs-ee and reserve-locals.  The
+   * number of locals reserved is EXPECTED + NLOCALS.
    */
-  VM_DEFINE_OP (9, return_values, "return-values", OP1 (X32))
+  VM_DEFINE_OP (13, assert_nargs_ee_locals, "assert-nargs-ee/locals", OP1 
(X8_C12_C12))
     {
-      union scm_vm_stack_element *old_fp;
-      size_t frame_size = 3;
-      uint8_t *mcode;
-
-      RETURN_HOOK ();
+      uint16_t expected, nlocals;
+      UNPACK_12_12 (op, expected, nlocals);
+      VM_ASSERT (FRAME_LOCALS_COUNT () == expected,
+                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
+      ALLOC_FRAME (expected + nlocals);
+      while (nlocals--)
+        SP_SET (nlocals, SCM_UNDEFINED);
 
-      old_fp = VP->fp;
-      VP->fp = SCM_FRAME_DYNAMIC_LINK (old_fp);
+      NEXT (1);
+    }
 
-#if ENABLE_JIT
-      if (!VP->disable_mcode)
-        {
-          mcode = SCM_FRAME_MACHINE_RETURN_ADDRESS (old_fp);
-          if (mcode)
-            {
-              scm_jit_enter_mcode (thread, mcode);
-              CACHE_REGISTER ();
-              NEXT (0);
-            }
-        }
-#endif
+  VM_DEFINE_OP (14, check_arguments, "arguments<=?", OP1 (X8_C24))
+    {
+      uint8_t compare_result;
+      uint32_t expected;
+      ptrdiff_t nargs;
 
-      ip = SCM_FRAME_VIRTUAL_RETURN_ADDRESS (old_fp);
-      NEXT (0);
-    }
+      UNPACK_24 (op, expected);
+      nargs = FRAME_LOCALS_COUNT ();
 
+      if (nargs < (ptrdiff_t) expected)
+        compare_result = SCM_F_COMPARE_LESS_THAN;
+      else if (nargs == (ptrdiff_t) expected)
+        compare_result = SCM_F_COMPARE_EQUAL;
+      else
+        compare_result = SCM_F_COMPARE_NONE;
 
-  
+      VP->compare_result = compare_result;
 
-  /*
-   * Specialized call stubs
-   */
+      NEXT (1);
+    }
 
-  /* subr-call idx:24
-   *
-   * Call a subr, passing all locals in this frame as arguments.  Return
-   * from the calling frame.  This instruction is part of the
-   * trampolines created in gsubr.c, and is not generated by the
-   * compiler.
-   */
-  VM_DEFINE_OP (10, subr_call, "subr-call", OP1 (X8_C24))
+  VM_DEFINE_OP (15, check_positional_arguments, "positional-arguments<=?", OP2 
(X8_C24, X8_C24))
     {
-      SCM ret;
-      uint32_t idx;
+      uint8_t compare_result;
+      uint32_t nreq, expected;
+      ptrdiff_t nargs, npos;
 
-      UNPACK_24 (op, idx);
+      UNPACK_24 (op, nreq);
+      UNPACK_24 (ip[1], expected);
+      nargs = FRAME_LOCALS_COUNT ();
 
-      SYNC_IP ();
-      ret = scm_apply_subr (sp, idx, FRAME_LOCALS_COUNT ());
+      /* Precondition: at least NREQ arguments.  */
+      for (npos = nreq; npos < nargs && npos <= expected; npos++)
+        if (scm_is_keyword (FP_REF (npos)))
+          break;
 
-      if (SCM_UNLIKELY (scm_is_values (ret)))
-        {
-          CALL_INTRINSIC (unpack_values_object, (thread, ret));
-          CACHE_SP ();
-          NEXT (1);
-        }
+      if (npos < (ptrdiff_t) expected)
+        compare_result = SCM_F_COMPARE_LESS_THAN;
+      else if (npos == (ptrdiff_t) expected)
+        compare_result = SCM_F_COMPARE_EQUAL;
       else
-        {
-          RESET_FRAME (1);
-          SP_SET (0, ret);
-          NEXT (1);
-        }
+        compare_result = SCM_F_COMPARE_NONE;
+
+      VP->compare_result = compare_result;
+
+      NEXT (2);
     }
 
-  /* foreign-call cif-idx:12 ptr-idx:12
+  /* bind-kwargs nreq:24 flags:8 nreq-and-opt:24 _:8 ntotal:24 kw-offset:32
    *
-   * Call a foreign function.  Fetch the CIF and foreign pointer from
-   * CIF-IDX and PTR-IDX, both free variables.  Return from the calling
-   * frame.  Arguments are taken from the stack.  This instruction is
-   * part of the trampolines created by the FFI, and is not generated by
-   * the compiler.
+   * flags := allow-other-keys:1 has-rest:1 _:6
+   *
+   * Find the last positional argument, and shuffle all the rest above
+   * NTOTAL.  Initialize the intervening locals to SCM_UNDEFINED.  Then
+   * load the constant at KW-OFFSET words from the current IP, and use it
+   * to bind keyword arguments.  If HAS-REST, collect all shuffled
+   * arguments into a list, and store it in NREQ-AND-OPT.  Finally, clear
+   * the arguments that we shuffled up.
+   *
+   * A macro-mega-instruction.
    */
-  VM_DEFINE_OP (11, foreign_call, "foreign-call", OP1 (X8_C12_C12))
+  VM_DEFINE_OP (16, bind_kwargs, "bind-kwargs", OP4 (X8_C24, C8_C24, X8_C24, 
N32))
     {
-      uint16_t cif_idx, ptr_idx;
-      SCM closure, cif, pointer;
+      uint32_t nreq, nreq_and_opt, ntotal, npositional;
+      int32_t kw_offset;
+      scm_t_bits kw_bits;
+      SCM kw;
+      uint8_t allow_other_keys, has_rest;
 
-      UNPACK_12_12 (op, cif_idx, ptr_idx);
+      UNPACK_24 (op, nreq);
+      allow_other_keys = ip[1] & 0x1;
+      has_rest = ip[1] & 0x2;
+      UNPACK_24 (ip[1], nreq_and_opt);
+      UNPACK_24 (ip[2], ntotal);
+      kw_offset = ip[3];
+      kw_bits = (scm_t_bits) (ip + kw_offset);
+      VM_ASSERT (!(kw_bits & 0x7), abort());
+      kw = SCM_PACK (kw_bits);
 
-      closure = FP_REF (0);
-      cif = SCM_PROGRAM_FREE_VARIABLE_REF (closure, cif_idx);
-      pointer = SCM_PROGRAM_FREE_VARIABLE_REF (closure, ptr_idx);
+      /* Note that if nopt == 0 then npositional = nreq.  */
+      npositional = CALL_INTRINSIC (compute_kwargs_npositional,
+                                    (thread, nreq, nreq_and_opt - nreq));
 
       SYNC_IP ();
-      CALL_INTRINSIC (foreign_call, (thread, cif, pointer));
+      CALL_INTRINSIC (bind_kwargs,
+                      (thread, npositional, ntotal, kw, !has_rest,
+                       allow_other_keys));
       CACHE_SP ();
 
-      NEXT (1);
-    }
-
-  /* continuation-call contregs:24
-   *
-   * Return to a continuation, nonlocally.  The arguments to the
-   * continuation are taken from the stack.  CONTREGS is a free variable
-   * containing the reified continuation.  This instruction is part of
-   * the implementation of undelimited continuations, and is not
-   * generated by the compiler.
-   */
-  VM_DEFINE_OP (12, continuation_call, "continuation-call", OP1 (X8_C24))
-    {
-      SCM contregs;
-      uint32_t contregs_idx;
-
-      UNPACK_24 (op, contregs_idx);
-
-      contregs =
-        SCM_PROGRAM_FREE_VARIABLE_REF (FP_REF (0), contregs_idx);
+      if (has_rest)
+        FP_SET (nreq_and_opt, CALL_INTRINSIC (cons_rest, (thread, ntotal)));
 
-      SYNC_IP ();
-      CALL_INTRINSIC (reinstate_continuation_x, (thread, contregs));
+      RESET_FRAME (ntotal);
 
-      /* no NEXT */
-      abort ();
+      NEXT (4);
     }
 
-  /* compose-continuation cont:24
+  /* bind-rest dst:24
    *
-   * Compose a partial continuation with the current continuation.  The
-   * arguments to the continuation are taken from the stack.  CONT is a
-   * free variable containing the reified continuation.  This
-   * instruction is part of the implementation of partial continuations,
-   * and is not generated by the compiler.
+   * Collect any arguments at or above DST into a list, and store that
+   * list at DST.
    */
-  VM_DEFINE_OP (13, compose_continuation, "compose-continuation", OP1 (X8_C24))
+  VM_DEFINE_OP (17, bind_rest, "bind-rest", DOP1 (X8_F24))
     {
-      SCM vmcont;
-      uint32_t cont_idx;
-      uint8_t *mcode;
-
-      UNPACK_24 (op, cont_idx);
-      vmcont = SCM_PROGRAM_FREE_VARIABLE_REF (FP_REF (0), cont_idx);
+      uint32_t dst, nargs;
+      SCM rest = SCM_EOL;
 
-      SYNC_IP ();
-      mcode = CALL_INTRINSIC (compose_continuation, (thread, vmcont));
+      UNPACK_24 (op, dst);
+      nargs = FRAME_LOCALS_COUNT ();
 
-#if ENABLE_JIT
-      if (mcode && !VP->disable_mcode)
+      if (nargs <= dst)
         {
-          scm_jit_enter_mcode (thread, mcode);
-          CACHE_REGISTER ();
-          NEXT (0);
+          ALLOC_FRAME (dst + 1);
+          while (nargs < dst)
+            FP_SET (nargs++, SCM_UNDEFINED);
         }
       else
-#endif
         {
-          CACHE_REGISTER ();
-          NEXT (0);
-        }
-    }
-
-  /* instrument-loop _:24 data:32
-   *
-   * Increase execution counter for this function and potentially tier
-   * up to the next JIT level.  DATA is an offset to raw profiler,
-   * recording execution counts and the next-level JIT code
-   * corresponding to this function.
-   */
-  VM_DEFINE_OP (14, instrument_loop, "instrument-loop", OP2 (X32, N32))
-    {
-#if ENABLE_JIT
-      if (!VP->disable_mcode)
-        {
-          int32_t data_offset = ip[1];
-          struct scm_jit_function_data *data;
-
-          data = (struct scm_jit_function_data *) (ip + data_offset);
-
-          if (data->counter >= scm_jit_counter_threshold)
-            {
-              const uint8_t *mcode;
-
-              SYNC_IP ();
-              mcode = scm_jit_compute_mcode (thread, data);
-
-              if (mcode)
-                {
-                  scm_jit_enter_mcode (thread, mcode);
-                  CACHE_REGISTER ();
-                  NEXT (0);
-                }
-            }
-          else
-            data->counter += SCM_JIT_COUNTER_LOOP_INCREMENT;
+          SYNC_IP ();
+          rest = CALL_INTRINSIC (cons_rest, (thread, dst));
+          RESET_FRAME (dst + 1);
         }
-#endif
-
-      NEXT (2);
-    }
-
-  /* capture-continuation dst:24
-   *
-   * Capture the current continuation.  This instruction is part of the
-   * implementation of `call/cc', and is not generated by the compiler.
-   */
-  VM_DEFINE_OP (15, capture_continuation, "capture-continuation", DOP1 
(X8_S24))
-    {
-      uint32_t dst;
 
-      UNPACK_24 (op, dst);
-
-      SYNC_IP ();
-      SP_SET (dst, CALL_INTRINSIC (capture_continuation, (thread)));
+      FP_SET (dst, rest);
 
       NEXT (1);
     }
 
-  /* abort _:24
-   *
-   * Abort to a prompt handler.  The tag is expected in r1, and the rest
-   * of the values in the frame are returned to the prompt handler.
-   * This corresponds to a tail application of abort-to-prompt.
-   */
-  VM_DEFINE_OP (16, abort, "abort", OP1 (X32))
-    {
-      uint8_t *mcode = NULL;
-
-      /* FIXME: Really we should capture the caller's registers.  Until
-         then, manually advance the IP so that when the prompt resumes,
-         it continues with the next instruction.  */
-      ip++;
-      SYNC_IP ();
-      mcode = CALL_INTRINSIC (abort_to_prompt, (thread, mcode));
-
-      /* If abort_to_prompt returned, that means there were no
-         intervening C frames to jump over, so we just continue
-         directly.  */
-
-      ABORT_HOOK ();
-
-#if ENABLE_JIT
-      if (mcode && !VP->disable_mcode)
-        scm_jit_enter_mcode (thread, mcode);
-#endif
-
-      CACHE_REGISTER ();
-      NEXT (0);
-    }
-
-  /* builtin-ref dst:12 idx:12
+  /* alloc-frame nlocals:24
    *
-   * Load a builtin stub by index into DST.
+   * Ensure that there is space on the stack for NLOCALS local variables,
+   * setting them all to SCM_UNDEFINED, except those nargs values that
+   * were passed as arguments and procedure.
    */
-  VM_DEFINE_OP (17, builtin_ref, "builtin-ref", DOP1 (X8_S12_C12))
+  VM_DEFINE_OP (18, alloc_frame, "alloc-frame", OP1 (X8_C24))
     {
-      uint16_t dst, idx;
+      uint32_t nlocals, nargs;
+      UNPACK_24 (op, nlocals);
 
-      UNPACK_12_12 (op, dst, idx);
-      SP_SET (dst, scm_vm_builtin_ref (idx));
+      nargs = FRAME_LOCALS_COUNT ();
+      ALLOC_FRAME (nlocals);
+      while (nlocals-- > nargs)
+        FP_SET (nlocals, SCM_UNDEFINED);
 
       NEXT (1);
     }
 
-
-  
-
-  /*
-   * Function prologues
-   */
-
-  /* throw key:12 args:12
+  /* reset-frame nlocals:24
    *
-   * Throw to KEY and ARGS.  ARGS should be a list.
+   * Like alloc-frame, but doesn't check that the stack is big enough.
+   * Used to reset the frame size to something less than the size that
+   * was previously set via alloc-frame.
    */
-  VM_DEFINE_OP (18, throw, "throw", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (19, reset_frame, "reset-frame", OP1 (X8_C24))
     {
-      uint16_t a, b;
-      SCM key, args;
-
-      UNPACK_12_12 (op, a, b);
-
-      key = SP_REF (a);
-      args = SP_REF (b);
-
-      SYNC_IP ();
-      CALL_INTRINSIC (throw_, (key, args));
-
-      abort (); /* never reached */
+      uint32_t nlocals;
+      UNPACK_24 (op, nlocals);
+      RESET_FRAME (nlocals);
+      NEXT (1);
     }
 
-  /* throw/value val:24 key-subr-and-message:32
+  /* mov dst:12 src:12
    *
-   * Raise an error, indicating VAL as the bad value.
-   * KEY-SUBR-AND-MESSAGE should be a vector, where the first element is
-   * the symbol to which to throw, the second is the procedure in which
-   * to signal the error (a string) or #f, and the third is a format
-   * string for the message, with one template.
+   * Copy a value from one local slot to another.
    */
-  VM_DEFINE_OP (19, throw_value, "throw/value", OP2 (X8_S24, N32))
+  VM_DEFINE_OP (20, mov, "mov", DOP1 (X8_S12_S12))
     {
-      uint32_t a;
-      int32_t offset;
-      scm_t_bits key_subr_and_message_bits;
-      SCM val, key_subr_and_message;
-
-      UNPACK_24 (op, a);
-      val = SP_REF (a);
-
-      offset = ip[1];
-      key_subr_and_message_bits = (scm_t_bits) (ip + offset);
-      VM_ASSERT (!(key_subr_and_message_bits & 0x7), abort());
-      key_subr_and_message = SCM_PACK (key_subr_and_message_bits);
+      uint16_t dst;
+      uint16_t src;
 
-      SYNC_IP ();
-      CALL_INTRINSIC (throw_with_value, (val, key_subr_and_message));
+      UNPACK_12_12 (op, dst, src);
+      /* FIXME: The compiler currently emits "mov" for SCM, F64, U64,
+         and S64 variables.  However SCM values are the usual case, and
+         on a 32-bit machine it might be cheaper to move a SCM than to
+         move a 64-bit number.  */
+      SP_SET_SLOT (dst, SP_REF_SLOT (src));
 
-      abort (); /* never reached */
+      NEXT (1);
     }
 
-  /* throw/value+data val:24 key-subr-and-message:32
+  /* long-mov dst:24 _:8 src:24
    *
-   * Raise an error, indicating VAL as the bad value.
-   * KEY-SUBR-AND-MESSAGE should be a vector, where the first element is
-   * the symbol to which to throw, the second is the procedure in which
-   * to signal the error (a string) or #f, and the third is a format
-   * string for the message, with one template.
+   * Copy a value from one local slot to another.
    */
-  VM_DEFINE_OP (20, throw_value_and_data, "throw/value+data", OP2 (X8_S24, 
N32))
+  VM_DEFINE_OP (21, long_mov, "long-mov", DOP2 (X8_S24, X8_S24))
     {
-      uint32_t a;
-      int32_t offset;
-      scm_t_bits key_subr_and_message_bits;
-      SCM val, key_subr_and_message;
-
-      UNPACK_24 (op, a);
-      val = SP_REF (a);
-
-      offset = ip[1];
-      key_subr_and_message_bits = (scm_t_bits) (ip + offset);
-      VM_ASSERT (!(key_subr_and_message_bits & 0x7), abort());
-      key_subr_and_message = SCM_PACK (key_subr_and_message_bits);
-
-      SYNC_IP ();
-      CALL_INTRINSIC (throw_with_value_and_data, (val, key_subr_and_message));
+      uint32_t dst;
+      uint32_t src;
 
-      abort (); /* never reached */
-    }
+      UNPACK_24 (op, dst);
+      UNPACK_24 (ip[1], src);
+      /* FIXME: The compiler currently emits "long-mov" for SCM, F64,
+         U64, and S64 variables.  However SCM values are the usual case,
+         and on a 32-bit machine it might be cheaper to move a SCM than
+         to move a 64-bit number.  */
+      SP_SET_SLOT (dst, SP_REF_SLOT (src));
 
-  /* assert-nargs-ee expected:24
-   * assert-nargs-ge expected:24
-   * assert-nargs-le expected:24
-   *
-   * If the number of actual arguments is not ==, >=, or <= EXPECTED,
-   * respectively, signal an error.
-   */
-  VM_DEFINE_OP (21, assert_nargs_ee, "assert-nargs-ee", OP1 (X8_C24))
-    {
-      uint32_t expected;
-      UNPACK_24 (op, expected);
-      VM_ASSERT (FRAME_LOCALS_COUNT () == expected,
-                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
-      NEXT (1);
-    }
-  VM_DEFINE_OP (22, assert_nargs_ge, "assert-nargs-ge", OP1 (X8_C24))
-    {
-      uint32_t expected;
-      UNPACK_24 (op, expected);
-      VM_ASSERT (FRAME_LOCALS_COUNT () >= expected,
-                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
-      NEXT (1);
-    }
-  VM_DEFINE_OP (23, assert_nargs_le, "assert-nargs-le", OP1 (X8_C24))
-    {
-      uint32_t expected;
-      UNPACK_24 (op, expected);
-      VM_ASSERT (FRAME_LOCALS_COUNT () <= expected,
-                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
-      NEXT (1);
+      NEXT (2);
     }
 
-  /* alloc-frame nlocals:24
+  /* long-fmov dst:24 _:8 src:24
    *
-   * Ensure that there is space on the stack for NLOCALS local variables,
-   * setting them all to SCM_UNDEFINED, except those nargs values that
-   * were passed as arguments and procedure.
+   * Copy a value from one local slot to another.  Slot indexes are
+   * relative to the FP.
    */
-  VM_DEFINE_OP (24, alloc_frame, "alloc-frame", OP1 (X8_C24))
+  VM_DEFINE_OP (22, long_fmov, "long-fmov", DOP2 (X8_F24, X8_F24))
     {
-      uint32_t nlocals, nargs;
-      UNPACK_24 (op, nlocals);
-
-      nargs = FRAME_LOCALS_COUNT ();
-      ALLOC_FRAME (nlocals);
-      while (nlocals-- > nargs)
-        FP_SET (nlocals, SCM_UNDEFINED);
+      uint32_t dst;
+      uint32_t src;
 
-      NEXT (1);
-    }
+      UNPACK_24 (op, dst);
+      UNPACK_24 (ip[1], src);
+      FP_SET (dst, FP_REF (src));
 
-  /* reset-frame nlocals:24
-   *
-   * Like alloc-frame, but doesn't check that the stack is big enough.
-   * Used to reset the frame size to something less than the size that
-   * was previously set via alloc-frame.
-   */
-  VM_DEFINE_OP (25, reset_frame, "reset-frame", OP1 (X8_C24))
-    {
-      uint32_t nlocals;
-      UNPACK_24 (op, nlocals);
-      RESET_FRAME (nlocals);
-      NEXT (1);
+      NEXT (2);
     }
 
   /* push src:24
    *
    * Push SRC onto the stack.
    */
-  VM_DEFINE_OP (26, push, "push", OP1 (X8_S24))
+  VM_DEFINE_OP (23, push, "push", OP1 (X8_S24))
     {
       uint32_t src;
       union scm_vm_stack_element val;
@@ -994,7 +897,7 @@ VM_NAME (scm_thread *thread)
    *
    * Pop the stack, storing to DST.
    */
-  VM_DEFINE_OP (27, pop, "pop", DOP1 (X8_S24))
+  VM_DEFINE_OP (24, pop, "pop", DOP1 (X8_S24))
     {
       uint32_t dst;
       union scm_vm_stack_element val;
@@ -1014,7 +917,7 @@ VM_NAME (scm_thread *thread)
    *
    * Drop some number of values from the stack.
    */
-  VM_DEFINE_OP (28, drop, "drop", OP1 (X8_C24))
+  VM_DEFINE_OP (25, drop, "drop", OP1 (X8_C24))
     {
       uint32_t count;
 
@@ -1023,470 +926,652 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  /* assert-nargs-ee/locals expected:12 nlocals:12
+  /* shuffle-down from:12 to:12
+   *
+   * Shuffle down values from FROM to TO, reducing the frame size by
+   * (FROM-TO) slots.  Part of the internal implementation of
+   * call-with-values, values, and apply.
+   */
+  VM_DEFINE_OP (26, shuffle_down, "shuffle-down", OP1 (X8_F12_F12))
+    {
+      uint32_t n, from, to, nlocals;
+
+      UNPACK_12_12 (op, from, to);
+
+      VM_ASSERT (from > to, abort ());
+      nlocals = FRAME_LOCALS_COUNT ();
+
+      for (n = 0; from + n < nlocals; n++)
+        FP_SET (to + n, FP_REF (from + n));
+
+      RESET_FRAME (to + n);
+
+      NEXT (1);
+    }
+
+  /* expand-apply-argument _:24
+   *
+   * Take the last local in a frame and expand it out onto the stack, as
+   * for the last argument to "apply".
+   */
+  VM_DEFINE_OP (27, expand_apply_argument, "expand-apply-argument", OP1 (X32))
+    {
+      SYNC_IP ();
+      CALL_INTRINSIC (expand_apply_argument, (thread));
+      CACHE_SP ();
+
+      NEXT (1);
+    }
+
+  /* subr-call idx:24
+   *
+   * Call a subr, passing all locals in this frame as arguments.  Return
+   * from the calling frame.  This instruction is part of the
+   * trampolines created in gsubr.c, and is not generated by the
+   * compiler.
+   */
+  VM_DEFINE_OP (28, subr_call, "subr-call", OP1 (X8_C24))
+    {
+      SCM ret;
+      uint32_t idx;
+
+      UNPACK_24 (op, idx);
+
+      SYNC_IP ();
+      ret = scm_apply_subr (sp, idx, FRAME_LOCALS_COUNT ());
+
+      if (SCM_UNLIKELY (scm_is_values (ret)))
+        {
+          CALL_INTRINSIC (unpack_values_object, (thread, ret));
+          CACHE_SP ();
+          NEXT (1);
+        }
+      else
+        {
+          RESET_FRAME (1);
+          SP_SET (0, ret);
+          NEXT (1);
+        }
+    }
+
+  /* foreign-call cif-idx:12 ptr-idx:12
    *
-   * Equivalent to a sequence of assert-nargs-ee and reserve-locals.  The
-   * number of locals reserved is EXPECTED + NLOCALS.
+   * Call a foreign function.  Fetch the CIF and foreign pointer from
+   * CIF-IDX and PTR-IDX, both free variables.  Return from the calling
+   * frame.  Arguments are taken from the stack.  This instruction is
+   * part of the trampolines created by the FFI, and is not generated by
+   * the compiler.
    */
-  VM_DEFINE_OP (29, assert_nargs_ee_locals, "assert-nargs-ee/locals", OP1 
(X8_C12_C12))
+  VM_DEFINE_OP (29, foreign_call, "foreign-call", OP1 (X8_C12_C12))
     {
-      uint16_t expected, nlocals;
-      UNPACK_12_12 (op, expected, nlocals);
-      VM_ASSERT (FRAME_LOCALS_COUNT () == expected,
-                 CALL_INTRINSIC (error_wrong_num_args, (thread)));
-      ALLOC_FRAME (expected + nlocals);
-      while (nlocals--)
-        SP_SET (nlocals, SCM_UNDEFINED);
+      uint16_t cif_idx, ptr_idx;
+      SCM closure, cif, pointer;
 
-      NEXT (1);
-    }
+      UNPACK_12_12 (op, cif_idx, ptr_idx);
+
+      closure = FP_REF (0);
+      cif = SCM_PROGRAM_FREE_VARIABLE_REF (closure, cif_idx);
+      pointer = SCM_PROGRAM_FREE_VARIABLE_REF (closure, ptr_idx);
 
-  /* expand-apply-argument _:24
-   *
-   * Take the last local in a frame and expand it out onto the stack, as
-   * for the last argument to "apply".
-   */
-  VM_DEFINE_OP (30, expand_apply_argument, "expand-apply-argument", OP1 (X32))
-    {
       SYNC_IP ();
-      CALL_INTRINSIC (expand_apply_argument, (thread));
+      CALL_INTRINSIC (foreign_call, (thread, cif, pointer));
       CACHE_SP ();
 
       NEXT (1);
     }
 
-  /* bind-kwargs nreq:24 flags:8 nreq-and-opt:24 _:8 ntotal:24 kw-offset:32
-   *
-   * flags := allow-other-keys:1 has-rest:1 _:6
-   *
-   * Find the last positional argument, and shuffle all the rest above
-   * NTOTAL.  Initialize the intervening locals to SCM_UNDEFINED.  Then
-   * load the constant at KW-OFFSET words from the current IP, and use it
-   * to bind keyword arguments.  If HAS-REST, collect all shuffled
-   * arguments into a list, and store it in NREQ-AND-OPT.  Finally, clear
-   * the arguments that we shuffled up.
+  /* continuation-call contregs:24
    *
-   * A macro-mega-instruction.
+   * Return to a continuation, nonlocally.  The arguments to the
+   * continuation are taken from the stack.  CONTREGS is a free variable
+   * containing the reified continuation.  This instruction is part of
+   * the implementation of undelimited continuations, and is not
+   * generated by the compiler.
    */
-  VM_DEFINE_OP (31, bind_kwargs, "bind-kwargs", OP4 (X8_C24, C8_C24, X8_C24, 
N32))
+  VM_DEFINE_OP (30, continuation_call, "continuation-call", OP1 (X8_C24))
     {
-      uint32_t nreq, nreq_and_opt, ntotal, npositional;
-      int32_t kw_offset;
-      scm_t_bits kw_bits;
-      SCM kw;
-      uint8_t allow_other_keys, has_rest;
+      SCM contregs;
+      uint32_t contregs_idx;
 
-      UNPACK_24 (op, nreq);
-      allow_other_keys = ip[1] & 0x1;
-      has_rest = ip[1] & 0x2;
-      UNPACK_24 (ip[1], nreq_and_opt);
-      UNPACK_24 (ip[2], ntotal);
-      kw_offset = ip[3];
-      kw_bits = (scm_t_bits) (ip + kw_offset);
-      VM_ASSERT (!(kw_bits & 0x7), abort());
-      kw = SCM_PACK (kw_bits);
+      UNPACK_24 (op, contregs_idx);
 
-      /* Note that if nopt == 0 then npositional = nreq.  */
-      npositional = CALL_INTRINSIC (compute_kwargs_npositional,
-                                    (thread, nreq, nreq_and_opt - nreq));
+      contregs =
+        SCM_PROGRAM_FREE_VARIABLE_REF (FP_REF (0), contregs_idx);
 
       SYNC_IP ();
-      CALL_INTRINSIC (bind_kwargs,
-                      (thread, npositional, ntotal, kw, !has_rest,
-                       allow_other_keys));
-      CACHE_SP ();
-
-      if (has_rest)
-        FP_SET (nreq_and_opt, CALL_INTRINSIC (cons_rest, (thread, ntotal)));
-
-      RESET_FRAME (ntotal);
+      CALL_INTRINSIC (reinstate_continuation_x, (thread, contregs));
 
-      NEXT (4);
+      /* no NEXT */
+      abort ();
     }
 
-  /* bind-rest dst:24
+  /* compose-continuation cont:24
    *
-   * Collect any arguments at or above DST into a list, and store that
-   * list at DST.
+   * Compose a partial continuation with the current continuation.  The
+   * arguments to the continuation are taken from the stack.  CONT is a
+   * free variable containing the reified continuation.  This
+   * instruction is part of the implementation of partial continuations,
+   * and is not generated by the compiler.
    */
-  VM_DEFINE_OP (32, bind_rest, "bind-rest", DOP1 (X8_F24))
+  VM_DEFINE_OP (31, compose_continuation, "compose-continuation", OP1 (X8_C24))
     {
-      uint32_t dst, nargs;
-      SCM rest = SCM_EOL;
+      SCM vmcont;
+      uint32_t cont_idx;
+      uint8_t *mcode;
 
-      UNPACK_24 (op, dst);
-      nargs = FRAME_LOCALS_COUNT ();
+      UNPACK_24 (op, cont_idx);
+      vmcont = SCM_PROGRAM_FREE_VARIABLE_REF (FP_REF (0), cont_idx);
 
-      if (nargs <= dst)
+      SYNC_IP ();
+      mcode = CALL_INTRINSIC (compose_continuation, (thread, vmcont));
+
+#if ENABLE_JIT
+      if (mcode && !VP->disable_mcode)
         {
-          ALLOC_FRAME (dst + 1);
-          while (nargs < dst)
-            FP_SET (nargs++, SCM_UNDEFINED);
+          scm_jit_enter_mcode (thread, mcode);
+          CACHE_REGISTER ();
+          NEXT (0);
         }
       else
+#endif
         {
-          SYNC_IP ();
-          rest = CALL_INTRINSIC (cons_rest, (thread, dst));
-          RESET_FRAME (dst + 1);
+          CACHE_REGISTER ();
+          NEXT (0);
         }
-
-      FP_SET (dst, rest);
-
-      NEXT (1);
     }
 
-
-  
-
-  VM_DEFINE_OP (33, allocate_words, "allocate-words", DOP1 (X8_S12_S12))
+  /* capture-continuation dst:24
+   *
+   * Capture the current continuation.  This instruction is part of the
+   * implementation of `call/cc', and is not generated by the compiler.
+   */
+  VM_DEFINE_OP (32, capture_continuation, "capture-continuation", DOP1 
(X8_S24))
     {
-      uint16_t dst, size;
+      uint32_t dst;
 
-      UNPACK_12_12 (op, dst, size);
+      UNPACK_24 (op, dst);
 
       SYNC_IP ();
-      SP_SET (dst, CALL_INTRINSIC (allocate_words, (thread, SP_REF_U64 
(size))));
+      SP_SET (dst, CALL_INTRINSIC (capture_continuation, (thread)));
+
       NEXT (1);
     }
 
-  VM_DEFINE_OP (34, allocate_words_immediate, "allocate-words/immediate", DOP1 
(X8_S12_C12))
+  /* abort _:24
+   *
+   * Abort to a prompt handler.  The tag is expected in r1, and the rest
+   * of the values in the frame are returned to the prompt handler.
+   * This corresponds to a tail application of abort-to-prompt.
+   */
+  VM_DEFINE_OP (33, abort, "abort", OP1 (X32))
     {
-      uint16_t dst, size;
-
-      UNPACK_12_12 (op, dst, size);
+      uint8_t *mcode = NULL;
 
+      /* FIXME: Really we should capture the caller's registers.  Until
+         then, manually advance the IP so that when the prompt resumes,
+         it continues with the next instruction.  */
+      ip++;
       SYNC_IP ();
-      SP_SET (dst, CALL_INTRINSIC (allocate_words, (thread, size)));
+      mcode = CALL_INTRINSIC (abort_to_prompt, (thread, mcode));
 
-      NEXT (1);
+      /* If abort_to_prompt returned, that means there were no
+         intervening C frames to jump over, so we just continue
+         directly.  */
+
+      ABORT_HOOK ();
+
+#if ENABLE_JIT
+      if (mcode && !VP->disable_mcode)
+        scm_jit_enter_mcode (thread, mcode);
+#endif
+
+      CACHE_REGISTER ();
+      NEXT (0);
     }
 
-  VM_DEFINE_OP (35, scm_ref, "scm-ref", DOP1 (X8_S8_S8_S8))
+  /* prompt tag:24 escape-only?:1 _:7 proc-slot:24 _:8 handler-offset:24
+   *
+   * Push a new prompt on the dynamic stack, with a tag from TAG and a
+   * handler at HANDLER-OFFSET words from the current IP.  The handler
+   * will expect a multiple-value return as if from a call with the
+   * procedure at PROC-SLOT.
+   */
+  VM_DEFINE_OP (34, prompt, "prompt", OP3 (X8_S24, B1_X7_F24, X8_L24))
     {
-      uint8_t dst, obj, idx;
-
-      UNPACK_8_8_8 (op, dst, obj, idx);
+      uint32_t tag, proc_slot;
+      int32_t offset;
+      uint8_t escape_only_p;
+      uint8_t *mra = NULL;
 
-      SP_SET (dst, SCM_CELL_OBJECT (SP_REF (obj), SP_REF_U64 (idx)));
+      UNPACK_24 (op, tag);
+      escape_only_p = ip[1] & 0x1;
+      UNPACK_24 (ip[1], proc_slot);
+      offset = ip[2];
+      offset >>= 8; /* Sign extension */
+  
+      /* Push the prompt onto the dynamic stack. */
+      SYNC_IP ();
+      CALL_INTRINSIC (push_prompt, (thread, escape_only_p, SP_REF (tag),
+                                    VP->fp - proc_slot, ip + offset, mra));
 
-      NEXT (1);
+      NEXT (3);
     }
 
-  VM_DEFINE_OP (36, scm_set, "scm-set!", OP1 (X8_S8_S8_S8))
+  /* builtin-ref dst:12 idx:12
+   *
+   * Load a builtin stub by index into DST.
+   */
+  VM_DEFINE_OP (35, builtin_ref, "builtin-ref", DOP1 (X8_S12_C12))
     {
-      uint8_t obj, idx, val;
-
-      UNPACK_8_8_8 (op, obj, idx, val);
+      uint16_t dst, idx;
 
-      SCM_SET_CELL_OBJECT (SP_REF (obj), SP_REF_U64 (idx), SP_REF (val));
+      UNPACK_12_12 (op, dst, idx);
+      SP_SET (dst, scm_vm_builtin_ref (idx));
 
       NEXT (1);
     }
 
-  VM_DEFINE_OP (37, scm_ref_tag, "scm-ref/tag", DOP1 (X8_S8_S8_C8))
+  /* throw key:12 args:12
+   *
+   * Throw to KEY and ARGS.  ARGS should be a list.
+   */
+  VM_DEFINE_OP (36, throw, "throw", OP1 (X8_S12_S12))
     {
-      uint8_t dst, obj, tag;
+      uint16_t a, b;
+      SCM key, args;
 
-      UNPACK_8_8_8 (op, dst, obj, tag);
+      UNPACK_12_12 (op, a, b);
 
-      SP_SET (dst, SCM_PACK (SCM_CELL_WORD_0 (SP_REF (obj)) - tag));
+      key = SP_REF (a);
+      args = SP_REF (b);
 
-      NEXT (1);
+      SYNC_IP ();
+      CALL_INTRINSIC (throw_, (key, args));
+
+      abort (); /* never reached */
     }
 
-  VM_DEFINE_OP (38, scm_set_tag, "scm-set!/tag", OP1 (X8_S8_C8_S8))
+  /* throw/value val:24 key-subr-and-message:32
+   *
+   * Raise an error, indicating VAL as the bad value.
+   * KEY-SUBR-AND-MESSAGE should be a vector, where the first element is
+   * the symbol to which to throw, the second is the procedure in which
+   * to signal the error (a string) or #f, and the third is a format
+   * string for the message, with one template.
+   */
+  VM_DEFINE_OP (37, throw_value, "throw/value", OP2 (X8_S24, N32))
     {
-      uint8_t obj, tag, val;
+      uint32_t a;
+      int32_t offset;
+      scm_t_bits key_subr_and_message_bits;
+      SCM val, key_subr_and_message;
 
-      UNPACK_8_8_8 (op, obj, tag, val);
+      UNPACK_24 (op, a);
+      val = SP_REF (a);
 
-      SCM_SET_CELL_WORD_0 (SP_REF (obj), SCM_UNPACK (SP_REF (val)) + tag);
+      offset = ip[1];
+      key_subr_and_message_bits = (scm_t_bits) (ip + offset);
+      VM_ASSERT (!(key_subr_and_message_bits & 0x7), abort());
+      key_subr_and_message = SCM_PACK (key_subr_and_message_bits);
+
+      SYNC_IP ();
+      CALL_INTRINSIC (throw_with_value, (val, key_subr_and_message));
 
-      NEXT (1);
+      abort (); /* never reached */
     }
 
-  VM_DEFINE_OP (39, scm_ref_immediate, "scm-ref/immediate", DOP1 (X8_S8_S8_C8))
+  /* throw/value+data val:24 key-subr-and-message:32
+   *
+   * Raise an error, indicating VAL as the bad value.
+   * KEY-SUBR-AND-MESSAGE should be a vector, where the first element is
+   * the symbol to which to throw, the second is the procedure in which
+   * to signal the error (a string) or #f, and the third is a format
+   * string for the message, with one template.
+   */
+  VM_DEFINE_OP (38, throw_value_and_data, "throw/value+data", OP2 (X8_S24, 
N32))
     {
-      uint8_t dst, obj, idx;
+      uint32_t a;
+      int32_t offset;
+      scm_t_bits key_subr_and_message_bits;
+      SCM val, key_subr_and_message;
 
-      UNPACK_8_8_8 (op, dst, obj, idx);
+      UNPACK_24 (op, a);
+      val = SP_REF (a);
 
-      SP_SET (dst, SCM_CELL_OBJECT (SP_REF (obj), idx));
+      offset = ip[1];
+      key_subr_and_message_bits = (scm_t_bits) (ip + offset);
+      VM_ASSERT (!(key_subr_and_message_bits & 0x7), abort());
+      key_subr_and_message = SCM_PACK (key_subr_and_message_bits);
 
-      NEXT (1);
+      SYNC_IP ();
+      CALL_INTRINSIC (throw_with_value_and_data, (val, key_subr_and_message));
+
+      abort (); /* never reached */
     }
 
-  VM_DEFINE_OP (40, scm_set_immediate, "scm-set!/immediate", OP1 (X8_S8_C8_S8))
+  /* handle-interrupts _:24
+   *
+   * Handle pending interrupts.
+   */
+  VM_DEFINE_OP (39, handle_interrupts, "handle-interrupts", OP1 (X32))
     {
-      uint8_t obj, idx, val;
+      if (SCM_LIKELY (scm_is_null
+                      (scm_atomic_ref_scm (&thread->pending_asyncs))))
+        NEXT (1);
 
-      UNPACK_8_8_8 (op, obj, idx, val);
+      if (thread->block_asyncs > 0)
+        NEXT (1);
 
-      SCM_SET_CELL_OBJECT (SP_REF (obj), idx, SP_REF (val));
+      SYNC_IP ();
+      CALL_INTRINSIC (push_interrupt_frame, (thread, 0));
+      CACHE_SP ();
+      ip = scm_vm_intrinsics.handle_interrupt_code;
 
-      NEXT (1);
+      NEXT (0);
     }
 
-  VM_DEFINE_OP (41, word_ref, "word-ref", DOP1 (X8_S8_S8_S8))
+  /* return-from-interrupt _:24
+   *
+   * Return from handling an interrupt, discarding any return values and
+   * stripping away the interrupt frame.
+   */
+  VM_DEFINE_OP (40, return_from_interrupt, "return-from-interrupt", OP1 (X32))
     {
-      uint8_t dst, obj, idx;
-
-      UNPACK_8_8_8 (op, dst, obj, idx);
+      union scm_vm_stack_element *fp = VP->fp;
 
-      SP_SET_U64 (dst, SCM_CELL_WORD (SP_REF (obj), SP_REF_U64 (idx)));
+      ip = SCM_FRAME_VIRTUAL_RETURN_ADDRESS (fp);
+      VP->fp = SCM_FRAME_DYNAMIC_LINK (fp);
+      VP->sp = sp = SCM_FRAME_PREVIOUS_SP (fp);
 
-      NEXT (1);
+      NEXT (0);
     }
 
-  VM_DEFINE_OP (42, word_set, "word-set!", OP1 (X8_S8_S8_S8))
+
+  VM_DEFINE_OP (41, call_thread, "call-thread", OP2 (X32, C32))
     {
-      uint8_t obj, idx, val;
+      scm_t_thread_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, obj, idx, val);
+      intrinsic = intrinsics[ip[1]];
 
-      SCM_SET_CELL_WORD (SP_REF (obj), SP_REF_U64 (idx), SP_REF_U64 (val));
+      SYNC_IP ();
+      intrinsic (thread);
+      CACHE_SP ();
 
-      NEXT (1);
+      NEXT (2);
     }
 
-  VM_DEFINE_OP (43, word_ref_immediate, "word-ref/immediate", DOP1 
(X8_S8_S8_C8))
+  VM_DEFINE_OP (42, call_thread_scm, "call-thread-scm", OP2 (X8_S24, C32))
     {
-      uint8_t dst, obj, idx;
+      uint32_t a;
+      scm_t_thread_scm_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, dst, obj, idx);
+      UNPACK_24 (op, a);
+      intrinsic = intrinsics[ip[1]];
 
-      SP_SET_U64 (dst, SCM_CELL_WORD (SP_REF (obj), idx));
+      SYNC_IP ();
+      intrinsic (thread, SP_REF (a));
+      CACHE_SP ();
 
-      NEXT (1);
+      NEXT (2);
     }
 
-  VM_DEFINE_OP (44, word_set_immediate, "word-set!/immediate", OP1 
(X8_S8_C8_S8))
+  VM_DEFINE_OP (43, call_thread_scm_scm, "call-thread-scm-scm", OP2 
(X8_S12_S12, C32))
     {
-      uint8_t obj, idx, val;
+      uint16_t a, b;
+      scm_t_thread_scm_scm_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, obj, idx, val);
+      UNPACK_12_12 (op, a, b);
+      intrinsic = intrinsics[ip[1]];
 
-      SCM_SET_CELL_WORD (SP_REF (obj), idx, SP_REF_U64 (val));
+      SYNC_IP ();
+      intrinsic (thread, SP_REF (a), SP_REF (b));
+      CACHE_SP ();
 
-      NEXT (1);
+      NEXT (2);
     }
 
-  VM_DEFINE_OP (45, pointer_ref_immediate, "pointer-ref/immediate", DOP1 
(X8_S8_S8_C8))
+  VM_DEFINE_OP (44, call_scm_sz_u32, "call-scm-sz-u32", OP2 (X8_S8_S8_S8, C32))
     {
-      uint8_t dst, obj, idx;
+      uint8_t a, b, c;
+      scm_t_scm_sz_u32_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, dst, obj, idx);
+      UNPACK_8_8_8 (op, a, b, c);
+      intrinsic = intrinsics[ip[1]];
 
-      SP_SET_PTR (dst, (void*) SCM_CELL_WORD (SP_REF (obj), idx));
+      SYNC_IP ();
+      intrinsic (SP_REF (a), SP_REF_U64 (b), SP_REF_U64 (c));
+      CACHE_SP ();
 
-      NEXT (1);
+      NEXT (2);
     }
 
-  VM_DEFINE_OP (46, pointer_set_immediate, "pointer-set!/immediate", OP1 
(X8_S8_C8_S8))
+  VM_DEFINE_OP (45, call_scm_from_thread, "call-scm<-thread", DOP2 (X8_S24, 
C32))
     {
-      uint8_t obj, idx, val;
+      uint32_t dst;
+      scm_t_scm_from_thread_intrinsic intrinsic;
+      SCM res;
 
-      UNPACK_8_8_8 (op, obj, idx, val);
+      UNPACK_24 (op, dst);
+      intrinsic = intrinsics[ip[1]];
 
-      SCM_SET_CELL_WORD (SP_REF (obj), idx, (uintptr_t) SP_REF_PTR (val));
+      SYNC_IP ();
+      res = intrinsic (thread);
+      CACHE_SP ();
 
-      NEXT (1);
+      SP_SET (dst, res);
+
+      NEXT (2);
     }
 
-  VM_DEFINE_OP (47, tail_pointer_ref_immediate, "tail-pointer-ref/immediate", 
DOP1 (X8_S8_S8_C8))
+  VM_DEFINE_OP (46, call_s64_from_scm, "call-s64<-scm", DOP2 (X8_S12_S12, C32))
     {
-      uint8_t dst, obj, idx;
-
-      UNPACK_8_8_8 (op, dst, obj, idx);
+      uint16_t dst, src;
+      scm_t_s64_from_scm_intrinsic intrinsic;
 
-      SP_SET_PTR (dst, ((scm_t_bits *) SCM2PTR (SP_REF (obj))) + idx);
+      UNPACK_12_12 (op, dst, src);
+      intrinsic = intrinsics[ip[1]];
 
-      NEXT (1);
-    }
+      SYNC_IP ();
+#if INDIRECT_INT64_INTRINSICS
+      intrinsic (& SP_REF_S64 (dst), SP_REF (src));
+#else
+      {
+        int64_t res = intrinsic (SP_REF (src));
+        SP_SET_S64 (dst, res);
+      }
+#endif
 
-  
+      /* No CACHE_SP () after the intrinsic, as the indirect variants
+         have an out argument that points at the stack; stack relocation
+         during this kind of intrinsic is not supported!  */
 
-  /*
-   * Lexical binding instructions
-   */
+      NEXT (2);
+    }
 
-  /* mov dst:12 src:12
-   *
-   * Copy a value from one local slot to another.
-   */
-  VM_DEFINE_OP (48, mov, "mov", DOP1 (X8_S12_S12))
+  VM_DEFINE_OP (47, call_scm_from_u64, "call-scm<-u64", DOP2 (X8_S12_S12, C32))
     {
-      uint16_t dst;
-      uint16_t src;
+      uint16_t dst, src;
+      SCM res;
+      scm_t_scm_from_u64_intrinsic intrinsic;
 
       UNPACK_12_12 (op, dst, src);
-      /* FIXME: The compiler currently emits "mov" for SCM, F64, U64,
-         and S64 variables.  However SCM values are the usual case, and
-         on a 32-bit machine it might be cheaper to move a SCM than to
-         move a 64-bit number.  */
-      SP_SET_SLOT (dst, SP_REF_SLOT (src));
-
-      NEXT (1);
-    }
+      intrinsic = intrinsics[ip[1]];
 
-  /* long-mov dst:24 _:8 src:24
-   *
-   * Copy a value from one local slot to another.
-   */
-  VM_DEFINE_OP (49, long_mov, "long-mov", DOP2 (X8_S24, X8_S24))
-    {
-      uint32_t dst;
-      uint32_t src;
+      SYNC_IP ();
+#if INDIRECT_INT64_INTRINSICS
+      res = intrinsic (& SP_REF_U64 (src));
+#else
+      res = intrinsic (SP_REF_U64 (src));
+#endif
+      SP_SET (dst, res);
 
-      UNPACK_24 (op, dst);
-      UNPACK_24 (ip[1], src);
-      /* FIXME: The compiler currently emits "long-mov" for SCM, F64,
-         U64, and S64 variables.  However SCM values are the usual case,
-         and on a 32-bit machine it might be cheaper to move a SCM than
-         to move a 64-bit number.  */
-      SP_SET_SLOT (dst, SP_REF_SLOT (src));
+      /* No CACHE_SP () after the intrinsic, as the indirect variants
+         pass stack pointers directly; stack relocation during this kind
+         of intrinsic is not supported!  */
 
       NEXT (2);
     }
 
-  /* long-fmov dst:24 _:8 src:24
-   *
-   * Copy a value from one local slot to another.  Slot indexes are
-   * relative to the FP.
-   */
-  VM_DEFINE_OP (50, long_fmov, "long-fmov", DOP2 (X8_F24, X8_F24))
+  VM_DEFINE_OP (48, call_scm_from_s64, "call-scm<-s64", DOP2 (X8_S12_S12, C32))
     {
-      uint32_t dst;
-      uint32_t src;
+      uint16_t dst, src;
+      SCM res;
+      scm_t_scm_from_s64_intrinsic intrinsic;
 
-      UNPACK_24 (op, dst);
-      UNPACK_24 (ip[1], src);
-      FP_SET (dst, FP_REF (src));
+      UNPACK_12_12 (op, dst, src);
+      intrinsic = intrinsics[ip[1]];
+
+      SYNC_IP ();
+#if INDIRECT_INT64_INTRINSICS
+      res = intrinsic (& SP_REF_S64 (src));
+#else
+      res = intrinsic (SP_REF_S64 (src));
+#endif
+      CACHE_SP ();
+      SP_SET (dst, res);
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (51, call_scm_from_scm_scm, "call-scm<-scm-scm", DOP2 
(X8_S8_S8_S8, C32))
+  VM_DEFINE_OP (49, call_scm_from_scm, "call-scm<-scm", DOP2 (X8_S12_S12, C32))
     {
-      uint8_t dst, a, b;
+      uint16_t dst, src;
       SCM res;
-      scm_t_scm_from_scm_scm_intrinsic intrinsic;
+      scm_t_scm_from_scm_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, dst, a, b);
+      UNPACK_12_12 (op, dst, src);
       intrinsic = intrinsics[ip[1]];
 
       SYNC_IP ();
-      res = intrinsic (SP_REF (a), SP_REF (b));
+      res = intrinsic (SP_REF (src));
       CACHE_SP ();
       SP_SET (dst, res);
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (52, call_scm_from_scm_uimm, "call-scm<-scm-uimm", DOP2 
(X8_S8_S8_C8, C32))
+  VM_DEFINE_OP (50, call_f64_from_scm, "call-f64<-scm", DOP2 (X8_S12_S12, C32))
     {
-      uint8_t dst, a, b;
-      SCM res;
-      scm_t_scm_from_scm_uimm_intrinsic intrinsic;
+      uint16_t dst, src;
+      double res;
+      scm_t_f64_from_scm_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, dst, a, b);
+      UNPACK_12_12 (op, dst, src);
       intrinsic = intrinsics[ip[1]];
 
       SYNC_IP ();
-      res = intrinsic (SP_REF (a), b);
+      res = intrinsic (SP_REF (src));
       CACHE_SP ();
-      SP_SET (dst, res);
+      SP_SET_F64 (dst, res);
+
+      NEXT (2);
+    }
+
+  VM_DEFINE_OP (51, call_u64_from_scm, "call-u64<-scm", DOP2 (X8_S12_S12, C32))
+    {
+      uint16_t dst, src;
+      scm_t_u64_from_scm_intrinsic intrinsic;
+
+      UNPACK_12_12 (op, dst, src);
+      intrinsic = intrinsics[ip[1]];
+
+      SYNC_IP ();
+#if INDIRECT_INT64_INTRINSICS
+      intrinsic (& SP_REF_U64 (dst), SP_REF (src));
+#else
+      {
+        uint64_t res = intrinsic (SP_REF (src));
+        SP_SET_U64 (dst, res);
+      }
+#endif
+
+      /* No CACHE_SP () after the intrinsic, as the indirect variants
+         have an out argument that points at the stack; stack relocation
+         during this kind of intrinsic is not supported!  */
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (53, call_scm_sz_u32, "call-scm-sz-u32", OP2 (X8_S8_S8_S8, C32))
+  VM_DEFINE_OP (52, call_scm_from_scm_scm, "call-scm<-scm-scm", DOP2 
(X8_S8_S8_S8, C32))
     {
-      uint8_t a, b, c;
-      scm_t_scm_sz_u32_intrinsic intrinsic;
+      uint8_t dst, a, b;
+      SCM res;
+      scm_t_scm_from_scm_scm_intrinsic intrinsic;
 
-      UNPACK_8_8_8 (op, a, b, c);
+      UNPACK_8_8_8 (op, dst, a, b);
       intrinsic = intrinsics[ip[1]];
 
       SYNC_IP ();
-      intrinsic (SP_REF (a), SP_REF_U64 (b), SP_REF_U64 (c));
+      res = intrinsic (SP_REF (a), SP_REF (b));
       CACHE_SP ();
+      SP_SET (dst, res);
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (54, call_scm_from_scm, "call-scm<-scm", DOP2 (X8_S12_S12, C32))
+  VM_DEFINE_OP (53, call_scm_from_scm_uimm, "call-scm<-scm-uimm", DOP2 
(X8_S8_S8_C8, C32))
     {
-      uint16_t dst, src;
+      uint8_t dst, a, b;
       SCM res;
-      scm_t_scm_from_scm_intrinsic intrinsic;
+      scm_t_scm_from_scm_uimm_intrinsic intrinsic;
 
-      UNPACK_12_12 (op, dst, src);
+      UNPACK_8_8_8 (op, dst, a, b);
       intrinsic = intrinsics[ip[1]];
 
       SYNC_IP ();
-      res = intrinsic (SP_REF (src));
+      res = intrinsic (SP_REF (a), b);
       CACHE_SP ();
       SP_SET (dst, res);
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (55, call_f64_from_scm, "call-f64<-scm", DOP2 (X8_S12_S12, C32))
+  VM_DEFINE_OP (54, call_scm_from_thread_scm, "call-scm<-thread-scm", DOP2 
(X8_S12_S12, C32))
     {
       uint16_t dst, src;
-      double res;
-      scm_t_f64_from_scm_intrinsic intrinsic;
+      scm_t_scm_from_thread_scm_intrinsic intrinsic;
+      SCM res;
 
       UNPACK_12_12 (op, dst, src);
       intrinsic = intrinsics[ip[1]];
 
       SYNC_IP ();
-      res = intrinsic (SP_REF (src));
+      res = intrinsic (thread, SP_REF (src));
       CACHE_SP ();
-      SP_SET_F64 (dst, res);
+
+      SP_SET (dst, res);
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (56, call_u64_from_scm, "call-u64<-scm", DOP2 (X8_S12_S12, C32))
+  VM_DEFINE_OP (55, call_scm_from_scm_u64, "call-scm<-scm-u64", DOP2 
(X8_S8_S8_S8, C32))
     {
-      uint16_t dst, src;
-      scm_t_u64_from_scm_intrinsic intrinsic;
+      uint8_t dst, a, b;
+      SCM res;
+      scm_t_scm_from_scm_u64_intrinsic intrinsic;
 
-      UNPACK_12_12 (op, dst, src);
+      UNPACK_8_8_8 (op, dst, a, b);
       intrinsic = intrinsics[ip[1]];
 
       SYNC_IP ();
 #if INDIRECT_INT64_INTRINSICS
-      intrinsic (& SP_REF_U64 (dst), SP_REF (src));
+      res = intrinsic (SP_REF (a), & SP_REF_U64 (b));
 #else
-      {
-        uint64_t res = intrinsic (SP_REF (src));
-        SP_SET_U64 (dst, res);
-      }
+      res = intrinsic (SP_REF (a), SP_REF_U64 (b));
 #endif
+      CACHE_SP ();
 
-      /* No CACHE_SP () after the intrinsic, as the indirect variants
-         have an out argument that points at the stack; stack relocation
-         during this kind of intrinsic is not supported!  */
+      SP_SET (dst, res);
 
       NEXT (2);
     }
 
-
-  
-
-  /*
-   * Immediates and statically allocated non-immediates
-   */
-
   /* make-short-immediate dst:8 low-bits:16
    *
    * Make an immediate whose low bits are LOW-BITS, and whose top bits are
    * 0.
    */
-  VM_DEFINE_OP (57, make_short_immediate, "make-short-immediate", DOP1 
(X8_S8_I16))
+  VM_DEFINE_OP (56, make_short_immediate, "make-short-immediate", DOP1 
(X8_S8_I16))
     {
       uint8_t dst;
       scm_t_bits val;
@@ -1501,7 +1586,7 @@ VM_NAME (scm_thread *thread)
    * Make an immediate whose low bits are LOW-BITS, and whose top bits are
    * 0.
    */
-  VM_DEFINE_OP (58, make_long_immediate, "make-long-immediate", DOP2 (X8_S24, 
I32))
+  VM_DEFINE_OP (57, make_long_immediate, "make-long-immediate", DOP2 (X8_S24, 
I32))
     {
       uint32_t dst;
       scm_t_bits val;
@@ -1516,7 +1601,7 @@ VM_NAME (scm_thread *thread)
    *
    * Make an immediate with HIGH-BITS and LOW-BITS.
    */
-  VM_DEFINE_OP (59, make_long_long_immediate, "make-long-long-immediate", DOP3 
(X8_S24, A32, B32))
+  VM_DEFINE_OP (58, make_long_long_immediate, "make-long-long-immediate", DOP3 
(X8_S24, A32, B32))
     {
       uint32_t dst;
       scm_t_bits val;
@@ -1547,7 +1632,7 @@ VM_NAME (scm_thread *thread)
    * Whether the object is mutable or immutable depends on where it was
    * allocated by the compiler, and loaded by the loader.
    */
-  VM_DEFINE_OP (60, make_non_immediate, "make-non-immediate", DOP2 (X8_S24, 
N32))
+  VM_DEFINE_OP (59, make_non_immediate, "make-non-immediate", DOP2 (X8_S24, 
N32))
     {
       uint32_t dst;
       int32_t offset;
@@ -1566,251 +1651,257 @@ VM_NAME (scm_thread *thread)
       NEXT (2);
     }
 
-  /* static-ref dst:24 offset:32
-   *
-   * Load a SCM value into DST.  The SCM value will be fetched from
-   * memory, OFFSET 32-bit words away from the current instruction
-   * pointer.  OFFSET is a signed value.
+  /* load-label dst:24 offset:32
    *
-   * The intention is for this instruction to be used to load constants
-   * that the compiler is unable to statically allocate, like symbols.
-   * These values would be initialized when the object file loads.
+   * Load a label OFFSET words away from the current IP and write it to
+   * DST.  OFFSET is a signed 32-bit integer.
    */
-  VM_DEFINE_OP (61, static_ref, "static-ref", DOP2 (X8_S24, R32))
+  VM_DEFINE_OP (60, load_label, "load-label", DOP2 (X8_S24, L32))
     {
       uint32_t dst;
       int32_t offset;
-      uint32_t* loc;
-      uintptr_t loc_bits;
 
       UNPACK_24 (op, dst);
       offset = ip[1];
-      loc = ip + offset;
-      loc_bits = (uintptr_t) loc;
-      VM_ASSERT (ALIGNED_P (loc, SCM), abort());
 
-      SP_SET (dst, *((SCM *) loc_bits));
+      SP_SET_U64 (dst, (uintptr_t) (ip + offset));
 
       NEXT (2);
     }
 
-  /* static-set! src:24 offset:32
+  /* load-f64 dst:24 high-bits:32 low-bits:32
    *
-   * Store a SCM value into memory, OFFSET 32-bit words away from the
-   * current instruction pointer.  OFFSET is a signed value.
+   * Make a double-precision floating-point value with HIGH-BITS and
+   * LOW-BITS.
    */
-  VM_DEFINE_OP (62, static_set, "static-set!", OP2 (X8_S24, LO32))
+  VM_DEFINE_OP (61, load_f64, "load-f64", DOP3 (X8_S24, AF32, BF32))
     {
-      uint32_t src;
-      int32_t offset;
-      uint32_t* loc;
+      uint32_t dst;
+      uint64_t val;
 
-      UNPACK_24 (op, src);
-      offset = ip[1];
-      loc = ip + offset;
-      VM_ASSERT (ALIGNED_P (loc, SCM), abort());
+      UNPACK_24 (op, dst);
+      val = ip[1];
+      val <<= 32;
+      val |= ip[2];
+      SP_SET_U64 (dst, val);
+      NEXT (3);
+    }
 
-      *((SCM *) loc) = SP_REF (src);
+  /* load-u64 dst:24 high-bits:32 low-bits:32
+   *
+   * Make an unsigned 64-bit integer with HIGH-BITS and LOW-BITS.
+   */
+  VM_DEFINE_OP (62, load_u64, "load-u64", DOP3 (X8_S24, AU32, BU32))
+    {
+      uint32_t dst;
+      uint64_t val;
 
-      NEXT (2);
+      UNPACK_24 (op, dst);
+      val = ip[1];
+      val <<= 32;
+      val |= ip[2];
+      SP_SET_U64 (dst, val);
+      NEXT (3);
     }
 
-  /* static-patch! _:24 dst-offset:32 src-offset:32
+  /* load-s64 dst:24 high-bits:32 low-bits:32
    *
-   * Patch a pointer at DST-OFFSET to point to SRC-OFFSET.  Both offsets
-   * are signed 32-bit values, indicating a memory address as a number
-   * of 32-bit words away from the current instruction pointer.
+   * Make an unsigned 64-bit integer with HIGH-BITS and LOW-BITS.
    */
-  VM_DEFINE_OP (63, static_patch, "static-patch!", OP3 (X32, LO32, L32))
+  VM_DEFINE_OP (63, load_s64, "load-s64", DOP3 (X8_S24, AS32, BS32))
     {
-      int32_t dst_offset, src_offset;
-      void *src;
-      void** dst_loc;
+      uint32_t dst;
+      uint64_t val;
 
-      dst_offset = ip[1];
-      src_offset = ip[2];
+      UNPACK_24 (op, dst);
+      val = ip[1];
+      val <<= 32;
+      val |= ip[2];
+      SP_SET_U64 (dst, val);
+      NEXT (3);
+    }
 
-      dst_loc = (void **) (ip + dst_offset);
-      src = ip + src_offset;
-      VM_ASSERT (ALIGNED_P (dst_loc, void*), abort());
+  /* current-thread dst:24
+   *
+   * Write the current thread into DST.
+   */
+  VM_DEFINE_OP (64, current_thread, "current-thread", DOP1 (X8_S24))
+    {
+      uint32_t dst;
 
-      *dst_loc = src;
+      UNPACK_24 (op, dst);
+      SP_SET (dst, thread->handle);
 
-      NEXT (3);
+      NEXT (1);
     }
 
-  
+  VM_DEFINE_OP (65, allocate_words, "allocate-words", DOP1 (X8_S12_S12))
+    {
+      uint16_t dst, size;
+
+      UNPACK_12_12 (op, dst, size);
+
+      SYNC_IP ();
+      SP_SET (dst, CALL_INTRINSIC (allocate_words, (thread, SP_REF_U64 
(size))));
+      NEXT (1);
+    }
 
-  VM_DEFINE_OP (64, unused_64, NULL, NOP)
-  VM_DEFINE_OP (65, unused_65, NULL, NOP)
-  VM_DEFINE_OP (66, unused_66, NULL, NOP)
-  VM_DEFINE_OP (67, unused_67, NULL, NOP)
-  VM_DEFINE_OP (68, unused_68, NULL, NOP)
+  VM_DEFINE_OP (66, allocate_words_immediate, "allocate-words/immediate", DOP1 
(X8_S12_C12))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      uint16_t dst, size;
+
+      UNPACK_12_12 (op, dst, size);
+
+      SYNC_IP ();
+      SP_SET (dst, CALL_INTRINSIC (allocate_words, (thread, size)));
+
+      NEXT (1);
     }
 
-  
+  VM_DEFINE_OP (67, scm_ref, "scm-ref", DOP1 (X8_S8_S8_S8))
+    {
+      uint8_t dst, obj, idx;
 
-  /*
-   * The dynamic environment
-   */
+      UNPACK_8_8_8 (op, dst, obj, idx);
 
-  /* prompt tag:24 escape-only?:1 _:7 proc-slot:24 _:8 handler-offset:24
-   *
-   * Push a new prompt on the dynamic stack, with a tag from TAG and a
-   * handler at HANDLER-OFFSET words from the current IP.  The handler
-   * will expect a multiple-value return as if from a call with the
-   * procedure at PROC-SLOT.
-   */
-  VM_DEFINE_OP (69, prompt, "prompt", OP3 (X8_S24, B1_X7_F24, X8_L24))
+      SP_SET (dst, SCM_CELL_OBJECT (SP_REF (obj), SP_REF_U64 (idx)));
+
+      NEXT (1);
+    }
+
+  VM_DEFINE_OP (68, scm_set, "scm-set!", OP1 (X8_S8_S8_S8))
     {
-      uint32_t tag, proc_slot;
-      int32_t offset;
-      uint8_t escape_only_p;
-      uint8_t *mra = NULL;
+      uint8_t obj, idx, val;
 
-      UNPACK_24 (op, tag);
-      escape_only_p = ip[1] & 0x1;
-      UNPACK_24 (ip[1], proc_slot);
-      offset = ip[2];
-      offset >>= 8; /* Sign extension */
-  
-      /* Push the prompt onto the dynamic stack. */
-      SYNC_IP ();
-      CALL_INTRINSIC (push_prompt, (thread, escape_only_p, SP_REF (tag),
-                                    VP->fp - proc_slot, ip + offset, mra));
+      UNPACK_8_8_8 (op, obj, idx, val);
 
-      NEXT (3);
+      SCM_SET_CELL_OBJECT (SP_REF (obj), SP_REF_U64 (idx), SP_REF (val));
+
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (70, unused_70, NULL, NOP)
-  VM_DEFINE_OP (71, unused_71, NULL, NOP)
-  VM_DEFINE_OP (72, unused_72, NULL, NOP)
-  VM_DEFINE_OP (73, unused_73, NULL, NOP)
-  VM_DEFINE_OP (74, unused_74, NULL, NOP)
-  VM_DEFINE_OP (75, unused_75, NULL, NOP)
+  VM_DEFINE_OP (69, scm_ref_tag, "scm-ref/tag", DOP1 (X8_S8_S8_C8))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      uint8_t dst, obj, tag;
+
+      UNPACK_8_8_8 (op, dst, obj, tag);
+
+      SP_SET (dst, SCM_PACK (SCM_CELL_WORD_0 (SP_REF (obj)) - tag));
+
+      NEXT (1);
     }
 
-  /* load-label dst:24 offset:32
-   *
-   * Load a label OFFSET words away from the current IP and write it to
-   * DST.  OFFSET is a signed 32-bit integer.
-   */
-  VM_DEFINE_OP (76, load_label, "load-label", DOP2 (X8_S24, L32))
+  VM_DEFINE_OP (70, scm_set_tag, "scm-set!/tag", OP1 (X8_S8_C8_S8))
     {
-      uint32_t dst;
-      int32_t offset;
+      uint8_t obj, tag, val;
+
+      UNPACK_8_8_8 (op, obj, tag, val);
+
+      SCM_SET_CELL_WORD_0 (SP_REF (obj), SCM_UNPACK (SP_REF (val)) + tag);
+
+      NEXT (1);
+    }
+
+  VM_DEFINE_OP (71, scm_ref_immediate, "scm-ref/immediate", DOP1 (X8_S8_S8_C8))
+    {
+      uint8_t dst, obj, idx;
+
+      UNPACK_8_8_8 (op, dst, obj, idx);
+
+      SP_SET (dst, SCM_CELL_OBJECT (SP_REF (obj), idx));
+
+      NEXT (1);
+    }
+
+  VM_DEFINE_OP (72, scm_set_immediate, "scm-set!/immediate", OP1 (X8_S8_C8_S8))
+    {
+      uint8_t obj, idx, val;
+
+      UNPACK_8_8_8 (op, obj, idx, val);
+
+      SCM_SET_CELL_OBJECT (SP_REF (obj), idx, SP_REF (val));
+
+      NEXT (1);
+    }
+
+  VM_DEFINE_OP (73, word_ref, "word-ref", DOP1 (X8_S8_S8_S8))
+    {
+      uint8_t dst, obj, idx;
+
+      UNPACK_8_8_8 (op, dst, obj, idx);
+
+      SP_SET_U64 (dst, SCM_CELL_WORD (SP_REF (obj), SP_REF_U64 (idx)));
+
+      NEXT (1);
+    }
+
+  VM_DEFINE_OP (74, word_set, "word-set!", OP1 (X8_S8_S8_S8))
+    {
+      uint8_t obj, idx, val;
 
-      UNPACK_24 (op, dst);
-      offset = ip[1];
+      UNPACK_8_8_8 (op, obj, idx, val);
 
-      SP_SET_U64 (dst, (uintptr_t) (ip + offset));
+      SCM_SET_CELL_WORD (SP_REF (obj), SP_REF_U64 (idx), SP_REF_U64 (val));
 
-      NEXT (2);
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (77, call_s64_from_scm, "call-s64<-scm", DOP2 (X8_S12_S12, C32))
+  VM_DEFINE_OP (75, word_ref_immediate, "word-ref/immediate", DOP1 
(X8_S8_S8_C8))
     {
-      uint16_t dst, src;
-      scm_t_s64_from_scm_intrinsic intrinsic;
-
-      UNPACK_12_12 (op, dst, src);
-      intrinsic = intrinsics[ip[1]];
+      uint8_t dst, obj, idx;
 
-      SYNC_IP ();
-#if INDIRECT_INT64_INTRINSICS
-      intrinsic (& SP_REF_S64 (dst), SP_REF (src));
-#else
-      {
-        int64_t res = intrinsic (SP_REF (src));
-        SP_SET_S64 (dst, res);
-      }
-#endif
+      UNPACK_8_8_8 (op, dst, obj, idx);
 
-      /* No CACHE_SP () after the intrinsic, as the indirect variants
-         have an out argument that points at the stack; stack relocation
-         during this kind of intrinsic is not supported!  */
+      SP_SET_U64 (dst, SCM_CELL_WORD (SP_REF (obj), idx));
 
-      NEXT (2);
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (78, call_scm_from_u64, "call-scm<-u64", DOP2 (X8_S12_S12, C32))
+  VM_DEFINE_OP (76, word_set_immediate, "word-set!/immediate", OP1 
(X8_S8_C8_S8))
     {
-      uint16_t dst, src;
-      SCM res;
-      scm_t_scm_from_u64_intrinsic intrinsic;
-
-      UNPACK_12_12 (op, dst, src);
-      intrinsic = intrinsics[ip[1]];
+      uint8_t obj, idx, val;
 
-      SYNC_IP ();
-#if INDIRECT_INT64_INTRINSICS
-      res = intrinsic (& SP_REF_U64 (src));
-#else
-      res = intrinsic (SP_REF_U64 (src));
-#endif
-      SP_SET (dst, res);
+      UNPACK_8_8_8 (op, obj, idx, val);
 
-      /* No CACHE_SP () after the intrinsic, as the indirect variants
-         pass stack pointers directly; stack relocation during this kind
-         of intrinsic is not supported!  */
+      SCM_SET_CELL_WORD (SP_REF (obj), idx, SP_REF_U64 (val));
 
-      NEXT (2);
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (79, call_scm_from_s64, "call-scm<-s64", DOP2 (X8_S12_S12, C32))
+  VM_DEFINE_OP (77, pointer_ref_immediate, "pointer-ref/immediate", DOP1 
(X8_S8_S8_C8))
     {
-      uint16_t dst, src;
-      SCM res;
-      scm_t_scm_from_s64_intrinsic intrinsic;
+      uint8_t dst, obj, idx;
 
-      UNPACK_12_12 (op, dst, src);
-      intrinsic = intrinsics[ip[1]];
+      UNPACK_8_8_8 (op, dst, obj, idx);
 
-      SYNC_IP ();
-#if INDIRECT_INT64_INTRINSICS
-      res = intrinsic (& SP_REF_S64 (src));
-#else
-      res = intrinsic (SP_REF_S64 (src));
-#endif
-      CACHE_SP ();
-      SP_SET (dst, res);
+      SP_SET_PTR (dst, (void*) SCM_CELL_WORD (SP_REF (obj), idx));
 
-      NEXT (2);
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (80, unused_80, NULL, NOP)
+  VM_DEFINE_OP (78, pointer_set_immediate, "pointer-set!/immediate", OP1 
(X8_S8_C8_S8))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
-    }
+      uint8_t obj, idx, val;
 
-  
+      UNPACK_8_8_8 (op, obj, idx, val);
+
+      SCM_SET_CELL_WORD (SP_REF (obj), idx, (uintptr_t) SP_REF_PTR (val));
 
-  VM_DEFINE_OP (81, tag_char, "tag-char", DOP1 (X8_S12_S12))
-    {
-      uint16_t dst, src;
-      UNPACK_12_12 (op, dst, src);
-      SP_SET (dst,
-              SCM_MAKE_ITAG8 ((scm_t_bits) (scm_t_wchar) SP_REF_U64 (src),
-                              scm_tc8_char));
       NEXT (1);
     }
 
-  VM_DEFINE_OP (82, untag_char, "untag-char", DOP1 (X8_S12_S12))
+  VM_DEFINE_OP (79, tail_pointer_ref_immediate, "tail-pointer-ref/immediate", 
DOP1 (X8_S8_S8_C8))
     {
-      uint16_t dst, src;
-      UNPACK_12_12 (op, dst, src);
-      SP_SET_U64 (dst, SCM_CHAR (SP_REF (src)));
+      uint8_t dst, obj, idx;
+
+      UNPACK_8_8_8 (op, dst, obj, idx);
+
+      SP_SET_PTR (dst, ((scm_t_bits *) SCM2PTR (SP_REF (obj))) + idx);
+
       NEXT (1);
     }
 
-  VM_DEFINE_OP (83, atomic_ref_scm_immediate, "atomic-scm-ref/immediate", DOP1 
(X8_S8_S8_C8))
+  VM_DEFINE_OP (80, atomic_ref_scm_immediate, "atomic-scm-ref/immediate", DOP1 
(X8_S8_S8_C8))
     {
       uint8_t dst, obj, offset;
       SCM *loc;
@@ -1820,7 +1911,7 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (84, atomic_set_scm_immediate, "atomic-scm-set!/immediate", OP1 
(X8_S8_C8_S8))
+  VM_DEFINE_OP (81, atomic_set_scm_immediate, "atomic-scm-set!/immediate", OP1 
(X8_S8_C8_S8))
     {
       uint8_t obj, offset, val;
       SCM *loc;
@@ -1830,7 +1921,7 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (85, atomic_scm_swap_immediate, "atomic-scm-swap!/immediate", 
DOP3 (X8_S24, X8_S24, C8_S24))
+  VM_DEFINE_OP (82, atomic_scm_swap_immediate, "atomic-scm-swap!/immediate", 
DOP3 (X8_S24, X8_S24, C8_S24))
     {
       uint32_t dst, obj, val;
       uint8_t offset;
@@ -1843,7 +1934,7 @@ VM_NAME (scm_thread *thread)
       NEXT (3);
     }
 
-  VM_DEFINE_OP (86, atomic_scm_compare_and_swap_immediate, 
"atomic-scm-compare-and-swap!/immediate", DOP4 (X8_S24, X8_S24, C8_S24, X8_S24))
+  VM_DEFINE_OP (83, atomic_scm_compare_and_swap_immediate, 
"atomic-scm-compare-and-swap!/immediate", DOP4 (X8_S24, X8_S24, C8_S24, X8_S24))
     {
       uint32_t dst, obj, expected, desired;
       uint8_t offset;
@@ -1860,219 +1951,117 @@ VM_NAME (scm_thread *thread)
       NEXT (4);
     }
 
-  VM_DEFINE_OP (87, call_thread_scm_scm, "call-thread-scm-scm", OP2 
(X8_S12_S12, C32))
-    {
-      uint16_t a, b;
-      scm_t_thread_scm_scm_intrinsic intrinsic;
-
-      UNPACK_12_12 (op, a, b);
-      intrinsic = intrinsics[ip[1]];
-
-      SYNC_IP ();
-      intrinsic (thread, SP_REF (a), SP_REF (b));
-      CACHE_SP ();
-
-      NEXT (2);
-    }
-
-  VM_DEFINE_OP (88, call_thread, "call-thread", OP2 (X32, C32))
-    {
-      scm_t_thread_intrinsic intrinsic;
-
-      intrinsic = intrinsics[ip[1]];
-
-      SYNC_IP ();
-      intrinsic (thread);
-      CACHE_SP ();
-
-      NEXT (2);
-    }
-
-  VM_DEFINE_OP (89, call_scm_from_thread_scm, "call-scm<-thread-scm", DOP2 
(X8_S12_S12, C32))
-    {
-      uint16_t dst, src;
-      scm_t_scm_from_thread_scm_intrinsic intrinsic;
-      SCM res;
-
-      UNPACK_12_12 (op, dst, src);
-      intrinsic = intrinsics[ip[1]];
-
-      SYNC_IP ();
-      res = intrinsic (thread, SP_REF (src));
-      CACHE_SP ();
-
-      SP_SET (dst, res);
-
-      NEXT (2);
-    }
-
-  VM_DEFINE_OP (90, call_thread_scm, "call-thread-scm", OP2 (X8_S24, C32))
+  /* static-ref dst:24 offset:32
+   *
+   * Load a SCM value into DST.  The SCM value will be fetched from
+   * memory, OFFSET 32-bit words away from the current instruction
+   * pointer.  OFFSET is a signed value.
+   *
+   * The intention is for this instruction to be used to load constants
+   * that the compiler is unable to statically allocate, like symbols.
+   * These values would be initialized when the object file loads.
+   */
+  VM_DEFINE_OP (84, static_ref, "static-ref", DOP2 (X8_S24, R32))
     {
-      uint32_t a;
-      scm_t_thread_scm_intrinsic intrinsic;
+      uint32_t dst;
+      int32_t offset;
+      uint32_t* loc;
+      uintptr_t loc_bits;
 
-      UNPACK_24 (op, a);
-      intrinsic = intrinsics[ip[1]];
+      UNPACK_24 (op, dst);
+      offset = ip[1];
+      loc = ip + offset;
+      loc_bits = (uintptr_t) loc;
+      VM_ASSERT (ALIGNED_P (loc, SCM), abort());
 
-      SYNC_IP ();
-      intrinsic (thread, SP_REF (a));
-      CACHE_SP ();
+      SP_SET (dst, *((SCM *) loc_bits));
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (91, call_scm_from_scm_u64, "call-scm<-scm-u64", DOP2 
(X8_S8_S8_S8, C32))
+  /* static-set! src:24 offset:32
+   *
+   * Store a SCM value into memory, OFFSET 32-bit words away from the
+   * current instruction pointer.  OFFSET is a signed value.
+   */
+  VM_DEFINE_OP (85, static_set, "static-set!", OP2 (X8_S24, LO32))
     {
-      uint8_t dst, a, b;
-      SCM res;
-      scm_t_scm_from_scm_u64_intrinsic intrinsic;
-
-      UNPACK_8_8_8 (op, dst, a, b);
-      intrinsic = intrinsics[ip[1]];
+      uint32_t src;
+      int32_t offset;
+      uint32_t* loc;
 
-      SYNC_IP ();
-#if INDIRECT_INT64_INTRINSICS
-      res = intrinsic (SP_REF (a), & SP_REF_U64 (b));
-#else
-      res = intrinsic (SP_REF (a), SP_REF_U64 (b));
-#endif
-      CACHE_SP ();
+      UNPACK_24 (op, src);
+      offset = ip[1];
+      loc = ip + offset;
+      VM_ASSERT (ALIGNED_P (loc, SCM), abort());
 
-      SP_SET (dst, res);
+      *((SCM *) loc) = SP_REF (src);
 
       NEXT (2);
     }
 
-  VM_DEFINE_OP (92, call_scm_from_thread, "call-scm<-thread", DOP2 (X8_S24, 
C32))
+  /* static-patch! _:24 dst-offset:32 src-offset:32
+   *
+   * Patch a pointer at DST-OFFSET to point to SRC-OFFSET.  Both offsets
+   * are signed 32-bit values, indicating a memory address as a number
+   * of 32-bit words away from the current instruction pointer.
+   */
+  VM_DEFINE_OP (86, static_patch, "static-patch!", OP3 (X32, LO32, L32))
     {
-      uint32_t dst;
-      scm_t_scm_from_thread_intrinsic intrinsic;
-      SCM res;
-
-      UNPACK_24 (op, dst);
-      intrinsic = intrinsics[ip[1]];
-
-      SYNC_IP ();
-      res = intrinsic (thread);
-      CACHE_SP ();
+      int32_t dst_offset, src_offset;
+      void *src;
+      void** dst_loc;
 
-      SP_SET (dst, res);
+      dst_offset = ip[1];
+      src_offset = ip[2];
 
-      NEXT (2);
-    }
+      dst_loc = (void **) (ip + dst_offset);
+      src = ip + src_offset;
+      VM_ASSERT (ALIGNED_P (dst_loc, void*), abort());
 
-  VM_DEFINE_OP (93, unused_93, NULL, NOP)
-  VM_DEFINE_OP (94, unused_94, NULL, NOP)
-  VM_DEFINE_OP (95, unused_95, NULL, NOP)
-  VM_DEFINE_OP (96, unused_96, NULL, NOP)
-  VM_DEFINE_OP (97, unused_97, NULL, NOP)
-  VM_DEFINE_OP (98, unused_98, NULL, NOP)
-  VM_DEFINE_OP (99, unused_99, NULL, NOP)
-  VM_DEFINE_OP (100, unused_100, NULL, NOP)
-  VM_DEFINE_OP (101, unused_101, NULL, NOP)
-  VM_DEFINE_OP (102, unused_102, NULL, NOP)
-  VM_DEFINE_OP (103, unused_103, NULL, NOP)
-  VM_DEFINE_OP (104, unused_104, NULL, NOP)
-  VM_DEFINE_OP (105, unused_105, NULL, NOP)
-  VM_DEFINE_OP (106, unused_106, NULL, NOP)
-  VM_DEFINE_OP (107, unused_107, NULL, NOP)
-  VM_DEFINE_OP (108, unused_108, NULL, NOP)
-  VM_DEFINE_OP (109, unused_109, NULL, NOP)
-  VM_DEFINE_OP (110, unused_110, NULL, NOP)
-  VM_DEFINE_OP (111, unused_111, NULL, NOP)
-  VM_DEFINE_OP (112, unused_112, NULL, NOP)
-  VM_DEFINE_OP (113, unused_113, NULL, NOP)
-  VM_DEFINE_OP (114, unused_114, NULL, NOP)
-  VM_DEFINE_OP (115, unused_115, NULL, NOP)
-  VM_DEFINE_OP (116, unused_116, NULL, NOP)
-  VM_DEFINE_OP (117, unused_117, NULL, NOP)
-  VM_DEFINE_OP (118, unused_118, NULL, NOP)
-  VM_DEFINE_OP (119, unused_119, NULL, NOP)
-  VM_DEFINE_OP (120, unused_120, NULL, NOP)
-  VM_DEFINE_OP (121, unused_121, NULL, NOP)
-  VM_DEFINE_OP (122, unused_122, NULL, NOP)
-  VM_DEFINE_OP (123, unused_123, NULL, NOP)
-  VM_DEFINE_OP (124, unused_124, NULL, NOP)
-  VM_DEFINE_OP (125, unused_125, NULL, NOP)
-  VM_DEFINE_OP (126, unused_126, NULL, NOP)
-  VM_DEFINE_OP (127, unused_127, NULL, NOP)
-  VM_DEFINE_OP (128, unused_128, NULL, NOP)
-  VM_DEFINE_OP (129, unused_129, NULL, NOP)
-  VM_DEFINE_OP (130, unused_130, NULL, NOP)
-  VM_DEFINE_OP (131, unused_131, NULL, NOP)
-  VM_DEFINE_OP (132, unused_132, NULL, NOP)
-  VM_DEFINE_OP (133, unused_133, NULL, NOP)
-  VM_DEFINE_OP (134, unused_134, NULL, NOP)
-  VM_DEFINE_OP (135, unused_135, NULL, NOP)
-  VM_DEFINE_OP (136, unused_136, NULL, NOP)
-  VM_DEFINE_OP (137, unused_137, NULL, NOP)
-    {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
-    }
+      *dst_loc = src;
 
-  /* fadd dst:8 a:8 b:8
-   *
-   * Add A to B, and place the result in DST.  The operands and the
-   * result are unboxed double-precision floating-point numbers.
-   */
-  VM_DEFINE_OP (138, fadd, "fadd", DOP1 (X8_S8_S8_S8))
-    {
-      uint8_t dst, a, b;
-      UNPACK_8_8_8 (op, dst, a, b);
-      SP_SET_F64 (dst, SP_REF_F64 (a) + SP_REF_F64 (b));
-      NEXT (1);
+      NEXT (3);
     }
 
-  /* fsub dst:8 a:8 b:8
-   *
-   * Subtract B from A, and place the result in DST.  The operands and
-   * the result are unboxed double-precision floating-point numbers.
-   */
-  VM_DEFINE_OP (139, fsub, "fsub", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (87, tag_char, "tag-char", DOP1 (X8_S12_S12))
     {
-      uint8_t dst, a, b;
-      UNPACK_8_8_8 (op, dst, a, b);
-      SP_SET_F64 (dst, SP_REF_F64 (a) - SP_REF_F64 (b));
+      uint16_t dst, src;
+      UNPACK_12_12 (op, dst, src);
+      SP_SET (dst,
+              SCM_MAKE_ITAG8 ((scm_t_bits) (scm_t_wchar) SP_REF_U64 (src),
+                              scm_tc8_char));
       NEXT (1);
     }
-
-  /* fmul dst:8 a:8 b:8
-   *
-   * Multiply A and B, and place the result in DST.  The operands and
-   * the result are unboxed double-precision floating-point numbers.
-   */
-  VM_DEFINE_OP (140, fmul, "fmul", DOP1 (X8_S8_S8_S8))
-    {
-      uint8_t dst, a, b;
-      UNPACK_8_8_8 (op, dst, a, b);
-      SP_SET_F64 (dst, SP_REF_F64 (a) * SP_REF_F64 (b));
+
+  VM_DEFINE_OP (88, untag_char, "untag-char", DOP1 (X8_S12_S12))
+    {
+      uint16_t dst, src;
+      UNPACK_12_12 (op, dst, src);
+      SP_SET_U64 (dst, SCM_CHAR (SP_REF (src)));
       NEXT (1);
     }
 
-  /* fdiv dst:8 a:8 b:8
-   *
-   * Divide A by B, and place the result in DST.  The operands and the
-   * result are unboxed double-precision floating-point numbers.
-   */
-  VM_DEFINE_OP (141, fdiv, "fdiv", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (89, tag_fixnum, "tag-fixnum", DOP1 (X8_S12_S12))
     {
-      uint8_t dst, a, b;
-      UNPACK_8_8_8 (op, dst, a, b);
-      SP_SET_F64 (dst, SP_REF_F64 (a) / SP_REF_F64 (b));
+      uint16_t dst, src;
+
+      UNPACK_12_12 (op, dst, src);
+
+      SP_SET (dst, SCM_I_MAKINUM (SP_REF_S64 (src)));
+
       NEXT (1);
     }
 
-  VM_DEFINE_OP (142, unused_142, NULL, NOP)
-  VM_DEFINE_OP (143, unused_143, NULL, NOP)
-  VM_DEFINE_OP (144, unused_144, NULL, NOP)
-  VM_DEFINE_OP (145, unused_145, NULL, NOP)
-  VM_DEFINE_OP (146, unused_146, NULL, NOP)
-  VM_DEFINE_OP (147, unused_147, NULL, NOP)
-  VM_DEFINE_OP (148, unused_148, NULL, NOP)
+  VM_DEFINE_OP (90, untag_fixnum, "untag-fixnum", DOP1 (X8_S12_S12))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      uint16_t dst, src;
+
+      UNPACK_12_12 (op, dst, src);
+
+      SP_SET_S64 (dst, SCM_I_INUM (SP_REF (src)));
+
+      NEXT (1);
     }
 
   /* uadd dst:8 a:8 b:8
@@ -2081,7 +2070,7 @@ VM_NAME (scm_thread *thread)
    * result are unboxed unsigned 64-bit integers.  Overflow will wrap
    * around.
    */
-  VM_DEFINE_OP (149, uadd, "uadd", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (91, uadd, "uadd", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
       UNPACK_8_8_8 (op, dst, a, b);
@@ -2095,7 +2084,7 @@ VM_NAME (scm_thread *thread)
    * the result are unboxed unsigned 64-bit integers.  Overflow will
    * wrap around.
    */
-  VM_DEFINE_OP (150, usub, "usub", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (92, usub, "usub", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
       UNPACK_8_8_8 (op, dst, a, b);
@@ -2109,7 +2098,7 @@ VM_NAME (scm_thread *thread)
    * the result are unboxed unsigned 64-bit integers.  Overflow will
    * wrap around.
    */
-  VM_DEFINE_OP (151, umul, "umul", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (93, umul, "umul", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
       UNPACK_8_8_8 (op, dst, a, b);
@@ -2123,7 +2112,7 @@ VM_NAME (scm_thread *thread)
    * value IMM and place the raw unsigned 64-bit result in DST.
    * Overflow will wrap around.
    */
-  VM_DEFINE_OP (152, uadd_immediate, "uadd/immediate", DOP1 (X8_S8_S8_C8))
+  VM_DEFINE_OP (94, uadd_immediate, "uadd/immediate", DOP1 (X8_S8_S8_C8))
     {
       uint8_t dst, src, imm;
       uint64_t x;
@@ -2140,7 +2129,7 @@ VM_NAME (scm_thread *thread)
    * value in SRC and place the raw unsigned 64-bit result in DST.
    * Overflow will wrap around.
    */
-  VM_DEFINE_OP (153, usub_immediate, "usub/immediate", DOP1 (X8_S8_S8_C8))
+  VM_DEFINE_OP (95, usub_immediate, "usub/immediate", DOP1 (X8_S8_S8_C8))
     {
       uint8_t dst, src, imm;
       uint64_t x;
@@ -2157,7 +2146,7 @@ VM_NAME (scm_thread *thread)
    * value IMM and place the raw unsigned 64-bit result in DST.
    * Overflow will wrap around.
    */
-  VM_DEFINE_OP (154, umul_immediate, "umul/immediate", DOP1 (X8_S8_S8_C8))
+  VM_DEFINE_OP (96, umul_immediate, "umul/immediate", DOP1 (X8_S8_S8_C8))
     {
       uint8_t dst, src, imm;
       uint64_t x;
@@ -2168,143 +2157,96 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  /* load-f64 dst:24 high-bits:32 low-bits:32
+  /* ulogand dst:8 a:8 b:8
    *
-   * Make a double-precision floating-point value with HIGH-BITS and
-   * LOW-BITS.
+   * Place the bitwise AND of the u64 values in A and B into DST.
    */
-  VM_DEFINE_OP (155, load_f64, "load-f64", DOP3 (X8_S24, AF32, BF32))
+  VM_DEFINE_OP (97, ulogand, "ulogand", DOP1 (X8_S8_S8_S8))
     {
-      uint32_t dst;
-      uint64_t val;
-
-      UNPACK_24 (op, dst);
-      val = ip[1];
-      val <<= 32;
-      val |= ip[2];
-      SP_SET_U64 (dst, val);
-      NEXT (3);
-    }
+      uint8_t dst, a, b;
 
-  /* load-u64 dst:24 high-bits:32 low-bits:32
-   *
-   * Make an unsigned 64-bit integer with HIGH-BITS and LOW-BITS.
-   */
-  VM_DEFINE_OP (156, load_u64, "load-u64", DOP3 (X8_S24, AU32, BU32))
-    {
-      uint32_t dst;
-      uint64_t val;
+      UNPACK_8_8_8 (op, dst, a, b);
 
-      UNPACK_24 (op, dst);
-      val = ip[1];
-      val <<= 32;
-      val |= ip[2];
-      SP_SET_U64 (dst, val);
-      NEXT (3);
-    }
+      SP_SET_U64 (dst, SP_REF_U64 (a) & SP_REF_U64 (b));
 
-  VM_DEFINE_OP (157, unused_157, NULL, NOP)
-  VM_DEFINE_OP (158, unused_158, NULL, NOP)
-    {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      NEXT (1);
     }
 
-  /* load-s64 dst:24 high-bits:32 low-bits:32
+  /* ulogior dst:8 a:8 b:8
    *
-   * Make an unsigned 64-bit integer with HIGH-BITS and LOW-BITS.
+   * Place the bitwise inclusive OR of the u64 values in A and B into
+   * DST.
    */
-  VM_DEFINE_OP (159, load_s64, "load-s64", DOP3 (X8_S24, AS32, BS32))
+  VM_DEFINE_OP (98, ulogior, "ulogior", DOP1 (X8_S8_S8_S8))
     {
-      uint32_t dst;
-      uint64_t val;
-
-      UNPACK_24 (op, dst);
-      val = ip[1];
-      val <<= 32;
-      val |= ip[2];
-      SP_SET_U64 (dst, val);
-      NEXT (3);
-    }
+      uint8_t dst, a, b;
 
-  /* current-thread dst:24
-   *
-   * Write the current thread into DST.
-   */
-  VM_DEFINE_OP (160, current_thread, "current-thread", DOP1 (X8_S24))
-    {
-      uint32_t dst;
+      UNPACK_8_8_8 (op, dst, a, b);
 
-      UNPACK_24 (op, dst);
-      SP_SET (dst, thread->handle);
+      SP_SET_U64 (dst, SP_REF_U64 (a) | SP_REF_U64 (b));
 
       NEXT (1);
     }
 
-  VM_DEFINE_OP (161, unused_161, NULL, NOP)
-    {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
-    }
-
-  /* ulogand dst:8 a:8 b:8
+  /* ulogsub dst:8 a:8 b:8
    *
-   * Place the bitwise AND of the u64 values in A and B into DST.
+   * Place the (A & ~B) of the u64 values A and B into DST.
    */
-  VM_DEFINE_OP (162, ulogand, "ulogand", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (99, ulogsub, "ulogsub", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
 
       UNPACK_8_8_8 (op, dst, a, b);
 
-      SP_SET_U64 (dst, SP_REF_U64 (a) & SP_REF_U64 (b));
+      SP_SET_U64 (dst, SP_REF_U64 (a) & ~SP_REF_U64 (b));
 
       NEXT (1);
     }
 
-  /* ulogior dst:8 a:8 b:8
+  /* ulogxor dst:8 a:8 b:8
    *
-   * Place the bitwise inclusive OR of the u64 values in A and B into
+   * Place the bitwise exclusive OR of the u64 values in A and B into
    * DST.
    */
-  VM_DEFINE_OP (163, ulogior, "ulogior", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (100, ulogxor, "ulogxor", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
 
       UNPACK_8_8_8 (op, dst, a, b);
 
-      SP_SET_U64 (dst, SP_REF_U64 (a) | SP_REF_U64 (b));
+      SP_SET_U64 (dst, SP_REF_U64 (a) ^ SP_REF_U64 (b));
 
       NEXT (1);
     }
 
-  /* ulogsub dst:8 a:8 b:8
+  /* ursh dst:8 a:8 b:8
    *
-   * Place the (A & ~B) of the u64 values A and B into DST.
+   * Shift the u64 value in A right by B bits, and place the result in
+   * DST.  Only the lower 6 bits of B are used.
    */
-  VM_DEFINE_OP (164, ulogsub, "ulogsub", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (101, ursh, "ursh", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
 
       UNPACK_8_8_8 (op, dst, a, b);
 
-      SP_SET_U64 (dst, SP_REF_U64 (a) & ~SP_REF_U64 (b));
+      SP_SET_U64 (dst, SP_REF_U64 (a) >> (SP_REF_U64 (b) & 63));
 
       NEXT (1);
     }
 
-  /* ursh dst:8 a:8 b:8
+  /* srsh dst:8 a:8 b:8
    *
-   * Shift the u64 value in A right by B bits, and place the result in
+   * Shift the s64 value in A right by B bits, and place the result in
    * DST.  Only the lower 6 bits of B are used.
    */
-  VM_DEFINE_OP (165, ursh, "ursh", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (102, srsh, "srsh", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
 
       UNPACK_8_8_8 (op, dst, a, b);
 
-      SP_SET_U64 (dst, SP_REF_U64 (a) >> (SP_REF_U64 (b) & 63));
+      SP_SET_S64 (dst, SCM_SRS (SP_REF_S64 (a), (SP_REF_U64 (b) & 63)));
 
       NEXT (1);
     }
@@ -2314,7 +2256,7 @@ VM_NAME (scm_thread *thread)
    * Shift the u64 value in A left by B bits, and place the result in
    * DST.  Only the lower 6 bits of B are used.
    */
-  VM_DEFINE_OP (166, ulsh, "ulsh", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (103, ulsh, "ulsh", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
 
@@ -2325,18 +2267,12 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (167, unused_167, NULL, NOP)
-    {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
-    }
-
   /* ursh/immediate dst:8 a:8 b:8
    *
    * Shift the u64 value in A right by the immediate B bits, and place
    * the result in DST.  Only the lower 6 bits of B are used.
    */
-  VM_DEFINE_OP (168, ursh_immediate, "ursh/immediate", DOP1 (X8_S8_S8_C8))
+  VM_DEFINE_OP (104, ursh_immediate, "ursh/immediate", DOP1 (X8_S8_S8_C8))
     {
       uint8_t dst, a, b;
 
@@ -2347,12 +2283,28 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
+  /* srsh/immediate dst:8 a:8 b:8
+   *
+   * Shift the s64 value in A right by the immediate B bits, and place
+   * the result in DST.  Only the lower 6 bits of B are used.
+   */
+  VM_DEFINE_OP (105, srsh_immediate, "srsh/immediate", DOP1 (X8_S8_S8_C8))
+    {
+      uint8_t dst, a, b;
+
+      UNPACK_8_8_8 (op, dst, a, b);
+
+      SP_SET_S64 (dst, SCM_SRS (SP_REF_S64 (a), b & 63));
+
+      NEXT (1);
+    }
+
   /* ulsh/immediate dst:8 a:8 b:8
    *
    * Shift the u64 value in A left by the immediate B bits, and place
    * the result in DST.  Only the lower 6 bits of B are used.
    */
-  VM_DEFINE_OP (169, ulsh_immediate, "ulsh/immediate", DOP1 (X8_S8_S8_C8))
+  VM_DEFINE_OP (106, ulsh_immediate, "ulsh/immediate", DOP1 (X8_S8_S8_C8))
     {
       uint8_t dst, a, b;
 
@@ -2363,151 +2315,172 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (170, unused_170, NULL, NOP)
-  VM_DEFINE_OP (171, unused_171, NULL, NOP)
-  VM_DEFINE_OP (172, unused_172, NULL, NOP)
-  VM_DEFINE_OP (173, unused_173, NULL, NOP)
-  VM_DEFINE_OP (174, unused_174, NULL, NOP)
-  VM_DEFINE_OP (175, unused_175, NULL, NOP)
-  VM_DEFINE_OP (176, unused_176, NULL, NOP)
+  /* fadd dst:8 a:8 b:8
+   *
+   * Add A to B, and place the result in DST.  The operands and the
+   * result are unboxed double-precision floating-point numbers.
+   */
+  VM_DEFINE_OP (107, fadd, "fadd", DOP1 (X8_S8_S8_S8))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      uint8_t dst, a, b;
+      UNPACK_8_8_8 (op, dst, a, b);
+      SP_SET_F64 (dst, SP_REF_F64 (a) + SP_REF_F64 (b));
+      NEXT (1);
     }
 
-  /* ulogxor dst:8 a:8 b:8
+  /* fsub dst:8 a:8 b:8
    *
-   * Place the bitwise exclusive OR of the u64 values in A and B into
-   * DST.
+   * Subtract B from A, and place the result in DST.  The operands and
+   * the result are unboxed double-precision floating-point numbers.
    */
-  VM_DEFINE_OP (177, ulogxor, "ulogxor", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (108, fsub, "fsub", DOP1 (X8_S8_S8_S8))
     {
       uint8_t dst, a, b;
-
       UNPACK_8_8_8 (op, dst, a, b);
-
-      SP_SET_U64 (dst, SP_REF_U64 (a) ^ SP_REF_U64 (b));
-
+      SP_SET_F64 (dst, SP_REF_F64 (a) - SP_REF_F64 (b));
       NEXT (1);
     }
 
-  VM_DEFINE_OP (178, unused_178, NULL, NOP)
-  VM_DEFINE_OP (179, unused_179, NULL, NOP)
-  VM_DEFINE_OP (180, unused_180, NULL, NOP)
-  VM_DEFINE_OP (181, unused_181, NULL, NOP)
-  VM_DEFINE_OP (182, unused_182, NULL, NOP)
+  /* fmul dst:8 a:8 b:8
+   *
+   * Multiply A and B, and place the result in DST.  The operands and
+   * the result are unboxed double-precision floating-point numbers.
+   */
+  VM_DEFINE_OP (109, fmul, "fmul", DOP1 (X8_S8_S8_S8))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      uint8_t dst, a, b;
+      UNPACK_8_8_8 (op, dst, a, b);
+      SP_SET_F64 (dst, SP_REF_F64 (a) * SP_REF_F64 (b));
+      NEXT (1);
     }
 
-  /* handle-interrupts _:24
+  /* fdiv dst:8 a:8 b:8
    *
-   * Handle pending interrupts.
+   * Divide A by B, and place the result in DST.  The operands and the
+   * result are unboxed double-precision floating-point numbers.
    */
-  VM_DEFINE_OP (183, handle_interrupts, "handle-interrupts", OP1 (X32))
+  VM_DEFINE_OP (110, fdiv, "fdiv", DOP1 (X8_S8_S8_S8))
     {
-      if (SCM_LIKELY (scm_is_null
-                      (scm_atomic_ref_scm (&thread->pending_asyncs))))
-        NEXT (1);
+      uint8_t dst, a, b;
+      UNPACK_8_8_8 (op, dst, a, b);
+      SP_SET_F64 (dst, SP_REF_F64 (a) / SP_REF_F64 (b));
+      NEXT (1);
+    }
 
-      if (thread->block_asyncs > 0)
-        NEXT (1);
+  VM_DEFINE_OP (111, u64_numerically_equal, "u64=?", OP1 (X8_S12_S12))
+    {
+      uint16_t a, b;
+      uint64_t x, y;
 
-      SYNC_IP ();
-      CALL_INTRINSIC (push_interrupt_frame, (thread, 0));
-      CACHE_SP ();
-      ip = scm_vm_intrinsics.handle_interrupt_code;
+      UNPACK_12_12 (op, a, b);
+      x = SP_REF_U64 (a);
+      y = SP_REF_U64 (b);
 
-      NEXT (0);
+      VP->compare_result = x == y ? SCM_F_COMPARE_EQUAL : SCM_F_COMPARE_NONE;
+
+      NEXT (1);
     }
 
-  /* return-from-interrupt _:24
-   *
-   * Return from handling an interrupt, discarding any return values and
-   * stripping away the interrupt frame.
-   */
-  VM_DEFINE_OP (184, return_from_interrupt, "return-from-interrupt", OP1 (X32))
+  VM_DEFINE_OP (112, u64_less, "u64<?", OP1 (X8_S12_S12))
     {
-      union scm_vm_stack_element *fp = VP->fp;
+      uint16_t a, b;
+      uint64_t x, y;
 
-      ip = SCM_FRAME_VIRTUAL_RETURN_ADDRESS (fp);
-      VP->fp = SCM_FRAME_DYNAMIC_LINK (fp);
-      VP->sp = sp = SCM_FRAME_PREVIOUS_SP (fp);
+      UNPACK_12_12 (op, a, b);
+      x = SP_REF_U64 (a);
+      y = SP_REF_U64 (b);
 
-      NEXT (0);
+      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
+
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (185, unused_185, NULL, NOP)
-  VM_DEFINE_OP (186, unused_186, NULL, NOP)
-  VM_DEFINE_OP (187, unused_187, NULL, NOP)
-  VM_DEFINE_OP (188, unused_188, NULL, NOP)
-  VM_DEFINE_OP (189, unused_189, NULL, NOP)
-  VM_DEFINE_OP (190, unused_190, NULL, NOP)
-  VM_DEFINE_OP (191, unused_191, NULL, NOP)
-  VM_DEFINE_OP (192, unused_192, NULL, NOP)
+  VM_DEFINE_OP (113, s64_less, "s64<?", OP1 (X8_S12_S12))
+    {
+      uint16_t a, b;
+      int64_t x, y;
+
+      UNPACK_12_12 (op, a, b);
+      x = SP_REF_S64 (a);
+      y = SP_REF_S64 (b);
+
+      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
+
+      NEXT (1);
+    }
+
+  VM_DEFINE_OP (114, s64_imm_numerically_equal, "s64-imm=?", OP1 (X8_S12_Z12))
     {
-      vm_error_bad_instruction (op);
-      abort (); /* never reached */
+      uint16_t a;
+      int64_t x, y;
+
+      a = (op >> 8) & 0xfff;
+      x = SP_REF_S64 (a);
+
+      y = ((int32_t) op) >> 20; /* Sign extension.  */
+
+      VP->compare_result = x == y ? SCM_F_COMPARE_EQUAL : SCM_F_COMPARE_NONE;
+
+      NEXT (1);
     }
 
-  VM_DEFINE_OP (193, u64_numerically_equal, "u64=?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (115, u64_imm_less, "u64-imm<?", OP1 (X8_S12_C12))
     {
-      uint16_t a, b;
+      uint16_t a;
       uint64_t x, y;
 
-      UNPACK_12_12 (op, a, b);
+      UNPACK_12_12 (op, a, y);
       x = SP_REF_U64 (a);
-      y = SP_REF_U64 (b);
 
-      VP->compare_result = x == y ? SCM_F_COMPARE_EQUAL : SCM_F_COMPARE_NONE;
+      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
 
       NEXT (1);
     }
 
-  VM_DEFINE_OP (194, u64_less, "u64<?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (116, imm_u64_less, "imm-u64<?", OP1 (X8_S12_C12))
     {
-      uint16_t a, b;
+      uint16_t a;
       uint64_t x, y;
 
-      UNPACK_12_12 (op, a, b);
-      x = SP_REF_U64 (a);
-      y = SP_REF_U64 (b);
+      UNPACK_12_12 (op, a, x);
+      y = SP_REF_U64 (a);
 
       VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
 
       NEXT (1);
     }
 
-  VM_DEFINE_OP (195, s64_numerically_equal, "s64=?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (117, s64_imm_less, "s64-imm<?", OP1 (X8_S12_Z12))
     {
-      uint16_t a, b;
+      uint16_t a;
       int64_t x, y;
 
-      UNPACK_12_12 (op, a, b);
+      a = (op >> 8) & 0xfff;
       x = SP_REF_S64 (a);
-      y = SP_REF_S64 (b);
 
-      VP->compare_result = x == y ? SCM_F_COMPARE_EQUAL : SCM_F_COMPARE_NONE;
+      y = ((int32_t) op) >> 20; /* Sign extension.  */
+
+      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
 
       NEXT (1);
     }
 
-  VM_DEFINE_OP (196, s64_less, "s64<?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (118, imm_s64_less, "imm-s64<?", OP1 (X8_S12_Z12))
     {
-      uint16_t a, b;
+      uint16_t a;
       int64_t x, y;
 
-      UNPACK_12_12 (op, a, b);
-      x = SP_REF_S64 (a);
-      y = SP_REF_S64 (b);
+      a = (op >> 8) & 0xfff;
+      y = SP_REF_S64 (a);
+
+      x = ((int32_t) op) >> 20; /* Sign extension.  */
 
       VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
 
       NEXT (1);
     }
 
-  VM_DEFINE_OP (197, f64_numerically_equal, "f64=?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (119, f64_numerically_equal, "f64=?", OP1 (X8_S12_S12))
     {
       uint16_t a, b;
       double x, y;
@@ -2525,7 +2498,7 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (198, f64_less, "f64<?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (120, f64_less, "f64<?", OP1 (X8_S12_S12))
     {
       uint16_t a, b;
       double x, y;
@@ -2545,7 +2518,7 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (199, numerically_equal, "=?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (121, numerically_equal, "=?", OP1 (X8_S12_S12))
     {
       uint16_t a, b;
       SCM x, y;
@@ -2563,7 +2536,7 @@ VM_NAME (scm_thread *thread)
       NEXT (1);
     }
 
-  VM_DEFINE_OP (200, less, "<?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (122, heap_numbers_equal, "heap-numbers-equal?", OP1 
(X8_S12_S12))
     {
       uint16_t a, b;
       SCM x, y;
@@ -2573,60 +2546,30 @@ VM_NAME (scm_thread *thread)
       y = SP_REF (b);
 
       SYNC_IP ();
-      VP->compare_result = CALL_INTRINSIC (less_p, (x, y));
+      if (CALL_INTRINSIC (heap_numbers_equal_p, (x, y)))
+        VP->compare_result = SCM_F_COMPARE_EQUAL;
+      else
+        VP->compare_result = SCM_F_COMPARE_NONE;
       CACHE_SP ();
       NEXT (1);
     }
 
-  VM_DEFINE_OP (201, check_arguments, "arguments<=?", OP1 (X8_C24))
+  VM_DEFINE_OP (123, less, "<?", OP1 (X8_S12_S12))
     {
-      uint8_t compare_result;
-      uint32_t expected;
-      ptrdiff_t nargs;
-
-      UNPACK_24 (op, expected);
-      nargs = FRAME_LOCALS_COUNT ();
-
-      if (nargs < (ptrdiff_t) expected)
-        compare_result = SCM_F_COMPARE_LESS_THAN;
-      else if (nargs == (ptrdiff_t) expected)
-        compare_result = SCM_F_COMPARE_EQUAL;
-      else
-        compare_result = SCM_F_COMPARE_NONE;
+      uint16_t a, b;
+      SCM x, y;
 
-      VP->compare_result = compare_result;
+      UNPACK_12_12 (op, a, b);
+      x = SP_REF (a);
+      y = SP_REF (b);
 
+      SYNC_IP ();
+      VP->compare_result = CALL_INTRINSIC (less_p, (x, y));
+      CACHE_SP ();
       NEXT (1);
     }
 
-  VM_DEFINE_OP (202, check_positional_arguments, "positional-arguments<=?", 
OP2 (X8_C24, X8_C24))
-    {
-      uint8_t compare_result;
-      uint32_t nreq, expected;
-      ptrdiff_t nargs, npos;
-
-      UNPACK_24 (op, nreq);
-      UNPACK_24 (ip[1], expected);
-      nargs = FRAME_LOCALS_COUNT ();
-
-      /* Precondition: at least NREQ arguments.  */
-      for (npos = nreq; npos < nargs && npos <= expected; npos++)
-        if (scm_is_keyword (FP_REF (npos)))
-          break;
-
-      if (npos < (ptrdiff_t) expected)
-        compare_result = SCM_F_COMPARE_LESS_THAN;
-      else if (npos == (ptrdiff_t) expected)
-        compare_result = SCM_F_COMPARE_EQUAL;
-      else
-        compare_result = SCM_F_COMPARE_NONE;
-
-      VP->compare_result = compare_result;
-
-      NEXT (2);
-    }
-
-  VM_DEFINE_OP (203, immediate_tag_equals, "immediate-tag=?", OP2 (X8_S24, 
C16_C16))
+  VM_DEFINE_OP (124, immediate_tag_equals, "immediate-tag=?", OP2 (X8_S24, 
C16_C16))
     {
       uint32_t a;
       uint16_t mask, expected;
@@ -2644,7 +2587,7 @@ VM_NAME (scm_thread *thread)
       NEXT (2);
     }
 
-  VM_DEFINE_OP (204, heap_tag_equals, "heap-tag=?", OP2 (X8_S24, C16_C16))
+  VM_DEFINE_OP (125, heap_tag_equals, "heap-tag=?", OP2 (X8_S24, C16_C16))
     {
       uint32_t a;
       uint16_t mask, expected;
@@ -2662,7 +2605,7 @@ VM_NAME (scm_thread *thread)
       NEXT (2);
     }
 
-  VM_DEFINE_OP (205, eq, "eq?", OP1 (X8_S12_S12))
+  VM_DEFINE_OP (126, eq, "eq?", OP1 (X8_S12_S12))
     {
       uint16_t a, b;
       SCM x, y;
@@ -2684,7 +2627,7 @@ VM_NAME (scm_thread *thread)
    * Add OFFSET, a signed 24-bit number, to the current instruction
    * pointer.
    */
-  VM_DEFINE_OP (206, j, "j", OP1 (X8_L24))
+  VM_DEFINE_OP (127, j, "j", OP1 (X8_L24))
     {
       int32_t offset = op;
       offset >>= 8; /* Sign-extending shift. */
@@ -2696,7 +2639,7 @@ VM_NAME (scm_thread *thread)
    * If the flags register is equal to SCM_F_COMPARE_LESS_THAN, add
    * OFFSET, a signed 24-bit number, to the current instruction pointer.
    */
-  VM_DEFINE_OP (207, jl, "jl", OP1 (X8_L24))
+  VM_DEFINE_OP (128, jl, "jl", OP1 (X8_L24))
     {
       if (VP->compare_result == SCM_F_COMPARE_LESS_THAN)
         {
@@ -2713,7 +2656,7 @@ VM_NAME (scm_thread *thread)
    * If the flags register is equal to SCM_F_COMPARE_EQUAL, add
    * OFFSET, a signed 24-bit number, to the current instruction pointer.
    */
-  VM_DEFINE_OP (208, je, "je", OP1 (X8_L24))
+  VM_DEFINE_OP (129, je, "je", OP1 (X8_L24))
     {
       if (VP->compare_result == SCM_F_COMPARE_EQUAL)
         {
@@ -2730,7 +2673,7 @@ VM_NAME (scm_thread *thread)
    * If the flags register is not equal to SCM_F_COMPARE_LESS_THAN, add
    * OFFSET, a signed 24-bit number, to the current instruction pointer.
    */
-  VM_DEFINE_OP (209, jnl, "jnl", OP1 (X8_L24))
+  VM_DEFINE_OP (130, jnl, "jnl", OP1 (X8_L24))
     {
       if (VP->compare_result != SCM_F_COMPARE_LESS_THAN)
         {
@@ -2747,7 +2690,7 @@ VM_NAME (scm_thread *thread)
    * If the flags register is not equal to SCM_F_COMPARE_EQUAL, add
    * OFFSET, a signed 24-bit number, to the current instruction pointer.
    */
-  VM_DEFINE_OP (210, jne, "jne", OP1 (X8_L24))
+  VM_DEFINE_OP (131, jne, "jne", OP1 (X8_L24))
     {
       if (VP->compare_result != SCM_F_COMPARE_EQUAL)
         {
@@ -2768,7 +2711,7 @@ VM_NAME (scm_thread *thread)
    * SCM_F_COMPARE_UNORDERED instead of SCM_F_COMPARE_NONE if either
    * value is a NaN.  For exact numbers, "jge" is the same as "jnl".
    */
-  VM_DEFINE_OP (211, jge, "jge", OP1 (X8_L24))
+  VM_DEFINE_OP (132, jge, "jge", OP1 (X8_L24))
     {
       if (VP->compare_result == SCM_F_COMPARE_NONE)
         {
@@ -2790,7 +2733,7 @@ VM_NAME (scm_thread *thread)
    * either value is a NaN.  For exact numbers, "jnge" is the same as
    * "jl".
    */
-  VM_DEFINE_OP (212, jnge, "jnge", OP1 (X8_L24))
+  VM_DEFINE_OP (133, jnge, "jnge", OP1 (X8_L24))
     {
       if (VP->compare_result != SCM_F_COMPARE_NONE)
         {
@@ -2802,149 +2745,6 @@ VM_NAME (scm_thread *thread)
         NEXT (1);
     }
 
-  VM_DEFINE_OP (213, heap_numbers_equal, "heap-numbers-equal?", OP1 
(X8_S12_S12))
-    {
-      uint16_t a, b;
-      SCM x, y;
-
-      UNPACK_12_12 (op, a, b);
-      x = SP_REF (a);
-      y = SP_REF (b);
-
-      SYNC_IP ();
-      if (CALL_INTRINSIC (heap_numbers_equal_p, (x, y)))
-        VP->compare_result = SCM_F_COMPARE_EQUAL;
-      else
-        VP->compare_result = SCM_F_COMPARE_NONE;
-      CACHE_SP ();
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (214, untag_fixnum, "untag-fixnum", DOP1 (X8_S12_S12))
-    {
-      uint16_t dst, src;
-
-      UNPACK_12_12 (op, dst, src);
-
-      SP_SET_S64 (dst, SCM_I_INUM (SP_REF (src)));
-
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (215, tag_fixnum, "tag-fixnum", DOP1 (X8_S12_S12))
-    {
-      uint16_t dst, src;
-
-      UNPACK_12_12 (op, dst, src);
-
-      SP_SET (dst, SCM_I_MAKINUM (SP_REF_S64 (src)));
-
-      NEXT (1);
-    }
-
-  /* srsh dst:8 a:8 b:8
-   *
-   * Shift the s64 value in A right by B bits, and place the result in
-   * DST.  Only the lower 6 bits of B are used.
-   */
-  VM_DEFINE_OP (216, srsh, "srsh", DOP1 (X8_S8_S8_S8))
-    {
-      uint8_t dst, a, b;
-
-      UNPACK_8_8_8 (op, dst, a, b);
-
-      SP_SET_S64 (dst, SCM_SRS (SP_REF_S64 (a), (SP_REF_U64 (b) & 63)));
-
-      NEXT (1);
-    }
-
-  /* srsh/immediate dst:8 a:8 b:8
-   *
-   * Shift the s64 value in A right by the immediate B bits, and place
-   * the result in DST.  Only the lower 6 bits of B are used.
-   */
-  VM_DEFINE_OP (217, srsh_immediate, "srsh/immediate", DOP1 (X8_S8_S8_C8))
-    {
-      uint8_t dst, a, b;
-
-      UNPACK_8_8_8 (op, dst, a, b);
-
-      SP_SET_S64 (dst, SCM_SRS (SP_REF_S64 (a), b & 63));
-
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (218, s64_imm_numerically_equal, "s64-imm=?", OP1 (X8_S12_Z12))
-    {
-      uint16_t a;
-      int64_t x, y;
-
-      a = (op >> 8) & 0xfff;
-      x = SP_REF_S64 (a);
-
-      y = ((int32_t) op) >> 20; /* Sign extension.  */
-
-      VP->compare_result = x == y ? SCM_F_COMPARE_EQUAL : SCM_F_COMPARE_NONE;
-
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (219, u64_imm_less, "u64-imm<?", OP1 (X8_S12_C12))
-    {
-      uint16_t a;
-      uint64_t x, y;
-
-      UNPACK_12_12 (op, a, y);
-      x = SP_REF_U64 (a);
-
-      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
-
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (220, imm_u64_less, "imm-u64<?", OP1 (X8_S12_C12))
-    {
-      uint16_t a;
-      uint64_t x, y;
-
-      UNPACK_12_12 (op, a, x);
-      y = SP_REF_U64 (a);
-
-      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
-
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (221, s64_imm_less, "s64-imm<?", OP1 (X8_S12_Z12))
-    {
-      uint16_t a;
-      int64_t x, y;
-
-      a = (op >> 8) & 0xfff;
-      x = SP_REF_S64 (a);
-
-      y = ((int32_t) op) >> 20; /* Sign extension.  */
-
-      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
-
-      NEXT (1);
-    }
-
-  VM_DEFINE_OP (222, imm_s64_less, "imm-s64<?", OP1 (X8_S12_Z12))
-    {
-      uint16_t a;
-      int64_t x, y;
-
-      a = (op >> 8) & 0xfff;
-      y = SP_REF_S64 (a);
-
-      x = ((int32_t) op) >> 20; /* Sign extension.  */
-
-      VP->compare_result = x < y ? SCM_F_COMPARE_LESS_THAN : 
SCM_F_COMPARE_NONE;
-
-      NEXT (1);
-    }
-
 #define PTR_REF(type, slot)                                             \
   do {                                                                  \
     uint8_t dst, a, b;                                                  \
@@ -2973,52 +2773,141 @@ VM_NAME (scm_thread *thread)
     NEXT (1);                                                           \
   } while (0)
 
-  VM_DEFINE_OP (223, u8_ref, "u8-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (134, u8_ref, "u8-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (uint8_t, U64);
-  VM_DEFINE_OP (224, u16_ref, "u16-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (135, u16_ref, "u16-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (uint16_t, U64);
-  VM_DEFINE_OP (225, u32_ref, "u32-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (136, u32_ref, "u32-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (uint32_t, U64);
-  VM_DEFINE_OP (226, u64_ref, "u64-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (137, u64_ref, "u64-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (uint64_t, U64);
 
-  VM_DEFINE_OP (227, u8_set, "u8-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (138, u8_set, "u8-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (uint8_t, U64);
-  VM_DEFINE_OP (228, u16_set, "u16-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (139, u16_set, "u16-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (uint16_t, U64);
-  VM_DEFINE_OP (229, u32_set, "u32-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (140, u32_set, "u32-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (uint32_t, U64);
-  VM_DEFINE_OP (230, u64_set, "u64-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (141, u64_set, "u64-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (uint64_t, U64);
 
-  VM_DEFINE_OP (231, s8_ref, "s8-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (142, s8_ref, "s8-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (int8_t, S64);
-  VM_DEFINE_OP (232, s16_ref, "s16-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (143, s16_ref, "s16-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (int16_t, S64);
-  VM_DEFINE_OP (233, s32_ref, "s32-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (144, s32_ref, "s32-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (int32_t, S64);
-  VM_DEFINE_OP (234, s64_ref, "s64-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (145, s64_ref, "s64-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (int64_t, S64);
 
-  VM_DEFINE_OP (235, s8_set, "s8-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (146, s8_set, "s8-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (int8_t, S64);
-  VM_DEFINE_OP (236, s16_set, "s16-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (147, s16_set, "s16-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (int16_t, S64);
-  VM_DEFINE_OP (237, s32_set, "s32-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (148, s32_set, "s32-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (int32_t, S64);
-  VM_DEFINE_OP (238, s64_set, "s64-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (149, s64_set, "s64-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (int64_t, S64);
 
-  VM_DEFINE_OP (239, f32_ref, "f32-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (150, f32_ref, "f32-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (float, F64);
-  VM_DEFINE_OP (240, f64_ref, "f64-ref", DOP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (151, f64_ref, "f64-ref", DOP1 (X8_S8_S8_S8))
     PTR_REF (double, F64);
 
-  VM_DEFINE_OP (241, f32_set, "f32-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (152, f32_set, "f32-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (float, F64);
-  VM_DEFINE_OP (242, f64_set, "f64-set!", OP1 (X8_S8_S8_S8))
+  VM_DEFINE_OP (153, f64_set, "f64-set!", OP1 (X8_S8_S8_S8))
     PTR_SET (double, F64);
 
+  VM_DEFINE_OP (154, unused_154, NULL, NOP)
+  VM_DEFINE_OP (155, unused_155, NULL, NOP)
+  VM_DEFINE_OP (156, unused_156, NULL, NOP)
+  VM_DEFINE_OP (157, unused_157, NULL, NOP)
+  VM_DEFINE_OP (158, unused_158, NULL, NOP)
+  VM_DEFINE_OP (159, unused_159, NULL, NOP)
+  VM_DEFINE_OP (160, unused_160, NULL, NOP)
+  VM_DEFINE_OP (161, unused_161, NULL, NOP)
+  VM_DEFINE_OP (162, unused_162, NULL, NOP)
+  VM_DEFINE_OP (163, unused_163, NULL, NOP)
+  VM_DEFINE_OP (164, unused_164, NULL, NOP)
+  VM_DEFINE_OP (165, unused_165, NULL, NOP)
+  VM_DEFINE_OP (166, unused_166, NULL, NOP)
+  VM_DEFINE_OP (167, unused_167, NULL, NOP)
+  VM_DEFINE_OP (168, unused_168, NULL, NOP)
+  VM_DEFINE_OP (169, unused_169, NULL, NOP)
+  VM_DEFINE_OP (170, unused_170, NULL, NOP)
+  VM_DEFINE_OP (171, unused_171, NULL, NOP)
+  VM_DEFINE_OP (172, unused_172, NULL, NOP)
+  VM_DEFINE_OP (173, unused_173, NULL, NOP)
+  VM_DEFINE_OP (174, unused_174, NULL, NOP)
+  VM_DEFINE_OP (175, unused_175, NULL, NOP)
+  VM_DEFINE_OP (176, unused_176, NULL, NOP)
+  VM_DEFINE_OP (177, unused_177, NULL, NOP)
+  VM_DEFINE_OP (178, unused_178, NULL, NOP)
+  VM_DEFINE_OP (179, unused_179, NULL, NOP)
+  VM_DEFINE_OP (180, unused_180, NULL, NOP)
+  VM_DEFINE_OP (181, unused_181, NULL, NOP)
+  VM_DEFINE_OP (182, unused_182, NULL, NOP)
+  VM_DEFINE_OP (183, unused_183, NULL, NOP)
+  VM_DEFINE_OP (184, unused_184, NULL, NOP)
+  VM_DEFINE_OP (185, unused_185, NULL, NOP)
+  VM_DEFINE_OP (186, unused_186, NULL, NOP)
+  VM_DEFINE_OP (187, unused_187, NULL, NOP)
+  VM_DEFINE_OP (188, unused_188, NULL, NOP)
+  VM_DEFINE_OP (189, unused_189, NULL, NOP)
+  VM_DEFINE_OP (190, unused_190, NULL, NOP)
+  VM_DEFINE_OP (191, unused_191, NULL, NOP)
+  VM_DEFINE_OP (192, unused_192, NULL, NOP)
+  VM_DEFINE_OP (193, unused_193, NULL, NOP)
+  VM_DEFINE_OP (194, unused_194, NULL, NOP)
+  VM_DEFINE_OP (195, unused_195, NULL, NOP)
+  VM_DEFINE_OP (196, unused_196, NULL, NOP)
+  VM_DEFINE_OP (197, unused_197, NULL, NOP)
+  VM_DEFINE_OP (198, unused_198, NULL, NOP)
+  VM_DEFINE_OP (199, unused_199, NULL, NOP)
+  VM_DEFINE_OP (200, unused_200, NULL, NOP)
+  VM_DEFINE_OP (201, unused_201, NULL, NOP)
+  VM_DEFINE_OP (202, unused_202, NULL, NOP)
+  VM_DEFINE_OP (203, unused_203, NULL, NOP)
+  VM_DEFINE_OP (204, unused_204, NULL, NOP)
+  VM_DEFINE_OP (205, unused_205, NULL, NOP)
+  VM_DEFINE_OP (206, unused_206, NULL, NOP)
+  VM_DEFINE_OP (207, unused_207, NULL, NOP)
+  VM_DEFINE_OP (208, unused_208, NULL, NOP)
+  VM_DEFINE_OP (209, unused_209, NULL, NOP)
+  VM_DEFINE_OP (210, unused_210, NULL, NOP)
+  VM_DEFINE_OP (211, unused_211, NULL, NOP)
+  VM_DEFINE_OP (212, unused_212, NULL, NOP)
+  VM_DEFINE_OP (213, unused_213, NULL, NOP)
+  VM_DEFINE_OP (214, unused_214, NULL, NOP)
+  VM_DEFINE_OP (215, unused_215, NULL, NOP)
+  VM_DEFINE_OP (216, unused_216, NULL, NOP)
+  VM_DEFINE_OP (217, unused_217, NULL, NOP)
+  VM_DEFINE_OP (218, unused_218, NULL, NOP)
+  VM_DEFINE_OP (219, unused_219, NULL, NOP)
+  VM_DEFINE_OP (220, unused_220, NULL, NOP)
+  VM_DEFINE_OP (221, unused_221, NULL, NOP)
+  VM_DEFINE_OP (222, unused_222, NULL, NOP)
+  VM_DEFINE_OP (223, unused_223, NULL, NOP)
+  VM_DEFINE_OP (224, unused_224, NULL, NOP)
+  VM_DEFINE_OP (225, unused_225, NULL, NOP)
+  VM_DEFINE_OP (226, unused_226, NULL, NOP)
+  VM_DEFINE_OP (227, unused_227, NULL, NOP)
+  VM_DEFINE_OP (228, unused_228, NULL, NOP)
+  VM_DEFINE_OP (229, unused_229, NULL, NOP)
+  VM_DEFINE_OP (230, unused_230, NULL, NOP)
+  VM_DEFINE_OP (231, unused_231, NULL, NOP)
+  VM_DEFINE_OP (232, unused_232, NULL, NOP)
+  VM_DEFINE_OP (233, unused_233, NULL, NOP)
+  VM_DEFINE_OP (234, unused_234, NULL, NOP)
+  VM_DEFINE_OP (235, unused_235, NULL, NOP)
+  VM_DEFINE_OP (236, unused_236, NULL, NOP)
+  VM_DEFINE_OP (237, unused_237, NULL, NOP)
+  VM_DEFINE_OP (238, unused_238, NULL, NOP)
+  VM_DEFINE_OP (239, unused_239, NULL, NOP)
+  VM_DEFINE_OP (240, unused_240, NULL, NOP)
+  VM_DEFINE_OP (241, unused_241, NULL, NOP)
+  VM_DEFINE_OP (242, unused_242, NULL, NOP)
   VM_DEFINE_OP (243, unused_243, NULL, NOP)
   VM_DEFINE_OP (244, unused_244, NULL, NOP)
   VM_DEFINE_OP (245, unused_245, NULL, NOP)
diff --git a/module/system/vm/assembler.scm b/module/system/vm/assembler.scm
index 58b5f95..d6d3faa 100644
--- a/module/system/vm/assembler.scm
+++ b/module/system/vm/assembler.scm
@@ -2041,7 +2041,7 @@ needed."
 
 ;; FIXME: Define these somewhere central, shared with C.
 (define *bytecode-major-version* #x0300)
-(define *bytecode-minor-version* 0)
+(define *bytecode-minor-version* 1)
 
 (define (link-dynamic-section asm text rw rw-init frame-maps)
   "Link the dynamic section for an ELF image with bytecode @var{text},



reply via email to

[Prev in Thread] Current Thread [Next in Thread]