[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 09/11] target-mips: add mips16 instruction decod
From: |
Aurelien Jarno |
Subject: |
Re: [Qemu-devel] [PATCH 09/11] target-mips: add mips16 instruction decoding |
Date: |
Sat, 28 Nov 2009 11:17:16 +0100 |
User-agent: |
Mutt/1.5.20 (2009-06-14) |
On Mon, Nov 23, 2009 at 12:50:07PM -0800, Nathan Froyd wrote:
> There's no good way to add this incrementally, so we do it all at once.
> The only changes to shared code are in handle_delay_slot. We need to
> flip ISAMode when doing a jump-and-exchange. We also need to set
> ISAMode the low bit of the target address for jump-to-register.
This patch breaks the boot of both MIPS kernel. On little endian, the
kernel crashes with the following message:
CPU 0 Unable to handle kernel paging request at virtual address 00000000, epc
== 00000000, ra == 80120004
On big endian, it fails before printing a single message on the console.
See also my other comments inline.
> Signed-off-by: Nathan Froyd <address@hidden>
> ---
> target-mips/translate.c | 980
> ++++++++++++++++++++++++++++++++++++++++++++++-
> 1 files changed, 976 insertions(+), 4 deletions(-)
>
> diff --git a/target-mips/translate.c b/target-mips/translate.c
> index 611774b..49d2264 100644
> --- a/target-mips/translate.c
> +++ b/target-mips/translate.c
> @@ -83,6 +83,7 @@ enum {
> OPC_LH = (0x21 << 26),
> OPC_LWL = (0x22 << 26),
> OPC_LW = (0x23 << 26),
> + OPC_LWPC = OPC_LW | 0x5,
> OPC_LBU = (0x24 << 26),
> OPC_LHU = (0x25 << 26),
> OPC_LWR = (0x26 << 26),
> @@ -173,6 +174,7 @@ enum {
> /* Jumps */
> OPC_JR = 0x08 | OPC_SPECIAL, /* Also JR.HB */
> OPC_JALR = 0x09 | OPC_SPECIAL, /* Also JALR.HB */
> + OPC_JALRC = OPC_JALR | (0x5 << 6),
> /* Traps */
> OPC_TGE = 0x30 | OPC_SPECIAL,
> OPC_TGEU = 0x31 | OPC_SPECIAL,
> @@ -1074,6 +1076,19 @@ static void gen_ldst (DisasContext *ctx, uint32_t opc,
> int rt,
> opn = "sdr";
> break;
> #endif
> + case OPC_LWPC:
> + save_cpu_state(ctx, 1);
> + if (ctx->hflags & MIPS_HFLAG_BMASK) {
> + int branch_bytes = ctx->hflags & MIPS_HFLAG_BDS16 ? 2 : 4;
> +
> + tcg_gen_movi_tl(t1, (ctx->pc - branch_bytes) & ~(target_ulong)3);
> + } else {
> + tcg_gen_movi_tl(t1, ctx->pc & ~(target_ulong)3);
> + }
> + gen_op_addr_add(ctx, t0, t0, t1);
> + op_ldst_lw(t0, t0, ctx);
> + gen_store_gpr(t0, rt);
> + break;
> case OPC_LW:
> save_cpu_state(ctx, 0);
> op_ldst_lw(t0, t0, ctx);
> @@ -2450,11 +2465,13 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
> break;
> case OPC_J:
> case OPC_JAL:
> + case OPC_JALX:
> /* Jump to immediate */
> btgt = ((ctx->pc + insn_bytes) & (int32_t)0xF0000000) |
> (uint32_t)offset;
> break;
> case OPC_JR:
> case OPC_JALR:
> + case OPC_JALRC:
> /* Jump to register */
> if (offset != 0 && offset != 16) {
> /* Hint = 0 is JR/JALR, hint 16 is JR.HB/JALR.HB, the
> @@ -2517,18 +2534,29 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
> ctx->hflags |= MIPS_HFLAG_B;
> MIPS_DEBUG("j " TARGET_FMT_lx, btgt);
> break;
> + case OPC_JALX:
> + ctx->hflags |= MIPS_HFLAG_BX;
> + /* Fallthrough */
> case OPC_JAL:
> blink = 31;
> ctx->hflags |= MIPS_HFLAG_B;
> + ctx->hflags |= (ctx->isa_mode
> + ? MIPS_HFLAG_BDS16
> + : MIPS_HFLAG_BDS32);
> MIPS_DEBUG("jal " TARGET_FMT_lx, btgt);
> break;
> case OPC_JR:
> ctx->hflags |= MIPS_HFLAG_BR;
> + if (ctx->isa_mode)
> + ctx->hflags |= MIPS_HFLAG_BDS16;
> MIPS_DEBUG("jr %s", regnames[rs]);
> break;
> case OPC_JALR:
> + case OPC_JALRC:
> blink = rt;
> ctx->hflags |= MIPS_HFLAG_BR;
> + if (ctx->isa_mode)
> + ctx->hflags |= MIPS_HFLAG_BDS16;
> MIPS_DEBUG("jalr %s, %s", regnames[rt], regnames[rs]);
> break;
> default:
> @@ -2625,7 +2653,13 @@ static void gen_compute_branch (DisasContext *ctx,
> uint32_t opc,
>
> ctx->btarget = btgt;
> if (blink > 0) {
> - tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + 8);
> + int post_delay = insn_bytes;
> + int lowbit = ctx->isa_mode;
> +
> + if (opc != OPC_JALRC)
> + post_delay += ((ctx->hflags & MIPS_HFLAG_BDS16) ? 2 : 4);
> +
> + tcg_gen_movi_tl(cpu_gpr[blink], ctx->pc + post_delay + lowbit);
> }
>
> out:
> @@ -7558,10 +7592,17 @@ static void handle_delay_slot (CPUState *env,
> DisasContext *ctx,
> ctx->bstate = BS_BRANCH;
> save_cpu_state(ctx, 0);
> /* FIXME: Need to clear can_do_io. */
> - switch (hflags) {
> + switch (hflags & MIPS_HFLAG_BMASK_BASE) {
> case MIPS_HFLAG_B:
> /* unconditional branch */
> MIPS_DEBUG("unconditional branch");
> + if (hflags & MIPS_HFLAG_BX) {
> + TCGv_i32 t0 = tcg_temp_new_i32();
> +
> + tcg_gen_movi_i32(t0, 1 - ctx->isa_mode);
> + tcg_gen_st_i32(t0, cpu_env, offsetof(CPUState,
> active_tc.ISAMode));
> + tcg_temp_free_i32(t0);
> + }
> gen_goto_tb(ctx, 0, ctx->btarget);
> break;
> case MIPS_HFLAG_BL:
> @@ -7584,7 +7625,20 @@ static void handle_delay_slot (CPUState *env,
> DisasContext *ctx,
> case MIPS_HFLAG_BR:
> /* unconditional branch to register */
> MIPS_DEBUG("branch to register");
> - tcg_gen_mov_tl(cpu_PC, btarget);
> + if (env->insn_flags & ASE_MIPS16) {
> + TCGv t0 = tcg_temp_new();
> + TCGv_i32 t1 = tcg_temp_new_i32();
> +
> + tcg_gen_andi_tl(t0, btarget, 0x1);
> + tcg_gen_trunc_tl_i32(t1, t0);
> + tcg_temp_free(t0);
> + tcg_gen_st_i32(t1, cpu_env, offsetof(CPUState,
> active_tc.ISAMode));
> + tcg_temp_free_i32(t1);
> +
> + tcg_gen_andi_tl(cpu_PC, btarget, ~(target_ulong)0x1);
> + } else {
> + tcg_gen_mov_tl(cpu_PC, btarget);
> + }
> if (ctx->singlestep_enabled) {
> save_cpu_state(ctx, 0);
> gen_helper_0i(raise_exception, EXCP_DEBUG);
> @@ -7712,6 +7766,918 @@ enum {
> RR_RY_CNVT_SEW = 0x6,
> };
>
> +static int xlat (int r)
> +{
> + static int map[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
> +
> + return map[r];
> +}
> +
> +static void gen_mips16_save (DisasContext *ctx,
> + int xsregs, int aregs,
> + int do_ra, int do_s0, int do_s1,
> + int framesize)
> +{
> + TCGv t0 = tcg_temp_new();
> + TCGv t1 = tcg_temp_new();
> + int args, astatic;
> +
> + switch (aregs) {
> + case 0:
> + case 1:
> + case 2:
> + case 3:
> + case 11:
> + args = 0;
> + break;
> + case 4:
> + case 5:
> + case 6:
> + case 7:
> + args = 1;
> + break;
> + case 8:
> + case 9:
> + case 10:
> + args = 2;
> + break;
> + case 12:
> + case 13:
> + args = 3;
> + break;
> + case 14:
> + args = 4;
> + break;
> + default:
> + generate_exception(ctx, EXCP_RI);
> + return;
> + }
> +
> + switch (args) {
> + case 4:
> + gen_base_offset_addr(ctx, t0, 29, 12);
> + gen_load_gpr(t1, 7);
> + op_ldst_sw(t1, t0, ctx);
> + /* Fall through */
> + case 3:
> + gen_base_offset_addr(ctx, t0, 29, 8);
> + gen_load_gpr(t1, 6);
> + op_ldst_sw(t1, t0, ctx);
> + /* Fall through */
> + case 2:
> + gen_base_offset_addr(ctx, t0, 29, 4);
> + gen_load_gpr(t1, 5);
> + op_ldst_sw(t1, t0, ctx);
> + /* Fall through */
> + case 1:
> + gen_base_offset_addr(ctx, t0, 29, 0);
> + gen_load_gpr(t1, 4);
> + op_ldst_sw(t1, t0, ctx);
> + }
> +
> + gen_load_gpr(t0, 29);
> +
> +#define DECR_AND_STORE(reg) do { \
> + tcg_gen_subi_tl(t0, t0, 4); \
> + gen_load_gpr(t1, reg); \
> + op_ldst_sw(t1, t0, ctx); \
> + } while (0)
> +
> + if (do_ra) {
> + DECR_AND_STORE(31);
> + }
> +
> + switch (xsregs) {
> + case 7:
> + DECR_AND_STORE(30);
> + /* Fall through */
> + case 6:
> + DECR_AND_STORE(23);
> + /* Fall through */
> + case 5:
> + DECR_AND_STORE(22);
> + /* Fall through */
> + case 4:
> + DECR_AND_STORE(21);
> + /* Fall through */
> + case 3:
> + DECR_AND_STORE(20);
> + /* Fall through */
> + case 2:
> + DECR_AND_STORE(19);
> + /* Fall through */
> + case 1:
> + DECR_AND_STORE(18);
> + }
> +
> + if (do_s1) {
> + DECR_AND_STORE(17);
> + }
> + if (do_s0) {
> + DECR_AND_STORE(16);
> + }
> +
> + switch (aregs) {
> + case 0:
> + case 4:
> + case 8:
> + case 12:
> + case 14:
> + astatic = 0;
> + break;
> + case 1:
> + case 5:
> + case 9:
> + case 13:
> + astatic = 1;
> + break;
> + case 2:
> + case 6:
> + case 10:
> + astatic = 2;
> + break;
> + case 3:
> + case 7:
> + astatic = 3;
> + break;
> + case 11:
> + astatic = 4;
> + break;
> + default:
> + generate_exception(ctx, EXCP_RI);
> + return;
> + }
> +
> + if (astatic > 0) {
> + DECR_AND_STORE(7);
> + if (astatic > 1) {
> + DECR_AND_STORE(6);
> + if (astatic > 2) {
> + DECR_AND_STORE(5);
> + if (astatic > 3) {
> + DECR_AND_STORE(4);
> + }
> + }
> + }
> + }
> +#undef DECR_AND_STORE
> +
> + tcg_gen_subi_tl(cpu_gpr[29], cpu_gpr[29], framesize);
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
> +}
> +
> +static void gen_mips16_restore (DisasContext *ctx,
> + int xsregs, int aregs,
> + int do_ra, int do_s0, int do_s1,
> + int framesize)
> +{
> + int astatic;
> + TCGv t0 = tcg_temp_new();
> + TCGv t1 = tcg_temp_new();
> +
> + tcg_gen_addi_tl(t0, cpu_gpr[29], framesize);
> +
> +#define DECR_AND_LOAD(reg) do { \
> + tcg_gen_subi_tl(t0, t0, 4); \
> + op_ldst_lw(t1, t0, ctx); \
> + gen_store_gpr(t1, reg); \
> + } while (0)
> +
> + if (do_ra) {
> + DECR_AND_LOAD(31);
> + }
> +
> + switch (xsregs) {
> + case 7:
> + DECR_AND_LOAD(30);
> + /* Fall through */
> + case 6:
> + DECR_AND_LOAD(23);
> + /* Fall through */
> + case 5:
> + DECR_AND_LOAD(22);
> + /* Fall through */
> + case 4:
> + DECR_AND_LOAD(21);
> + /* Fall through */
> + case 3:
> + DECR_AND_LOAD(20);
> + /* Fall through */
> + case 2:
> + DECR_AND_LOAD(19);
> + /* Fall through */
> + case 1:
> + DECR_AND_LOAD(18);
> + }
> +
> + if (do_s1) {
> + DECR_AND_LOAD(17);
> + }
> + if (do_s0) {
> + DECR_AND_LOAD(16);
> + }
> +
> + switch (aregs) {
> + case 0:
> + case 4:
> + case 8:
> + case 12:
> + case 14:
> + astatic = 0;
> + break;
> + case 1:
> + case 5:
> + case 9:
> + case 13:
> + astatic = 1;
> + break;
> + case 2:
> + case 6:
> + case 10:
> + astatic = 2;
> + break;
> + case 3:
> + case 7:
> + astatic = 3;
> + break;
> + case 11:
> + astatic = 4;
> + break;
> + default:
> + generate_exception(ctx, EXCP_RI);
> + return;
> + }
> +
> + if (astatic > 0) {
> + DECR_AND_LOAD(7);
> + if (astatic > 1) {
> + DECR_AND_LOAD(6);
> + if (astatic > 2) {
> + DECR_AND_LOAD(5);
> + if (astatic > 3) {
> + DECR_AND_LOAD(4);
> + }
> + }
> + }
> + }
> +#undef DECR_AND_LOAD
> +
> + tcg_gen_addi_tl(cpu_gpr[29], cpu_gpr[29], framesize);
> + tcg_temp_free(t0);
> + tcg_temp_free(t1);
> +}
> +
> +static void gen_addiupc (DisasContext *ctx, int rx, int imm)
> +{
> + TCGv t0 = tcg_temp_new();
> +
> + tcg_gen_movi_tl(t0, ctx->pc & ~(target_ulong)3);
> + tcg_gen_addi_tl(cpu_gpr[rx], t0, imm);
> +
> + tcg_temp_free(t0);
> +}
> +
> +static int decode_extended_mips16_opc (CPUState *env, DisasContext *ctx,
> + int *is_branch)
> +{
> + int extend = lduw_code(ctx->pc + 2);
> + int op, rx, ry, funct, sa;
> + int16_t imm, offset;
> +
> + ctx->opcode = (ctx->opcode << 16) | extend;
> + op = (ctx->opcode >> 11) & 0x1f;
> + sa = (ctx->opcode >> 22) & 0x1f;
> + funct = (ctx->opcode >> 8) & 0x7;
> + rx = xlat((ctx->opcode >> 8) & 0x7);
> + ry = xlat((ctx->opcode >> 5) & 0x7);
> + offset = imm = (int16_t) (((ctx->opcode >> 16) & 0x1f) << 11
> + | ((ctx->opcode >> 21) & 0x3f) << 5
> + | (ctx->opcode & 0x1f));
> +
> + /* The extended opcodes cleverly reuse the opcodes from their 16-bit
> + counterparts. */
> + switch (op) {
> + case M16_OPC_ADDIUSP:
> + gen_arith_imm(env, ctx, OPC_ADDIU, rx, 29, imm);
> + break;
> + case M16_OPC_ADDIUPC:
> + gen_addiupc(ctx, rx, imm);
> + break;
> + case M16_OPC_B:
> + gen_compute_branch(ctx, OPC_BEQ, 4, 0, 0, offset << 1);
> + /* No delay slot, so just process as a normal instruction */
> + break;
> + case M16_OPC_BEQZ:
> + gen_compute_branch(ctx, OPC_BEQ, 4, rx, 0, offset << 1);
> + /* No delay slot, so just process as a normal instruction */
> + break;
> + case M16_OPC_BNEQZ:
> + gen_compute_branch(ctx, OPC_BNE, 4, rx, 0, offset << 1);
> + /* No delay slot, so just process as a normal instruction */
> + break;
> + case M16_OPC_SHIFT:
> + switch (ctx->opcode & 0x3) {
> + case 0x0:
> + gen_shift_imm(env, ctx, OPC_SLL, rx, ry, sa);
> + break;
> + case 0x1:
> +#if defined(TARGET_MIPS64)
> + gen_shift_imm(env, ctx, OPC_DSLL, rx, ry, sa);
Shouldn't this generate an exception on 64-bit CPU, but with 64-bit
instructions not enabled? The MIPS16e manual is not really clear about
that.
Same for other instructions below.
> +#else
> + generate_exception(ctx, EXCP_RI);
> +#endif
> + break;
> + case 0x2:
> + gen_shift_imm(env, ctx, OPC_SRL, rx, ry, sa);
> + break;
> + case 0x3:
> + gen_shift_imm(env, ctx, OPC_SRA, rx, ry, sa);
> + break;
> + }
> + break;
> +#if defined(TARGET_MIPS64)
> + case M16_OPC_LD:
> + gen_ldst(ctx, OPC_LD, ry, rx, offset);
> + break;
> +#endif
> + case M16_OPC_RRIA:
> + imm = ctx->opcode & 0xf;
> + imm = imm | ((ctx->opcode >> 20) & 0x7f) << 4;
> + imm = imm | ((ctx->opcode >> 16) & 0xf) << 11;
> + imm = (int16_t) (imm << 1) >> 1;
> + if ((ctx->opcode >> 4) & 0x1) {
> +#if defined(TARGET_MIPS64)
> + gen_arith_imm(env, ctx, OPC_DADDIU, ry, rx, imm);
> +#else
> + generate_exception(ctx, EXCP_RI);
> +#endif
> + } else {
> + gen_arith_imm(env, ctx, OPC_ADDIU, ry, rx, imm);
> + }
> + break;
> + case M16_OPC_ADDIU8:
> + gen_arith_imm(env, ctx, OPC_ADDIU, rx, rx, imm);
> + break;
> + case M16_OPC_SLTI:
> + gen_slt_imm(env, OPC_SLTI, 24, rx, imm);
> + break;
> + case M16_OPC_SLTIU:
> + gen_slt_imm(env, OPC_SLTIU, 24, rx, imm);
> + break;
> + case M16_OPC_I8:
> + switch (funct) {
> + case I8_BTEQZ:
> + gen_compute_branch(ctx, OPC_BEQ, 4, 24, 0, offset << 1);
> + break;
> + case I8_BTNEZ:
> + gen_compute_branch(ctx, OPC_BNE, 4, 24, 0, offset << 1);
> + break;
> + case I8_SWRASP:
> + gen_ldst(ctx, OPC_SW, 31, 29, imm);
> + break;
> + case I8_ADJSP:
> + gen_arith_imm(env, ctx, OPC_ADDIU, 29, 29, imm);
> + break;
> + case I8_SVRS:
> + {
> + int xsregs = (ctx->opcode >> 24) & 0x7;
> + int aregs = (ctx->opcode >> 16) & 0xf;
> + int do_ra = (ctx->opcode >> 6) & 0x1;
> + int do_s0 = (ctx->opcode >> 5) & 0x1;
> + int do_s1 = (ctx->opcode >> 4) & 0x1;
> + int framesize = (((ctx->opcode >> 20) & 0xf) << 4
> + | (ctx->opcode & 0xf)) << 3;
> +
> + if (ctx->opcode & (1 << 7)) {
> + gen_mips16_save(ctx, xsregs, aregs,
> + do_ra, do_s0, do_s1,
> + framesize);
> + } else {
> + gen_mips16_restore(ctx, xsregs, aregs,
> + do_ra, do_s0, do_s1,
> + framesize);
> + }
> + }
> + break;
> + default:
> + generate_exception(ctx, EXCP_RI);
> + break;
> + }
> + break;
> + case M16_OPC_LI:
> + tcg_gen_movi_tl(cpu_gpr[rx], (uint16_t) imm);
> + break;
> + case M16_OPC_CMPI:
> + tcg_gen_xori_tl(cpu_gpr[24], cpu_gpr[rx], (uint16_t) imm);
> + break;
> +#if defined(TARGET_MIPS64)
> + case M16_OPC_SD:
> + gen_ldst(ctx, OPC_SD, ry, rx, offset);
> + break;
> +#endif
> + case M16_OPC_LB:
> + gen_ldst(ctx, OPC_LB, ry, rx, offset);
> + break;
> + case M16_OPC_LH:
> + gen_ldst(ctx, OPC_LH, ry, rx, offset);
> + break;
> + case M16_OPC_LWSP:
> + gen_ldst(ctx, OPC_LW, rx, 29, offset);
> + break;
> + case M16_OPC_LW:
> + gen_ldst(ctx, OPC_LW, ry, rx, offset);
> + break;
> + case M16_OPC_LBU:
> + gen_ldst(ctx, OPC_LBU, ry, rx, offset);
> + break;
> + case M16_OPC_LHU:
> + gen_ldst(ctx, OPC_LHU, ry, rx, offset);
> + break;
> + case M16_OPC_LWPC:
> + gen_ldst(ctx, OPC_LWPC, rx, 0, offset);
> + break;
> +#if defined(TARGET_MIPS64)
> + case M16_OPC_LWU:
> + gen_ldst(ctx, OPC_LWU, ry, rx, offset);
> + break;
> +#endif
> + case M16_OPC_SB:
> + gen_ldst(ctx, OPC_SB, ry, rx, offset);
> + break;
> + case M16_OPC_SH:
> + gen_ldst(ctx, OPC_SH, ry, rx, offset);
> + break;
> + case M16_OPC_SWSP:
> + gen_ldst(ctx, OPC_SW, rx, 29, offset);
> + break;
> + case M16_OPC_SW:
> + gen_ldst(ctx, OPC_SW, ry, rx, offset);
> + break;
> + default:
> + generate_exception(ctx, EXCP_RI);
> + break;
> + }
> +
> + return 4;
> +}
> +
> +static int decode_mips16_opc (CPUState *env, DisasContext *ctx,
> + int *is_branch)
> +{
> + int rx, ry;
> + int sa;
> + int op, cnvt_op, op1, offset;
> + int funct;
> + int n_bytes;
> +
> + op = (ctx->opcode >> 11) & 0x1f;
> + sa = (ctx->opcode >> 2) & 0x7;
> + sa = sa == 0 ? 8 : sa;
> + rx = xlat((ctx->opcode >> 8) & 0x7);
> + cnvt_op = (ctx->opcode >> 5) & 0x7;
> + ry = xlat((ctx->opcode >> 5) & 0x7);
> + op1 = offset = ctx->opcode & 0x1f;
> +
> + n_bytes = 2;
> +
> + switch (op) {
> + case M16_OPC_ADDIUSP:
> + {
> + int16_t imm = ((uint8_t) ctx->opcode) << 2;
> +
> + gen_arith_imm(env, ctx, OPC_ADDIU, rx, 29, imm);
> + }
> + break;
> + case M16_OPC_ADDIUPC:
> + gen_addiupc(ctx, rx, ((uint8_t) ctx->opcode) << 2);
> + break;
> + case M16_OPC_B:
> + offset = (ctx->opcode & 0x7ff) << 1;
> + offset = (int16_t)(offset << 4) >> 4;
> + gen_compute_branch(ctx, OPC_BEQ, 2, 0, 0, offset);
> + /* No delay slot, so just process as a normal instruction */
> + break;
> + case M16_OPC_JAL:
> + offset = lduw_code(ctx->pc + 2);
> + offset = (((ctx->opcode & 0x1f) << 21)
> + | ((ctx->opcode >> 5) & 0x1f) << 16
> + | offset) << 2;
> + op = ((ctx->opcode >> 10) & 0x1) ? OPC_JALX : OPC_JAL;
> + gen_compute_branch(ctx, op, 4, rx, ry, offset);
> + n_bytes = 4;
> + *is_branch = 1;
> + break;
> + case M16_OPC_BEQZ:
> + gen_compute_branch(ctx, OPC_BEQ, 2, rx, 0, ((int8_t)ctx->opcode) <<
> 1);
> + /* No delay slot, so just process as a normal instruction */
> + break;
> + case M16_OPC_BNEQZ:
> + gen_compute_branch(ctx, OPC_BNE, 2, rx, 0, ((int8_t)ctx->opcode) <<
> 1);
> + /* No delay slot, so just process as a normal instruction */
> + break;
> + case M16_OPC_SHIFT:
> + switch (ctx->opcode & 0x3) {
> + case 0x0:
> + gen_shift_imm(env, ctx, OPC_SLL, rx, ry, sa);
> + break;
> + case 0x1:
> +#if defined(TARGET_MIPS64)
> + gen_shift_imm(env, ctx, OPC_DSLL, rx, ry, sa);
> +#else
> + generate_exception(ctx, EXCP_RI);
> +#endif
> + break;
> + case 0x2:
> + gen_shift_imm(env, ctx, OPC_SRL, rx, ry, sa);
> + break;
> + case 0x3:
> + gen_shift_imm(env, ctx, OPC_SRA, rx, ry, sa);
> + break;
> + }
> + break;
> +#if defined(TARGET_MIPS64)
> + case M16_OPC_LD:
> + gen_ldst(ctx, OPC_LD, ry, rx, offset << 3);
> + break;
> +#endif
> + case M16_OPC_RRIA:
> + {
> + int16_t imm = (int8_t)((ctx->opcode & 0xf) << 4) >> 4;
> +
> + if ((ctx->opcode >> 4) & 1) {
> +#if defined(TARGET_MIPS64)
> + gen_arith_imm(env, ctx, OPC_DADDIU, ry, rx, imm);
> +#else
> + generate_exception(ctx, EXCP_RI);
> +#endif
> + } else {
> + gen_arith_imm(env, ctx, OPC_ADDIU, ry, rx, imm);
> + }
> + }
> + break;
> + case M16_OPC_ADDIU8:
> + {
> + int16_t imm = (int8_t) ctx->opcode;
> +
> + gen_arith_imm(env, ctx, OPC_ADDIU, rx, rx, imm);
> + }
> + break;
> + case M16_OPC_SLTI:
> + {
> + int16_t imm = (uint8_t) ctx->opcode;
> +
> + gen_slt_imm(env, OPC_SLTI, 24, rx, imm);
> + }
> + break;
> + case M16_OPC_SLTIU:
> + {
> + int16_t imm = (uint8_t) ctx->opcode;
> +
> + gen_slt_imm(env, OPC_SLTIU, 24, rx, imm);
> + }
> + break;
> + case M16_OPC_I8:
> + {
> + int reg32;
> +
> + funct = (ctx->opcode >> 8) & 0x7;
> + switch (funct) {
> + case I8_BTEQZ:
> + gen_compute_branch(ctx, OPC_BEQ, 2, 24, 0,
> + ((int8_t)ctx->opcode) << 1);
> + break;
> + case I8_BTNEZ:
> + gen_compute_branch(ctx, OPC_BNE, 2, 24, 0,
> + ((int8_t)ctx->opcode) << 1);
> + break;
> + case I8_SWRASP:
> + gen_ldst(ctx, OPC_SW, 31, 29, (ctx->opcode & 0xff) << 2);
> + break;
> + case I8_ADJSP:
> + gen_arith_imm(env, ctx, OPC_ADDIU, 29, 29,
> + ((int8_t)ctx->opcode) << 3);
> + break;
> + case I8_SVRS:
> + {
> + int do_ra = ctx->opcode & (1 << 6);
> + int do_s0 = ctx->opcode & (1 << 5);
> + int do_s1 = ctx->opcode & (1 << 4);
> + int framesize = ctx->opcode & 0xf;
> +
> + if (framesize == 0) {
> + framesize = 128;
> + } else {
> + framesize = framesize << 3;
> + }
> +
> + if (ctx->opcode & (1 << 7)) {
> + gen_mips16_save(ctx, 0, 0,
> + do_ra, do_s0, do_s1, framesize);
> + } else {
> + gen_mips16_restore(ctx, 0, 0,
> + do_ra, do_s0, do_s1, framesize);
> + }
> + }
> + break;
> + case I8_MOV32R:
> + {
> + int rz = xlat(ctx->opcode & 0x7);
> +
> + reg32 = (((ctx->opcode >> 3) & 0x3) << 3) |
> + ((ctx->opcode >> 5) & 0x7);
> + gen_arith(env, ctx, OPC_ADDU, reg32, rz, 0);
> + }
> + break;
> + case I8_MOVR32:
> + reg32 = ctx->opcode & 0x1f;
> + gen_arith(env, ctx, OPC_ADDU, ry, reg32, 0);
> + break;
> + default:
> + generate_exception(ctx, EXCP_RI);
> + break;
> + }
> + }
> + break;
> + case M16_OPC_LI:
> + {
> + int16_t imm = (uint8_t) ctx->opcode;
> +
> + gen_arith_imm(env, ctx, OPC_ADDIU, rx, 0, imm);
> + }
> + break;
> + case M16_OPC_CMPI:
> + {
> + int16_t imm = (uint8_t) ctx->opcode;
> +
> + gen_logic_imm(env, OPC_XORI, 24, rx, imm);
> + }
> + break;
> +#if defined(TARGET_MIPS64)
> + case M16_OPC_SD:
> + gen_ldst(ctx, OPC_SD, ry, rx, offset << 3);
> + break;
> +#endif
> + case M16_OPC_LB:
> + gen_ldst(ctx, OPC_LB, ry, rx, offset);
> + break;
> + case M16_OPC_LH:
> + gen_ldst(ctx, OPC_LH, ry, rx, offset << 1);
> + break;
> + case M16_OPC_LWSP:
> + gen_ldst(ctx, OPC_LW, rx, 29, ((uint8_t)ctx->opcode) << 2);
> + break;
> + case M16_OPC_LW:
> + gen_ldst(ctx, OPC_LW, ry, rx, offset << 2);
> + break;
> + case M16_OPC_LBU:
> + gen_ldst(ctx, OPC_LBU, ry, rx, offset);
> + break;
> + case M16_OPC_LHU:
> + gen_ldst(ctx, OPC_LHU, ry, rx, offset << 1);
> + break;
> + case M16_OPC_LWPC:
> + gen_ldst(ctx, OPC_LWPC, rx, 0, ((uint8_t)ctx->opcode) << 2);
> + break;
> +#if defined (TARGET_MIPS64)
> + case M16_OPC_LWU:
> + gen_ldst(ctx, OPC_LWU, ry, rx, offset << 2);
> + break;
> +#endif
> + case M16_OPC_SB:
> + gen_ldst(ctx, OPC_SB, ry, rx, offset);
> + break;
> + case M16_OPC_SH:
> + gen_ldst(ctx, OPC_SH, ry, rx, offset << 1);
> + break;
> + case M16_OPC_SWSP:
> + gen_ldst(ctx, OPC_SW, rx, 29, ((uint8_t)ctx->opcode) << 2);
> + break;
> + case M16_OPC_SW:
> + gen_ldst(ctx, OPC_SW, ry, rx, offset << 2);
> + break;
> + case M16_OPC_RRR:
> + {
> + int rz = xlat((ctx->opcode >> 2) & 0x7);
> + int mips32_op;
> +
> + switch (ctx->opcode & 0x3) {
> + case RRR_ADDU:
> + mips32_op = OPC_ADDU;
> + break;
> + case RRR_SUBU:
> + mips32_op = OPC_SUBU;
> + break;
> +#if defined(TARGET_MIPS64)
> + case RRR_DADDU:
> + mips32_op = OPC_DADDU;
> + break;
> + case RRR_DSUBU:
> + mips32_op = OPC_DSUBU;
> + break;
> +#endif
> + default:
> + generate_exception(ctx, EXCP_RI);
> + goto done;
> + }
> +
> + gen_arith(env, ctx, mips32_op, rz, rx, ry);
> + done:
> + ;
> + }
> + break;
> + case M16_OPC_RR:
> + switch (op1) {
> + case RR_JR:
> + {
> + int nd = (ctx->opcode >> 7) & 0x1;
> + int link = (ctx->opcode >> 6) & 0x1;
> + int ra = (ctx->opcode >> 5) & 0x1;
> +
> + if (link) {
> + op = nd ? OPC_JALRC : OPC_JALR;
> + } else {
> + op = OPC_JR;
> + }
> +
> + gen_compute_branch(ctx, op, 2, ra ? 31 : rx, 31, 0);
> + if (!nd) {
> + *is_branch = 1;
> + }
> + }
> + break;
> + case RR_SDBBP:
> + /* XXX: not clear which exception should be raised
> + * when in debug mode...
> + */
> + check_insn(env, ctx, ISA_MIPS32);
> + if (!(ctx->hflags & MIPS_HFLAG_DM)) {
> + generate_exception(ctx, EXCP_DBp);
> + } else {
> + generate_exception(ctx, EXCP_DBp);
> + }
> + break;
> + case RR_SLT:
> + gen_slt(env, OPC_SLT, 24, rx, ry);
> + break;
> + case RR_SLTU:
> + gen_slt(env, OPC_SLTU, 24, rx, ry);
> + break;
> + case RR_BREAK:
> + generate_exception(ctx, EXCP_BREAK);
> + break;
> + case RR_SLLV:
> + gen_shift(env, ctx, OPC_SLLV, ry, rx, ry);
> + break;
> + case RR_SRLV:
> + gen_shift(env, ctx, OPC_SRLV, ry, rx, ry);
> + break;
> + case RR_SRAV:
> + gen_shift(env, ctx, OPC_SRAV, ry, rx, ry);
> + break;
> +#if defined (TARGET_MIPS64)
> + case RR_DSRL:
> + gen_shift_imm(env, ctx, OPC_DSRL, ry, ry, sa);
> + break;
> +#endif
> + case RR_CMP:
> + gen_logic(env, OPC_XOR, 24, rx, ry);
> + break;
> + case RR_NEG:
> + gen_arith(env, ctx, OPC_SUBU, rx, 0, ry);
> + break;
> + case RR_AND:
> + gen_logic(env, OPC_AND, rx, rx, ry);
> + break;
> + case RR_OR:
> + gen_logic(env, OPC_OR, rx, rx, ry);
> + break;
> + case RR_XOR:
> + gen_logic(env, OPC_XOR, rx, rx, ry);
> + break;
> + case RR_NOT:
> + gen_logic(env, OPC_NOR, rx, ry, 0);
> + break;
> + case RR_MFHI:
> + gen_HILO(ctx, OPC_MFHI, rx);
> + break;
> + case RR_CNVT:
> + switch (cnvt_op) {
> + case RR_RY_CNVT_ZEB:
> + tcg_gen_ext8u_tl(cpu_gpr[rx], cpu_gpr[rx]);
> + break;
> + case RR_RY_CNVT_ZEH:
> + tcg_gen_ext16u_tl(cpu_gpr[rx], cpu_gpr[rx]);
> + break;
> + case RR_RY_CNVT_SEB:
> + tcg_gen_ext8s_tl(cpu_gpr[rx], cpu_gpr[rx]);
> + break;
> + case RR_RY_CNVT_SEH:
> + tcg_gen_ext16s_tl(cpu_gpr[rx], cpu_gpr[rx]);
> + break;
> +#if defined (TARGET_MIPS64)
> + case RR_RY_CNVT_ZEW:
> + tcg_gen_ext32u_tl(cpu_gpr[rx], cpu_gpr[rx]);
> + break;
> + case RR_RY_CNVT_SEW:
> + tcg_gen_ext32s_tl(cpu_gpr[rx], cpu_gpr[rx]);
> + break;
> +#endif
> + default:
> + generate_exception(ctx, EXCP_RI);
> + break;
> + }
> + break;
> + case RR_MFLO:
> + gen_HILO(ctx, OPC_MFLO, rx);
> + break;
> +#if defined (TARGET_MIPS64)
> + case RR_DSRA:
> + gen_shift_imm(env, ctx, OPC_DSRA, ry, ry, sa);
> + break;
> + case RR_DSLLV:
> + gen_shift(env, ctx, OPC_DSLLV, ry, rx, ry);
> + case RR_DSRLV:
> + gen_shift(env, ctx, OPC_DSRLV, ry, rx, ry);
> + break;
> + case RR_DSRAV:
> + gen_shift(env, ctx, OPC_DSRAV, ry, rx, ry);
> + break;
> +#endif
> + case RR_MULT:
> + gen_muldiv(ctx, OPC_MULT, rx, ry);
> + break;
> + case RR_MULTU:
> + gen_muldiv(ctx, OPC_MULTU, rx, ry);
> + break;
> + case RR_DIV:
> + gen_muldiv(ctx, OPC_DIV, rx, ry);
> + break;
> + case RR_DIVU:
> + gen_muldiv(ctx, OPC_DIVU, rx, ry);
> + break;
> +#if defined (TARGET_MIPS64)
> + case RR_DMULT:
> + gen_muldiv(ctx, OPC_DMULT, rx, ry);
> + break;
> + case RR_DMULTU:
> + gen_muldiv(ctx, OPC_DMULTU, rx, ry);
> + break;
> + case RR_DDIV:
> + gen_muldiv(ctx, OPC_DDIV, rx, ry);
> + break;
> + case RR_DDIVU:
> + gen_muldiv(ctx, OPC_DDIVU, rx, ry);
> + break;
> +#endif
> + default:
> + generate_exception(ctx, EXCP_RI);
> + break;
> + }
> + break;
> + case M16_OPC_EXTEND:
> + decode_extended_mips16_opc(env, ctx, is_branch);
> + n_bytes = 4;
> + break;
> +#if defined(TARGET_MIPS64)
> + case M16_OPC_I64:
> + funct = (ctx->opcode >> 8) & 0x7;
> + switch (funct) {
> + case I64_LDSP:
> + gen_ldst(ctx, OPC_LD, ry, 29, offset << 3);
> + break;
> + case I64_SDSP:
> + gen_ldst(ctx, OPC_SD, ry, 29, offset << 3);
> + break;
> + case I64_SDRASP:
> + gen_ldst(ctx, OPC_SD, 31, 29, (ctx->opcode & 0xff) << 3);
> + break;
> + case I64_DADJSP:
> + gen_arith_imm(env, ctx, OPC_DADDIU, 29, 29,
> + ((int8_t)ctx->opcode) << 3);
> + break;
> + case I64_LDPC:
> + break;
> + case I64_DADDIU:
> + gen_arith_imm(env, ctx, OPC_DADDIU, ry, ry,
> + ((int8_t)(offset << 3)) >> 3);
> + break;
> + case I64_DADDIUPC:
> + break;
> + case I64_DADDIUSP:
> + gen_arith_imm(env, ctx, OPC_DADDIU, ry, 29,
> + ((int8_t)(offset << 3)) >> 1);
> + break;
> + }
> +#endif
> + default:
> + generate_exception(ctx, EXCP_RI);
> + break;
> + }
> +
> + return n_bytes;
> +}
> +
> /* SmartMIPS extension to MIPS32 */
>
> #if defined(TARGET_MIPS64)
> @@ -8400,7 +9366,10 @@ static void decode_opc (CPUState *env, DisasContext
> *ctx, int *is_branch)
> #endif
> case OPC_JALX:
> check_insn(env, ctx, ASE_MIPS16);
> - /* MIPS16: Not implemented. */
> + offset = (int32_t)(ctx->opcode & 0x3FFFFFF) << 2;
> + gen_compute_branch(ctx, op, 4, rs, rt, offset);
> + *is_branch = 1;
> + break;
> case OPC_MDMX:
> check_insn(env, ctx, ASE_MDMX);
> /* MDMX: Not implemented. */
> @@ -8490,6 +9459,9 @@ gen_intermediate_code_internal (CPUState *env,
> TranslationBlock *tb,
> ctx.opcode = ldl_code(ctx.pc);
> insn_bytes = 4;
> decode_opc(env, &ctx, &is_branch);
> + } else if (env->insn_flags & ASE_MIPS16) {
> + ctx.opcode = lduw_code(ctx.pc);
> + insn_bytes = decode_mips16_opc(env, &ctx, &is_branch);
> } else {
> generate_exception(&ctx, EXCP_RI);
> break;
> --
> 1.6.3.2
>
>
>
>
--
Aurelien Jarno GPG: 1024D/F1BCDB73
address@hidden http://www.aurel32.net
- [Qemu-devel] [PATCH 00/11] target-mips: add mips16 support, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 04/11] target-mips: move ROTR and ROTRV inside gen_shift_{imm, }, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 09/11] target-mips: add mips16 instruction decoding, Nathan Froyd, 2009/11/23
- Re: [Qemu-devel] [PATCH 09/11] target-mips: add mips16 instruction decoding,
Aurelien Jarno <=
- [Qemu-devel] [PATCH 03/11] target-mips: change interrupt bits to be mips16-aware, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 07/11] target-mips: split out delay slot handling, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 01/11] target-mips: add ISAMode bits for mips16 execution, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 02/11] target-mips: add new HFLAGs for JALX and 16/32-bit delay slots, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 06/11] target-mips: add gen_base_offset_addr, Nathan Froyd, 2009/11/23
- [Qemu-devel] [PATCH 11/11] target-mips: add copyright notice for mips16 work, Nathan Froyd, 2009/11/23