Discussion:
[Qemu-devel] [PATCH v3 03/38] target-microblaze: compute_ldst_addr: Use bool instead of int
Edgar E. Iglesias
2018-05-16 18:51:11 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Use bool instead of int to represent flags.
No functional change.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 413e683aec..46595e6336 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -845,13 +845,13 @@ static void dec_imm(DisasContext *dc)

static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
{
- unsigned int extimm = dc->tb_flags & IMM_FLAG;
- /* Should be set to one if r1 is used by loadstores. */
- int stackprot = 0;
+ bool extimm = dc->tb_flags & IMM_FLAG;
+ /* Should be set to true if r1 is used by loadstores. */
+ bool stackprot = false;

/* All load/stores use ra. */
if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
- stackprot = 1;
+ stackprot = true;
}

/* Treat the common cases first. */
@@ -864,7 +864,7 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
}

if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
- stackprot = 1;
+ stackprot = true;
}

*t = tcg_temp_new();
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:22 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Name special registers we support.

Reviewed-by: Alistair Francis <***@wdc.com>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index a3cc1e0ef1..12cb345f64 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -105,8 +105,8 @@ static const char *regnames[] =

static const char *special_regnames[] =
{
- "rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
- "sr8", "sr9", "sr10", "sr11", "sr12", "sr13"
+ "rpc", "rmsr", "sr2", "rear", "sr4", "resr", "sr6", "rfsr",
+ "sr8", "sr9", "sr10", "rbtr", "sr12", "redr"
};

static inline void t_sync_flags(DisasContext *dc)
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:13 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Correct special register array sizes.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.h | 4 ++--
target/microblaze/translate.c | 5 ++---
2 files changed, 4 insertions(+), 5 deletions(-)

diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 5be71bc320..994496515f 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -242,8 +242,8 @@ struct CPUMBState {
uint32_t bimm;

uint32_t imm;
- uint32_t regs[33];
- uint32_t sregs[24];
+ uint32_t regs[32];
+ uint32_t sregs[14];
float_status fp_status;
/* Stack protectors. Yes, it's a hw feature. */
uint32_t slr, shr;
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 46595e6336..9614f15d58 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -54,7 +54,7 @@

static TCGv env_debug;
static TCGv cpu_R[32];
-static TCGv cpu_SR[18];
+static TCGv cpu_SR[14];
static TCGv env_imm;
static TCGv env_btaken;
static TCGv env_btarget;
@@ -106,8 +106,7 @@ static const char *regnames[] =
static const char *special_regnames[] =
{
"rpc", "rmsr", "sr2", "sr3", "sr4", "sr5", "sr6", "sr7",
- "sr8", "sr9", "sr10", "sr11", "sr12", "sr13", "sr14", "sr15",
- "sr16", "sr17", "sr18"
+ "sr8", "sr9", "sr10", "sr11", "sr12", "sr13"
};

static inline void t_sync_flags(DisasContext *dc)
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:19 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Make compute_ldst_addr always use a temp. This simplifies
the code a bit in preparation for adding support for
64bit addresses.

No functional change.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 111 ++++++++++++++----------------------------
1 file changed, 37 insertions(+), 74 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 2e9a286af6..3431a07b99 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -848,7 +848,7 @@ static void dec_imm(DisasContext *dc)
dc->clear_imm = 0;
}

-static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
+static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
{
bool extimm = dc->tb_flags & IMM_FLAG;
/* Should be set to true if r1 is used by loadstores. */
@@ -861,47 +861,47 @@ static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)

/* Treat the common cases first. */
if (!dc->type_b) {
- /* If any of the regs is r0, return a ptr to the other. */
+ /* If any of the regs is r0, return the value of the other reg. */
if (dc->ra == 0) {
- return &cpu_R[dc->rb];
+ tcg_gen_mov_i32(*t, cpu_R[dc->rb]);
+ return;
} else if (dc->rb == 0) {
- return &cpu_R[dc->ra];
+ tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
+ return;
}

if (dc->rb == 1 && dc->cpu->cfg.stackprot) {
stackprot = true;
}

- *t = tcg_temp_new_i32();
tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);

if (stackprot) {
gen_helper_stackprot(cpu_env, *t);
}
- return t;
+ return;
}
/* Immediate. */
if (!extimm) {
if (dc->imm == 0) {
- return &cpu_R[dc->ra];
+ tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
+ return;
}
- *t = tcg_temp_new_i32();
tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
} else {
- *t = tcg_temp_new_i32();
tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

if (stackprot) {
gen_helper_stackprot(cpu_env, *t);
}
- return t;
+ return;
}

static void dec_load(DisasContext *dc)
{
- TCGv_i32 t, v, *addr;
+ TCGv_i32 v, addr;
unsigned int size;
bool rev = false, ex = false;
TCGMemOp mop;
@@ -928,7 +928,8 @@ static void dec_load(DisasContext *dc)
ex ? "x" : "");

t_sync_flags(dc);
- addr = compute_ldst_addr(dc, &t);
+ addr = tcg_temp_new_i32();
+ compute_ldst_addr(dc, &addr);

/*
* When doing reverse accesses we need to do two things.
@@ -947,17 +948,10 @@ static void dec_load(DisasContext *dc)
11 -> 00 */
TCGv_i32 low = tcg_temp_new_i32();

- /* Force addr into the temp. */
- if (addr != &t) {
- t = tcg_temp_new_i32();
- tcg_gen_mov_i32(t, *addr);
- addr = &t;
- }
-
- tcg_gen_andi_i32(low, t, 3);
+ tcg_gen_andi_i32(low, addr, 3);
tcg_gen_sub_i32(low, tcg_const_i32(3), low);
- tcg_gen_andi_i32(t, t, ~3);
- tcg_gen_or_i32(t, t, low);
+ tcg_gen_andi_i32(addr, addr, ~3);
+ tcg_gen_or_i32(addr, addr, low);
tcg_temp_free_i32(low);
break;
}
@@ -965,14 +959,7 @@ static void dec_load(DisasContext *dc)
case 2:
/* 00 -> 10
10 -> 00. */
- /* Force addr into the temp. */
- if (addr != &t) {
- t = tcg_temp_new_i32();
- tcg_gen_xori_i32(t, *addr, 2);
- addr = &t;
- } else {
- tcg_gen_xori_i32(t, t, 2);
- }
+ tcg_gen_xori_i32(addr, addr, 2);
break;
default:
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
@@ -982,13 +969,7 @@ static void dec_load(DisasContext *dc)

/* lwx does not throw unaligned access errors, so force alignment */
if (ex) {
- /* Force addr into the temp. */
- if (addr != &t) {
- t = tcg_temp_new_i32();
- tcg_gen_mov_i32(t, *addr);
- addr = &t;
- }
- tcg_gen_andi_i32(t, t, ~3);
+ tcg_gen_andi_i32(addr, addr, ~3);
}

/* If we get a fault on a dslot, the jmpstate better be in sync. */
@@ -1002,16 +983,16 @@ static void dec_load(DisasContext *dc)
* address and if that succeeds we write into the destination reg.
*/
v = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
+ tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);

if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
- gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
+ gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
tcg_const_i32(0), tcg_const_i32(size - 1));
}

if (ex) {
- tcg_gen_mov_i32(env_res_addr, *addr);
+ tcg_gen_mov_i32(env_res_addr, addr);
tcg_gen_mov_i32(env_res_val, v);
}
if (dc->rd) {
@@ -1024,13 +1005,12 @@ static void dec_load(DisasContext *dc)
write_carryi(dc, 0);
}

- if (addr == &t)
- tcg_temp_free_i32(t);
+ tcg_temp_free_i32(addr);
}

static void dec_store(DisasContext *dc)
{
- TCGv_i32 t, *addr, swx_addr;
+ TCGv_i32 addr;
TCGLabel *swx_skip = NULL;
unsigned int size;
bool rev = false, ex = false;
@@ -1059,21 +1039,19 @@ static void dec_store(DisasContext *dc)
t_sync_flags(dc);
/* If we get a fault on a dslot, the jmpstate better be in sync. */
sync_jmpstate(dc);
- addr = compute_ldst_addr(dc, &t);
+ /* SWX needs a temp_local. */
+ addr = ex ? tcg_temp_local_new_i32() : tcg_temp_new_i32();
+ compute_ldst_addr(dc, &addr);

- swx_addr = tcg_temp_local_new_i32();
if (ex) { /* swx */
TCGv_i32 tval;

- /* Force addr into the swx_addr. */
- tcg_gen_mov_i32(swx_addr, *addr);
- addr = &swx_addr;
/* swx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_i32(swx_addr, swx_addr, ~3);
+ tcg_gen_andi_i32(addr, addr, ~3);

write_carryi(dc, 1);
swx_skip = gen_new_label();
- tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
+ tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, addr, swx_skip);

/* Compare the value loaded at lwx with current contents of
the reserved location.
@@ -1081,8 +1059,8 @@ static void dec_store(DisasContext *dc)
this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */
tval = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
- MO_TEUL);
+ tcg_gen_qemu_ld_i32(tval, addr, cpu_mmu_index(&dc->cpu->env, false),
+ MO_TEUL);
tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0);
tcg_temp_free_i32(tval);
@@ -1099,17 +1077,10 @@ static void dec_store(DisasContext *dc)
11 -> 00 */
TCGv_i32 low = tcg_temp_new_i32();

- /* Force addr into the temp. */
- if (addr != &t) {
- t = tcg_temp_new_i32();
- tcg_gen_mov_i32(t, *addr);
- addr = &t;
- }
-
- tcg_gen_andi_i32(low, t, 3);
+ tcg_gen_andi_i32(low, addr, 3);
tcg_gen_sub_i32(low, tcg_const_i32(3), low);
- tcg_gen_andi_i32(t, t, ~3);
- tcg_gen_or_i32(t, t, low);
+ tcg_gen_andi_i32(addr, addr, ~3);
+ tcg_gen_or_i32(addr, addr, low);
tcg_temp_free_i32(low);
break;
}
@@ -1118,20 +1089,14 @@ static void dec_store(DisasContext *dc)
/* 00 -> 10
10 -> 00. */
/* Force addr into the temp. */
- if (addr != &t) {
- t = tcg_temp_new_i32();
- tcg_gen_xori_i32(t, *addr, 2);
- addr = &t;
- } else {
- tcg_gen_xori_i32(t, t, 2);
- }
+ tcg_gen_xori_i32(addr, addr, 2);
break;
default:
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
break;
}
}
- tcg_gen_qemu_st_i32(cpu_R[dc->rd], *addr,
+ tcg_gen_qemu_st_i32(cpu_R[dc->rd], addr,
cpu_mmu_index(&dc->cpu->env, false), mop);

/* Verify alignment if needed. */
@@ -1143,17 +1108,15 @@ static void dec_store(DisasContext *dc)
* the alignment checks in between the probe and the mem
* access.
*/
- gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
+ gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
tcg_const_i32(1), tcg_const_i32(size - 1));
}

if (ex) {
gen_set_label(swx_skip);
}
- tcg_temp_free_i32(swx_addr);

- if (addr == &t)
- tcg_temp_free_i32(t);
+ tcg_temp_free_i32(addr);
}

static inline void eval_cc(DisasContext *dc, unsigned int cc,
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:25 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Use bool and extract32 to represent the to, clr and
clrset flags.

No functional change.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index b0a76fb23b..f623617fa0 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -458,17 +458,20 @@ static void dec_msr(DisasContext *dc)
{
CPUState *cs = CPU(dc->cpu);
TCGv_i32 t0, t1;
- unsigned int sr, to, rn;
+ unsigned int sr, rn;
+ bool to, clrset;

- sr = dc->imm & ((1 << 14) - 1);
- to = dc->imm & (1 << 14);
+ sr = extract32(dc->imm, 0, 14);
+ to = extract32(dc->imm, 14, 1);
+ clrset = extract32(dc->imm, 15, 1) == 0;
dc->type_b = 1;
- if (to)
+ if (to) {
dc->cpustate_changed = 1;
+ }

/* msrclr and msrset. */
- if (!(dc->imm & (1 << 15))) {
- unsigned int clr = dc->ir & (1 << 16);
+ if (clrset) {
+ bool clr = extract32(dc->ir, 16, 1);

LOG_DIS("msr%s r%d imm=%x\n", clr ? "clr" : "set",
dc->rd, dc->imm);
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:24 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Break out trap_illegal() to handle illegal operation traps.
We now generally stop translation of the current insn if
it's not valid.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 75 ++++++++++++++++---------------------------
1 file changed, 27 insertions(+), 48 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 8f72cf39fb..b0a76fb23b 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -179,6 +179,20 @@ static void write_carryi(DisasContext *dc, bool carry)
tcg_temp_free_i32(t0);
}

+/*
+ * Returns true if the insn an illegal operation.
+ * If exceptions are enabled, an exception is raised.
+ */
+static bool trap_illegal(DisasContext *dc, bool cond)
+{
+ if (cond && (dc->tb_flags & MSR_EE_FLAG)
+ && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ return cond;
+}
+
/*
* Returns true if the insn is illegal in userspace.
* If exceptions are enabled, an exception is raised.
@@ -344,11 +358,8 @@ static void dec_pattern(DisasContext *dc)
{
unsigned int mode;

- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !dc->cpu->cfg.use_pcmp_instr) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
+ return;
}

mode = dc->opcode & 3;
@@ -602,11 +613,7 @@ static void dec_mul(DisasContext *dc)
TCGv_i32 tmp;
unsigned int subcode;

- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !dc->cpu->cfg.use_hw_mul) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, !dc->cpu->cfg.use_hw_mul)) {
return;
}

@@ -658,10 +665,8 @@ static void dec_div(DisasContext *dc)
u = dc->imm & 2;
LOG_DIS("div\n");

- if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !dc->cpu->cfg.use_div) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, !dc->cpu->cfg.use_div)) {
+ return;
}

if (u)
@@ -680,11 +685,7 @@ static void dec_barrel(DisasContext *dc)
unsigned int imm_w, imm_s;
bool s, t, e = false, i = false;

- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !dc->cpu->cfg.use_barrel) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, !dc->cpu->cfg.use_barrel)) {
return;
}

@@ -798,11 +799,8 @@ static void dec_bit(DisasContext *dc)
trap_userspace(dc, true);
break;
case 0xe0:
- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !dc->cpu->cfg.use_pcmp_instr) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, !dc->cpu->cfg.use_pcmp_instr)) {
+ return;
}
if (dc->cpu->cfg.use_pcmp_instr) {
tcg_gen_clzi_i32(cpu_R[dc->rd], cpu_R[dc->ra], 32);
@@ -921,10 +919,7 @@ static void dec_load(DisasContext *dc)
mop ^= MO_BSWAP;
}

- if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, size > 4)) {
return;
}

@@ -1031,10 +1026,7 @@ static void dec_store(DisasContext *dc)
mop ^= MO_BSWAP;
}

- if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, size > 4)) {
return;
}

@@ -1368,11 +1360,7 @@ static void dec_fpu(DisasContext *dc)
{
unsigned int fpu_insn;

- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && !dc->cpu->cfg.use_fpu) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, !dc->cpu->cfg.use_fpu)) {
return;
}

@@ -1471,10 +1459,7 @@ static void dec_fpu(DisasContext *dc)

static void dec_null(DisasContext *dc)
{
- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_illegal(dc, true)) {
return;
}
qemu_log_mask(LOG_GUEST_ERROR, "unknown insn pc=%x opc=%x\n", dc->pc, dc->opcode);
@@ -1552,13 +1537,7 @@ static inline void decode(DisasContext *dc, uint32_t ir)
if (dc->ir)
dc->nr_nops = 0;
else {
- if ((dc->tb_flags & MSR_EE_FLAG)
- && (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
- && (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- return;
- }
+ trap_illegal(dc, dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK);

LOG_DIS("nr_nops=%d\t", dc->nr_nops);
dc->nr_nops++;
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:33 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Add explicit handling for MMU_R_TLBX and log accesses to
invalid MMU registers. We can now remove the state for
all regs but PID, ZPR and TLBX (0 - 2).

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/mmu.c | 7 +++++--
target/microblaze/mmu.h | 2 +-
2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index f4a4c339c9..231803ceea 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -211,11 +211,14 @@ uint32_t mmu_read(CPUMBState *env, uint32_t rn)
}
r = env->mmu.regs[rn];
break;
+ case MMU_R_TLBX:
+ r = env->mmu.regs[rn];
+ break;
case MMU_R_TLBSX:
qemu_log_mask(LOG_GUEST_ERROR, "TLBSX is write-only.\n");
break;
default:
- r = env->mmu.regs[rn];
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
break;
}
D(qemu_log("%s rn=%d=%x\n", __func__, rn, r));
@@ -298,7 +301,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
break;
}
default:
- env->mmu.regs[rn] = v;
+ qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
break;
}
}
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 113539c6e9..624becfded 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -67,7 +67,7 @@ struct microblaze_mmu
/* We keep a separate ram for the tids to avoid the 48 bit tag width. */
uint8_t tids[TLB_ENTRIES];
/* Control flops. */
- uint32_t regs[8];
+ uint32_t regs[3];

int c_mmu;
int c_mmu_tlb_access;
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:16 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

We already have a CPU property to control if a core has
an MMU or not. Remove USE_MMU PVR checks in favor of
looking at the property.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/helper.c | 12 +-----------
1 file changed, 1 insertion(+), 11 deletions(-)

diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 387d4aca5a..a9f4ca93e3 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -54,21 +54,11 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
unsigned int hit;
- unsigned int mmu_available;
int r = 1;
int prot;

- mmu_available = 0;
- if (cpu->cfg.use_mmu) {
- mmu_available = 1;
- if ((cpu->cfg.pvr == C_PVR_FULL) &&
- (env->pvr.regs[11] & PVR11_USE_MMU) != PVR11_USE_MMU) {
- mmu_available = 0;
- }
- }
-
/* Translate if the MMU is available and enabled. */
- if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
+ if (cpu->cfg.use_mmu && (env->sregs[SR_MSR] & MSR_VM)) {
uint32_t vaddr, paddr;
struct microblaze_mmu_lookup lu;
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:27 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Fix moves to FSR. Not only bit 31 is accessible.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 675db78030..528450a8e2 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -533,11 +533,9 @@ static void dec_msr(DisasContext *dc)
break;
case SR_EAR:
case SR_ESR:
+ case SR_FSR:
tcg_gen_mov_i32(cpu_SR[sr], cpu_R[dc->ra]);
break;
- case 0x7:
- tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
- break;
case 0x800:
tcg_gen_st_i32(cpu_R[dc->ra],
cpu_env, offsetof(CPUMBState, slr));
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:35 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Add a configurable output address mask, used to mimic the
configurable physical address bit width.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.c | 1 +
target/microblaze/mmu.c | 1 +
target/microblaze/mmu.h | 1 +
3 files changed, 3 insertions(+)

diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 2b3f8fa374..d0649fdaaa 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -128,6 +128,7 @@ static void mb_cpu_reset(CPUState *s)
env->mmu.c_mmu = 3;
env->mmu.c_mmu_tlb_access = 3;
env->mmu.c_mmu_zones = 16;
+ env->mmu.c_addr_mask = MAKE_64BIT_MASK(0, cpu->cfg.addr_size);
#endif
}

diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index a379968618..166c79908c 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -164,6 +164,7 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,
tlb_rpn = d & TLB_RPN_MASK;

lu->vaddr = tlb_tag;
+ lu->paddr = tlb_rpn & mmu->c_addr_mask;
lu->paddr = tlb_rpn;
lu->size = tlb_size;
lu->err = ERR_HIT;
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 1714caf82e..9fbdf38f36 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -72,6 +72,7 @@ struct microblaze_mmu
int c_mmu;
int c_mmu_tlb_access;
int c_mmu_zones;
+ uint64_t c_addr_mask; /* Mask to apply to physical addresses. */
};

struct microblaze_mmu_lookup
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:28 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Extend special registers to 64-bits. This is in preparation for
MFSE/MTSE, moves to and from extended special registers.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
linux-user/microblaze/cpu_loop.c | 4 +-
target/microblaze/cpu.h | 2 +-
target/microblaze/helper.c | 15 ++++--
target/microblaze/mmu.c | 3 +-
target/microblaze/op_helper.c | 9 ++--
target/microblaze/translate.c | 99 +++++++++++++++++++++-------------------
6 files changed, 72 insertions(+), 60 deletions(-)

diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c
index 5ffb83dea2..5af12d5b21 100644
--- a/linux-user/microblaze/cpu_loop.c
+++ b/linux-user/microblaze/cpu_loop.c
@@ -105,8 +105,8 @@ void cpu_loop(CPUMBState *env)
queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info);
break;
default:
- printf ("Unhandled hw-exception: 0x%x\n",
- env->sregs[SR_ESR] & ESR_EC_MASK);
+ printf("Unhandled hw-exception: 0x%" PRIx64 "\n",
+ env->sregs[SR_ESR] & ESR_EC_MASK);
cpu_dump_state(cs, stderr, fprintf, 0);
exit(EXIT_FAILURE);
break;
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 1593496997..215f42b384 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -243,7 +243,7 @@ struct CPUMBState {

uint32_t imm;
uint32_t regs[32];
- uint32_t sregs[14];
+ uint64_t sregs[14];
float_status fp_status;
/* Stack protectors. Yes, it's a hw feature. */
uint32_t slr, shr;
diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 261dcc74c7..985bdae8d1 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -143,7 +143,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->sregs[SR_MSR] |= MSR_EIP;

qemu_log_mask(CPU_LOG_INT,
- "hw exception at pc=%x ear=%x esr=%x iflags=%x\n",
+ "hw exception at pc=%" PRIx64 " ear=%" PRIx64 " "
+ "esr=%" PRIx64 " iflags=%x\n",
env->sregs[SR_PC], env->sregs[SR_EAR],
env->sregs[SR_ESR], env->iflags);
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
@@ -166,7 +167,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
/* was the branch immprefixed?. */
if (env->bimm) {
qemu_log_mask(CPU_LOG_INT,
- "bimm exception at pc=%x iflags=%x\n",
+ "bimm exception at pc=%" PRIx64 " "
+ "iflags=%x\n",
env->sregs[SR_PC], env->iflags);
env->regs[17] -= 4;
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
@@ -184,7 +186,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
env->sregs[SR_MSR] |= MSR_EIP;

qemu_log_mask(CPU_LOG_INT,
- "exception at pc=%x ear=%x iflags=%x\n",
+ "exception at pc=%" PRIx64 " ear=%" PRIx64 " "
+ "iflags=%x\n",
env->sregs[SR_PC], env->sregs[SR_EAR], env->iflags);
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
env->iflags &= ~(IMM_FLAG | D_FLAG);
@@ -221,7 +224,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
}
#endif
qemu_log_mask(CPU_LOG_INT,
- "interrupt at pc=%x msr=%x %x iflags=%x\n",
+ "interrupt at pc=%" PRIx64 " msr=%" PRIx64 " %x "
+ "iflags=%x\n",
env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);

env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM \
@@ -239,7 +243,8 @@ void mb_cpu_do_interrupt(CPUState *cs)
assert(!(env->iflags & D_FLAG));
t = (env->sregs[SR_MSR] & (MSR_VM | MSR_UM)) << 1;
qemu_log_mask(CPU_LOG_INT,
- "break at pc=%x msr=%x %x iflags=%x\n",
+ "break at pc=%" PRIx64 " msr=%" PRIx64 " %x "
+ "iflags=%x\n",
env->sregs[SR_PC], env->sregs[SR_MSR], t, env->iflags);
log_cpu_state_mask(CPU_LOG_INT, cs, 0);
env->sregs[SR_MSR] &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 9d5e6aa8a5..0019ebd18f 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -240,7 +240,8 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
i = env->mmu.regs[MMU_R_TLBX] & 0xff;
if (rn == MMU_R_TLBHI) {
if (i < 3 && !(v & TLB_VALID) && qemu_loglevel_mask(~0))
- qemu_log_mask(LOG_GUEST_ERROR, "invalidating index %x at pc=%x\n",
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "invalidating index %x at pc=%" PRIx64 "\n",
i, env->sregs[SR_PC]);
env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
mmu_flush_idx(env, i);
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index f5e851e38d..4dc3aff84b 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -94,16 +94,17 @@ void helper_debug(CPUMBState *env)
{
int i;

- qemu_log("PC=%8.8x\n", env->sregs[SR_PC]);
- qemu_log("rmsr=%x resr=%x rear=%x debug[%x] imm=%x iflags=%x\n",
+ qemu_log("PC=%" PRIx64 "\n", env->sregs[SR_PC]);
+ qemu_log("rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
+ "debug[%x] imm=%x iflags=%x\n",
env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
env->debug, env->imm, env->iflags);
qemu_log("btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
env->btaken, env->btarget,
(env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
(env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
- (env->sregs[SR_MSR] & MSR_EIP),
- (env->sregs[SR_MSR] & MSR_IE));
+ (bool)(env->sregs[SR_MSR] & MSR_EIP),
+ (bool)(env->sregs[SR_MSR] & MSR_IE));
for (i = 0; i < 32; i++) {
qemu_log("r%2.2d=%8.8x ", i, env->regs[i]);
if ((i + 1) % 4 == 0)
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 528450a8e2..fdbb08fb8f 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -54,7 +54,7 @@

static TCGv_i32 env_debug;
static TCGv_i32 cpu_R[32];
-static TCGv_i32 cpu_SR[14];
+static TCGv_i64 cpu_SR[14];
static TCGv_i32 env_imm;
static TCGv_i32 env_btaken;
static TCGv_i32 env_btarget;
@@ -123,7 +123,7 @@ static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
TCGv_i32 tmp = tcg_const_i32(index);

t_sync_flags(dc);
- tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
dc->is_jmp = DISAS_UPDATE;
@@ -142,17 +142,18 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
- tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
- tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb(0);
}
}

static void read_carry(DisasContext *dc, TCGv_i32 d)
{
- tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
+ tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
+ tcg_gen_shri_i32(d, d, 31);
}

/*
@@ -161,14 +162,12 @@ static void read_carry(DisasContext *dc, TCGv_i32 d)
*/
static void write_carry(DisasContext *dc, TCGv_i32 v)
{
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_shli_i32(t0, v, 31);
- tcg_gen_sari_i32(t0, t0, 31);
- tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
- tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
- ~(MSR_C | MSR_CC));
- tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
- tcg_temp_free_i32(t0);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(t0, v);
+ /* Deposit bit 0 into MSR_C and the alias MSR_CC. */
+ tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 2, 1);
+ tcg_gen_deposit_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0, 31, 1);
+ tcg_temp_free_i64(t0);
}

static void write_carryi(DisasContext *dc, bool carry)
@@ -187,7 +186,7 @@ static bool trap_illegal(DisasContext *dc, bool cond)
{
if (cond && (dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
return cond;
@@ -203,7 +202,7 @@ static bool trap_userspace(DisasContext *dc, bool cond)
bool cond_user = cond && mem_index == MMU_USER_IDX;

if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
return cond_user;
@@ -438,20 +437,21 @@ static void dec_xor(DisasContext *dc)

static inline void msr_read(DisasContext *dc, TCGv_i32 d)
{
- tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
+ tcg_gen_extrl_i64_i32(d, cpu_SR[SR_MSR]);
}

static inline void msr_write(DisasContext *dc, TCGv_i32 v)
{
- TCGv_i32 t;
+ TCGv_i64 t;

- t = tcg_temp_new_i32();
+ t = tcg_temp_new_i64();
dc->cpustate_changed = 1;
/* PVR bit is not writable. */
- tcg_gen_andi_i32(t, v, ~MSR_PVR);
- tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
- tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
- tcg_temp_free(t);
+ tcg_gen_extu_i32_i64(t, v);
+ tcg_gen_andi_i64(t, t, ~MSR_PVR);
+ tcg_gen_andi_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
+ tcg_gen_or_i64(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
+ tcg_temp_free_i64(t);
}

static void dec_msr(DisasContext *dc)
@@ -501,7 +501,7 @@ static void dec_msr(DisasContext *dc)
msr_write(dc, t0);
tcg_temp_free_i32(t0);
tcg_temp_free_i32(t1);
- tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
dc->is_jmp = DISAS_UPDATE;
return;
}
@@ -534,7 +534,7 @@ static void dec_msr(DisasContext *dc)
case SR_EAR:
case SR_ESR:
case SR_FSR:
- tcg_gen_mov_i32(cpu_SR[sr], cpu_R[dc->ra]);
+ tcg_gen_extu_i32_i64(cpu_SR[sr], cpu_R[dc->ra]);
break;
case 0x800:
tcg_gen_st_i32(cpu_R[dc->ra],
@@ -562,7 +562,7 @@ static void dec_msr(DisasContext *dc)
case SR_ESR:
case SR_FSR:
case SR_BTR:
- tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[sr]);
+ tcg_gen_extrl_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
break;
case 0x800:
tcg_gen_ld_i32(cpu_R[dc->rd],
@@ -735,7 +735,8 @@ static void dec_bit(DisasContext *dc)
t0 = tcg_temp_new_i32();

LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
- tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
+ tcg_gen_extrl_i64_i32(t0, cpu_SR[SR_MSR]);
+ tcg_gen_andi_i32(t0, t0, MSR_CC);
write_carry(dc, cpu_R[dc->ra]);
if (dc->rd) {
tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
@@ -966,7 +967,7 @@ static void dec_load(DisasContext *dc)
tcg_gen_qemu_ld_i32(v, addr, cpu_mmu_index(&dc->cpu->env, false), mop);

if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
- tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
gen_helper_memalign(cpu_env, addr, tcg_const_i32(dc->rd),
tcg_const_i32(0), tcg_const_i32(size - 1));
}
@@ -1078,7 +1079,7 @@ static void dec_store(DisasContext *dc)

/* Verify alignment if needed. */
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
- tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
/* FIXME: if the alignment is wrong, we should restore the value
* in memory. One possible way to achieve this is to probe
* the MMU prior to the memaccess, thay way we could put
@@ -1124,13 +1125,13 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
}
}

-static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
+static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i64 pc_false)
{
TCGLabel *l1 = gen_new_label();
/* Conditional jmp. */
- tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
+ tcg_gen_mov_i64(cpu_SR[SR_PC], pc_false);
tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
- tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
+ tcg_gen_extu_i32_i64(cpu_SR[SR_PC], pc_true);
gen_set_label(l1);
}

@@ -1187,7 +1188,7 @@ static void dec_br(DisasContext *dc)
tcg_gen_st_i32(tmp_1, cpu_env,
-offsetof(MicroBlazeCPU, env)
+offsetof(CPUState, halted));
- tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc + 4);
gen_helper_raise_exception(cpu_env, tmp_hlt);
tcg_temp_free_i32(tmp_hlt);
tcg_temp_free_i32(tmp_1);
@@ -1246,8 +1247,9 @@ static inline void do_rti(DisasContext *dc)
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
- tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
- tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
+ tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
+ tcg_gen_shri_i32(t0, t1, 1);
+ tcg_gen_ori_i32(t1, t1, MSR_IE);
tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));

tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
@@ -1263,7 +1265,8 @@ static inline void do_rtb(DisasContext *dc)
TCGv_i32 t0, t1;
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();
- tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
+ tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
+ tcg_gen_andi_i32(t1, t1, ~MSR_BIP);
tcg_gen_shri_i32(t0, t1, 1);
tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));

@@ -1281,7 +1284,8 @@ static inline void do_rte(DisasContext *dc)
t0 = tcg_temp_new_i32();
t1 = tcg_temp_new_i32();

- tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
+ tcg_gen_extrl_i64_i32(t1, cpu_SR[SR_MSR]);
+ tcg_gen_ori_i32(t1, t1, MSR_EE);
tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
tcg_gen_shri_i32(t0, t1, 1);
tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
@@ -1331,7 +1335,7 @@ static void dec_rts(DisasContext *dc)
static int dec_check_fpuv2(DisasContext *dc)
{
if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
+ tcg_gen_movi_i64(cpu_SR[SR_ESR], ESR_EC_FPU);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
@@ -1596,7 +1600,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)

#if SIM_COMPAT
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], dc->pc);
gen_helper_debug();
}
#endif
@@ -1638,7 +1642,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
dc->tb_flags &= ~D_FLAG;
/* If it is a direct jump, try direct chaining. */
if (dc->jmp == JMP_INDIRECT) {
- eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
+ eval_cond_jmp(dc, env_btarget, tcg_const_i64(dc->pc));
dc->is_jmp = DISAS_JUMP;
} else if (dc->jmp == JMP_DIRECT) {
t_sync_flags(dc);
@@ -1671,7 +1675,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
if (dc->tb_flags & D_FLAG) {
dc->is_jmp = DISAS_UPDATE;
- tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
sync_jmpstate(dc);
} else
npc = dc->jmp_pc;
@@ -1683,7 +1687,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (dc->is_jmp == DISAS_NEXT
&& (dc->cpustate_changed || org_flags != dc->tb_flags)) {
dc->is_jmp = DISAS_UPDATE;
- tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
}
t_sync_flags(dc);

@@ -1691,7 +1695,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);

if (dc->is_jmp != DISAS_JUMP) {
- tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
+ tcg_gen_movi_i64(cpu_SR[SR_PC], npc);
}
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
@@ -1741,17 +1745,18 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
if (!env || !f)
return;

- cpu_fprintf(f, "IN: PC=%x %s\n",
+ cpu_fprintf(f, "IN: PC=%" PRIx64 " %s\n",
env->sregs[SR_PC], lookup_symbol(env->sregs[SR_PC]));
- cpu_fprintf(f, "rmsr=%x resr=%x rear=%x debug=%x imm=%x iflags=%x fsr=%x\n",
+ cpu_fprintf(f, "rmsr=%" PRIx64 " resr=%" PRIx64 " rear=%" PRIx64 " "
+ "debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
env->btaken, env->btarget,
(env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
(env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
- (env->sregs[SR_MSR] & MSR_EIP),
- (env->sregs[SR_MSR] & MSR_IE));
+ (bool)(env->sregs[SR_MSR] & MSR_EIP),
+ (bool)(env->sregs[SR_MSR] & MSR_IE));

for (i = 0; i < 32; i++) {
cpu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
@@ -1792,7 +1797,7 @@ void mb_tcg_init(void)
regnames[i]);
}
for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
- cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
+ cpu_SR[i] = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUMBState, sregs[i]),
special_regnames[i]);
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:31 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Implement MFSE EAR to enable access to the upper part of EAR.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 76f09e3f7e..03a0289858 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -459,7 +459,7 @@ static void dec_msr(DisasContext *dc)
CPUState *cs = CPU(dc->cpu);
TCGv_i32 t0, t1;
unsigned int sr, rn;
- bool to, clrset;
+ bool to, clrset, extended;

sr = extract32(dc->imm, 0, 14);
to = extract32(dc->imm, 14, 1);
@@ -467,6 +467,9 @@ static void dec_msr(DisasContext *dc)
dc->type_b = 1;
if (to) {
dc->cpustate_changed = 1;
+ extended = extract32(dc->imm, 24, 1);
+ } else {
+ extended = extract32(dc->imm, 19, 1);
}

/* msrclr and msrset. */
@@ -559,6 +562,10 @@ static void dec_msr(DisasContext *dc)
msr_read(dc, cpu_R[dc->rd]);
break;
case SR_EAR:
+ if (extended) {
+ tcg_gen_extrh_i64_i32(cpu_R[dc->rd], cpu_SR[sr]);
+ break;
+ }
case SR_ESR:
case SR_FSR:
case SR_BTR:
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:20 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 3431a07b99..5ef978e897 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -848,7 +848,7 @@ static void dec_imm(DisasContext *dc)
dc->clear_imm = 0;
}

-static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
+static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 t)
{
bool extimm = dc->tb_flags & IMM_FLAG;
/* Should be set to true if r1 is used by loadstores. */
@@ -863,10 +863,10 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
if (!dc->type_b) {
/* If any of the regs is r0, return the value of the other reg. */
if (dc->ra == 0) {
- tcg_gen_mov_i32(*t, cpu_R[dc->rb]);
+ tcg_gen_mov_i32(t, cpu_R[dc->rb]);
return;
} else if (dc->rb == 0) {
- tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
+ tcg_gen_mov_i32(t, cpu_R[dc->ra]);
return;
}

@@ -874,27 +874,27 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
stackprot = true;
}

- tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_add_i32(t, cpu_R[dc->ra], cpu_R[dc->rb]);

if (stackprot) {
- gen_helper_stackprot(cpu_env, *t);
+ gen_helper_stackprot(cpu_env, t);
}
return;
}
/* Immediate. */
if (!extimm) {
if (dc->imm == 0) {
- tcg_gen_mov_i32(*t, cpu_R[dc->ra]);
+ tcg_gen_mov_i32(t, cpu_R[dc->ra]);
return;
}
- tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
- tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
+ tcg_gen_movi_i32(t, (int32_t)((int16_t)dc->imm));
+ tcg_gen_add_i32(t, cpu_R[dc->ra], t);
} else {
- tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_add_i32(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

if (stackprot) {
- gen_helper_stackprot(cpu_env, *t);
+ gen_helper_stackprot(cpu_env, t);
}
return;
}
@@ -929,7 +929,7 @@ static void dec_load(DisasContext *dc)

t_sync_flags(dc);
addr = tcg_temp_new_i32();
- compute_ldst_addr(dc, &addr);
+ compute_ldst_addr(dc, addr);

/*
* When doing reverse accesses we need to do two things.
@@ -1041,7 +1041,7 @@ static void dec_store(DisasContext *dc)
sync_jmpstate(dc);
/* SWX needs a temp_local. */
addr = ex ? tcg_temp_local_new_i32() : tcg_temp_new_i32();
- compute_ldst_addr(dc, &addr);
+ compute_ldst_addr(dc, addr);

if (ex) { /* swx */
TCGv_i32 tval;
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:41 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Use a table based conversion to map condition-codes between
MicroBlaze ISA encoding and TCG.
No functional change.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 41 ++++++++++++++++++++---------------------
1 file changed, 20 insertions(+), 21 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 44395cf189..ed0b6fa881 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -1145,28 +1145,27 @@ static void dec_store(DisasContext *dc)
static inline void eval_cc(DisasContext *dc, unsigned int cc,
TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
+ static const int mb_to_tcg_cc[] = {
+ [CC_EQ] = TCG_COND_EQ,
+ [CC_NE] = TCG_COND_NE,
+ [CC_LT] = TCG_COND_LT,
+ [CC_LE] = TCG_COND_LE,
+ [CC_GE] = TCG_COND_GE,
+ [CC_GT] = TCG_COND_GT,
+ };
+
switch (cc) {
- case CC_EQ:
- tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
- break;
- case CC_NE:
- tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
- break;
- case CC_LT:
- tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
- break;
- case CC_LE:
- tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
- break;
- case CC_GE:
- tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
- break;
- case CC_GT:
- tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
- break;
- default:
- cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
- break;
+ case CC_EQ:
+ case CC_NE:
+ case CC_LT:
+ case CC_LE:
+ case CC_GE:
+ case CC_GT:
+ tcg_gen_setcond_i32(mb_to_tcg_cc[cc], d, a, b);
+ break;
+ default:
+ cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
+ break;
}
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:37 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Add support for extended access to TLBLO's upper 32 bits.

Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/helper.h | 4 ++--
target/microblaze/mmu.c | 18 ++++++++++++++----
target/microblaze/mmu.h | 4 ++--
target/microblaze/op_helper.c | 8 ++++----
target/microblaze/translate.c | 19 +++++++++++++------
5 files changed, 35 insertions(+), 18 deletions(-)

diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index ce70353936..2f8bdea22b 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -25,8 +25,8 @@ DEF_HELPER_3(fcmp_ge, i32, env, i32, i32)

DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32)
#if !defined(CONFIG_USER_ONLY)
-DEF_HELPER_2(mmu_read, i32, env, i32)
-DEF_HELPER_3(mmu_write, void, env, i32, i32)
+DEF_HELPER_3(mmu_read, i32, env, i32, i32)
+DEF_HELPER_4(mmu_write, void, env, i32, i32, i32)
#endif

DEF_HELPER_5(memalign, void, env, tl, i32, i32, i32)
diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 166c79908c..9ecffb2c9c 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -180,7 +180,7 @@ done:
}

/* Writes/reads to the MMU's special regs end up here. */
-uint32_t mmu_read(CPUMBState *env, uint32_t rn)
+uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
{
unsigned int i;
uint32_t r = 0;
@@ -189,6 +189,10 @@ uint32_t mmu_read(CPUMBState *env, uint32_t rn)
qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
return 0;
}
+ if (ext && rn != MMU_R_TLBLO) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
+ return 0;
+ }

switch (rn) {
/* Reads to HI/LO trig reads from the mmu rams. */
@@ -200,7 +204,7 @@ uint32_t mmu_read(CPUMBState *env, uint32_t rn)
}

i = env->mmu.regs[MMU_R_TLBX] & 0xff;
- r = env->mmu.rams[rn & 1][i];
+ r = extract64(env->mmu.rams[rn & 1][i], ext * 32, 32);
if (rn == MMU_R_TLBHI)
env->mmu.regs[MMU_R_PID] = env->mmu.tids[i];
break;
@@ -226,9 +230,10 @@ uint32_t mmu_read(CPUMBState *env, uint32_t rn)
return r;
}

-void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
+void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
{
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
+ uint64_t tmp64;
unsigned int i;
D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));

@@ -236,6 +241,10 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
return;
}
+ if (ext && rn != MMU_R_TLBLO) {
+ qemu_log_mask(LOG_GUEST_ERROR, "Extended access only to TLBLO.\n");
+ return;
+ }

switch (rn) {
/* Writes to HI/LO trig writes to the mmu rams. */
@@ -250,7 +259,8 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
env->mmu.tids[i] = env->mmu.regs[MMU_R_PID] & 0xff;
mmu_flush_idx(env, i);
}
- env->mmu.rams[rn & 1][i] = v;
+ tmp64 = env->mmu.rams[rn & 1][i];
+ env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);

D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v));
break;
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 9fbdf38f36..a4272b6356 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -90,6 +90,6 @@ struct microblaze_mmu_lookup
unsigned int mmu_translate(struct microblaze_mmu *mmu,
struct microblaze_mmu_lookup *lu,
target_ulong vaddr, int rw, int mmu_idx);
-uint32_t mmu_read(CPUMBState *env, uint32_t rn);
-void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v);
+uint32_t mmu_read(CPUMBState *env, bool ea, uint32_t rn);
+void mmu_write(CPUMBState *env, bool ea, uint32_t rn, uint32_t v);
void mmu_init(struct microblaze_mmu *mmu);
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 4dc3aff84b..ddc1f71d62 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -476,14 +476,14 @@ void helper_stackprot(CPUMBState *env, target_ulong addr)

#if !defined(CONFIG_USER_ONLY)
/* Writes/reads to the MMU's special regs end up here. */
-uint32_t helper_mmu_read(CPUMBState *env, uint32_t rn)
+uint32_t helper_mmu_read(CPUMBState *env, uint32_t ext, uint32_t rn)
{
- return mmu_read(env, rn);
+ return mmu_read(env, ext, rn);
}

-void helper_mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
+void helper_mmu_write(CPUMBState *env, uint32_t ext, uint32_t rn, uint32_t v)
{
- mmu_write(env, rn, v);
+ mmu_write(env, ext, rn, v);
}

void mb_cpu_unassigned_access(CPUState *cs, hwaddr addr,
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index cf1b87c09e..39c4d0654e 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -459,7 +459,7 @@ static void dec_msr(DisasContext *dc)
CPUState *cs = CPU(dc->cpu);
TCGv_i32 t0, t1;
unsigned int sr, rn;
- bool to, clrset, extended;
+ bool to, clrset, extended = false;

sr = extract32(dc->imm, 0, 14);
to = extract32(dc->imm, 14, 1);
@@ -467,9 +467,14 @@ static void dec_msr(DisasContext *dc)
dc->type_b = 1;
if (to) {
dc->cpustate_changed = 1;
- extended = extract32(dc->imm, 24, 1);
- } else {
- extended = extract32(dc->imm, 19, 1);
+ }
+
+ /* Extended MSRs are only available if addr_size > 32. */
+ if (dc->cpu->cfg.addr_size > 32) {
+ /* The E-bit is encoded differently for To/From MSR. */
+ static const unsigned int e_bit[] = { 19, 24 };
+
+ extended = extract32(dc->imm, e_bit[to], 1);
}

/* msrclr and msrset. */
@@ -516,17 +521,19 @@ static void dec_msr(DisasContext *dc)
#if !defined(CONFIG_USER_ONLY)
/* Catch read/writes to the mmu block. */
if ((sr & ~0xff) == 0x1000) {
+ TCGv_i32 tmp_ext = tcg_const_i32(extended);
TCGv_i32 tmp_sr;

sr &= 7;
tmp_sr = tcg_const_i32(sr);
LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
if (to) {
- gen_helper_mmu_write(cpu_env, tmp_sr, cpu_R[dc->ra]);
+ gen_helper_mmu_write(cpu_env, tmp_ext, tmp_sr, cpu_R[dc->ra]);
} else {
- gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_sr);
+ gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_ext, tmp_sr);
}
tcg_temp_free_i32(tmp_sr);
+ tcg_temp_free_i32(tmp_ext);
return;
}
#endif
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:10 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Use bool instead of unsigned int to represent flags.
Also, use extract32 instead of open coding the bit extract.

No functional change.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index a8a5eaebec..413e683aec 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -1027,14 +1027,15 @@ static void dec_store(DisasContext *dc)
{
TCGv t, *addr, swx_addr;
TCGLabel *swx_skip = NULL;
- unsigned int size, rev = 0, ex = 0;
+ unsigned int size;
+ bool rev = false, ex = false;
TCGMemOp mop;

mop = dc->opcode & 3;
size = 1 << mop;
if (!dc->type_b) {
- rev = (dc->ir >> 9) & 1;
- ex = (dc->ir >> 10) & 1;
+ rev = extract32(dc->ir, 9, 1);
+ ex = extract32(dc->ir, 10, 1);
}
mop |= MO_TE;
if (rev) {
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:40 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Cleanup debug log messages:
* Avoid long 80+ character lines.
* Remove D() macro and use qemu_log_mask.
* Remove logs that are not very useful

Suggested-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/mmu.c | 39 +++++++++++++++++++--------------------
1 file changed, 19 insertions(+), 20 deletions(-)

diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 9ecffb2c9c..f4ceaea520 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -22,8 +22,6 @@
#include "cpu.h"
#include "exec/exec-all.h"

-#define D(x)
-
static unsigned int tlb_decode_size(unsigned int f)
{
static const unsigned int sizes[] = {
@@ -90,25 +88,20 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,

/* Lookup and decode. */
t = mmu->rams[RAM_TAG][i];
- D(qemu_log("TLB %d valid=%" PRId64 "\n", i, t & TLB_VALID));
if (t & TLB_VALID) {
tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
if (tlb_size < TARGET_PAGE_SIZE) {
- qemu_log("%d pages not supported\n", tlb_size);
+ qemu_log_mask(LOG_UNIMP, "%d pages not supported\n", tlb_size);
abort();
}

mask = ~((uint64_t)tlb_size - 1);
tlb_tag = t & TLB_EPN_MASK;
if ((vaddr & mask) != (tlb_tag & mask)) {
- D(qemu_log("TLB %d vaddr=%" PRIx64 " != tag=%" PRIx64 "\n",
- i, vaddr & mask, tlb_tag & mask));
continue;
}
if (mmu->tids[i]
&& ((mmu->regs[MMU_R_PID] & 0xff) != mmu->tids[i])) {
- D(qemu_log("TLB %d pid=%x != tid=%x\n",
- i, mmu->regs[MMU_R_PID], mmu->tids[i]));
continue;
}

@@ -123,7 +116,8 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,
t0 &= 0x3;

if (tlb_zsel > mmu->c_mmu_zones) {
- qemu_log_mask(LOG_GUEST_ERROR, "tlb zone select out of range! %d\n", tlb_zsel);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "tlb zone select out of range! %d\n", tlb_zsel);
t0 = 1; /* Ignore. */
}

@@ -174,8 +168,9 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,
}
}
done:
- D(qemu_log("MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
- vaddr, rw, tlb_wr, tlb_ex, hit));
+ qemu_log_mask(CPU_LOG_MMU,
+ "MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ vaddr, rw, tlb_wr, tlb_ex, hit);
return hit;
}

@@ -199,7 +194,8 @@ uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
case MMU_R_TLBLO:
case MMU_R_TLBHI:
if (!(env->mmu.c_mmu_tlb_access & 1)) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
return 0;
}

@@ -211,7 +207,8 @@ uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
case MMU_R_PID:
case MMU_R_ZPR:
if (!(env->mmu.c_mmu_tlb_access & 1)) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
return 0;
}
r = env->mmu.regs[rn];
@@ -226,7 +223,7 @@ uint32_t mmu_read(CPUMBState *env, bool ext, uint32_t rn)
qemu_log_mask(LOG_GUEST_ERROR, "Invalid MMU register %d.\n", rn);
break;
}
- D(qemu_log("%s rn=%d=%x\n", __func__, rn, r));
+ qemu_log_mask(CPU_LOG_MMU, "%s rn=%d=%x\n", __func__, rn, r);
return r;
}

@@ -235,7 +232,8 @@ void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
MicroBlazeCPU *cpu = mb_env_get_cpu(env);
uint64_t tmp64;
unsigned int i;
- D(qemu_log("%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]));
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s rn=%d=%x old=%x\n", __func__, rn, v, env->mmu.regs[rn]);

if (env->mmu.c_mmu < 2 || !env->mmu.c_mmu_tlb_access) {
qemu_log_mask(LOG_GUEST_ERROR, "MMU access on MMU-less system\n");
@@ -261,12 +259,11 @@ void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
}
tmp64 = env->mmu.rams[rn & 1][i];
env->mmu.rams[rn & 1][i] = deposit64(tmp64, ext * 32, 32, v);
-
- D(qemu_log("%s ram[%d][%d]=%x\n", __func__, rn & 1, i, v));
break;
case MMU_R_ZPR:
if (env->mmu.c_mmu_tlb_access <= 1) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
return;
}

@@ -279,7 +276,8 @@ void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
break;
case MMU_R_PID:
if (env->mmu.c_mmu_tlb_access <= 1) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
return;
}

@@ -298,7 +296,8 @@ void mmu_write(CPUMBState *env, bool ext, uint32_t rn, uint32_t v)
int hit;

if (env->mmu.c_mmu_tlb_access <= 1) {
- qemu_log_mask(LOG_GUEST_ERROR, "Invalid access to MMU reg %d\n", rn);
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "Invalid access to MMU reg %d\n", rn);
return;
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:26 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Reuse more code when decoding register numbers.

No functional changes.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 38 +++++++++-----------------------------
1 file changed, 9 insertions(+), 29 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index f623617fa0..675db78030 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -531,11 +531,9 @@ static void dec_msr(DisasContext *dc)
case 1:
msr_write(dc, cpu_R[dc->ra]);
break;
- case 0x3:
- tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
- break;
- case 0x5:
- tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
+ case SR_EAR:
+ case SR_ESR:
+ tcg_gen_mov_i32(cpu_SR[sr], cpu_R[dc->ra]);
break;
case 0x7:
tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
@@ -562,17 +560,11 @@ static void dec_msr(DisasContext *dc)
case 1:
msr_read(dc, cpu_R[dc->rd]);
break;
- case 0x3:
- tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
- break;
- case 0x5:
- tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
- break;
- case 0x7:
- tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
- break;
- case 0xb:
- tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
+ case SR_EAR:
+ case SR_ESR:
+ case SR_FSR:
+ case SR_BTR:
+ tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[sr]);
break;
case 0x800:
tcg_gen_ld_i32(cpu_R[dc->rd],
@@ -582,19 +574,7 @@ static void dec_msr(DisasContext *dc)
tcg_gen_ld_i32(cpu_R[dc->rd],
cpu_env, offsetof(CPUMBState, shr));
break;
- case 0x2000:
- case 0x2001:
- case 0x2002:
- case 0x2003:
- case 0x2004:
- case 0x2005:
- case 0x2006:
- case 0x2007:
- case 0x2008:
- case 0x2009:
- case 0x200a:
- case 0x200b:
- case 0x200c:
+ case 0x2000 ... 0x200c:
rn = sr & 0xf;
tcg_gen_ld_i32(cpu_R[dc->rd],
cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:43 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Convert env_btarget to i64.
No functional change.

Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.h | 2 +-
target/microblaze/op_helper.c | 2 +-
target/microblaze/translate.c | 36 +++++++++++++++++++++++-------------
3 files changed, 25 insertions(+), 15 deletions(-)

diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index e62c456ccf..e38580cd7f 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -239,7 +239,7 @@ typedef struct CPUMBState CPUMBState;
struct CPUMBState {
uint32_t debug;
uint32_t btaken;
- uint32_t btarget;
+ uint64_t btarget;
uint32_t bimm;

uint32_t imm;
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index ddc1f71d62..7cdbbcccae 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -99,7 +99,7 @@ void helper_debug(CPUMBState *env)
"debug[%x] imm=%x iflags=%x\n",
env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
env->debug, env->imm, env->iflags);
- qemu_log("btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
+ qemu_log("btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) eip=%d ie=%d\n",
env->btaken, env->btarget,
(env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
(env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index a35683c8c9..a846797d9c 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -57,7 +57,7 @@ static TCGv_i32 cpu_R[32];
static TCGv_i64 cpu_SR[14];
static TCGv_i32 env_imm;
static TCGv_i32 env_btaken;
-static TCGv_i32 env_btarget;
+static TCGv_i64 env_btarget;
static TCGv_i32 env_iflags;
static TCGv env_res_addr;
static TCGv_i32 env_res_val;
@@ -831,7 +831,7 @@ static inline void sync_jmpstate(DisasContext *dc)
tcg_gen_movi_i32(env_btaken, 1);
}
dc->jmp = JMP_INDIRECT;
- tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
+ tcg_gen_movi_i64(env_btarget, dc->jmp_pc);
}
}

@@ -1169,13 +1169,13 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
}
}

-static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i64 pc_false)
+static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
{
TCGLabel *l1 = gen_new_label();
/* Conditional jmp. */
tcg_gen_mov_i64(cpu_SR[SR_PC], pc_false);
tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
- tcg_gen_extu_i32_i64(cpu_SR[SR_PC], pc_true);
+ tcg_gen_mov_i64(cpu_SR[SR_PC], pc_true);
gen_set_label(l1);
}

@@ -1199,13 +1199,14 @@ static void dec_bcc(DisasContext *dc)
if (dec_alu_op_b_is_small_imm(dc)) {
int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */

- tcg_gen_movi_i32(env_btarget, dc->pc + offset);
+ tcg_gen_movi_i64(env_btarget, dc->pc + offset);
dc->jmp = JMP_DIRECT_CC;
dc->jmp_pc = dc->pc + offset;
} else {
dc->jmp = JMP_INDIRECT;
- tcg_gen_movi_i32(env_btarget, dc->pc);
- tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
+ tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
}
eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
}
@@ -1262,7 +1263,7 @@ static void dec_br(DisasContext *dc)
dc->jmp = JMP_INDIRECT;
if (abs) {
tcg_gen_movi_i32(env_btaken, 1);
- tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
if (link && !dslot) {
if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
t_gen_raise_exception(dc, EXCP_BREAK);
@@ -1280,8 +1281,9 @@ static void dec_br(DisasContext *dc)
dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
} else {
tcg_gen_movi_i32(env_btaken, 1);
- tcg_gen_movi_i32(env_btarget, dc->pc);
- tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_addi_i64(env_btarget, env_btarget, dc->pc);
+ tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
}
}
}
@@ -1345,6 +1347,7 @@ static inline void do_rte(DisasContext *dc)
static void dec_rts(DisasContext *dc)
{
unsigned int b_bit, i_bit, e_bit;
+ TCGv_i64 tmp64;

i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22);
@@ -1373,7 +1376,13 @@ static void dec_rts(DisasContext *dc)

dc->jmp = JMP_INDIRECT;
tcg_gen_movi_i32(env_btaken, 1);
- tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_extu_i32_i64(tmp64, cpu_R[dc->ra]);
+ tcg_gen_add_i64(env_btarget, env_btarget, tmp64);
+ tcg_gen_andi_i64(env_btarget, env_btarget, UINT32_MAX);
+ tcg_temp_free_i64(tmp64);
}

static int dec_check_fpuv2(DisasContext *dc)
@@ -1795,7 +1804,8 @@ void mb_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
"debug=%x imm=%x iflags=%x fsr=%" PRIx64 "\n",
env->sregs[SR_MSR], env->sregs[SR_ESR], env->sregs[SR_EAR],
env->debug, env->imm, env->iflags, env->sregs[SR_FSR]);
- cpu_fprintf(f, "btaken=%d btarget=%x mode=%s(saved=%s) eip=%d ie=%d\n",
+ cpu_fprintf(f, "btaken=%d btarget=%" PRIx64 " mode=%s(saved=%s) "
+ "eip=%d ie=%d\n",
env->btaken, env->btarget,
(env->sregs[SR_MSR] & MSR_UM) ? "user" : "kernel",
(env->sregs[SR_MSR] & MSR_UMS) ? "user" : "kernel",
@@ -1823,7 +1833,7 @@ void mb_tcg_init(void)
env_imm = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, imm),
"imm");
- env_btarget = tcg_global_mem_new_i32(cpu_env,
+ env_btarget = tcg_global_mem_new_i64(cpu_env,
offsetof(CPUMBState, btarget),
"btarget");
env_btaken = tcg_global_mem_new_i32(cpu_env,
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:18 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Bypass MMU translation when mmu-index MMU_NOMMU_IDX is used.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/helper.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index a9f4ca93e3..261dcc74c7 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -58,7 +58,8 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int prot;

/* Translate if the MMU is available and enabled. */
- if (cpu->cfg.use_mmu && (env->sregs[SR_MSR] & MSR_VM)) {
+ if (cpu->cfg.use_mmu && (env->sregs[SR_MSR] & MSR_VM)
+ && mmu_idx != MMU_NOMMU_IDX) {
uint32_t vaddr, paddr;
struct microblaze_mmu_lookup lu;
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:12 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Today, when running QEMU in linux-user or with boards that don't
select a specific CPU version, we treat it as an invalid version
and log a message.

Instead, if no specific version was selected, fallback to our
latest CPU version.

Reviewed-by: Alistair Francis <***@wdc.com>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.c | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 4dc1404800..06476f6efc 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -72,6 +72,9 @@ static const struct {
{NULL, 0},
};

+/* If no specific version gets selected, default to the following. */
+#define DEFAULT_CPU_VERSION "10.0"
+
static void mb_cpu_set_pc(CPUState *cs, vaddr value)
{
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
@@ -141,6 +144,7 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
CPUMBState *env = &cpu->env;
uint8_t version_code = 0;
+ const char *version;
int i = 0;
Error *local_err = NULL;

@@ -162,8 +166,9 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
| PVR2_FPU_EXC_MASK \
| 0;

- for (i = 0; mb_cpu_lookup[i].name && cpu->cfg.version; i++) {
- if (strcmp(mb_cpu_lookup[i].name, cpu->cfg.version) == 0) {
+ version = cpu->cfg.version ? cpu->cfg.version : DEFAULT_CPU_VERSION;
+ for (i = 0; mb_cpu_lookup[i].name && version; i++) {
+ if (strcmp(mb_cpu_lookup[i].name, version) == 0) {
version_code = mb_cpu_lookup[i].version_id;
break;
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:38 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Allow address sizes between 32 and 64 bits.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index d0649fdaaa..8c1f850ab1 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -155,9 +155,8 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
return;
}

- if (cpu->cfg.addr_size != 32) {
- error_setg(errp, "addr-size %d is out of range. "
- "Only 32bit is supported.",
+ if (cpu->cfg.addr_size < 32 || cpu->cfg.addr_size > 64) {
+ error_setg(errp, "addr-size %d is out of range (32 - 64)",
cpu->cfg.addr_size);
return;
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:34 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Prepare for 64-bit addresses.
This makes no functional difference as the upper parts of
the 64-bit addresses are not yet reachable.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/mmu.c | 14 +++++++-------
target/microblaze/mmu.h | 6 +++---
2 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 231803ceea..a379968618 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -81,16 +81,16 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,
{
unsigned int i, hit = 0;
unsigned int tlb_ex = 0, tlb_wr = 0, tlb_zsel;
- unsigned int tlb_size;
- uint32_t tlb_tag, tlb_rpn, mask, t0;
+ uint64_t tlb_tag, tlb_rpn, mask;
+ uint32_t tlb_size, t0;

lu->err = ERR_MISS;
for (i = 0; i < ARRAY_SIZE(mmu->rams[RAM_TAG]); i++) {
- uint32_t t, d;
+ uint64_t t, d;

/* Lookup and decode. */
t = mmu->rams[RAM_TAG][i];
- D(qemu_log("TLB %d valid=%d\n", i, t & TLB_VALID));
+ D(qemu_log("TLB %d valid=%" PRId64 "\n", i, t & TLB_VALID));
if (t & TLB_VALID) {
tlb_size = tlb_decode_size((t & TLB_PAGESZ_MASK) >> 7);
if (tlb_size < TARGET_PAGE_SIZE) {
@@ -98,10 +98,10 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,
abort();
}

- mask = ~(tlb_size - 1);
+ mask = ~((uint64_t)tlb_size - 1);
tlb_tag = t & TLB_EPN_MASK;
if ((vaddr & mask) != (tlb_tag & mask)) {
- D(qemu_log("TLB %d vaddr=%x != tag=%x\n",
+ D(qemu_log("TLB %d vaddr=%" PRIx64 " != tag=%" PRIx64 "\n",
i, vaddr & mask, tlb_tag & mask));
continue;
}
@@ -173,7 +173,7 @@ unsigned int mmu_translate(struct microblaze_mmu *mmu,
}
}
done:
- D(qemu_log("MMU vaddr=%x rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
+ D(qemu_log("MMU vaddr=%" PRIx64 " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n",
vaddr, rw, tlb_wr, tlb_ex, hit));
return hit;
}
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 624becfded..1714caf82e 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -28,7 +28,7 @@
#define RAM_TAG 0

/* Tag portion */
-#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */
+#define TLB_EPN_MASK MAKE_64BIT_MASK(10, 64 - 10)
#define TLB_PAGESZ_MASK 0x00000380
#define TLB_PAGESZ(x) (((x) & 0x7) << 7)
#define PAGESZ_1K 0
@@ -42,7 +42,7 @@
#define TLB_VALID 0x00000040 /* Entry is valid */

/* Data portion */
-#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */
+#define TLB_RPN_MASK MAKE_64BIT_MASK(10, 64 - 10)
#define TLB_PERM_MASK 0x00000300
#define TLB_EX 0x00000200 /* Instruction execution allowed */
#define TLB_WR 0x00000100 /* Writes permitted */
@@ -63,7 +63,7 @@
struct microblaze_mmu
{
/* Data and tag brams. */
- uint32_t rams[2][TLB_ENTRIES];
+ uint64_t rams[2][TLB_ENTRIES];
/* We keep a separate ram for the tids to avoid the 48 bit tag width. */
uint8_t tids[TLB_ENTRIES];
/* Control flops. */
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:14 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Correct the PVR array size, there are 13 PVR registers.

Reviewed-by: Alistair Francis <***@wdc.com>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 994496515f..2304c24b7d 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -277,7 +277,7 @@ struct CPUMBState {
/* These fields are preserved on reset. */

struct {
- uint32_t regs[16];
+ uint32_t regs[13];
} pvr;
};
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:39 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Simplify address computation using tcg_gen_addi_i32().
tcg_gen_addi_i32() already optimizes the case when the
immediate is zero.

No functional change.

Suggested-by: Richard Henderson <***@linaro.org>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 7 +------
1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 39c4d0654e..44395cf189 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -899,12 +899,7 @@ static inline void compute_ldst_addr(DisasContext *dc, bool ea, TCGv t)
/* Immediate. */
t32 = tcg_temp_new_i32();
if (!extimm) {
- if (dc->imm == 0) {
- tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
- } else {
- tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
- tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
- }
+ tcg_gen_addi_i32(t32, cpu_R[dc->ra], (int16_t)dc->imm);
} else {
tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:21 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Use TCGv for load/store addresses, allowing for future
computation of 64-bit load/store address.

No functional change.

Acked-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.h | 2 +-
target/microblaze/helper.h | 4 +--
target/microblaze/op_helper.c | 11 +++---
target/microblaze/translate.c | 78 ++++++++++++++++++++++++-------------------
4 files changed, 53 insertions(+), 42 deletions(-)

diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 2304c24b7d..1593496997 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -250,7 +250,7 @@ struct CPUMBState {

/* lwx/swx reserved address */
#define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */
- uint32_t res_addr;
+ target_ulong res_addr;
uint32_t res_val;

/* Internal flags. */
diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h
index 71a6c0858d..ce70353936 100644
--- a/target/microblaze/helper.h
+++ b/target/microblaze/helper.h
@@ -29,8 +29,8 @@ DEF_HELPER_2(mmu_read, i32, env, i32)
DEF_HELPER_3(mmu_write, void, env, i32, i32)
#endif

-DEF_HELPER_5(memalign, void, env, i32, i32, i32, i32)
-DEF_HELPER_2(stackprot, void, env, i32)
+DEF_HELPER_5(memalign, void, env, tl, i32, i32, i32)
+DEF_HELPER_2(stackprot, void, env, tl)

DEF_HELPER_2(get, i32, i32, i32)
DEF_HELPER_3(put, void, i32, i32, i32)
diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c
index 1b4fe796e7..f5e851e38d 100644
--- a/target/microblaze/op_helper.c
+++ b/target/microblaze/op_helper.c
@@ -439,12 +439,14 @@ uint32_t helper_pcmpbf(uint32_t a, uint32_t b)
return 0;
}

-void helper_memalign(CPUMBState *env, uint32_t addr, uint32_t dr, uint32_t wr,
+void helper_memalign(CPUMBState *env, target_ulong addr,
+ uint32_t dr, uint32_t wr,
uint32_t mask)
{
if (addr & mask) {
qemu_log_mask(CPU_LOG_INT,
- "unaligned access addr=%x mask=%x, wr=%d dr=r%d\n",
+ "unaligned access addr=" TARGET_FMT_lx
+ " mask=%x, wr=%d dr=r%d\n",
addr, mask, wr, dr);
env->sregs[SR_EAR] = addr;
env->sregs[SR_ESR] = ESR_EC_UNALIGNED_DATA | (wr << 10) \
@@ -459,10 +461,11 @@ void helper_memalign(CPUMBState *env, uint32_t addr, uint32_t dr, uint32_t wr,
}
}

-void helper_stackprot(CPUMBState *env, uint32_t addr)
+void helper_stackprot(CPUMBState *env, target_ulong addr)
{
if (addr < env->slr || addr > env->shr) {
- qemu_log_mask(CPU_LOG_INT, "Stack protector violation at %x %x %x\n",
+ qemu_log_mask(CPU_LOG_INT, "Stack protector violation at "
+ TARGET_FMT_lx " %x %x\n",
addr, env->slr, env->shr);
env->sregs[SR_EAR] = addr;
env->sregs[SR_ESR] = ESR_EC_STACKPROT;
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 5ef978e897..a3cc1e0ef1 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -59,7 +59,7 @@ static TCGv_i32 env_imm;
static TCGv_i32 env_btaken;
static TCGv_i32 env_btarget;
static TCGv_i32 env_iflags;
-static TCGv_i32 env_res_addr;
+static TCGv env_res_addr;
static TCGv_i32 env_res_val;

#include "exec/gen-icount.h"
@@ -848,11 +848,12 @@ static void dec_imm(DisasContext *dc)
dc->clear_imm = 0;
}

-static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 t)
+static inline void compute_ldst_addr(DisasContext *dc, TCGv t)
{
bool extimm = dc->tb_flags & IMM_FLAG;
/* Should be set to true if r1 is used by loadstores. */
bool stackprot = false;
+ TCGv_i32 t32;

/* All load/stores use ra. */
if (dc->ra == 1 && dc->cpu->cfg.stackprot) {
@@ -863,10 +864,10 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 t)
if (!dc->type_b) {
/* If any of the regs is r0, return the value of the other reg. */
if (dc->ra == 0) {
- tcg_gen_mov_i32(t, cpu_R[dc->rb]);
+ tcg_gen_extu_i32_tl(t, cpu_R[dc->rb]);
return;
} else if (dc->rb == 0) {
- tcg_gen_mov_i32(t, cpu_R[dc->ra]);
+ tcg_gen_extu_i32_tl(t, cpu_R[dc->ra]);
return;
}

@@ -874,7 +875,10 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 t)
stackprot = true;
}

- tcg_gen_add_i32(t, cpu_R[dc->ra], cpu_R[dc->rb]);
+ t32 = tcg_temp_new_i32();
+ tcg_gen_add_i32(t32, cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_extu_i32_tl(t, t32);
+ tcg_temp_free_i32(t32);

if (stackprot) {
gen_helper_stackprot(cpu_env, t);
@@ -882,16 +886,19 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 t)
return;
}
/* Immediate. */
+ t32 = tcg_temp_new_i32();
if (!extimm) {
if (dc->imm == 0) {
- tcg_gen_mov_i32(t, cpu_R[dc->ra]);
- return;
+ tcg_gen_mov_i32(t32, cpu_R[dc->ra]);
+ } else {
+ tcg_gen_movi_i32(t32, (int32_t)((int16_t)dc->imm));
+ tcg_gen_add_i32(t32, cpu_R[dc->ra], t32);
}
- tcg_gen_movi_i32(t, (int32_t)((int16_t)dc->imm));
- tcg_gen_add_i32(t, cpu_R[dc->ra], t);
} else {
- tcg_gen_add_i32(t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_add_i32(t32, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}
+ tcg_gen_extu_i32_tl(t, t32);
+ tcg_temp_free_i32(t32);

if (stackprot) {
gen_helper_stackprot(cpu_env, t);
@@ -901,7 +908,8 @@ static inline void compute_ldst_addr(DisasContext *dc, TCGv_i32 t)

static void dec_load(DisasContext *dc)
{
- TCGv_i32 v, addr;
+ TCGv_i32 v;
+ TCGv addr;
unsigned int size;
bool rev = false, ex = false;
TCGMemOp mop;
@@ -928,7 +936,7 @@ static void dec_load(DisasContext *dc)
ex ? "x" : "");

t_sync_flags(dc);
- addr = tcg_temp_new_i32();
+ addr = tcg_temp_new();
compute_ldst_addr(dc, addr);

/*
@@ -946,20 +954,20 @@ static void dec_load(DisasContext *dc)
01 -> 10
10 -> 10
11 -> 00 */
- TCGv_i32 low = tcg_temp_new_i32();
+ TCGv low = tcg_temp_new();

- tcg_gen_andi_i32(low, addr, 3);
- tcg_gen_sub_i32(low, tcg_const_i32(3), low);
- tcg_gen_andi_i32(addr, addr, ~3);
- tcg_gen_or_i32(addr, addr, low);
- tcg_temp_free_i32(low);
+ tcg_gen_andi_tl(low, addr, 3);
+ tcg_gen_sub_tl(low, tcg_const_tl(3), low);
+ tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_or_tl(addr, addr, low);
+ tcg_temp_free(low);
break;
}

case 2:
/* 00 -> 10
10 -> 00. */
- tcg_gen_xori_i32(addr, addr, 2);
+ tcg_gen_xori_tl(addr, addr, 2);
break;
default:
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
@@ -969,7 +977,7 @@ static void dec_load(DisasContext *dc)

/* lwx does not throw unaligned access errors, so force alignment */
if (ex) {
- tcg_gen_andi_i32(addr, addr, ~3);
+ tcg_gen_andi_tl(addr, addr, ~3);
}

/* If we get a fault on a dslot, the jmpstate better be in sync. */
@@ -992,7 +1000,7 @@ static void dec_load(DisasContext *dc)
}

if (ex) {
- tcg_gen_mov_i32(env_res_addr, addr);
+ tcg_gen_mov_tl(env_res_addr, addr);
tcg_gen_mov_i32(env_res_val, v);
}
if (dc->rd) {
@@ -1005,12 +1013,12 @@ static void dec_load(DisasContext *dc)
write_carryi(dc, 0);
}

- tcg_temp_free_i32(addr);
+ tcg_temp_free(addr);
}

static void dec_store(DisasContext *dc)
{
- TCGv_i32 addr;
+ TCGv addr;
TCGLabel *swx_skip = NULL;
unsigned int size;
bool rev = false, ex = false;
@@ -1040,18 +1048,18 @@ static void dec_store(DisasContext *dc)
/* If we get a fault on a dslot, the jmpstate better be in sync. */
sync_jmpstate(dc);
/* SWX needs a temp_local. */
- addr = ex ? tcg_temp_local_new_i32() : tcg_temp_new_i32();
+ addr = ex ? tcg_temp_local_new() : tcg_temp_new();
compute_ldst_addr(dc, addr);

if (ex) { /* swx */
TCGv_i32 tval;

/* swx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_i32(addr, addr, ~3);
+ tcg_gen_andi_tl(addr, addr, ~3);

write_carryi(dc, 1);
swx_skip = gen_new_label();
- tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, addr, swx_skip);
+ tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, addr, swx_skip);

/* Compare the value loaded at lwx with current contents of
the reserved location.
@@ -1075,13 +1083,13 @@ static void dec_store(DisasContext *dc)
01 -> 10
10 -> 10
11 -> 00 */
- TCGv_i32 low = tcg_temp_new_i32();
+ TCGv low = tcg_temp_new();

- tcg_gen_andi_i32(low, addr, 3);
- tcg_gen_sub_i32(low, tcg_const_i32(3), low);
- tcg_gen_andi_i32(addr, addr, ~3);
- tcg_gen_or_i32(addr, addr, low);
- tcg_temp_free_i32(low);
+ tcg_gen_andi_tl(low, addr, 3);
+ tcg_gen_sub_tl(low, tcg_const_tl(3), low);
+ tcg_gen_andi_tl(addr, addr, ~3);
+ tcg_gen_or_tl(addr, addr, low);
+ tcg_temp_free(low);
break;
}

@@ -1089,7 +1097,7 @@ static void dec_store(DisasContext *dc)
/* 00 -> 10
10 -> 00. */
/* Force addr into the temp. */
- tcg_gen_xori_i32(addr, addr, 2);
+ tcg_gen_xori_tl(addr, addr, 2);
break;
default:
cpu_abort(CPU(dc->cpu), "Invalid reverse size\n");
@@ -1116,7 +1124,7 @@ static void dec_store(DisasContext *dc)
gen_set_label(swx_skip);
}

- tcg_temp_free_i32(addr);
+ tcg_temp_free(addr);
}

static inline void eval_cc(DisasContext *dc, unsigned int cc,
@@ -1834,7 +1842,7 @@ void mb_tcg_init(void)
env_btaken = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, btaken),
"btaken");
- env_res_addr = tcg_global_mem_new_i32(cpu_env,
+ env_res_addr = tcg_global_mem_new(cpu_env,
offsetof(CPUMBState, res_addr),
"res_addr");
env_res_val = tcg_global_mem_new_i32(cpu_env,
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:32 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Add a R_TBLX_MISS MASK and SHIFT macros.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/mmu.c | 5 +++--
target/microblaze/mmu.h | 4 ++++
2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c
index 0019ebd18f..f4a4c339c9 100644
--- a/target/microblaze/mmu.c
+++ b/target/microblaze/mmu.c
@@ -292,8 +292,9 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
if (hit) {
env->mmu.regs[MMU_R_TLBX] = lu.idx;
- } else
- env->mmu.regs[MMU_R_TLBX] |= 0x80000000;
+ } else {
+ env->mmu.regs[MMU_R_TLBX] |= R_TBLX_MISS_MASK;
+ }
break;
}
default:
diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h
index 3b7a9983d5..113539c6e9 100644
--- a/target/microblaze/mmu.h
+++ b/target/microblaze/mmu.h
@@ -54,6 +54,10 @@
#define TLB_M 0x00000002 /* Memory is coherent */
#define TLB_G 0x00000001 /* Memory is guarded from prefetch */

+/* TLBX */
+#define R_TBLX_MISS_SHIFT 31
+#define R_TBLX_MISS_MASK (1U << R_TBLX_MISS_SHIFT)
+
#define TLB_ENTRIES 64

struct microblaze_mmu
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:23 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Break out trap_userspace() to avoid open coding it everywhere.
For privileged insns, we now always stop translation of the
current insn for cores without exceptions.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 76 +++++++++++++++----------------------------
1 file changed, 27 insertions(+), 49 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 12cb345f64..8f72cf39fb 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -179,6 +179,22 @@ static void write_carryi(DisasContext *dc, bool carry)
tcg_temp_free_i32(t0);
}

+/*
+ * Returns true if the insn is illegal in userspace.
+ * If exceptions are enabled, an exception is raised.
+ */
+static bool trap_userspace(DisasContext *dc, bool cond)
+{
+ int mem_index = cpu_mmu_index(&dc->cpu->env, false);
+ bool cond_user = cond && mem_index == MMU_USER_IDX;
+
+ if (cond_user && (dc->tb_flags & MSR_EE_FLAG)) {
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ }
+ return cond_user;
+}
+
/* True if ALU operand b is a small immediate that may deserve
faster treatment. */
static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
@@ -432,7 +448,6 @@ static void dec_msr(DisasContext *dc)
CPUState *cs = CPU(dc->cpu);
TCGv_i32 t0, t1;
unsigned int sr, to, rn;
- int mem_index = cpu_mmu_index(&dc->cpu->env, false);

sr = dc->imm & ((1 << 14) - 1);
to = dc->imm & (1 << 14);
@@ -452,10 +467,7 @@ static void dec_msr(DisasContext *dc)
return;
}

- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_userspace(dc, dc->imm != 4 && dc->imm != 0)) {
return;
}

@@ -480,13 +492,8 @@ static void dec_msr(DisasContext *dc)
return;
}

- if (to) {
- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- return;
- }
+ if (trap_userspace(dc, to)) {
+ return;
}

#if !defined(CONFIG_USER_ONLY)
@@ -738,7 +745,6 @@ static void dec_bit(DisasContext *dc)
CPUState *cs = CPU(dc->cpu);
TCGv_i32 t0;
unsigned int op;
- int mem_index = cpu_mmu_index(&dc->cpu->env, false);

op = dc->ir & ((1 << 9) - 1);
switch (op) {
@@ -784,22 +790,12 @@ static void dec_bit(DisasContext *dc)
case 0x76:
/* wdc. */
LOG_DIS("wdc r%d\n", dc->ra);
- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- return;
- }
+ trap_userspace(dc, true);
break;
case 0x68:
/* wic. */
LOG_DIS("wic r%d\n", dc->ra);
- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- return;
- }
+ trap_userspace(dc, true);
break;
case 0xe0:
if ((dc->tb_flags & MSR_EE_FLAG)
@@ -1199,7 +1195,6 @@ static void dec_bcc(DisasContext *dc)
static void dec_br(DisasContext *dc)
{
unsigned int dslot, link, abs, mbar;
- int mem_index = cpu_mmu_index(&dc->cpu->env, false);

dslot = dc->ir & (1 << 20);
abs = dc->ir & (1 << 19);
@@ -1254,9 +1249,7 @@ static void dec_br(DisasContext *dc)
if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
t_gen_raise_exception(dc, EXCP_BREAK);
if (dc->imm == 0) {
- if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_userspace(dc, true)) {
return;
}

@@ -1331,12 +1324,15 @@ static inline void do_rte(DisasContext *dc)
static void dec_rts(DisasContext *dc)
{
unsigned int b_bit, i_bit, e_bit;
- int mem_index = cpu_mmu_index(&dc->cpu->env, false);

i_bit = dc->ir & (1 << 21);
b_bit = dc->ir & (1 << 22);
e_bit = dc->ir & (1 << 23);

+ if (trap_userspace(dc, i_bit || b_bit || e_bit)) {
+ return;
+ }
+
dc->delayed_branch = 2;
dc->tb_flags |= D_FLAG;
tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
@@ -1344,27 +1340,12 @@ static void dec_rts(DisasContext *dc)

if (i_bit) {
LOG_DIS("rtid ir=%x\n", dc->ir);
- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- }
dc->tb_flags |= DRTI_FLAG;
} else if (b_bit) {
LOG_DIS("rtbd ir=%x\n", dc->ir);
- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- }
dc->tb_flags |= DRTB_FLAG;
} else if (e_bit) {
LOG_DIS("rted ir=%x\n", dc->ir);
- if ((dc->tb_flags & MSR_EE_FLAG)
- && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
- }
dc->tb_flags |= DRTE_FLAG;
} else
LOG_DIS("rts ir=%x\n", dc->ir);
@@ -1503,16 +1484,13 @@ static void dec_null(DisasContext *dc)
/* Insns connected to FSL or AXI stream attached devices. */
static void dec_stream(DisasContext *dc)
{
- int mem_index = cpu_mmu_index(&dc->cpu->env, false);
TCGv_i32 t_id, t_ctrl;
int ctrl;

LOG_DIS("%s%s imm=%x\n", dc->rd ? "get" : "put",
dc->type_b ? "" : "d", dc->imm);

- if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
- tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
- t_gen_raise_exception(dc, EXCP_HW_EXCP);
+ if (trap_userspace(dc, true)) {
return;
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:09 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Use bool instead of unsigned int to represent flags.
No functional change.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 0872dc9ded..a8a5eaebec 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -897,14 +897,15 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
static void dec_load(DisasContext *dc)
{
TCGv t, v, *addr;
- unsigned int size, rev = 0, ex = 0;
+ unsigned int size;
+ bool rev = false, ex = false;
TCGMemOp mop;

mop = dc->opcode & 3;
size = 1 << mop;
if (!dc->type_b) {
- rev = (dc->ir >> 9) & 1;
- ex = (dc->ir >> 10) & 1;
+ rev = extract32(dc->ir, 9, 1);
+ ex = extract32(dc->ir, 10, 1);
}
mop |= MO_TE;
if (rev) {
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:46 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Consolidate MMU enabled checks to cpu_mmu_index().
No functional changes.

Suggested-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.h | 4 +++-
target/microblaze/helper.c | 6 +++---
2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index c77ca2d8f9..3c4e0ba80a 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -360,8 +360,10 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,

static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
{
+ MicroBlazeCPU *cpu = mb_env_get_cpu(env);
+
/* Are we in nommu mode?. */
- if (!(env->sregs[SR_MSR] & MSR_VM)) {
+ if (!(env->sregs[SR_MSR] & MSR_VM) || !cpu->cfg.use_mmu) {
return MMU_NOMMU_IDX;
}

diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index 985bdae8d1..bc753793ec 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -58,8 +58,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,
int prot;

/* Translate if the MMU is available and enabled. */
- if (cpu->cfg.use_mmu && (env->sregs[SR_MSR] & MSR_VM)
- && mmu_idx != MMU_NOMMU_IDX) {
+ if (mmu_idx != MMU_NOMMU_IDX) {
uint32_t vaddr, paddr;
struct microblaze_mmu_lookup lu;

@@ -270,9 +269,10 @@ hwaddr mb_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
CPUMBState *env = &cpu->env;
target_ulong vaddr, paddr = 0;
struct microblaze_mmu_lookup lu;
+ int mmu_idx = cpu_mmu_index(env, false);
unsigned int hit;

- if (env->sregs[SR_MSR] & MSR_VM) {
+ if (mmu_idx != MMU_NOMMU_IDX) {
hit = mmu_translate(&env->mmu, &lu, addr, 0, 0);
if (hit) {
vaddr = addr & TARGET_PAGE_MASK;
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:45 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Fixup the indentation of cpu_mmu_index in preparation for
future edits.
No functional changes.

Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.h | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)

diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index e38580cd7f..c77ca2d8f9 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -360,13 +360,15 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,

static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
{
- /* Are we in nommu mode?. */
- if (!(env->sregs[SR_MSR] & MSR_VM))
- return MMU_NOMMU_IDX;
-
- if (env->sregs[SR_MSR] & MSR_UM)
- return MMU_USER_IDX;
- return MMU_KERNEL_IDX;
+ /* Are we in nommu mode?. */
+ if (!(env->sregs[SR_MSR] & MSR_VM)) {
+ return MMU_NOMMU_IDX;
+ }
+
+ if (env->sregs[SR_MSR] & MSR_UM) {
+ return MMU_USER_IDX;
+ }
+ return MMU_KERNEL_IDX;
}

int mb_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size, int rw,
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:15 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Tighten up TCGv_i32 vs TCGv type usage. Avoid using TCGv when
TCGv_i32 should be used.

This is in preparation for adding 64bit addressing support.
No functional change.

Reviewed-by: Alistair Francis <***@wdc.com>
Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/helper.c | 2 +-
target/microblaze/translate.c | 581 +++++++++++++++++++++---------------------
2 files changed, 295 insertions(+), 288 deletions(-)

diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c
index fac6ee9263..387d4aca5a 100644
--- a/target/microblaze/helper.c
+++ b/target/microblaze/helper.c
@@ -69,7 +69,7 @@ int mb_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size, int rw,

/* Translate if the MMU is available and enabled. */
if (mmu_available && (env->sregs[SR_MSR] & MSR_VM)) {
- target_ulong vaddr, paddr;
+ uint32_t vaddr, paddr;
struct microblaze_mmu_lookup lu;

hit = mmu_translate(&env->mmu, &lu, address, rw, mmu_idx);
diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 9614f15d58..2e9a286af6 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -52,22 +52,22 @@
#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */

-static TCGv env_debug;
-static TCGv cpu_R[32];
-static TCGv cpu_SR[14];
-static TCGv env_imm;
-static TCGv env_btaken;
-static TCGv env_btarget;
-static TCGv env_iflags;
-static TCGv env_res_addr;
-static TCGv env_res_val;
+static TCGv_i32 env_debug;
+static TCGv_i32 cpu_R[32];
+static TCGv_i32 cpu_SR[14];
+static TCGv_i32 env_imm;
+static TCGv_i32 env_btaken;
+static TCGv_i32 env_btarget;
+static TCGv_i32 env_iflags;
+static TCGv_i32 env_res_addr;
+static TCGv_i32 env_res_val;

#include "exec/gen-icount.h"

/* This is the state at translation time. */
typedef struct DisasContext {
MicroBlazeCPU *cpu;
- target_ulong pc;
+ uint32_t pc;

/* Decoder. */
int type_b;
@@ -113,7 +113,7 @@ static inline void t_sync_flags(DisasContext *dc)
{
/* Synch the tb dependent flags between translator and runtime. */
if (dc->tb_flags != dc->synced_flags) {
- tcg_gen_movi_tl(env_iflags, dc->tb_flags);
+ tcg_gen_movi_i32(env_iflags, dc->tb_flags);
dc->synced_flags = dc->tb_flags;
}
}
@@ -123,7 +123,7 @@ static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
TCGv_i32 tmp = tcg_const_i32(index);

t_sync_flags(dc);
- tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
dc->is_jmp = DISAS_UPDATE;
@@ -142,41 +142,41 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
{
if (use_goto_tb(dc, dest)) {
tcg_gen_goto_tb(n);
- tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb((uintptr_t)dc->tb + n);
} else {
- tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb(0);
}
}

-static void read_carry(DisasContext *dc, TCGv d)
+static void read_carry(DisasContext *dc, TCGv_i32 d)
{
- tcg_gen_shri_tl(d, cpu_SR[SR_MSR], 31);
+ tcg_gen_shri_i32(d, cpu_SR[SR_MSR], 31);
}

/*
* write_carry sets the carry bits in MSR based on bit 0 of v.
* v[31:1] are ignored.
*/
-static void write_carry(DisasContext *dc, TCGv v)
+static void write_carry(DisasContext *dc, TCGv_i32 v)
{
- TCGv t0 = tcg_temp_new();
- tcg_gen_shli_tl(t0, v, 31);
- tcg_gen_sari_tl(t0, t0, 31);
- tcg_gen_andi_tl(t0, t0, (MSR_C | MSR_CC));
- tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_shli_i32(t0, v, 31);
+ tcg_gen_sari_i32(t0, t0, 31);
+ tcg_gen_andi_i32(t0, t0, (MSR_C | MSR_CC));
+ tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR],
~(MSR_C | MSR_CC));
- tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
- tcg_temp_free(t0);
+ tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t0);
+ tcg_temp_free_i32(t0);
}

static void write_carryi(DisasContext *dc, bool carry)
{
- TCGv t0 = tcg_temp_new();
- tcg_gen_movi_tl(t0, carry);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_movi_i32(t0, carry);
write_carry(dc, t0);
- tcg_temp_free(t0);
+ tcg_temp_free_i32(t0);
}

/* True if ALU operand b is a small immediate that may deserve
@@ -187,13 +187,13 @@ static inline int dec_alu_op_b_is_small_imm(DisasContext *dc)
return dc->type_b && !(dc->tb_flags & IMM_FLAG);
}

-static inline TCGv *dec_alu_op_b(DisasContext *dc)
+static inline TCGv_i32 *dec_alu_op_b(DisasContext *dc)
{
if (dc->type_b) {
if (dc->tb_flags & IMM_FLAG)
- tcg_gen_ori_tl(env_imm, env_imm, dc->imm);
+ tcg_gen_ori_i32(env_imm, env_imm, dc->imm);
else
- tcg_gen_movi_tl(env_imm, (int32_t)((int16_t)dc->imm));
+ tcg_gen_movi_i32(env_imm, (int32_t)((int16_t)dc->imm));
return &env_imm;
} else
return &cpu_R[dc->rb];
@@ -202,7 +202,7 @@ static inline TCGv *dec_alu_op_b(DisasContext *dc)
static void dec_add(DisasContext *dc)
{
unsigned int k, c;
- TCGv cf;
+ TCGv_i32 cf;

k = dc->opcode & 4;
c = dc->opcode & 2;
@@ -216,15 +216,15 @@ static void dec_add(DisasContext *dc)
/* k - keep carry, no need to update MSR. */
/* If rd == r0, it's a nop. */
if (dc->rd) {
- tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));

if (c) {
/* c - Add carry into the result. */
- cf = tcg_temp_new();
+ cf = tcg_temp_new_i32();

read_carry(dc, cf);
- tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
- tcg_temp_free(cf);
+ tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ tcg_temp_free_i32(cf);
}
}
return;
@@ -232,31 +232,31 @@ static void dec_add(DisasContext *dc)

/* From now on, we can assume k is zero. So we need to update MSR. */
/* Extract carry. */
- cf = tcg_temp_new();
+ cf = tcg_temp_new_i32();
if (c) {
read_carry(dc, cf);
} else {
- tcg_gen_movi_tl(cf, 0);
+ tcg_gen_movi_i32(cf, 0);
}

if (dc->rd) {
- TCGv ncf = tcg_temp_new();
+ TCGv_i32 ncf = tcg_temp_new_i32();
gen_helper_carry(ncf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
- tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
- tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
write_carry(dc, ncf);
- tcg_temp_free(ncf);
+ tcg_temp_free_i32(ncf);
} else {
gen_helper_carry(cf, cpu_R[dc->ra], *(dec_alu_op_b(dc)), cf);
write_carry(dc, cf);
}
- tcg_temp_free(cf);
+ tcg_temp_free_i32(cf);
}

static void dec_sub(DisasContext *dc)
{
unsigned int u, cmp, k, c;
- TCGv cf, na;
+ TCGv_i32 cf, na;

u = dc->imm & 2;
k = dc->opcode & 4;
@@ -282,15 +282,15 @@ static void dec_sub(DisasContext *dc)
/* k - keep carry, no need to update MSR. */
/* If rd == r0, it's a nop. */
if (dc->rd) {
- tcg_gen_sub_tl(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);
+ tcg_gen_sub_i32(cpu_R[dc->rd], *(dec_alu_op_b(dc)), cpu_R[dc->ra]);

if (c) {
/* c - Add carry into the result. */
- cf = tcg_temp_new();
+ cf = tcg_temp_new_i32();

read_carry(dc, cf);
- tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
- tcg_temp_free(cf);
+ tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ tcg_temp_free_i32(cf);
}
}
return;
@@ -298,30 +298,30 @@ static void dec_sub(DisasContext *dc)

/* From now on, we can assume k is zero. So we need to update MSR. */
/* Extract carry. And complement a into na. */
- cf = tcg_temp_new();
- na = tcg_temp_new();
+ cf = tcg_temp_new_i32();
+ na = tcg_temp_new_i32();
if (c) {
read_carry(dc, cf);
} else {
- tcg_gen_movi_tl(cf, 1);
+ tcg_gen_movi_i32(cf, 1);
}

/* d = b + ~a + c. carry defaults to 1. */
- tcg_gen_not_tl(na, cpu_R[dc->ra]);
+ tcg_gen_not_i32(na, cpu_R[dc->ra]);

if (dc->rd) {
- TCGv ncf = tcg_temp_new();
+ TCGv_i32 ncf = tcg_temp_new_i32();
gen_helper_carry(ncf, na, *(dec_alu_op_b(dc)), cf);
- tcg_gen_add_tl(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
- tcg_gen_add_tl(cpu_R[dc->rd], cpu_R[dc->rd], cf);
+ tcg_gen_add_i32(cpu_R[dc->rd], na, *(dec_alu_op_b(dc)));
+ tcg_gen_add_i32(cpu_R[dc->rd], cpu_R[dc->rd], cf);
write_carry(dc, ncf);
- tcg_temp_free(ncf);
+ tcg_temp_free_i32(ncf);
} else {
gen_helper_carry(cf, na, *(dec_alu_op_b(dc)), cf);
write_carry(dc, cf);
}
- tcg_temp_free(cf);
- tcg_temp_free(na);
+ tcg_temp_free_i32(cf);
+ tcg_temp_free_i32(na);
}

static void dec_pattern(DisasContext *dc)
@@ -331,7 +331,7 @@ static void dec_pattern(DisasContext *dc)
if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !dc->cpu->cfg.use_pcmp_instr) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}

@@ -346,14 +346,14 @@ static void dec_pattern(DisasContext *dc)
case 2:
LOG_DIS("pcmpeq r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
if (dc->rd) {
- tcg_gen_setcond_tl(TCG_COND_EQ, cpu_R[dc->rd],
+ tcg_gen_setcond_i32(TCG_COND_EQ, cpu_R[dc->rd],
cpu_R[dc->ra], cpu_R[dc->rb]);
}
break;
case 3:
LOG_DIS("pcmpne r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
if (dc->rd) {
- tcg_gen_setcond_tl(TCG_COND_NE, cpu_R[dc->rd],
+ tcg_gen_setcond_i32(TCG_COND_NE, cpu_R[dc->rd],
cpu_R[dc->ra], cpu_R[dc->rb]);
}
break;
@@ -380,9 +380,9 @@ static void dec_and(DisasContext *dc)
return;

if (not) {
- tcg_gen_andc_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_andc_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
} else
- tcg_gen_and_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_and_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

static void dec_or(DisasContext *dc)
@@ -394,7 +394,7 @@ static void dec_or(DisasContext *dc)

LOG_DIS("or r%d r%d r%d imm=%x\n", dc->rd, dc->ra, dc->rb, dc->imm);
if (dc->rd)
- tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

static void dec_xor(DisasContext *dc)
@@ -406,31 +406,31 @@ static void dec_xor(DisasContext *dc)

LOG_DIS("xor r%d\n", dc->rd);
if (dc->rd)
- tcg_gen_xor_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_xor_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

-static inline void msr_read(DisasContext *dc, TCGv d)
+static inline void msr_read(DisasContext *dc, TCGv_i32 d)
{
- tcg_gen_mov_tl(d, cpu_SR[SR_MSR]);
+ tcg_gen_mov_i32(d, cpu_SR[SR_MSR]);
}

-static inline void msr_write(DisasContext *dc, TCGv v)
+static inline void msr_write(DisasContext *dc, TCGv_i32 v)
{
- TCGv t;
+ TCGv_i32 t;

- t = tcg_temp_new();
+ t = tcg_temp_new_i32();
dc->cpustate_changed = 1;
/* PVR bit is not writable. */
- tcg_gen_andi_tl(t, v, ~MSR_PVR);
- tcg_gen_andi_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
- tcg_gen_or_tl(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
+ tcg_gen_andi_i32(t, v, ~MSR_PVR);
+ tcg_gen_andi_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], MSR_PVR);
+ tcg_gen_or_i32(cpu_SR[SR_MSR], cpu_SR[SR_MSR], t);
tcg_temp_free(t);
}

static void dec_msr(DisasContext *dc)
{
CPUState *cs = CPU(dc->cpu);
- TCGv t0, t1;
+ TCGv_i32 t0, t1;
unsigned int sr, to, rn;
int mem_index = cpu_mmu_index(&dc->cpu->env, false);

@@ -454,7 +454,7 @@ static void dec_msr(DisasContext *dc)

if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX && (dc->imm != 4 && dc->imm != 0)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -462,20 +462,20 @@ static void dec_msr(DisasContext *dc)
if (dc->rd)
msr_read(dc, cpu_R[dc->rd]);

- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
msr_read(dc, t0);
- tcg_gen_mov_tl(t1, *(dec_alu_op_b(dc)));
+ tcg_gen_mov_i32(t1, *(dec_alu_op_b(dc)));

if (clr) {
- tcg_gen_not_tl(t1, t1);
- tcg_gen_and_tl(t0, t0, t1);
+ tcg_gen_not_i32(t1, t1);
+ tcg_gen_and_i32(t0, t0, t1);
} else
- tcg_gen_or_tl(t0, t0, t1);
+ tcg_gen_or_i32(t0, t0, t1);
msr_write(dc, t0);
- tcg_temp_free(t0);
- tcg_temp_free(t1);
- tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
dc->is_jmp = DISAS_UPDATE;
return;
}
@@ -483,7 +483,7 @@ static void dec_msr(DisasContext *dc)
if (to) {
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -495,9 +495,9 @@ static void dec_msr(DisasContext *dc)
sr &= 7;
LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
if (to)
- gen_helper_mmu_write(cpu_env, tcg_const_tl(sr), cpu_R[dc->ra]);
+ gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
else
- gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_tl(sr));
+ gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
return;
}
#endif
@@ -511,19 +511,21 @@ static void dec_msr(DisasContext *dc)
msr_write(dc, cpu_R[dc->ra]);
break;
case 0x3:
- tcg_gen_mov_tl(cpu_SR[SR_EAR], cpu_R[dc->ra]);
+ tcg_gen_mov_i32(cpu_SR[SR_EAR], cpu_R[dc->ra]);
break;
case 0x5:
- tcg_gen_mov_tl(cpu_SR[SR_ESR], cpu_R[dc->ra]);
+ tcg_gen_mov_i32(cpu_SR[SR_ESR], cpu_R[dc->ra]);
break;
case 0x7:
- tcg_gen_andi_tl(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
+ tcg_gen_andi_i32(cpu_SR[SR_FSR], cpu_R[dc->ra], 31);
break;
case 0x800:
- tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, slr));
+ tcg_gen_st_i32(cpu_R[dc->ra],
+ cpu_env, offsetof(CPUMBState, slr));
break;
case 0x802:
- tcg_gen_st_tl(cpu_R[dc->ra], cpu_env, offsetof(CPUMBState, shr));
+ tcg_gen_st_i32(cpu_R[dc->ra],
+ cpu_env, offsetof(CPUMBState, shr));
break;
default:
cpu_abort(CPU(dc->cpu), "unknown mts reg %x\n", sr);
@@ -534,28 +536,30 @@ static void dec_msr(DisasContext *dc)

switch (sr) {
case 0:
- tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
+ tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);
break;
case 1:
msr_read(dc, cpu_R[dc->rd]);
break;
case 0x3:
- tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_EAR]);
+ tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_EAR]);
break;
case 0x5:
- tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_ESR]);
+ tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_ESR]);
break;
case 0x7:
- tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_FSR]);
+ tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_FSR]);
break;
case 0xb:
- tcg_gen_mov_tl(cpu_R[dc->rd], cpu_SR[SR_BTR]);
+ tcg_gen_mov_i32(cpu_R[dc->rd], cpu_SR[SR_BTR]);
break;
case 0x800:
- tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, slr));
+ tcg_gen_ld_i32(cpu_R[dc->rd],
+ cpu_env, offsetof(CPUMBState, slr));
break;
case 0x802:
- tcg_gen_ld_tl(cpu_R[dc->rd], cpu_env, offsetof(CPUMBState, shr));
+ tcg_gen_ld_i32(cpu_R[dc->rd],
+ cpu_env, offsetof(CPUMBState, shr));
break;
case 0x2000:
case 0x2001:
@@ -571,7 +575,7 @@ static void dec_msr(DisasContext *dc)
case 0x200b:
case 0x200c:
rn = sr & 0xf;
- tcg_gen_ld_tl(cpu_R[dc->rd],
+ tcg_gen_ld_i32(cpu_R[dc->rd],
cpu_env, offsetof(CPUMBState, pvr.regs[rn]));
break;
default:
@@ -581,20 +585,20 @@ static void dec_msr(DisasContext *dc)
}

if (dc->rd == 0) {
- tcg_gen_movi_tl(cpu_R[0], 0);
+ tcg_gen_movi_i32(cpu_R[0], 0);
}
}

/* Multiplier unit. */
static void dec_mul(DisasContext *dc)
{
- TCGv tmp;
+ TCGv_i32 tmp;
unsigned int subcode;

if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !dc->cpu->cfg.use_hw_mul) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -603,7 +607,7 @@ static void dec_mul(DisasContext *dc)

if (dc->type_b) {
LOG_DIS("muli r%d r%d %x\n", dc->rd, dc->ra, dc->imm);
- tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], *(dec_alu_op_b(dc)));
return;
}

@@ -612,29 +616,31 @@ static void dec_mul(DisasContext *dc)
/* nop??? */
}

- tmp = tcg_temp_new();
+ tmp = tcg_temp_new_i32();
switch (subcode) {
case 0:
LOG_DIS("mul r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- tcg_gen_mul_tl(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_mul_i32(cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 1:
LOG_DIS("mulh r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- tcg_gen_muls2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_muls2_i32(tmp, cpu_R[dc->rd],
+ cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 2:
LOG_DIS("mulhsu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- tcg_gen_mulsu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_mulsu2_i32(tmp, cpu_R[dc->rd],
+ cpu_R[dc->ra], cpu_R[dc->rb]);
break;
case 3:
LOG_DIS("mulhu r%d r%d r%d\n", dc->rd, dc->ra, dc->rb);
- tcg_gen_mulu2_tl(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
+ tcg_gen_mulu2_i32(tmp, cpu_R[dc->rd], cpu_R[dc->ra], cpu_R[dc->rb]);
break;
default:
cpu_abort(CPU(dc->cpu), "unknown MUL insn %x\n", subcode);
break;
}
- tcg_temp_free(tmp);
+ tcg_temp_free_i32(tmp);
}

/* Div unit. */
@@ -647,7 +653,7 @@ static void dec_div(DisasContext *dc)

if ((dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !dc->cpu->cfg.use_div) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}

@@ -658,19 +664,19 @@ static void dec_div(DisasContext *dc)
gen_helper_divs(cpu_R[dc->rd], cpu_env, *(dec_alu_op_b(dc)),
cpu_R[dc->ra]);
if (!dc->rd)
- tcg_gen_movi_tl(cpu_R[dc->rd], 0);
+ tcg_gen_movi_i32(cpu_R[dc->rd], 0);
}

static void dec_barrel(DisasContext *dc)
{
- TCGv t0;
+ TCGv_i32 t0;
unsigned int imm_w, imm_s;
bool s, t, e = false, i = false;

if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !dc->cpu->cfg.use_barrel) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -709,28 +715,28 @@ static void dec_barrel(DisasContext *dc)
imm_s, width);
}
} else {
- t0 = tcg_temp_new();
+ t0 = tcg_temp_new_i32();

- tcg_gen_mov_tl(t0, *(dec_alu_op_b(dc)));
- tcg_gen_andi_tl(t0, t0, 31);
+ tcg_gen_mov_i32(t0, *(dec_alu_op_b(dc)));
+ tcg_gen_andi_i32(t0, t0, 31);

if (s) {
- tcg_gen_shl_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+ tcg_gen_shl_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
} else {
if (t) {
- tcg_gen_sar_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+ tcg_gen_sar_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
} else {
- tcg_gen_shr_tl(cpu_R[dc->rd], cpu_R[dc->ra], t0);
+ tcg_gen_shr_i32(cpu_R[dc->rd], cpu_R[dc->ra], t0);
}
}
- tcg_temp_free(t0);
+ tcg_temp_free_i32(t0);
}
}

static void dec_bit(DisasContext *dc)
{
CPUState *cs = CPU(dc->cpu);
- TCGv t0;
+ TCGv_i32 t0;
unsigned int op;
int mem_index = cpu_mmu_index(&dc->cpu->env, false);

@@ -738,16 +744,16 @@ static void dec_bit(DisasContext *dc)
switch (op) {
case 0x21:
/* src. */
- t0 = tcg_temp_new();
+ t0 = tcg_temp_new_i32();

LOG_DIS("src r%d r%d\n", dc->rd, dc->ra);
- tcg_gen_andi_tl(t0, cpu_SR[SR_MSR], MSR_CC);
+ tcg_gen_andi_i32(t0, cpu_SR[SR_MSR], MSR_CC);
write_carry(dc, cpu_R[dc->ra]);
if (dc->rd) {
- tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
- tcg_gen_or_tl(cpu_R[dc->rd], cpu_R[dc->rd], t0);
+ tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+ tcg_gen_or_i32(cpu_R[dc->rd], cpu_R[dc->rd], t0);
}
- tcg_temp_free(t0);
+ tcg_temp_free_i32(t0);
break;

case 0x1:
@@ -759,9 +765,9 @@ static void dec_bit(DisasContext *dc)
write_carry(dc, cpu_R[dc->ra]);
if (dc->rd) {
if (op == 0x41)
- tcg_gen_shri_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+ tcg_gen_shri_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
else
- tcg_gen_sari_tl(cpu_R[dc->rd], cpu_R[dc->ra], 1);
+ tcg_gen_sari_i32(cpu_R[dc->rd], cpu_R[dc->ra], 1);
}
break;
case 0x60:
@@ -780,7 +786,7 @@ static void dec_bit(DisasContext *dc)
LOG_DIS("wdc r%d\n", dc->ra);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -790,7 +796,7 @@ static void dec_bit(DisasContext *dc)
LOG_DIS("wic r%d\n", dc->ra);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -799,7 +805,7 @@ static void dec_bit(DisasContext *dc)
if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !dc->cpu->cfg.use_pcmp_instr) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
if (dc->cpu->cfg.use_pcmp_instr) {
@@ -827,22 +833,22 @@ static inline void sync_jmpstate(DisasContext *dc)
{
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
if (dc->jmp == JMP_DIRECT) {
- tcg_gen_movi_tl(env_btaken, 1);
+ tcg_gen_movi_i32(env_btaken, 1);
}
dc->jmp = JMP_INDIRECT;
- tcg_gen_movi_tl(env_btarget, dc->jmp_pc);
+ tcg_gen_movi_i32(env_btarget, dc->jmp_pc);
}
}

static void dec_imm(DisasContext *dc)
{
LOG_DIS("imm %x\n", dc->imm << 16);
- tcg_gen_movi_tl(env_imm, (dc->imm << 16));
+ tcg_gen_movi_i32(env_imm, (dc->imm << 16));
dc->tb_flags |= IMM_FLAG;
dc->clear_imm = 0;
}

-static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
+static inline TCGv_i32 *compute_ldst_addr(DisasContext *dc, TCGv_i32 *t)
{
bool extimm = dc->tb_flags & IMM_FLAG;
/* Should be set to true if r1 is used by loadstores. */
@@ -866,8 +872,8 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
stackprot = true;
}

- *t = tcg_temp_new();
- tcg_gen_add_tl(*t, cpu_R[dc->ra], cpu_R[dc->rb]);
+ *t = tcg_temp_new_i32();
+ tcg_gen_add_i32(*t, cpu_R[dc->ra], cpu_R[dc->rb]);

if (stackprot) {
gen_helper_stackprot(cpu_env, *t);
@@ -879,12 +885,12 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)
if (dc->imm == 0) {
return &cpu_R[dc->ra];
}
- *t = tcg_temp_new();
- tcg_gen_movi_tl(*t, (int32_t)((int16_t)dc->imm));
- tcg_gen_add_tl(*t, cpu_R[dc->ra], *t);
+ *t = tcg_temp_new_i32();
+ tcg_gen_movi_i32(*t, (int32_t)((int16_t)dc->imm));
+ tcg_gen_add_i32(*t, cpu_R[dc->ra], *t);
} else {
- *t = tcg_temp_new();
- tcg_gen_add_tl(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ *t = tcg_temp_new_i32();
+ tcg_gen_add_i32(*t, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

if (stackprot) {
@@ -895,7 +901,7 @@ static inline TCGv *compute_ldst_addr(DisasContext *dc, TCGv *t)

static void dec_load(DisasContext *dc)
{
- TCGv t, v, *addr;
+ TCGv_i32 t, v, *addr;
unsigned int size;
bool rev = false, ex = false;
TCGMemOp mop;
@@ -913,7 +919,7 @@ static void dec_load(DisasContext *dc)

if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -939,20 +945,20 @@ static void dec_load(DisasContext *dc)
01 -> 10
10 -> 10
11 -> 00 */
- TCGv low = tcg_temp_new();
+ TCGv_i32 low = tcg_temp_new_i32();

/* Force addr into the temp. */
if (addr != &t) {
- t = tcg_temp_new();
- tcg_gen_mov_tl(t, *addr);
+ t = tcg_temp_new_i32();
+ tcg_gen_mov_i32(t, *addr);
addr = &t;
}

- tcg_gen_andi_tl(low, t, 3);
- tcg_gen_sub_tl(low, tcg_const_tl(3), low);
- tcg_gen_andi_tl(t, t, ~3);
- tcg_gen_or_tl(t, t, low);
- tcg_temp_free(low);
+ tcg_gen_andi_i32(low, t, 3);
+ tcg_gen_sub_i32(low, tcg_const_i32(3), low);
+ tcg_gen_andi_i32(t, t, ~3);
+ tcg_gen_or_i32(t, t, low);
+ tcg_temp_free_i32(low);
break;
}

@@ -961,11 +967,11 @@ static void dec_load(DisasContext *dc)
10 -> 00. */
/* Force addr into the temp. */
if (addr != &t) {
- t = tcg_temp_new();
- tcg_gen_xori_tl(t, *addr, 2);
+ t = tcg_temp_new_i32();
+ tcg_gen_xori_i32(t, *addr, 2);
addr = &t;
} else {
- tcg_gen_xori_tl(t, t, 2);
+ tcg_gen_xori_i32(t, t, 2);
}
break;
default:
@@ -978,11 +984,11 @@ static void dec_load(DisasContext *dc)
if (ex) {
/* Force addr into the temp. */
if (addr != &t) {
- t = tcg_temp_new();
- tcg_gen_mov_tl(t, *addr);
+ t = tcg_temp_new_i32();
+ tcg_gen_mov_i32(t, *addr);
addr = &t;
}
- tcg_gen_andi_tl(t, t, ~3);
+ tcg_gen_andi_i32(t, t, ~3);
}

/* If we get a fault on a dslot, the jmpstate better be in sync. */
@@ -995,23 +1001,23 @@ static void dec_load(DisasContext *dc)
* into v. If the load succeeds, we verify alignment of the
* address and if that succeeds we write into the destination reg.
*/
- v = tcg_temp_new();
- tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
+ v = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);

if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
- tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
- gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
- tcg_const_tl(0), tcg_const_tl(size - 1));
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
+ gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
+ tcg_const_i32(0), tcg_const_i32(size - 1));
}

if (ex) {
- tcg_gen_mov_tl(env_res_addr, *addr);
- tcg_gen_mov_tl(env_res_val, v);
+ tcg_gen_mov_i32(env_res_addr, *addr);
+ tcg_gen_mov_i32(env_res_val, v);
}
if (dc->rd) {
- tcg_gen_mov_tl(cpu_R[dc->rd], v);
+ tcg_gen_mov_i32(cpu_R[dc->rd], v);
}
- tcg_temp_free(v);
+ tcg_temp_free_i32(v);

if (ex) { /* lwx */
/* no support for AXI exclusive so always clear C */
@@ -1019,12 +1025,12 @@ static void dec_load(DisasContext *dc)
}

if (addr == &t)
- tcg_temp_free(t);
+ tcg_temp_free_i32(t);
}

static void dec_store(DisasContext *dc)
{
- TCGv t, *addr, swx_addr;
+ TCGv_i32 t, *addr, swx_addr;
TCGLabel *swx_skip = NULL;
unsigned int size;
bool rev = false, ex = false;
@@ -1043,7 +1049,7 @@ static void dec_store(DisasContext *dc)

if (size > 4 && (dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -1055,31 +1061,31 @@ static void dec_store(DisasContext *dc)
sync_jmpstate(dc);
addr = compute_ldst_addr(dc, &t);

- swx_addr = tcg_temp_local_new();
+ swx_addr = tcg_temp_local_new_i32();
if (ex) { /* swx */
- TCGv tval;
+ TCGv_i32 tval;

/* Force addr into the swx_addr. */
- tcg_gen_mov_tl(swx_addr, *addr);
+ tcg_gen_mov_i32(swx_addr, *addr);
addr = &swx_addr;
/* swx does not throw unaligned access errors, so force alignment */
- tcg_gen_andi_tl(swx_addr, swx_addr, ~3);
+ tcg_gen_andi_i32(swx_addr, swx_addr, ~3);

write_carryi(dc, 1);
swx_skip = gen_new_label();
- tcg_gen_brcond_tl(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);
+ tcg_gen_brcond_i32(TCG_COND_NE, env_res_addr, swx_addr, swx_skip);

/* Compare the value loaded at lwx with current contents of
the reserved location.
FIXME: This only works for system emulation where we can expect
this compare and the following write to be atomic. For user
emulation we need to add atomicity between threads. */
- tval = tcg_temp_new();
- tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
+ tval = tcg_temp_new_i32();
+ tcg_gen_qemu_ld_i32(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
MO_TEUL);
- tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
+ tcg_gen_brcond_i32(TCG_COND_NE, env_res_val, tval, swx_skip);
write_carryi(dc, 0);
- tcg_temp_free(tval);
+ tcg_temp_free_i32(tval);
}

if (rev && size != 4) {
@@ -1091,20 +1097,20 @@ static void dec_store(DisasContext *dc)
01 -> 10
10 -> 10
11 -> 00 */
- TCGv low = tcg_temp_new();
+ TCGv_i32 low = tcg_temp_new_i32();

/* Force addr into the temp. */
if (addr != &t) {
- t = tcg_temp_new();
- tcg_gen_mov_tl(t, *addr);
+ t = tcg_temp_new_i32();
+ tcg_gen_mov_i32(t, *addr);
addr = &t;
}

- tcg_gen_andi_tl(low, t, 3);
- tcg_gen_sub_tl(low, tcg_const_tl(3), low);
- tcg_gen_andi_tl(t, t, ~3);
- tcg_gen_or_tl(t, t, low);
- tcg_temp_free(low);
+ tcg_gen_andi_i32(low, t, 3);
+ tcg_gen_sub_i32(low, tcg_const_i32(3), low);
+ tcg_gen_andi_i32(t, t, ~3);
+ tcg_gen_or_i32(t, t, low);
+ tcg_temp_free_i32(low);
break;
}

@@ -1113,11 +1119,11 @@ static void dec_store(DisasContext *dc)
10 -> 00. */
/* Force addr into the temp. */
if (addr != &t) {
- t = tcg_temp_new();
- tcg_gen_xori_tl(t, *addr, 2);
+ t = tcg_temp_new_i32();
+ tcg_gen_xori_i32(t, *addr, 2);
addr = &t;
} else {
- tcg_gen_xori_tl(t, t, 2);
+ tcg_gen_xori_i32(t, t, 2);
}
break;
default:
@@ -1125,51 +1131,52 @@ static void dec_store(DisasContext *dc)
break;
}
}
- tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
+ tcg_gen_qemu_st_i32(cpu_R[dc->rd], *addr,
+ cpu_mmu_index(&dc->cpu->env, false), mop);

/* Verify alignment if needed. */
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
- tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
/* FIXME: if the alignment is wrong, we should restore the value
* in memory. One possible way to achieve this is to probe
* the MMU prior to the memaccess, thay way we could put
* the alignment checks in between the probe and the mem
* access.
*/
- gen_helper_memalign(cpu_env, *addr, tcg_const_tl(dc->rd),
- tcg_const_tl(1), tcg_const_tl(size - 1));
+ gen_helper_memalign(cpu_env, *addr, tcg_const_i32(dc->rd),
+ tcg_const_i32(1), tcg_const_i32(size - 1));
}

if (ex) {
gen_set_label(swx_skip);
}
- tcg_temp_free(swx_addr);
+ tcg_temp_free_i32(swx_addr);

if (addr == &t)
- tcg_temp_free(t);
+ tcg_temp_free_i32(t);
}

static inline void eval_cc(DisasContext *dc, unsigned int cc,
- TCGv d, TCGv a, TCGv b)
+ TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
{
switch (cc) {
case CC_EQ:
- tcg_gen_setcond_tl(TCG_COND_EQ, d, a, b);
+ tcg_gen_setcond_i32(TCG_COND_EQ, d, a, b);
break;
case CC_NE:
- tcg_gen_setcond_tl(TCG_COND_NE, d, a, b);
+ tcg_gen_setcond_i32(TCG_COND_NE, d, a, b);
break;
case CC_LT:
- tcg_gen_setcond_tl(TCG_COND_LT, d, a, b);
+ tcg_gen_setcond_i32(TCG_COND_LT, d, a, b);
break;
case CC_LE:
- tcg_gen_setcond_tl(TCG_COND_LE, d, a, b);
+ tcg_gen_setcond_i32(TCG_COND_LE, d, a, b);
break;
case CC_GE:
- tcg_gen_setcond_tl(TCG_COND_GE, d, a, b);
+ tcg_gen_setcond_i32(TCG_COND_GE, d, a, b);
break;
case CC_GT:
- tcg_gen_setcond_tl(TCG_COND_GT, d, a, b);
+ tcg_gen_setcond_i32(TCG_COND_GT, d, a, b);
break;
default:
cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
@@ -1177,13 +1184,13 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
}
}

-static void eval_cond_jmp(DisasContext *dc, TCGv pc_true, TCGv pc_false)
+static void eval_cond_jmp(DisasContext *dc, TCGv_i32 pc_true, TCGv_i32 pc_false)
{
TCGLabel *l1 = gen_new_label();
/* Conditional jmp. */
- tcg_gen_mov_tl(cpu_SR[SR_PC], pc_false);
- tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, l1);
- tcg_gen_mov_tl(cpu_SR[SR_PC], pc_true);
+ tcg_gen_mov_i32(cpu_SR[SR_PC], pc_false);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
+ tcg_gen_mov_i32(cpu_SR[SR_PC], pc_true);
gen_set_label(l1);
}

@@ -1200,22 +1207,22 @@ static void dec_bcc(DisasContext *dc)
if (dslot) {
dc->delayed_branch = 2;
dc->tb_flags |= D_FLAG;
- tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+ tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
cpu_env, offsetof(CPUMBState, bimm));
}

if (dec_alu_op_b_is_small_imm(dc)) {
int32_t offset = (int32_t)((int16_t)dc->imm); /* sign-extend. */

- tcg_gen_movi_tl(env_btarget, dc->pc + offset);
+ tcg_gen_movi_i32(env_btarget, dc->pc + offset);
dc->jmp = JMP_DIRECT_CC;
dc->jmp_pc = dc->pc + offset;
} else {
dc->jmp = JMP_INDIRECT;
- tcg_gen_movi_tl(env_btarget, dc->pc);
- tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_movi_i32(env_btarget, dc->pc);
+ tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
}
- eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_tl(0));
+ eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
}

static void dec_br(DisasContext *dc)
@@ -1241,7 +1248,7 @@ static void dec_br(DisasContext *dc)
tcg_gen_st_i32(tmp_1, cpu_env,
-offsetof(MicroBlazeCPU, env)
+offsetof(CPUState, halted));
- tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc + 4);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc + 4);
gen_helper_raise_exception(cpu_env, tmp_hlt);
tcg_temp_free_i32(tmp_hlt);
tcg_temp_free_i32(tmp_1);
@@ -1262,22 +1269,22 @@ static void dec_br(DisasContext *dc)
if (dslot) {
dc->delayed_branch = 2;
dc->tb_flags |= D_FLAG;
- tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+ tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
cpu_env, offsetof(CPUMBState, bimm));
}
if (link && dc->rd)
- tcg_gen_movi_tl(cpu_R[dc->rd], dc->pc);
+ tcg_gen_movi_i32(cpu_R[dc->rd], dc->pc);

dc->jmp = JMP_INDIRECT;
if (abs) {
- tcg_gen_movi_tl(env_btaken, 1);
- tcg_gen_mov_tl(env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_movi_i32(env_btaken, 1);
+ tcg_gen_mov_i32(env_btarget, *(dec_alu_op_b(dc)));
if (link && !dslot) {
if (!(dc->tb_flags & IMM_FLAG) && (dc->imm == 8 || dc->imm == 0x18))
t_gen_raise_exception(dc, EXCP_BREAK);
if (dc->imm == 0) {
if ((dc->tb_flags & MSR_EE_FLAG) && mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -1290,63 +1297,63 @@ static void dec_br(DisasContext *dc)
dc->jmp = JMP_DIRECT;
dc->jmp_pc = dc->pc + (int32_t)((int16_t)dc->imm);
} else {
- tcg_gen_movi_tl(env_btaken, 1);
- tcg_gen_movi_tl(env_btarget, dc->pc);
- tcg_gen_add_tl(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
+ tcg_gen_movi_i32(env_btaken, 1);
+ tcg_gen_movi_i32(env_btarget, dc->pc);
+ tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
}
}
}

static inline void do_rti(DisasContext *dc)
{
- TCGv t0, t1;
- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
- tcg_gen_shri_tl(t0, cpu_SR[SR_MSR], 1);
- tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_IE);
- tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
-
- tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
- tcg_gen_or_tl(t1, t1, t0);
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_shri_i32(t0, cpu_SR[SR_MSR], 1);
+ tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_IE);
+ tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
+
+ tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_i32(t1, t1, t0);
msr_write(dc, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
dc->tb_flags &= ~DRTI_FLAG;
}

static inline void do_rtb(DisasContext *dc)
{
- TCGv t0, t1;
- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
- tcg_gen_andi_tl(t1, cpu_SR[SR_MSR], ~MSR_BIP);
- tcg_gen_shri_tl(t0, t1, 1);
- tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
-
- tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
- tcg_gen_or_tl(t1, t1, t0);
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();
+ tcg_gen_andi_i32(t1, cpu_SR[SR_MSR], ~MSR_BIP);
+ tcg_gen_shri_i32(t0, t1, 1);
+ tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));
+
+ tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_i32(t1, t1, t0);
msr_write(dc, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
dc->tb_flags &= ~DRTB_FLAG;
}

static inline void do_rte(DisasContext *dc)
{
- TCGv t0, t1;
- t0 = tcg_temp_new();
- t1 = tcg_temp_new();
+ TCGv_i32 t0, t1;
+ t0 = tcg_temp_new_i32();
+ t1 = tcg_temp_new_i32();

- tcg_gen_ori_tl(t1, cpu_SR[SR_MSR], MSR_EE);
- tcg_gen_andi_tl(t1, t1, ~MSR_EIP);
- tcg_gen_shri_tl(t0, t1, 1);
- tcg_gen_andi_tl(t0, t0, (MSR_VM | MSR_UM));
+ tcg_gen_ori_i32(t1, cpu_SR[SR_MSR], MSR_EE);
+ tcg_gen_andi_i32(t1, t1, ~MSR_EIP);
+ tcg_gen_shri_i32(t0, t1, 1);
+ tcg_gen_andi_i32(t0, t0, (MSR_VM | MSR_UM));

- tcg_gen_andi_tl(t1, t1, ~(MSR_VM | MSR_UM));
- tcg_gen_or_tl(t1, t1, t0);
+ tcg_gen_andi_i32(t1, t1, ~(MSR_VM | MSR_UM));
+ tcg_gen_or_i32(t1, t1, t0);
msr_write(dc, t1);
- tcg_temp_free(t1);
- tcg_temp_free(t0);
+ tcg_temp_free_i32(t1);
+ tcg_temp_free_i32(t0);
dc->tb_flags &= ~DRTE_FLAG;
}

@@ -1361,14 +1368,14 @@ static void dec_rts(DisasContext *dc)

dc->delayed_branch = 2;
dc->tb_flags |= D_FLAG;
- tcg_gen_st_tl(tcg_const_tl(dc->type_b && (dc->tb_flags & IMM_FLAG)),
+ tcg_gen_st_i32(tcg_const_i32(dc->type_b && (dc->tb_flags & IMM_FLAG)),
cpu_env, offsetof(CPUMBState, bimm));

if (i_bit) {
LOG_DIS("rtid ir=%x\n", dc->ir);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
dc->tb_flags |= DRTI_FLAG;
@@ -1376,7 +1383,7 @@ static void dec_rts(DisasContext *dc)
LOG_DIS("rtbd ir=%x\n", dc->ir);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
dc->tb_flags |= DRTB_FLAG;
@@ -1384,7 +1391,7 @@ static void dec_rts(DisasContext *dc)
LOG_DIS("rted ir=%x\n", dc->ir);
if ((dc->tb_flags & MSR_EE_FLAG)
&& mem_index == MMU_USER_IDX) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
dc->tb_flags |= DRTE_FLAG;
@@ -1392,14 +1399,14 @@ static void dec_rts(DisasContext *dc)
LOG_DIS("rts ir=%x\n", dc->ir);

dc->jmp = JMP_INDIRECT;
- tcg_gen_movi_tl(env_btaken, 1);
- tcg_gen_add_tl(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
+ tcg_gen_movi_i32(env_btaken, 1);
+ tcg_gen_add_i32(env_btarget, cpu_R[dc->ra], *(dec_alu_op_b(dc)));
}

static int dec_check_fpuv2(DisasContext *dc)
{
if ((dc->cpu->cfg.use_fpu != 2) && (dc->tb_flags & MSR_EE_FLAG)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_FPU);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_FPU);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
}
return (dc->cpu->cfg.use_fpu == 2) ? 0 : PVR2_USE_FPU2_MASK;
@@ -1412,7 +1419,7 @@ static void dec_fpu(DisasContext *dc)
if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& !dc->cpu->cfg.use_fpu) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -1514,7 +1521,7 @@ static void dec_null(DisasContext *dc)
{
if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -1533,29 +1540,29 @@ static void dec_stream(DisasContext *dc)
dc->type_b ? "" : "d", dc->imm);

if ((dc->tb_flags & MSR_EE_FLAG) && (mem_index == MMU_USER_IDX)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_PRIVINSN);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}

- t_id = tcg_temp_new();
+ t_id = tcg_temp_new_i32();
if (dc->type_b) {
- tcg_gen_movi_tl(t_id, dc->imm & 0xf);
+ tcg_gen_movi_i32(t_id, dc->imm & 0xf);
ctrl = dc->imm >> 10;
} else {
- tcg_gen_andi_tl(t_id, cpu_R[dc->rb], 0xf);
+ tcg_gen_andi_i32(t_id, cpu_R[dc->rb], 0xf);
ctrl = dc->imm >> 5;
}

- t_ctrl = tcg_const_tl(ctrl);
+ t_ctrl = tcg_const_i32(ctrl);

if (dc->rd == 0) {
gen_helper_put(t_id, t_ctrl, cpu_R[dc->ra]);
} else {
gen_helper_get(cpu_R[dc->rd], t_id, t_ctrl);
}
- tcg_temp_free(t_id);
- tcg_temp_free(t_ctrl);
+ tcg_temp_free_i32(t_id);
+ tcg_temp_free_i32(t_ctrl);
}

static struct decoder_info {
@@ -1599,7 +1606,7 @@ static inline void decode(DisasContext *dc, uint32_t ir)
if ((dc->tb_flags & MSR_EE_FLAG)
&& (dc->cpu->env.pvr.regs[2] & PVR2_ILL_OPCODE_EXC_MASK)
&& (dc->cpu->env.pvr.regs[2] & PVR2_OPCODE_0x0_ILL_MASK)) {
- tcg_gen_movi_tl(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
+ tcg_gen_movi_i32(cpu_SR[SR_ESR], ESR_EC_ILLEGAL_OP);
t_gen_raise_exception(dc, EXCP_HW_EXCP);
return;
}
@@ -1637,7 +1644,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
struct DisasContext ctx;
struct DisasContext *dc = &ctx;
uint32_t page_start, org_flags;
- target_ulong npc;
+ uint32_t npc;
int num_insns;
int max_insns;

@@ -1680,7 +1687,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)

#if SIM_COMPAT
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
- tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], dc->pc);
gen_helper_debug();
}
#endif
@@ -1722,7 +1729,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
dc->tb_flags &= ~D_FLAG;
/* If it is a direct jump, try direct chaining. */
if (dc->jmp == JMP_INDIRECT) {
- eval_cond_jmp(dc, env_btarget, tcg_const_tl(dc->pc));
+ eval_cond_jmp(dc, env_btarget, tcg_const_i32(dc->pc));
dc->is_jmp = DISAS_JUMP;
} else if (dc->jmp == JMP_DIRECT) {
t_sync_flags(dc);
@@ -1732,7 +1739,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
TCGLabel *l1 = gen_new_label();
t_sync_flags(dc);
/* Conditional jmp. */
- tcg_gen_brcondi_tl(TCG_COND_NE, env_btaken, 0, l1);
+ tcg_gen_brcondi_i32(TCG_COND_NE, env_btaken, 0, l1);
gen_goto_tb(dc, 1, dc->pc);
gen_set_label(l1);
gen_goto_tb(dc, 0, dc->jmp_pc);
@@ -1755,7 +1762,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (dc->jmp == JMP_DIRECT || dc->jmp == JMP_DIRECT_CC) {
if (dc->tb_flags & D_FLAG) {
dc->is_jmp = DISAS_UPDATE;
- tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
sync_jmpstate(dc);
} else
npc = dc->jmp_pc;
@@ -1767,7 +1774,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
if (dc->is_jmp == DISAS_NEXT
&& (dc->cpustate_changed || org_flags != dc->tb_flags)) {
dc->is_jmp = DISAS_UPDATE;
- tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
}
t_sync_flags(dc);

@@ -1775,7 +1782,7 @@ void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb)
TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG);

if (dc->is_jmp != DISAS_JUMP) {
- tcg_gen_movi_tl(cpu_SR[SR_PC], npc);
+ tcg_gen_movi_i32(cpu_SR[SR_PC], npc);
}
gen_helper_raise_exception(cpu_env, tmp);
tcg_temp_free_i32(tmp);
@@ -1849,34 +1856,34 @@ void mb_tcg_init(void)
{
int i;

- env_debug = tcg_global_mem_new(cpu_env,
+ env_debug = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, debug),
"debug0");
- env_iflags = tcg_global_mem_new(cpu_env,
+ env_iflags = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, iflags),
"iflags");
- env_imm = tcg_global_mem_new(cpu_env,
+ env_imm = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, imm),
"imm");
- env_btarget = tcg_global_mem_new(cpu_env,
+ env_btarget = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, btarget),
"btarget");
- env_btaken = tcg_global_mem_new(cpu_env,
+ env_btaken = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, btaken),
"btaken");
- env_res_addr = tcg_global_mem_new(cpu_env,
+ env_res_addr = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, res_addr),
"res_addr");
- env_res_val = tcg_global_mem_new(cpu_env,
+ env_res_val = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, res_val),
"res_val");
for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
- cpu_R[i] = tcg_global_mem_new(cpu_env,
+ cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, regs[i]),
regnames[i]);
}
for (i = 0; i < ARRAY_SIZE(cpu_SR); i++) {
- cpu_SR[i] = tcg_global_mem_new(cpu_env,
+ cpu_SR[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUMBState, sregs[i]),
special_regnames[i]);
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:17 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Conditionalize setting of PVR11_USE_MMU on the use_mmu
CPU property, otherwise we may incorrectly advertise an
MMU via PVR when the core in fact has none.

Reviewed-by: Alistair Francis <***@wdc.com>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/cpu.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c
index 06476f6efc..6fdf0fd223 100644
--- a/target/microblaze/cpu.c
+++ b/target/microblaze/cpu.c
@@ -201,7 +201,8 @@ static void mb_cpu_realizefn(DeviceState *dev, Error **errp)
PVR5_DCACHE_WRITEBACK_MASK : 0;

env->pvr.regs[10] = 0x0c000000; /* Default to spartan 3a dsp family. */
- env->pvr.regs[11] = PVR11_USE_MMU | (16 << 17);
+ env->pvr.regs[11] = cpu->cfg.use_mmu ? PVR11_USE_MMU : 0 |
+ 16 << 17;

mcc->parent_realize(dev, errp);
}
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:36 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Plug a temp leak.

Reported-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index 03a0289858..cf1b87c09e 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -516,12 +516,17 @@ static void dec_msr(DisasContext *dc)
#if !defined(CONFIG_USER_ONLY)
/* Catch read/writes to the mmu block. */
if ((sr & ~0xff) == 0x1000) {
+ TCGv_i32 tmp_sr;
+
sr &= 7;
+ tmp_sr = tcg_const_i32(sr);
LOG_DIS("m%ss sr%d r%d imm=%x\n", to ? "t" : "f", sr, dc->ra, dc->imm);
- if (to)
- gen_helper_mmu_write(cpu_env, tcg_const_i32(sr), cpu_R[dc->ra]);
- else
- gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tcg_const_i32(sr));
+ if (to) {
+ gen_helper_mmu_write(cpu_env, tmp_sr, cpu_R[dc->ra]);
+ } else {
+ gen_helper_mmu_read(cpu_R[dc->rd], cpu_env, tmp_sr);
+ }
+ tcg_temp_free_i32(tmp_sr);
return;
}
#endif
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:44 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Cleanup eval_cond_jmp to use tcg_gen_movcond_i64().
No functional change.

Suggested-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index a846797d9c..78c2855ff0 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -1171,12 +1171,16 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,

static void eval_cond_jmp(DisasContext *dc, TCGv_i64 pc_true, TCGv_i64 pc_false)
{
- TCGLabel *l1 = gen_new_label();
- /* Conditional jmp. */
- tcg_gen_mov_i64(cpu_SR[SR_PC], pc_false);
- tcg_gen_brcondi_i32(TCG_COND_EQ, env_btaken, 0, l1);
- tcg_gen_mov_i64(cpu_SR[SR_PC], pc_true);
- gen_set_label(l1);
+ TCGv_i64 tmp_btaken = tcg_temp_new_i64();
+ TCGv_i64 tmp_zero = tcg_const_i64(0);
+
+ tcg_gen_extu_i32_i64(tmp_btaken, env_btaken);
+ tcg_gen_movcond_i64(TCG_COND_NE, cpu_SR[SR_PC],
+ tmp_btaken, tmp_zero,
+ pc_true, pc_false);
+
+ tcg_temp_free_i64(tmp_btaken);
+ tcg_temp_free_i64(tmp_zero);
}

static void dec_bcc(DisasContext *dc)
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:29 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Setup MicroBlaze builds for 64bit addressing.
No functional change since the translator does not yet
emit 64bit addresses.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
configure | 1 +
target/microblaze/cpu.h | 6 +++---
2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/configure b/configure
index 59f91ab3f9..5626499e99 100755
--- a/configure
+++ b/configure
@@ -6844,6 +6844,7 @@ case "$target_name" in
microblaze|microblazeel)
TARGET_ARCH=microblaze
bflt="yes"
+ echo "TARGET_ABI32=y" >> $config_target_mak
;;
mips|mipsel)
TARGET_ARCH=mips
diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h
index 215f42b384..b631b7dc4c 100644
--- a/target/microblaze/cpu.h
+++ b/target/microblaze/cpu.h
@@ -23,7 +23,7 @@
#include "qemu-common.h"
#include "cpu-qom.h"

-#define TARGET_LONG_BITS 32
+#define TARGET_LONG_BITS 64

#define CPUArchState struct CPUMBState

@@ -340,8 +340,8 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
/* FIXME: MB uses variable pages down to 1K but linux only uses 4k. */
#define TARGET_PAGE_BITS 12

-#define TARGET_PHYS_ADDR_SPACE_BITS 32
-#define TARGET_VIRT_ADDR_SPACE_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 64
+#define TARGET_VIRT_ADDR_SPACE_BITS 64

#define CPU_RESOLVING_TYPE TYPE_MICROBLAZE_CPU
--
2.14.1
Edgar E. Iglesias
2018-05-16 18:51:42 UTC
Permalink
From: "Edgar E. Iglesias" <***@xilinx.com>

Remove argument b in eval_cc() as it is always set to zero.
No functional change.

Reviewed-by: Richard Henderson <***@linaro.org>
Signed-off-by: Edgar E. Iglesias <***@xilinx.com>
---
target/microblaze/translate.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c
index ed0b6fa881..a35683c8c9 100644
--- a/target/microblaze/translate.c
+++ b/target/microblaze/translate.c
@@ -1143,7 +1143,7 @@ static void dec_store(DisasContext *dc)
}

static inline void eval_cc(DisasContext *dc, unsigned int cc,
- TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+ TCGv_i32 d, TCGv_i32 a)
{
static const int mb_to_tcg_cc[] = {
[CC_EQ] = TCG_COND_EQ,
@@ -1161,7 +1161,7 @@ static inline void eval_cc(DisasContext *dc, unsigned int cc,
case CC_LE:
case CC_GE:
case CC_GT:
- tcg_gen_setcond_i32(mb_to_tcg_cc[cc], d, a, b);
+ tcg_gen_setcondi_i32(mb_to_tcg_cc[cc], d, a, 0);
break;
default:
cpu_abort(CPU(dc->cpu), "Unknown condition code %x.\n", cc);
@@ -1207,7 +1207,7 @@ static void dec_bcc(DisasContext *dc)
tcg_gen_movi_i32(env_btarget, dc->pc);
tcg_gen_add_i32(env_btarget, env_btarget, *(dec_alu_op_b(dc)));
}
- eval_cc(dc, cc, env_btaken, cpu_R[dc->ra], tcg_const_i32(0));
+ eval_cc(dc, cc, env_btaken, cpu_R[dc->ra]);
}

static void dec_br(DisasContext *dc)
--
2.14.1
Loading...