aboutsummaryrefslogtreecommitdiffstats
path: root/target-mips/op_helper.c
diff options
context:
space:
mode:
authorChris Dearman <chris@mips.com>2012-08-03 14:35:52 -0700
committerRaghu Gandham <raghu@mips.com>2012-08-13 17:12:26 -0700
commit55ff318b4e5382074e2049c996cb6df1041aff1b (patch)
tree2bf906056a890d5ec97faf2a1f53db1083a0be12 /target-mips/op_helper.c
parent1f1e03b62e23620975b215f77f745744426764e9 (diff)
downloadexternal_qemu-55ff318b4e5382074e2049c996cb6df1041aff1b.zip
external_qemu-55ff318b4e5382074e2049c996cb6df1041aff1b.tar.gz
external_qemu-55ff318b4e5382074e2049c996cb6df1041aff1b.tar.bz2
[MIPS] Speed up software refill handler
Emulating each instruction of the software refill handler has a significant impact on the overall performance of QEMU because of the overhead of emulating the various CP0 instructions to accurately reflect the machine state. Running the software TLB handler takes the equivalent of 1000's of machine cycles. This patch implements a pseudo hardware TLB refill handler that significantly reduces the impact of refilling the TLB to bring it more inline with what would be observed on a real target. Signed-off-by: Steven Hill <sjhill@mips.com> Signed-off-by: Chris Dearman <chris@mips.com> Signed-off-by: Yajin <yajin@mips.com.cn>
Diffstat (limited to 'target-mips/op_helper.c')
-rw-r--r--target-mips/op_helper.c152
1 files changed, 115 insertions, 37 deletions
diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c
index a0c64f4..783a1a9 100644
--- a/target-mips/op_helper.c
+++ b/target-mips/op_helper.c
@@ -1498,20 +1498,69 @@ target_ulong helper_yield(target_ulong arg1)
}
#ifndef CONFIG_USER_ONLY
-/* TLB management */
-void cpu_mips_tlb_flush (CPUState *env, int flush_global)
+static void inline r4k_invalidate_tlb_shadow (CPUState *env, int idx)
{
- /* Flush qemu's TLB and discard all shadowed entries. */
- tlb_flush (env, flush_global);
- env->tlb->tlb_in_use = env->tlb->nb_tlb;
+ r4k_tlb_t *tlb;
+ uint8_t ASID = env->CP0_EntryHi & 0xFF;
+
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ /* The qemu TLB is flushed when the ASID changes, so no need to
+ flush these entries again. */
+ if (tlb->G == 0 && tlb->ASID != ASID) {
+ return;
+ }
}
-static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
+static void inline r4k_invalidate_tlb (CPUState *env, int idx)
{
- /* Discard entries from env->tlb[first] onwards. */
- while (env->tlb->tlb_in_use > first) {
- r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
+ r4k_tlb_t *tlb;
+ target_ulong addr;
+ target_ulong end;
+ uint8_t ASID = env->CP0_EntryHi & 0xFF;
+ target_ulong mask;
+
+ tlb = &env->tlb->mmu.r4k.tlb[idx];
+ /* The qemu TLB is flushed when the ASID changes, so no need to
+ flush these entries again. */
+ if (tlb->G == 0 && tlb->ASID != ASID) {
+ return;
+ }
+
+ /* 1k pages are not supported. */
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ if (tlb->V0) {
+ addr = tlb->VPN & ~mask;
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | (mask >> 1);
+ while (addr < end) {
+ tlb_flush_page (env, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
}
+ if (tlb->V1) {
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
+#if defined(TARGET_MIPS64)
+ if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) {
+ addr |= 0x3FFFFF0000000000ULL;
+ }
+#endif
+ end = addr | mask;
+ while (addr - 1 < end) {
+ tlb_flush_page (env, addr);
+ addr += TARGET_PAGE_SIZE;
+ }
+ }
+}
+
+/* TLB management */
+void cpu_mips_tlb_flush (CPUState *env, int flush_global)
+{
+ /* Flush qemu's TLB and discard all shadowed entries. */
+ tlb_flush (env, flush_global);
}
static void r4k_fill_tlb (int idx)
@@ -1537,26 +1586,65 @@ static void r4k_fill_tlb (int idx)
tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
}
+void r4k_helper_ptw_tlbrefill(CPUState *target_env)
+{
+ CPUState *saved_env;
+
+ /* Save current 'env' value */
+ saved_env = env;
+ env = target_env;
+
+ /* Do TLB load on behalf of Page Table Walk */
+ int r = cpu_mips_get_random(env);
+ r4k_invalidate_tlb_shadow(env, r);
+ r4k_fill_tlb(r);
+
+ /* Restore 'env' value */
+ env = saved_env;
+}
+
void r4k_helper_tlbwi (void)
{
- int idx;
+ r4k_tlb_t *tlb;
+ target_ulong tag;
+ target_ulong VPN;
+ target_ulong mask;
- idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
+ /* If tlbwi is trying to upgrading access permissions on current entry,
+ * we do not need to flush tlb hash table.
+ */
+ tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
+ mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
+ tag = env->CP0_EntryHi & ~mask;
+ VPN = tlb->VPN & ~mask;
+ if (VPN == tag)
+ {
+ if (tlb->ASID == (env->CP0_EntryHi & 0xFF))
+ {
+ tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
+ tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
+ tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
+ tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
+ tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
+ tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
+ tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
+ tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
+ return;
+ }
+ }
- /* Discard cached TLB entries. We could avoid doing this if the
- tlbwi is just upgrading access permissions on the current entry;
- that might be a further win. */
- r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
+ /*flush all the tlb cache */
+ cpu_mips_tlb_flush (env, 1);
- r4k_invalidate_tlb(env, idx, 0);
- r4k_fill_tlb(idx);
+ r4k_invalidate_tlb(env, env->CP0_Index % env->tlb->nb_tlb);
+ r4k_fill_tlb(env->CP0_Index % env->tlb->nb_tlb);
}
void r4k_helper_tlbwr (void)
{
int r = cpu_mips_get_random(env);
- r4k_invalidate_tlb(env, r, 1);
+ r4k_invalidate_tlb_shadow(env, r);
r4k_fill_tlb(r);
}
@@ -1568,6 +1656,8 @@ void r4k_helper_tlbp (void)
target_ulong VPN;
uint8_t ASID;
int i;
+ target_ulong addr;
+ target_ulong end;
ASID = env->CP0_EntryHi & 0xFF;
for (i = 0; i < env->tlb->nb_tlb; i++) {
@@ -1577,27 +1667,16 @@ void r4k_helper_tlbp (void)
tag = env->CP0_EntryHi & ~mask;
VPN = tlb->VPN & ~mask;
/* Check ASID, virtual page number & size */
- if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
+ if (unlikely((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag)) {
/* TLB match */
env->CP0_Index = i;
break;
}
}
if (i == env->tlb->nb_tlb) {
- /* No match. Discard any shadow entries, if any of them match. */
- for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
- tlb = &env->tlb->mmu.r4k.tlb[i];
- /* 1k pages are not supported. */
- mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
- tag = env->CP0_EntryHi & ~mask;
- VPN = tlb->VPN & ~mask;
- /* Check ASID, virtual page number & size */
- if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
- r4k_mips_tlb_flush_extra (env, i);
- break;
- }
- }
-
+ /* No match. Discard any shadow entries, if any of them match. */
+ int index = ((env->CP0_EntryHi>>5)&0x1ff00) | ASID;
+ index |= (env->CP0_EntryHi>>13)&0x20000;
env->CP0_Index |= 0x80000000;
}
}
@@ -1606,17 +1685,16 @@ void r4k_helper_tlbr (void)
{
r4k_tlb_t *tlb;
uint8_t ASID;
- int idx;
ASID = env->CP0_EntryHi & 0xFF;
- idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
- tlb = &env->tlb->mmu.r4k.tlb[idx];
+ tlb = &env->tlb->mmu.r4k.tlb[env->CP0_Index % env->tlb->nb_tlb];
/* If this will change the current ASID, flush qemu's TLB. */
if (ASID != tlb->ASID)
cpu_mips_tlb_flush (env, 1);
- r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
+ /*flush all the tlb cache */
+ cpu_mips_tlb_flush (env, 1);
env->CP0_EntryHi = tlb->VPN | tlb->ASID;
env->CP0_PageMask = tlb->PageMask;