From 006a851b10a395955c153a145ad8241494d43688 Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Tue, 26 Jun 2012 04:11:03 +0000 Subject: MIPS: Add support for the 1074K core. Signed-off-by: Steven J. Hill --- arch/mips/mm/c-r4k.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index f092c26..4c32ede 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -786,6 +786,25 @@ static inline void rm7k_erratum31(void) } } +static inline void alias_74k_erratum(struct cpuinfo_mips *c) +{ + /* + * Early versions of the 74K do not update the cache tags on a + * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG + * aliases. In this case it is better to treat the cache as always + * having aliases. + */ + if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) + c->dcache.flags |= MIPS_CACHE_VTAG; + if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); + if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && + ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { + c->dcache.flags |= MIPS_CACHE_VTAG; + write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); + } +} + static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; @@ -1056,6 +1075,8 @@ static void __cpuinit probe_pcache(void) case CPU_34K: case CPU_74K: case CPU_1004K: + if (c->cputype == CPU_74K) + alias_74k_erratum(c); if ((read_c0_config7() & (1 << 16))) { /* effectively physically indexed dcache, thus no virtual aliases. */ -- cgit v1.1 From 625c0a21700bdb90844d926a1508a17a77e369c9 Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Tue, 28 Aug 2012 23:20:08 -0500 Subject: MIPS: Avoid pipeline stalls on some MIPS32R2 cores. The architecture specification says that an EHB instruction is needed to avoid a hazard when writing TLB entries. However, some cores do not have this hazard, and thus the EHB instruction causes a costly pipeline stall. Detect these cores and do not use the EHB instruction. Signed-off-by: Steven J. Hill --- arch/mips/mm/tlbex.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 03eb0ef..22ba108 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -449,8 +449,20 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, } if (cpu_has_mips_r2) { - if (cpu_has_mips_r2_exec_hazard) + /* + * The architecture spec says an ehb is required here, + * but a number of cores do not have the hazard and + * using an ehb causes an expensive pipeline stall. + */ + switch (current_cpu_type()) { + case CPU_M14KC: + case CPU_74K: + break; + + default: uasm_i_ehb(p); + break; + } tlbw(p); return; } -- cgit v1.1 From e6de1a09a2f6a6825341e8463866553b77848ed6 Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Thu, 12 Jul 2012 17:21:31 +0000 Subject: MIPS: uasm: Add INS and EXT instructions. These are MIPS32R2 instructions for merging and extracting bit fields from one GPR into another. Signed-off-by: Steven J. Hill --- arch/mips/mm/uasm.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 64a28e8..39b8910 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -63,11 +63,12 @@ enum opcode { insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, - insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, insn_ll, insn_lld, - insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori, - insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, - insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp, - insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, + insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, + insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, + insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, + insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, + insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, + insn_xori, }; struct insn { @@ -115,6 +116,9 @@ static struct insn insn_table[] __uasminitdata = { { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, + { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, + { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, @@ -341,6 +345,13 @@ Ip_u2u1msbu3(op) \ } \ UASM_EXPORT_SYMBOL(uasm_i##op); +#define I_u2u1msbdu3(op) \ +Ip_u2u1msbu3(op) \ +{ \ + build_insn(buf, insn##op, b, a, d-1, c); \ +} \ +UASM_EXPORT_SYMBOL(uasm_i##op); + #define I_u1u2(op) \ Ip_u1u2(op) \ { \ @@ -394,6 +405,8 @@ I_u2u1u3(_drotr) I_u2u1u3(_drotr32) I_u3u1u2(_dsubu) I_0(_eret) +I_u2u1msbdu3(_ext) +I_u2u1msbu3(_ins) I_u1(_j) I_u1(_jal) I_u1(_jr) -- cgit v1.1 From ff401e52100dcdc85e572d1ad376d3307b3fe28e Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Tue, 28 Aug 2012 23:20:39 -0500 Subject: MIPS: Optimise TLB handlers for MIPS32/64 R2 cores. The EXT and INS instructions can be used to decrease code size and thus speed up TLB handlers on MIPS32R2 and MIPS64R2 cores. Signed-off-by: Steven J. Hill --- arch/mips/mm/tlbex.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/mips/mm') diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 22ba108..70a7008 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -933,6 +933,13 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) #endif uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); + + if (cpu_has_mips_r2) { + uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT)); + uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT)); + return; + } + uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ @@ -968,6 +975,15 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) { + if (cpu_has_mips_r2) { + /* PTE ptr offset is obtained from BadVAddr */ + UASM_i_MFC0(p, tmp, C0_BADVADDR); + UASM_i_LW(p, ptr, 0, ptr); + uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1); + uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1); + return; + } + /* * Bug workaround for the Nevada. It seems as if under certain * circumstances the move from cp0_context might produce a -- cgit v1.1