aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-09-04 10:47:48 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-10-04 20:23:36 +0100
commitf00ec48fadf5e37e7889f14cff900aa70d18b644 (patch)
tree421cbce97167a78532aa825624f380caade3c0d2 /arch/arm/mm
parent067173526c3bbc2eaeefcf6b7b2a9d998b9e8042 (diff)
downloadkernel_samsung_crespo-f00ec48fadf5e37e7889f14cff900aa70d18b644.zip
kernel_samsung_crespo-f00ec48fadf5e37e7889f14cff900aa70d18b644.tar.gz
kernel_samsung_crespo-f00ec48fadf5e37e7889f14cff900aa70d18b644.tar.bz2
ARM: Allow SMP kernels to boot on UP systems
UP systems do not implement all the instructions that SMP systems have, so in order to boot a SMP kernel on a UP system, we need to rewrite parts of the kernel. Do this using an 'alternatives' scheme, where the kernel code and data is modified prior to initialization to replace the SMP instructions, thereby rendering the problematical code ineffectual. We use the linker to generate a list of 32-bit word locations and their replacement values, and run through these replacements when we detect a UP system. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-v7.S14
-rw-r--r--arch/arm/mm/mmu.c46
-rw-r--r--arch/arm/mm/proc-v6.S43
-rw-r--r--arch/arm/mm/proc-v7.S41
-rw-r--r--arch/arm/mm/tlb-v7.S33
5 files changed, 92 insertions, 85 deletions
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157..e8ea1a0 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -91,11 +91,8 @@ ENTRY(v7_flush_kern_cache_all)
THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
bl v7_flush_dcache_all
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
-#else
- mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
mov pc, lr
@@ -171,11 +168,8 @@ ENTRY(v7_coherent_user_range)
cmp r0, r1
blo 1b
mov r0, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
-#else
- mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
-#endif
+ ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
dsb
isb
mov pc, lr
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6a3a2d0..e233581 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -310,9 +310,8 @@ static void __init build_mem_type_table(void)
cachepolicy = CPOLICY_WRITEBACK;
ecc_mask = 0;
}
-#ifdef CONFIG_SMP
- cachepolicy = CPOLICY_WRITEALLOC;
-#endif
+ if (is_smp())
+ cachepolicy = CPOLICY_WRITEALLOC;
/*
* Strip out features not present on earlier architectures.
@@ -406,13 +405,11 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
-#ifndef CONFIG_SMP
/*
* Only use write-through for non-SMP systems
*/
- if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
+ if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
-#endif
/*
* Enable CPU-specific coherency if supported.
@@ -436,22 +433,23 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- user_pgprot |= L_PTE_SHARED;
- kern_pgprot |= L_PTE_SHARED;
- vecs_pgprot |= L_PTE_SHARED;
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
- mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
-#endif
+ if (is_smp()) {
+ /*
+ * Mark memory with the "shared" attribute
+ * for SMP systems
+ */
+ user_pgprot |= L_PTE_SHARED;
+ kern_pgprot |= L_PTE_SHARED;
+ vecs_pgprot |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
}
/*
@@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void)
* rather difficult.
*/
reason = "with VIPT aliasing cache";
-#ifdef CONFIG_SMP
- } else if (tlb_ops_need_broadcast()) {
+ } else if (is_smp() && tlb_ops_need_broadcast()) {
/*
* kmap_high needs to occasionally flush TLB entries,
* however, if the TLB entries need to be broadcast
@@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void)
* (must not be called with irqs off)
*/
reason = "without hardware TLB ops broadcasting";
-#endif
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 22aac85..b95662d 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -30,13 +30,10 @@
#define TTB_RGN_WT (2 << 3)
#define TTB_RGN_WB (3 << 3)
-#ifndef CONFIG_SMP
-#define TTB_FLAGS TTB_RGN_WBWA
-#define PMD_FLAGS PMD_SECT_WB
-#else
-#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_UP TTB_RGN_WBWA
+#define PMD_FLAGS_UP PMD_SECT_WB
+#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v6_proc_init)
mov pc, lr
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -156,9 +154,11 @@ cpu_pj4_name:
*/
__v6_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
+ ALT_UP(nop)
orr r0, r0, #0x20
- mcr p15, 0, r0, c1, c0, 1
+ ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
+ ALT_UP(nop)
#endif
mov r0, #0
@@ -169,7 +169,8 @@ __v6_setup:
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
adr r5, v6_crval
@@ -225,10 +226,16 @@ cpu_elf_name:
__v6_proc_info:
.long 0x0007b000
.long 0x0007f000
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
@@ -249,10 +256,16 @@ __v6_proc_info:
__pj4_v6_proc_info:
.long 0x560f5810
.long 0xff0ffff0
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7563ff0..df422fe 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -30,15 +30,13 @@
#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
-#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
-#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
-#define PMD_FLAGS PMD_SECT_WB
-#else
+#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS_UP PMD_SECT_WB
+
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
-#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
-#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
-#endif
+#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
ENTRY(cpu_v7_proc_init)
mov pc, lr
@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
- orr r0, r0, #TTB_FLAGS
+ ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
+ ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
@@ -188,7 +187,8 @@ cpu_v7_name:
*/
__v7_ca9mp_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1
+ ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
+ ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
@@ -262,7 +262,8 @@ __v7_setup:
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
- orr r4, r4, #TTB_FLAGS
+ ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
+ ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
@@ -354,10 +355,16 @@ cpu_elf_name:
__v7_ca9mp_proc_info:
.long 0x410fc090 @ Required ID value
.long 0xff0ffff0 @ Mask for ID
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
@@ -380,10 +387,16 @@ __v7_ca9mp_proc_info:
__v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
- .long PMD_TYPE_SECT | \
+ ALT_SMP(.long \
+ PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS_SMP)
+ ALT_UP(.long \
+ PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \
- PMD_FLAGS
+ PMD_FLAGS_UP)
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index f3f288a..53cd5b4 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -13,6 +13,7 @@
*/
#include <linux/init.h>
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range)
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov ip, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
dsb
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
-#else
- mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
-#endif
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mov r2, #0
-#ifdef CONFIG_SMP
- mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
-#else
- mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
-#endif
+ ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
+ ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
dsb
isb
mov pc, lr
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
ENTRY(v7wbi_tlb_fns)
.long v7wbi_flush_user_tlb_range
.long v7wbi_flush_kern_tlb_range
- .long v7wbi_tlb_flags
+ ALT_SMP(.long v7wbi_tlb_flags_smp)
+ ALT_UP(.long v7wbi_tlb_flags_up)
.size v7wbi_tlb_fns, . - v7wbi_tlb_fns