aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pageattr.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 13:34:09 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:34:09 +0100
commit0879750f5d75dee0546316b7b0e83fb6cd258ad7 (patch)
tree76190c94a6e3abc12964fd6a4835e0a1f5019fe5 /arch/x86/mm/pageattr.c
parent86f03989d99cfa2e1216cdd7aa996852236909cf (diff)
downloadkernel_samsung_aries-0879750f5d75dee0546316b7b0e83fb6cd258ad7.zip
kernel_samsung_aries-0879750f5d75dee0546316b7b0e83fb6cd258ad7.tar.gz
kernel_samsung_aries-0879750f5d75dee0546316b7b0e83fb6cd258ad7.tar.bz2
x86: cpa cleanup the 64-bit alias math
Cleanup the address calculations, which are necessary to identify the high/low alias mappings of the kernel on 64 bit machines. Instead of calling __pa/__va back and forth, calculate the physical address once and base the other calculations on it. Add understandable constants so we can use the already available within() helper. Also add comments, which help mere mortals to understand what this code does. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r--arch/x86/mm/pageattr.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 532a40b..ec07c18 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -305,49 +305,53 @@ repeat:
* Modules and drivers should use the set_memory_* APIs instead.
*/
+#define HIGH_MAP_START __START_KERNEL_map
+#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
+
static int
change_page_attr_addr(unsigned long address, pgprot_t mask_set,
- pgprot_t mask_clr)
+ pgprot_t mask_clr)
{
- int err = 0, kernel_map = 0;
- unsigned long pfn;
+ unsigned long phys_addr = __pa(address);
+ unsigned long pfn = phys_addr >> PAGE_SHIFT;
+ int err;
#ifdef CONFIG_X86_64
- if (address >= __START_KERNEL_map &&
- address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
-
- address = (unsigned long)__va(__pa((void *)address));
- kernel_map = 1;
- }
+ /*
+ * If we are inside the high mapped kernel range, then we
+ * fixup the low mapping first. __va() returns the virtual
+ * address in the linear mapping:
+ */
+ if (within(address, HIGH_MAP_START, HIGH_MAP_END))
+ address = (unsigned long) __va(phys_addr);
#endif
- pfn = __pa(address) >> PAGE_SHIFT;
-
- if (!kernel_map || 1) {
- err = __change_page_attr(address, pfn, mask_set, mask_clr);
- if (err)
- return err;
- }
+ err = __change_page_attr(address, pfn, mask_set, mask_clr);
+ if (err)
+ return err;
#ifdef CONFIG_X86_64
/*
- * Handle kernel mapping too which aliases part of
- * lowmem:
+ * If the physical address is inside the kernel map, we need
+ * to touch the high mapped kernel as well:
*/
- if (__pa(address) < KERNEL_TEXT_SIZE) {
- unsigned long addr2;
-
- addr2 = __pa(address) + __START_KERNEL_map - phys_base;
+ if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
+ /*
+ * Calc the high mapping address. See __phys_addr()
+ * for the non obvious details.
+ */
+ address = phys_addr + HIGH_MAP_START - phys_base;
/* Make sure the kernel mappings stay executable */
pgprot_val(mask_clr) |= _PAGE_NX;
+
/*
- * Our high aliases are imprecise, so do not propagate
- * failures back to users:
+ * Our high aliases are imprecise, because we check
+ * everything between 0 and KERNEL_TEXT_SIZE, so do
+ * not propagate lookup failures back to users:
*/
- __change_page_attr(addr2, pfn, mask_set, mask_clr);
+ __change_page_attr(address, pfn, mask_set, mask_clr);
}
#endif
-
return err;
}