diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/fault.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 42 | ||||
-rw-r--r-- | arch/powerpc/mm/imalloc.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/lmb.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 27 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 139 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 2 |
15 files changed, 211 insertions, 83 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 93d4fbf..a4815d3 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -81,7 +81,8 @@ static int store_updates_sp(struct pt_regs *regs) } #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) -static void do_dabr(struct pt_regs *regs, unsigned long error_code) +static void do_dabr(struct pt_regs *regs, unsigned long address, + unsigned long error_code) { siginfo_t info; @@ -99,7 +100,7 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code) info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_HWBKPT; - info.si_addr = (void __user *)regs->nip; + info.si_addr = (void __user *)address; force_sig_info(SIGTRAP, &info, current); } #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ @@ -159,7 +160,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) if (error_code & DSISR_DABRMATCH) { /* DABR match */ - do_dabr(regs, error_code); + do_dabr(regs, address, error_code); return 0; } #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index d96bcfe..33654d1 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -403,12 +403,17 @@ static void native_hpte_clear(void) */ hpte_v = hptep->v; + /* + * Call __tlbie() here rather than tlbie() since we + * already hold the native_tlbie_lock. + */ if (hpte_v & HPTE_V_VALID) { hptep->v = 0; - tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K, 0); + __tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K); } } + asm volatile("eieio; tlbsync; ptesync":::"memory"); spin_unlock(&native_tlbie_lock); local_irq_restore(flags); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a606504..e9d589e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -88,6 +88,7 @@ static unsigned long _SDR1; struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; hpte_t *htab_address; +unsigned long htab_size_bytes; unsigned long htab_hash_mask; int mmu_linear_psize = MMU_PAGE_4K; int mmu_virtual_psize = MMU_PAGE_4K; @@ -168,7 +169,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, #ifdef CONFIG_PPC_ISERIES if (_machine == PLATFORM_ISERIES_LPAR) ret = iSeries_hpte_insert(hpteg, va, - virt_to_abs(paddr), + __pa(vaddr), tmp_mode, HPTE_V_BOLTED, psize); @@ -368,7 +369,7 @@ static unsigned long __init htab_get_table_size(void) unsigned long mem_size, rnd_mem_size, pteg_count; /* If hash size isn't already provided by the platform, we try to - * retreive it from the device-tree. If it's not there neither, we + * retrieve it from the device-tree. If it's not there neither, we * calculate it now based on the total RAM size */ if (ppc64_pft_size == 0) @@ -399,7 +400,7 @@ void create_section_mapping(unsigned long start, unsigned long end) void __init htab_initialize(void) { - unsigned long table, htab_size_bytes; + unsigned long table; unsigned long pteg_count; unsigned long mode_rw; unsigned long base = 0, size = 0; @@ -456,7 +457,7 @@ void __init htab_initialize(void) /* create bolted the linear mapping in the hash table */ for (i=0; i < lmb.memory.cnt; i++) { - base = lmb.memory.region[i].base + KERNELBASE; + base = (unsigned long)__va(lmb.memory.region[i].base); size = lmb.memory.region[i].size; DBG("creating mapping for region: %lx : %lx\n", base, size); @@ -498,8 +499,8 @@ void __init htab_initialize(void) * for either 4K or 16MB pages. */ if (tce_alloc_start) { - tce_alloc_start += KERNELBASE; - tce_alloc_end += KERNELBASE; + tce_alloc_start = (unsigned long)__va(tce_alloc_start); + tce_alloc_end = (unsigned long)__va(tce_alloc_end); if (base + size >= tce_alloc_start) tce_alloc_start = base + size + 1; @@ -644,6 +645,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) DBG_LOW(" -> rc=%d\n", rc); return rc; } +EXPORT_SYMBOL_GPL(hash_page); void hash_preload(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 54131b8..b51bb28 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -549,6 +549,17 @@ fail: return addr; } +static int htlb_check_hinted_area(unsigned long addr, unsigned long len) +{ + struct vm_area_struct *vma; + + vma = find_vma(current->mm, addr); + if (!vma || ((addr + len) <= vma->vm_start)) + return 0; + + return -ENOMEM; +} + static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) { unsigned long addr = 0; @@ -618,15 +629,28 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, if (!cpu_has_feature(CPU_FTR_16M_PAGE)) return -EINVAL; + /* Paranoia, caller should have dealt with this */ + BUG_ON((addr + len) < addr); + if (test_thread_flag(TIF_32BIT)) { + /* Paranoia, caller should have dealt with this */ + BUG_ON((addr + len) > 0x100000000UL); + curareas = current->mm->context.low_htlb_areas; - /* First see if we can do the mapping in the existing - * low areas */ + /* First see if we can use the hint address */ + if (addr && (htlb_check_hinted_area(addr, len) == 0)) { + areamask = LOW_ESID_MASK(addr, len); + if (open_low_hpage_areas(current->mm, areamask) == 0) + return addr; + } + + /* Next see if we can map in the existing low areas */ addr = htlb_get_low_area(len, curareas); if (addr != -ENOMEM) return addr; + /* Finally go looking for areas to open */ lastshift = 0; for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); ! lastshift; areamask >>=1) { @@ -641,12 +665,22 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } else { curareas = current->mm->context.high_htlb_areas; - /* First see if we can do the mapping in the existing - * high areas */ + /* First see if we can use the hint address */ + /* We discourage 64-bit processes from doing hugepage + * mappings below 4GB (must use MAP_FIXED) */ + if ((addr >= 0x100000000UL) + && (htlb_check_hinted_area(addr, len) == 0)) { + areamask = HTLB_AREA_MASK(addr, len); + if (open_high_hpage_areas(current->mm, areamask) == 0) + return addr; + } + + /* Next see if we can map in the existing high areas */ addr = htlb_get_high_area(len, curareas); if (addr != -ENOMEM) return addr; + /* Finally go looking for areas to open */ lastshift = 0; for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); ! lastshift; areamask >>=1) { diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c index f9587bc..8b0c132 100644 --- a/arch/powerpc/mm/imalloc.c +++ b/arch/powerpc/mm/imalloc.c @@ -107,6 +107,7 @@ static int im_region_status(unsigned long v_addr, unsigned long size, if (v_addr < (unsigned long) tmp->addr + tmp->size) break; + *vm = NULL; if (tmp) { if (im_region_overlaps(v_addr, size, tmp)) return IM_REGION_OVERLAP; @@ -127,7 +128,6 @@ static int im_region_status(unsigned long v_addr, unsigned long size, } } - *vm = NULL; return IM_REGION_UNUSED; } diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 7d4b8b5..7d0d75c 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -188,6 +188,11 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:exit", 0x211); + + /* From now on, btext is no longer BAT mapped if it was at all */ +#ifdef CONFIG_BOOTX_TEXT + btext_unmap(); +#endif } /* This is only called until mem_init is done. */ diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c index 9584608..bbe3eac 100644 --- a/arch/powerpc/mm/lmb.c +++ b/arch/powerpc/mm/lmb.c @@ -197,6 +197,8 @@ long __init lmb_reserve(unsigned long base, unsigned long size) { struct lmb_region *_rgn = &(lmb.reserved); + BUG_ON(0 == size); + return lmb_add_region(_rgn, base, size); } @@ -227,6 +229,8 @@ unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, long i, j; unsigned long base = 0; + BUG_ON(0 == size); + #ifdef CONFIG_PPC32 /* On 32-bit, make sure we allocate lowmem */ if (max_addr == LMB_ALLOC_ANYWHERE) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ed6ed2e..550517c 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -114,19 +114,18 @@ void online_page(struct page *page) num_physpages++; } -/* - * This works only for the non-NUMA case. Later, we'll need a lookup - * to convert from real physical addresses to nid, that doesn't use - * pfn_to_nid(). - */ int __devinit add_memory(u64 start, u64 size) { - struct pglist_data *pgdata = NODE_DATA(0); + struct pglist_data *pgdata; struct zone *zone; + int nid; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - start += KERNELBASE; + nid = hot_add_scn_to_nid(start); + pgdata = NODE_DATA(nid); + + start = __va(start); create_section_mapping(start, start + size); /* this should work for most non-highmem platforms */ @@ -436,17 +435,12 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { clear_page(page); - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; /* * We shouldnt have to do this, but some versions of glibc * require it (ld.so assumes zero filled pages are icache clean) * - Anton */ - - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &pg->flags)) - clear_bit(PG_arch_1, &pg->flags); + flush_dcache_page(pg); } EXPORT_SYMBOL(clear_user_page); @@ -470,12 +464,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, return; #endif - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; - - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &pg->flags)) - clear_bit(PG_arch_1, &pg->flags); + flush_dcache_page(pg); } void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index ba7a305..2863a91 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -37,6 +37,7 @@ EXPORT_SYMBOL(node_data); static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; static int min_common_depth; +static int n_mem_addr_cells, n_mem_size_cells; /* * We need somewhere to store start/end/node for each region until we have @@ -254,32 +255,20 @@ static int __init find_min_common_depth(void) return depth; } -static int __init get_mem_addr_cells(void) +static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) { struct device_node *memory = NULL; - int rc; memory = of_find_node_by_type(memory, "memory"); if (!memory) - return 0; /* it won't matter */ + panic("numa.c: No memory nodes found!"); - rc = prom_n_addr_cells(memory); - return rc; + *n_addr_cells = prom_n_addr_cells(memory); + *n_size_cells = prom_n_size_cells(memory); + of_node_put(memory); } -static int __init get_mem_size_cells(void) -{ - struct device_node *memory = NULL; - int rc; - - memory = of_find_node_by_type(memory, "memory"); - if (!memory) - return 0; /* it won't matter */ - rc = prom_n_size_cells(memory); - return rc; -} - -static unsigned long __init read_n_cells(int n, unsigned int **buf) +static unsigned long __devinit read_n_cells(int n, unsigned int **buf) { unsigned long result = 0; @@ -386,7 +375,6 @@ static int __init parse_numa_properties(void) { struct device_node *cpu = NULL; struct device_node *memory = NULL; - int addr_cells, size_cells; int max_domain; unsigned long i; @@ -425,8 +413,7 @@ static int __init parse_numa_properties(void) } } - addr_cells = get_mem_addr_cells(); - size_cells = get_mem_size_cells(); + get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); memory = NULL; while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { unsigned long start; @@ -436,15 +423,21 @@ static int __init parse_numa_properties(void) unsigned int *memcell_buf; unsigned int len; - memcell_buf = (unsigned int *)get_property(memory, "reg", &len); + memcell_buf = (unsigned int *)get_property(memory, + "linux,usable-memory", &len); + if (!memcell_buf || len <= 0) + memcell_buf = + (unsigned int *)get_property(memory, "reg", + &len); if (!memcell_buf || len <= 0) continue; - ranges = memory->n_addrs; + /* ranges in cell */ + ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); new_range: /* these are order-sensitive, and modify the buffer pointer */ - start = read_n_cells(addr_cells, &memcell_buf); - size = read_n_cells(size_cells, &memcell_buf); + start = read_n_cells(n_mem_addr_cells, &memcell_buf); + size = read_n_cells(n_mem_size_cells, &memcell_buf); numa_domain = of_node_numa_domain(memory); @@ -497,7 +490,41 @@ static void __init setup_nonnuma(void) node_set_online(0); } -static void __init dump_numa_topology(void) +void __init dump_numa_cpu_topology(void) +{ + unsigned int node; + unsigned int cpu, count; + + if (min_common_depth == -1 || !numa_enabled) + return; + + for_each_online_node(node) { + printk(KERN_INFO "Node %d CPUs:", node); + + count = 0; + /* + * If we used a CPU iterator here we would miss printing + * the holes in the cpumap. + */ + for (cpu = 0; cpu < NR_CPUS; cpu++) { + if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { + if (count == 0) + printk(" %u", cpu); + ++count; + } else { + if (count > 1) + printk("-%u", cpu - 1); + count = 0; + } + } + + if (count > 1) + printk("-%u", NR_CPUS - 1); + printk("\n"); + } +} + +static void __init dump_numa_memory_topology(void) { unsigned int node; unsigned int count; @@ -529,7 +556,6 @@ static void __init dump_numa_topology(void) printk("-0x%lx", i); printk("\n"); } - return; } /* @@ -591,7 +617,7 @@ void __init do_init_bootmem(void) if (parse_numa_properties()) setup_nonnuma(); else - dump_numa_topology(); + dump_numa_memory_topology(); register_cpu_notifier(&ppc64_numa_nb); @@ -730,3 +756,60 @@ static int __init early_numa(char *p) return 0; } early_param("numa", early_numa); + +#ifdef CONFIG_MEMORY_HOTPLUG +/* + * Find the node associated with a hot added memory section. Section + * corresponds to a SPARSEMEM section, not an LMB. It is assumed that + * sections are fully contained within a single LMB. + */ +int hot_add_scn_to_nid(unsigned long scn_addr) +{ + struct device_node *memory = NULL; + nodemask_t nodes; + int numa_domain = 0; + + if (!numa_enabled || (min_common_depth < 0)) + return numa_domain; + + while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { + unsigned long start, size; + int ranges; + unsigned int *memcell_buf; + unsigned int len; + + memcell_buf = (unsigned int *)get_property(memory, "reg", &len); + if (!memcell_buf || len <= 0) + continue; + + /* ranges in cell */ + ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); +ha_new_range: + start = read_n_cells(n_mem_addr_cells, &memcell_buf); + size = read_n_cells(n_mem_size_cells, &memcell_buf); + numa_domain = of_node_numa_domain(memory); + + /* Domains not present at boot default to 0 */ + if (!node_online(numa_domain)) + numa_domain = any_online_node(NODE_MASK_ALL); + + if ((scn_addr >= start) && (scn_addr < (start + size))) { + of_node_put(memory); + goto got_numa_domain; + } + + if (--ranges) /* process all ranges in cell */ + goto ha_new_range; + } + BUG(); /* section address should be found above */ + + /* Temporary code to ensure that returned node is not empty */ +got_numa_domain: + nodes_setall(nodes); + while (NODE_DATA(numa_domain)->node_spanned_pages == 0) { + node_clear(numa_domain, nodes); + numa_domain = any_online_node(nodes); + } + return numa_domain; +} +#endif /* CONFIG_MEMORY_HOTPLUG */ diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index f4e5ac1..d296eb6 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -37,6 +37,7 @@ unsigned long ioremap_base; unsigned long ioremap_bot; +EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ int io_bat_index; #if defined(CONFIG_6xx) || defined(CONFIG_POWER3) @@ -153,6 +154,7 @@ ioremap64(unsigned long long addr, unsigned long size) { return __ioremap(addr, size, _PAGE_NO_CACHE); } +EXPORT_SYMBOL(ioremap64); void __iomem * ioremap(phys_addr_t addr, unsigned long size) @@ -162,6 +164,7 @@ ioremap(phys_addr_t addr, unsigned long size) return ioremap64(addr64, size); } #endif /* CONFIG_PHYS_64BIT */ +EXPORT_SYMBOL(ioremap); void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) @@ -247,6 +250,7 @@ __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) out: return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK)); } +EXPORT_SYMBOL(__ioremap); void iounmap(volatile void __iomem *addr) { @@ -259,6 +263,7 @@ void iounmap(volatile void __iomem *addr) if (addr > high_memory && (unsigned long) addr < ioremap_bot) vunmap((void *) (PAGE_MASK & (unsigned long)addr)); } +EXPORT_SYMBOL(iounmap); void __iomem *ioport_map(unsigned long port, unsigned int len) { diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 2ffca63..7b278d83 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -174,7 +174,7 @@ void __iomem * __ioremap(unsigned long addr, unsigned long size, pa = addr & PAGE_MASK; size = PAGE_ALIGN(addr + size) - pa; - if (size == 0) + if ((size == 0) || (pa == 0)) return NULL; if (mem_init_done) { diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 60e852f..ffc8ed4 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void) vflags = SLB_VSID_KERNEL | virtual_llp; ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); - if ((ksp_esid_data & ESID_MASK) == KERNELBASE) + if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) ksp_esid_data &= ~SLB_ESID_V; /* We need to do this all in asm, so we're sure we don't touch @@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void) /* Slot 2 - kernel stack */ "slbmte %2,%3\n" "isync" - :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), - "r"(mk_esid_data(VMALLOCBASE, 1)), + :: "r"(mk_vsid_data(VMALLOC_START, vflags)), + "r"(mk_esid_data(VMALLOC_START, 1)), "r"(mk_vsid_data(ksp_esid_data, lflags)), "r"(ksp_esid_data) : "memory"); @@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) else unmapped_base = TASK_UNMAPPED_BASE_USER64; - if (pc >= KERNELBASE) + if (is_kernel_addr(pc)) return; slb_allocate(pc); if (GET_ESID(pc) == GET_ESID(stack)) return; - if (stack >= KERNELBASE) + if (is_kernel_addr(stack)) return; slb_allocate(stack); @@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) || (GET_ESID(stack) == GET_ESID(unmapped_base))) return; - if (unmapped_base >= KERNELBASE) + if (is_kernel_addr(unmapped_base)) return; slb_allocate(unmapped_base); } @@ -213,10 +213,10 @@ void slb_initialize(void) asm volatile("isync":::"memory"); asm volatile("slbmte %0,%0"::"r" (0) : "memory"); asm volatile("isync; slbia; isync":::"memory"); - create_slbe(KERNELBASE, lflags, 0); + create_slbe(PAGE_OFFSET, lflags, 0); /* VMALLOC space has 4K pages always for now */ - create_slbe(VMALLOCBASE, vflags, 1); + create_slbe(VMALLOC_START, vflags, 1); /* We don't bolt the stack for the time being - we're in boot, * so the stack is in the bolted segment. By the time it goes diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 950ffc5..d1acee3 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S @@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode) srdi r9,r3,60 /* get region */ srdi r10,r3,28 /* get esid */ - cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ + cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ - /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ + /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ blt cr7,0f /* user or kernel? */ /* kernel address: proto-VSID = ESID */ @@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user) /* * Finish loading of an SLB entry and return * - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE + * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET */ slb_finish_load: ASM_VSID_SCRAMBLE(r10,r9) diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 51e7951..82e4951 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) unsigned long entry, group, old_esid, castout_entry, i; unsigned int global_entry; struct stab_entry *ste, *castout_ste; - unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; + unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; vsid_data = vsid << STE_VSID_SHIFT; esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; @@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) } /* Dont cast out the first kernel segment */ - if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) + if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) break; castout_entry = (castout_entry + 1) & 0xf; @@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) unsigned long offset; /* Kernel or user address? */ - if (ea >= KERNELBASE) { + if (is_kernel_addr(ea)) { vsid = get_kernel_vsid(ea); } else { if ((ea >= TASK_SIZE_USER64) || (! mm)) @@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); - if (ea < KERNELBASE) { + if (!is_kernel_addr(ea)) { offset = __get_cpu_var(stab_cache_ptr); if (offset < NR_STAB_CACHE_ENTRIES) __get_cpu_var(stab_cache[offset++]) = stab_entry; @@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) entry++, ste++) { unsigned long ea; ea = ste->esid_data & ESID_MASK; - if (ea < KERNELBASE) { + if (!is_kernel_addr(ea)) { ste->esid_data = 0; } } @@ -251,7 +251,7 @@ void stabs_alloc(void) panic("Unable to allocate segment table for CPU %d.\n", cpu); - newstab += KERNELBASE; + newstab = (unsigned long)__va(newstab); memset((void *)newstab, 0, HW_PAGE_SIZE); @@ -270,11 +270,11 @@ void stabs_alloc(void) */ void stab_initialize(unsigned long stab) { - unsigned long vsid = get_kernel_vsid(KERNELBASE); + unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); unsigned long stabreal; asm volatile("isync; slbia; isync":::"memory"); - make_ste(stab, GET_ESID(KERNELBASE), vsid); + make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); /* Order update */ asm volatile("sync":::"memory"); diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index 859d29a..bb3afb6 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -168,7 +168,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, batch->mm = mm; batch->psize = psize; } - if (addr < KERNELBASE) { + if (!is_kernel_addr(addr)) { vsid = get_vsid(mm->context.id, addr); WARN_ON(vsid == 0); } else |