aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c441
1 files changed, 206 insertions, 235 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 9f6ca62..0d2e967 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -21,6 +21,7 @@
#include <linux/seq_file.h>
#include <linux/kprobes.h>
#include <linux/cache.h>
+#include <linux/sort.h>
#include <asm/head.h>
#include <asm/system.h>
@@ -41,7 +42,72 @@
extern void device_scan(void);
-struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+#define MAX_BANKS 32
+
+static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
+static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+static int pavail_ents __initdata;
+static int pavail_rescan_ents __initdata;
+
+static int cmp_p64(const void *a, const void *b)
+{
+ const struct linux_prom64_registers *x = a, *y = b;
+
+ if (x->phys_addr > y->phys_addr)
+ return 1;
+ if (x->phys_addr < y->phys_addr)
+ return -1;
+ return 0;
+}
+
+static void __init read_obp_memory(const char *property,
+ struct linux_prom64_registers *regs,
+ int *num_ents)
+{
+ int node = prom_finddevice("/memory");
+ int prop_size = prom_getproplen(node, property);
+ int ents, ret, i;
+
+ ents = prop_size / sizeof(struct linux_prom64_registers);
+ if (ents > MAX_BANKS) {
+ prom_printf("The machine has more %s property entries than "
+ "this kernel can support (%d).\n",
+ property, MAX_BANKS);
+ prom_halt();
+ }
+
+ ret = prom_getproperty(node, property, (char *) regs, prop_size);
+ if (ret == -1) {
+ prom_printf("Couldn't get %s property from /memory.\n");
+ prom_halt();
+ }
+
+ *num_ents = ents;
+
+ /* Sanitize what we got from the firmware, by page aligning
+ * everything.
+ */
+ for (i = 0; i < ents; i++) {
+ unsigned long base, size;
+
+ base = regs[i].phys_addr;
+ size = regs[i].reg_size;
+
+ size &= PAGE_MASK;
+ if (base & ~PAGE_MASK) {
+ unsigned long new_base = PAGE_ALIGN(base);
+
+ size -= new_base - base;
+ if ((long) size < 0L)
+ size = 0UL;
+ base = new_base;
+ }
+ regs[i].phys_addr = base;
+ regs[i].reg_size = size;
+ }
+ sort(regs, ents, sizeof(struct linux_prom64_registers),
+ cmp_p64, NULL);
+}
unsigned long *sparc64_valid_addr_bitmap __read_mostly;
@@ -67,6 +133,12 @@ extern unsigned int sparc_ramdisk_size;
struct page *mem_map_zero __read_mostly;
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
+
int bigkernel = 0;
/* XXX Tune this... */
@@ -296,6 +368,7 @@ struct linux_prom_translation {
unsigned long data;
};
static struct linux_prom_translation prom_trans[512] __initdata;
+static unsigned int prom_trans_ents __initdata;
extern unsigned long prom_boot_page;
extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
@@ -309,57 +382,7 @@ unsigned long kern_locked_tte_data;
unsigned long prom_pmd_phys __read_mostly;
unsigned int swapper_pgd_zero __read_mostly;
-/* Allocate power-of-2 aligned chunks from the end of the
- * kernel image. Return physical address.
- */
-static inline unsigned long early_alloc_phys(unsigned long size)
-{
- unsigned long base;
-
- BUILD_BUG_ON(size & (size - 1));
-
- kern_size = (kern_size + (size - 1)) & ~(size - 1);
- base = kern_base + kern_size;
- kern_size += size;
-
- return base;
-}
-
-static inline unsigned long load_phys32(unsigned long pa)
-{
- unsigned long val;
-
- __asm__ __volatile__("lduwa [%1] %2, %0"
- : "=&r" (val)
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
-
- return val;
-}
-
-static inline unsigned long load_phys64(unsigned long pa)
-{
- unsigned long val;
-
- __asm__ __volatile__("ldxa [%1] %2, %0"
- : "=&r" (val)
- : "r" (pa), "i" (ASI_PHYS_USE_EC));
-
- return val;
-}
-
-static inline void store_phys32(unsigned long pa, unsigned long val)
-{
- __asm__ __volatile__("stwa %0, [%1] %2"
- : /* no outputs */
- : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
-}
-
-static inline void store_phys64(unsigned long pa, unsigned long val)
-{
- __asm__ __volatile__("stxa %0, [%1] %2"
- : /* no outputs */
- : "r" (val), "r" (pa), "i" (ASI_PHYS_USE_EC));
-}
+static pmd_t *prompmd __read_mostly;
#define BASE_PAGE_SIZE 8192
@@ -369,34 +392,28 @@ static inline void store_phys64(unsigned long pa, unsigned long val)
*/
unsigned long prom_virt_to_phys(unsigned long promva, int *error)
{
- unsigned long pmd_phys = (prom_pmd_phys +
- ((promva >> 23) & 0x7ff) * sizeof(pmd_t));
- unsigned long pte_phys;
- pmd_t pmd_ent;
- pte_t pte_ent;
+ pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
+ pte_t *ptep;
unsigned long base;
- pmd_val(pmd_ent) = load_phys32(pmd_phys);
- if (pmd_none(pmd_ent)) {
+ if (pmd_none(*pmdp)) {
if (error)
*error = 1;
return 0;
}
-
- pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
- pte_phys += ((promva >> 13) & 0x3ff) * sizeof(pte_t);
- pte_val(pte_ent) = load_phys64(pte_phys);
- if (!pte_present(pte_ent)) {
+ ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
+ if (!pte_present(*ptep)) {
if (error)
*error = 1;
return 0;
}
if (error) {
*error = 0;
- return pte_val(pte_ent);
+ return pte_val(*ptep);
}
- base = pte_val(pte_ent) & _PAGE_PADDR;
- return (base + (promva & (BASE_PAGE_SIZE - 1)));
+ base = pte_val(*ptep) & _PAGE_PADDR;
+
+ return base + (promva & (BASE_PAGE_SIZE - 1));
}
/* The obp translations are saved based on 8k pagesize, since obp can
@@ -409,25 +426,20 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig
unsigned long vaddr;
for (vaddr = start; vaddr < end; vaddr += BASE_PAGE_SIZE) {
- unsigned long val, pte_phys, pmd_phys;
- pmd_t pmd_ent;
- int i;
-
- pmd_phys = (prom_pmd_phys +
- (((vaddr >> 23) & 0x7ff) * sizeof(pmd_t)));
- pmd_val(pmd_ent) = load_phys32(pmd_phys);
- if (pmd_none(pmd_ent)) {
- pte_phys = early_alloc_phys(BASE_PAGE_SIZE);
-
- for (i = 0; i < BASE_PAGE_SIZE / sizeof(pte_t); i++)
- store_phys64(pte_phys+i*sizeof(pte_t),0);
+ unsigned long val;
+ pmd_t *pmd;
+ pte_t *pte;
- pmd_val(pmd_ent) = pte_phys >> 11UL;
- store_phys32(pmd_phys, pmd_val(pmd_ent));
+ pmd = prompmd + ((vaddr >> 23) & 0x7ff);
+ if (pmd_none(*pmd)) {
+ pte = __alloc_bootmem(BASE_PAGE_SIZE, BASE_PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pte)
+ prom_halt();
+ memset(pte, 0, BASE_PAGE_SIZE);
+ pmd_set(pmd, pte);
}
-
- pte_phys = (unsigned long)pmd_val(pmd_ent) << 11UL;
- pte_phys += (((vaddr >> 13) & 0x3ff) * sizeof(pte_t));
+ pte = (pte_t *) __pmd_page(*pmd) + ((vaddr >> 13) & 0x3ff);
val = data;
@@ -435,7 +447,8 @@ static void __init build_obp_range(unsigned long start, unsigned long end, unsig
if (tlb_type == spitfire)
val &= ~0x0003fe0000000000UL;
- store_phys64(pte_phys, val | _PAGE_MODIFIED);
+ set_pte_at(&init_mm, vaddr, pte,
+ __pte(val | _PAGE_MODIFIED));
data += BASE_PAGE_SIZE;
}
@@ -448,13 +461,17 @@ static inline int in_obp_range(unsigned long vaddr)
}
#define OBP_PMD_SIZE 2048
-static void __init build_obp_pgtable(int prom_trans_ents)
+static void __init build_obp_pgtable(void)
{
unsigned long i;
- prom_pmd_phys = early_alloc_phys(OBP_PMD_SIZE);
- for (i = 0; i < OBP_PMD_SIZE; i += 4)
- store_phys32(prom_pmd_phys + i, 0);
+ prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, PAGE_SIZE);
+ if (!prompmd)
+ prom_halt();
+
+ memset(prompmd, 0, OBP_PMD_SIZE);
+
+ prom_pmd_phys = __pa(prompmd);
for (i = 0; i < prom_trans_ents; i++) {
unsigned long start, end;
@@ -474,7 +491,7 @@ static void __init build_obp_pgtable(int prom_trans_ents)
/* Read OBP translations property into 'prom_trans[]'.
* Return the number of entries.
*/
-static int __init read_obp_translations(void)
+static void __init read_obp_translations(void)
{
int n, node;
@@ -495,8 +512,10 @@ static int __init read_obp_translations(void)
prom_printf("prom_mappings: Couldn't get property.\n");
prom_halt();
}
+
n = n / sizeof(struct linux_prom_translation);
- return n;
+
+ prom_trans_ents = n;
}
static void __init remap_kernel(void)
@@ -516,28 +535,38 @@ static void __init remap_kernel(void)
prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
if (bigkernel) {
- prom_dtlb_load(tlb_ent - 1,
+ tlb_ent -= 1;
+ prom_dtlb_load(tlb_ent,
tte_data + 0x400000,
tte_vaddr + 0x400000);
- prom_itlb_load(tlb_ent - 1,
+ prom_itlb_load(tlb_ent,
tte_data + 0x400000,
tte_vaddr + 0x400000);
}
+ sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+ if (tlb_type == cheetah_plus) {
+ sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+ CTX_CHEETAH_PLUS_NUC);
+ sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+ sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
+ }
}
-static void __init inherit_prom_mappings(void)
-{
- int n;
- n = read_obp_translations();
- build_obp_pgtable(n);
+static void __init inherit_prom_mappings_pre(void)
+{
+ read_obp_translations();
/* Now fixup OBP's idea about where we really are mapped. */
prom_printf("Remapping the kernel... ");
remap_kernel();
prom_printf("done.\n");
+}
+static void __init inherit_prom_mappings_post(void)
+{
+ build_obp_pgtable();
register_prom_callbacks();
}
@@ -722,8 +751,8 @@ void inherit_locked_prom_mappings(int save_p)
}
}
if (tlb_type == spitfire) {
- int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
- for (i = 0; i < high; i++) {
+ int high = sparc64_highest_unlocked_tlb_ent;
+ for (i = 0; i <= high; i++) {
unsigned long data;
/* Spitfire Errata #32 workaround */
@@ -811,9 +840,9 @@ void inherit_locked_prom_mappings(int save_p)
}
}
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+ int high = sparc64_highest_unlocked_tlb_ent;
- for (i = 0; i < high; i++) {
+ for (i = 0; i <= high; i++) {
unsigned long data;
data = cheetah_get_ldtlb_data(i);
@@ -1206,14 +1235,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
int i;
#ifdef CONFIG_DEBUG_BOOTMEM
- prom_printf("bootmem_init: Scan sp_banks, ");
+ prom_printf("bootmem_init: Scan pavail, ");
#endif
bytes_avail = 0UL;
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- end_of_phys_memory = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- bytes_avail += sp_banks[i].num_bytes;
+ for (i = 0; i < pavail_ents; i++) {
+ end_of_phys_memory = pavail[i].phys_addr +
+ pavail[i].reg_size;
+ bytes_avail += pavail[i].reg_size;
if (cmdline_memory_size) {
if (bytes_avail > cmdline_memory_size) {
unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1221,12 +1250,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
bytes_avail -= slack;
end_of_phys_memory -= slack;
- sp_banks[i].num_bytes -= slack;
- if (sp_banks[i].num_bytes == 0) {
- sp_banks[i].base_addr = 0xdeadbeef;
+ pavail[i].reg_size -= slack;
+ if ((long)pavail[i].reg_size <= 0L) {
+ pavail[i].phys_addr = 0xdeadbeefUL;
+ pavail[i].reg_size = 0UL;
+ pavail_ents = i;
} else {
- sp_banks[i+1].num_bytes = 0;
- sp_banks[i+1].base_addr = 0xdeadbeef;
+ pavail[i+1].reg_size = 0Ul;
+ pavail[i+1].phys_addr = 0xdeadbeefUL;
+ pavail_ents = i + 1;
}
break;
}
@@ -1280,12 +1312,12 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
/* Now register the available physical memory with the
* allocator.
*/
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ for (i = 0; i < pavail_ents; i++) {
#ifdef CONFIG_DEBUG_BOOTMEM
- prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
- i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
+ i, pavail[i].phys_addr, pavail[i].reg_size);
#endif
- free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -1334,7 +1366,7 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
unsigned long alloc_bytes = 0UL;
if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
- prom_printf("kernel_map: Unaligned sp_banks[%lx:%lx]\n",
+ prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
vstart, vend);
prom_halt();
}
@@ -1381,23 +1413,24 @@ static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend,
return alloc_bytes;
}
-extern struct linux_mlist_p1275 *prom_ptot_ptr;
+static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+static int pall_ents __initdata;
+
extern unsigned int kvmap_linear_patch[1];
static void __init kernel_physical_mapping_init(void)
{
- struct linux_mlist_p1275 *p = prom_ptot_ptr;
- unsigned long mem_alloced = 0UL;
+ unsigned long i, mem_alloced = 0UL;
+
+ read_obp_memory("reg", &pall[0], &pall_ents);
- while (p) {
+ for (i = 0; i < pall_ents; i++) {
unsigned long phys_start, phys_end;
- phys_start = p->start_adr;
- phys_end = phys_start + p->num_bytes;
+ phys_start = pall[i].phys_addr;
+ phys_end = phys_start + pall[i].reg_size;
mem_alloced += kernel_map_range(phys_start, phys_end,
PAGE_KERNEL);
-
- p = p->theres_more;
}
printk("Allocated %ld bytes for kernel page tables.\n",
@@ -1425,6 +1458,18 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
}
#endif
+unsigned long __init find_ecache_flush_span(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < pavail_ents; i++) {
+ if (pavail[i].reg_size >= size)
+ return pavail[i].phys_addr;
+ }
+
+ return ~0UL;
+}
+
/* paging_init() sets up the page tables */
extern void cheetah_ecache_flush_init(void);
@@ -1435,7 +1480,19 @@ pgd_t swapper_pg_dir[2048];
void __init paging_init(void)
{
unsigned long end_pfn, pages_avail, shift;
- unsigned long real_end;
+ unsigned long real_end, i;
+
+ /* Find available physical memory... */
+ read_obp_memory("available", &pavail[0], &pavail_ents);
+
+ phys_base = 0xffffffffffffffffUL;
+ for (i = 0; i < pavail_ents; i++)
+ phys_base = min(phys_base, pavail[i].phys_addr);
+
+ pfn_base = phys_base >> PAGE_SHIFT;
+
+ kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
set_bit(0, mmu_context_bmap);
@@ -1462,8 +1519,7 @@ void __init paging_init(void)
swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
- /* Inherit non-locked OBP mappings. */
- inherit_prom_mappings();
+ inherit_prom_mappings_pre();
/* Ok, we can use our TLB miss and window trap handlers safely.
* We need to do a quick peek here to see if we are on StarFire
@@ -1474,15 +1530,23 @@ void __init paging_init(void)
extern void setup_tba(int);
setup_tba(this_is_starfire);
}
-
- inherit_locked_prom_mappings(1);
-
__flush_tlb_all();
+ /* Everything from this point forward, until we are done with
+ * inherit_prom_mappings_post(), must complete successfully
+ * without calling into the firmware. The firwmare page tables
+ * have not been built, but we are running on the Linux kernel's
+ * trap table.
+ */
+
/* Setup bootmem... */
pages_avail = 0;
last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+ inherit_prom_mappings_post();
+
+ inherit_locked_prom_mappings(1);
+
#ifdef CONFIG_DEBUG_PAGEALLOC
kernel_physical_mapping_init();
#endif
@@ -1507,128 +1571,35 @@ void __init paging_init(void)
device_scan();
}
-/* Ok, it seems that the prom can allocate some more memory chunks
- * as a side effect of some prom calls we perform during the
- * boot sequence. My most likely theory is that it is from the
- * prom_set_traptable() call, and OBP is allocating a scratchpad
- * for saving client program register state etc.
- */
-static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
-{
- int swapi = 0;
- int i, mitr;
- unsigned long tmpaddr, tmpsize;
- unsigned long lowest;
-
- for (i = 0; thislist[i].theres_more != 0; i++) {
- lowest = thislist[i].start_adr;
- for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
- if (thislist[mitr].start_adr < lowest) {
- lowest = thislist[mitr].start_adr;
- swapi = mitr;
- }
- if (lowest == thislist[i].start_adr)
- continue;
- tmpaddr = thislist[swapi].start_adr;
- tmpsize = thislist[swapi].num_bytes;
- for (mitr = swapi; mitr > i; mitr--) {
- thislist[mitr].start_adr = thislist[mitr-1].start_adr;
- thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
- }
- thislist[i].start_adr = tmpaddr;
- thislist[i].num_bytes = tmpsize;
- }
-}
-
-void __init rescan_sp_banks(void)
-{
- struct linux_prom64_registers memlist[64];
- struct linux_mlist_p1275 avail[64], *mlist;
- unsigned long bytes, base_paddr;
- int num_regs, node = prom_finddevice("/memory");
- int i;
-
- num_regs = prom_getproperty(node, "available",
- (char *) memlist, sizeof(memlist));
- num_regs = (num_regs / sizeof(struct linux_prom64_registers));
- for (i = 0; i < num_regs; i++) {
- avail[i].start_adr = memlist[i].phys_addr;
- avail[i].num_bytes = memlist[i].reg_size;
- avail[i].theres_more = &avail[i + 1];
- }
- avail[i - 1].theres_more = NULL;
- sort_memlist(avail);
-
- mlist = &avail[0];
- i = 0;
- bytes = mlist->num_bytes;
- base_paddr = mlist->start_adr;
-
- sp_banks[0].base_addr = base_paddr;
- sp_banks[0].num_bytes = bytes;
-
- while (mlist->theres_more != NULL){
- i++;
- mlist = mlist->theres_more;
- bytes = mlist->num_bytes;
- if (i >= SPARC_PHYS_BANKS-1) {
- printk ("The machine has more banks than "
- "this kernel can support\n"
- "Increase the SPARC_PHYS_BANKS "
- "setting (currently %d)\n",
- SPARC_PHYS_BANKS);
- i = SPARC_PHYS_BANKS-1;
- break;
- }
-
- sp_banks[i].base_addr = mlist->start_adr;
- sp_banks[i].num_bytes = mlist->num_bytes;
- }
-
- i++;
- sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
- sp_banks[i].num_bytes = 0;
-
- for (i = 0; sp_banks[i].num_bytes != 0; i++)
- sp_banks[i].num_bytes &= PAGE_MASK;
-}
-
static void __init taint_real_pages(void)
{
- struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
int i;
- for (i = 0; i < SPARC_PHYS_BANKS; i++) {
- saved_sp_banks[i].base_addr =
- sp_banks[i].base_addr;
- saved_sp_banks[i].num_bytes =
- sp_banks[i].num_bytes;
- }
-
- rescan_sp_banks();
+ read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
- /* Find changes discovered in the sp_bank rescan and
+ /* Find changes discovered in the physmem available rescan and
* reserve the lost portions in the bootmem maps.
*/
- for (i = 0; saved_sp_banks[i].num_bytes; i++) {
+ for (i = 0; i < pavail_ents; i++) {
unsigned long old_start, old_end;
- old_start = saved_sp_banks[i].base_addr;
+ old_start = pavail[i].phys_addr;
old_end = old_start +
- saved_sp_banks[i].num_bytes;
+ pavail[i].reg_size;
while (old_start < old_end) {
int n;
- for (n = 0; sp_banks[n].num_bytes; n++) {
+ for (n = 0; pavail_rescan_ents; n++) {
unsigned long new_start, new_end;
- new_start = sp_banks[n].base_addr;
- new_end = new_start + sp_banks[n].num_bytes;
+ new_start = pavail_rescan[n].phys_addr;
+ new_end = new_start +
+ pavail_rescan[n].reg_size;
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
- set_bit (old_start >> 22,
- sparc64_valid_addr_bitmap);
+ set_bit(old_start >> 22,
+ sparc64_valid_addr_bitmap);
goto do_next_page;
}
}