diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-10-17 11:50:32 +1000 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-10-17 11:50:32 +1000 |
commit | 7dffb72028bfd909ac51a1546d182de2df4d2426 (patch) | |
tree | c465c35642872973543f710f8aa06b955b84f7e5 /arch | |
parent | cf764855620aa1aa5b134687ca18b841ac9be4c7 (diff) | |
download | kernel_samsung_tuna-7dffb72028bfd909ac51a1546d182de2df4d2426.zip kernel_samsung_tuna-7dffb72028bfd909ac51a1546d182de2df4d2426.tar.gz kernel_samsung_tuna-7dffb72028bfd909ac51a1546d182de2df4d2426.tar.bz2 |
ppc32: use L1_CACHE_SHIFT/L1_CACHE_BYTES
instead of L1_CACHE_LINE_SIZE and LG_L1_CACHE_LINE_SIZE
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kernel/head_32.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_32.S | 58 | ||||
-rw-r--r-- | arch/powerpc/lib/copy_32.S | 24 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/sleep.S | 4 | ||||
-rw-r--r-- | arch/ppc/kernel/cpu_setup_6xx.S | 4 | ||||
-rw-r--r-- | arch/ppc/kernel/cpu_setup_power4.S | 4 | ||||
-rw-r--r-- | arch/ppc/kernel/head.S | 2 | ||||
-rw-r--r-- | arch/ppc/kernel/l2cr.S | 2 | ||||
-rw-r--r-- | arch/ppc/kernel/misc.S | 58 | ||||
-rw-r--r-- | arch/ppc/lib/string.S | 24 | ||||
-rw-r--r-- | arch/ppc/platforms/katana.c | 2 | ||||
-rw-r--r-- | arch/ppc/platforms/pmac_sleep.S | 4 | ||||
-rw-r--r-- | arch/ppc/syslib/mv64x60.c | 2 |
13 files changed, 95 insertions, 95 deletions
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 108e78e..d9b063f 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -837,7 +837,7 @@ relocate_kernel: copy_and_flush: addi r5,r5,-4 addi r6,r6,-4 -4: li r0,L1_CACHE_LINE_SIZE/4 +4: li r0,L1_CACHE_BYTES/4 mtctr r0 3: addi r6,r6,4 /* copy a cache line */ lwzx r0,r6,r4 diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 2727410..0b0e908 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -496,21 +496,21 @@ _GLOBAL(flush_icache_range) BEGIN_FTR_SECTION blr /* for 601, do nothing */ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 mr r6,r3 1: dcbst 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ mtctr r4 2: icbi 0,r6 - addi r6,r6,L1_CACHE_LINE_SIZE + addi r6,r6,L1_CACHE_BYTES bdnz 2b sync /* additional sync needed on g4 */ isync @@ -523,16 +523,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) * clean_dcache_range(unsigned long start, unsigned long stop) */ _GLOBAL(clean_dcache_range) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 1: dcbst 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ blr @@ -544,16 +544,16 @@ _GLOBAL(clean_dcache_range) * flush_dcache_range(unsigned long start, unsigned long stop) */ _GLOBAL(flush_dcache_range) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 1: dcbf 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ blr @@ -566,16 +566,16 @@ _GLOBAL(flush_dcache_range) * invalidate_dcache_range(unsigned long start, unsigned long stop) */ _GLOBAL(invalidate_dcache_range) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 1: dcbi 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbi's to get to ram */ blr @@ -596,7 +596,7 @@ _GLOBAL(flush_dcache_all) mtctr r4 lis r5, KERNELBASE@h 1: lwz r3, 0(r5) /* Load one word from every line */ - addi r5, r5, L1_CACHE_LINE_SIZE + addi r5, r5, L1_CACHE_BYTES bdnz 1b blr #endif /* CONFIG_NOT_COHERENT_CACHE */ @@ -614,16 +614,16 @@ BEGIN_FTR_SECTION blr /* for 601, do nothing */ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ + li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 0b sync mtctr r4 1: icbi 0,r6 - addi r6,r6,L1_CACHE_LINE_SIZE + addi r6,r6,L1_CACHE_BYTES bdnz 1b sync isync @@ -646,16 +646,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) mtmsr r0 isync rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ + li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 0b sync mtctr r4 1: icbi 0,r6 - addi r6,r6,L1_CACHE_LINE_SIZE + addi r6,r6,L1_CACHE_BYTES bdnz 1b sync mtmsr r10 /* restore DR */ @@ -670,7 +670,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) * void clear_pages(void *page, int order) ; */ _GLOBAL(clear_pages) - li r0,4096/L1_CACHE_LINE_SIZE + li r0,4096/L1_CACHE_BYTES slw r0,r0,r4 mtctr r0 #ifdef CONFIG_8xx @@ -682,7 +682,7 @@ _GLOBAL(clear_pages) #else 1: dcbz 0,r3 #endif - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b blr @@ -708,7 +708,7 @@ _GLOBAL(copy_page) #ifdef CONFIG_8xx /* don't use prefetch on 8xx */ - li r0,4096/L1_CACHE_LINE_SIZE + li r0,4096/L1_CACHE_BYTES mtctr r0 1: COPY_16_BYTES bdnz 1b @@ -722,13 +722,13 @@ _GLOBAL(copy_page) li r11,4 mtctr r0 11: dcbt r11,r4 - addi r11,r11,L1_CACHE_LINE_SIZE + addi r11,r11,L1_CACHE_BYTES bdnz 11b #else /* MAX_COPY_PREFETCH == 1 */ dcbt r5,r4 - li r11,L1_CACHE_LINE_SIZE+4 + li r11,L1_CACHE_BYTES+4 #endif /* MAX_COPY_PREFETCH */ - li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH + li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH crclr 4*cr0+eq 2: mtctr r0 @@ -736,12 +736,12 @@ _GLOBAL(copy_page) dcbt r11,r4 dcbz r5,r3 COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 420a912..bee5141 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -66,9 +66,9 @@ .stabs "copy32.S",N_SO,0,0,0f 0: -CACHELINE_BYTES = L1_CACHE_LINE_SIZE -LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE -CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) +CACHELINE_BYTES = L1_CACHE_BYTES +LG_CACHELINE_BYTES = L1_CACHE_SHIFT +CACHELINE_MASK = (L1_CACHE_BYTES-1) /* * Use dcbz on the complete cache lines in the destination @@ -205,12 +205,12 @@ _GLOBAL(cacheable_memcpy) dcbz r11,r6 #endif COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES @@ -399,12 +399,12 @@ _GLOBAL(__copy_tofrom_user) .text /* the main body of the cacheline loop */ COPY_16_BYTES_WITHEX(0) -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES_WITHEX(1) -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES_WITHEX(2) COPY_16_BYTES_WITHEX(3) -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES_WITHEX(4) COPY_16_BYTES_WITHEX(5) COPY_16_BYTES_WITHEX(6) @@ -458,12 +458,12 @@ _GLOBAL(__copy_tofrom_user) * 104f (if in read part) or 105f (if in write part), after updating r5 */ COPY_16_BYTES_EXCODE(0) -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES_EXCODE(1) -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES_EXCODE(2) COPY_16_BYTES_EXCODE(3) -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES_EXCODE(4) COPY_16_BYTES_EXCODE(5) COPY_16_BYTES_EXCODE(6) diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S index 88419c7..22b113d 100644 --- a/arch/powerpc/platforms/powermac/sleep.S +++ b/arch/powerpc/platforms/powermac/sleep.S @@ -387,10 +387,10 @@ turn_on_mmu: #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ .section .data - .balign L1_CACHE_LINE_SIZE + .balign L1_CACHE_BYTES sleep_storage: .long 0 - .balign L1_CACHE_LINE_SIZE, 0 + .balign L1_CACHE_BYTES, 0 #endif /* CONFIG_6xx */ .section .text diff --git a/arch/ppc/kernel/cpu_setup_6xx.S b/arch/ppc/kernel/cpu_setup_6xx.S index a5333c0..55ed771 100644 --- a/arch/ppc/kernel/cpu_setup_6xx.S +++ b/arch/ppc/kernel/cpu_setup_6xx.S @@ -290,10 +290,10 @@ _GLOBAL(__init_fpu_registers) #define CS_SIZE 32 .data - .balign L1_CACHE_LINE_SIZE + .balign L1_CACHE_BYTES cpu_state_storage: .space CS_SIZE - .balign L1_CACHE_LINE_SIZE,0 + .balign L1_CACHE_BYTES,0 .text /* Called in normal context to backup CPU 0 state. This diff --git a/arch/ppc/kernel/cpu_setup_power4.S b/arch/ppc/kernel/cpu_setup_power4.S index 0abb5f2..d7bfd60 100644 --- a/arch/ppc/kernel/cpu_setup_power4.S +++ b/arch/ppc/kernel/cpu_setup_power4.S @@ -86,10 +86,10 @@ _GLOBAL(__setup_cpu_ppc970) #define CS_SIZE 32 .data - .balign L1_CACHE_LINE_SIZE + .balign L1_CACHE_BYTES cpu_state_storage: .space CS_SIZE - .balign L1_CACHE_LINE_SIZE,0 + .balign L1_CACHE_BYTES,0 .text /* Called in normal context to backup CPU 0 state. This diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 5b43987..c5a890d 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S @@ -916,7 +916,7 @@ relocate_kernel: copy_and_flush: addi r5,r5,-4 addi r6,r6,-4 -4: li r0,L1_CACHE_LINE_SIZE/4 +4: li r0,L1_CACHE_BYTES/4 mtctr r0 3: addi r6,r6,4 /* copy a cache line */ lwzx r0,r6,r4 diff --git a/arch/ppc/kernel/l2cr.S b/arch/ppc/kernel/l2cr.S index 8611152..d7f4e98 100644 --- a/arch/ppc/kernel/l2cr.S +++ b/arch/ppc/kernel/l2cr.S @@ -203,7 +203,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450) * L1 icache */ b 20f - .balign L1_CACHE_LINE_SIZE + .balign L1_CACHE_BYTES 22: sync mtspr SPRN_L2CR,r3 diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 2b9a162..2350f3e 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -498,21 +498,21 @@ _GLOBAL(flush_icache_range) BEGIN_FTR_SECTION blr /* for 601, do nothing */ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 mr r6,r3 1: dcbst 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ mtctr r4 2: icbi 0,r6 - addi r6,r6,L1_CACHE_LINE_SIZE + addi r6,r6,L1_CACHE_BYTES bdnz 2b sync /* additional sync needed on g4 */ isync @@ -525,16 +525,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) * clean_dcache_range(unsigned long start, unsigned long stop) */ _GLOBAL(clean_dcache_range) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 1: dcbst 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ blr @@ -546,16 +546,16 @@ _GLOBAL(clean_dcache_range) * flush_dcache_range(unsigned long start, unsigned long stop) */ _GLOBAL(flush_dcache_range) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 1: dcbf 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbst's to get to ram */ blr @@ -568,16 +568,16 @@ _GLOBAL(flush_dcache_range) * invalidate_dcache_range(unsigned long start, unsigned long stop) */ _GLOBAL(invalidate_dcache_range) - li r5,L1_CACHE_LINE_SIZE-1 + li r5,L1_CACHE_BYTES-1 andc r3,r3,r5 subf r4,r3,r4 add r4,r4,r5 - srwi. r4,r4,LG_L1_CACHE_LINE_SIZE + srwi. r4,r4,L1_CACHE_SHIFT beqlr mtctr r4 1: dcbi 0,r3 - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b sync /* wait for dcbi's to get to ram */ blr @@ -598,7 +598,7 @@ _GLOBAL(flush_dcache_all) mtctr r4 lis r5, KERNELBASE@h 1: lwz r3, 0(r5) /* Load one word from every line */ - addi r5, r5, L1_CACHE_LINE_SIZE + addi r5, r5, L1_CACHE_BYTES bdnz 1b blr #endif /* CONFIG_NOT_COHERENT_CACHE */ @@ -616,16 +616,16 @@ BEGIN_FTR_SECTION blr /* for 601, do nothing */ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ + li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 0b sync mtctr r4 1: icbi 0,r6 - addi r6,r6,L1_CACHE_LINE_SIZE + addi r6,r6,L1_CACHE_BYTES bdnz 1b sync isync @@ -648,16 +648,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) mtmsr r0 isync rlwinm r3,r3,0,0,19 /* Get page base address */ - li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */ + li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */ mtctr r4 mr r6,r3 0: dcbst 0,r3 /* Write line to ram */ - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 0b sync mtctr r4 1: icbi 0,r6 - addi r6,r6,L1_CACHE_LINE_SIZE + addi r6,r6,L1_CACHE_BYTES bdnz 1b sync mtmsr r10 /* restore DR */ @@ -672,7 +672,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) * void clear_pages(void *page, int order) ; */ _GLOBAL(clear_pages) - li r0,4096/L1_CACHE_LINE_SIZE + li r0,4096/L1_CACHE_BYTES slw r0,r0,r4 mtctr r0 #ifdef CONFIG_8xx @@ -684,7 +684,7 @@ _GLOBAL(clear_pages) #else 1: dcbz 0,r3 #endif - addi r3,r3,L1_CACHE_LINE_SIZE + addi r3,r3,L1_CACHE_BYTES bdnz 1b blr @@ -710,7 +710,7 @@ _GLOBAL(copy_page) #ifdef CONFIG_8xx /* don't use prefetch on 8xx */ - li r0,4096/L1_CACHE_LINE_SIZE + li r0,4096/L1_CACHE_BYTES mtctr r0 1: COPY_16_BYTES bdnz 1b @@ -724,13 +724,13 @@ _GLOBAL(copy_page) li r11,4 mtctr r0 11: dcbt r11,r4 - addi r11,r11,L1_CACHE_LINE_SIZE + addi r11,r11,L1_CACHE_BYTES bdnz 11b #else /* MAX_COPY_PREFETCH == 1 */ dcbt r5,r4 - li r11,L1_CACHE_LINE_SIZE+4 + li r11,L1_CACHE_BYTES+4 #endif /* MAX_COPY_PREFETCH */ - li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH + li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH crclr 4*cr0+eq 2: mtctr r0 @@ -738,12 +738,12 @@ _GLOBAL(copy_page) dcbt r11,r4 dcbz r5,r3 COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES diff --git a/arch/ppc/lib/string.S b/arch/ppc/lib/string.S index 36c9b97..2e258c4 100644 --- a/arch/ppc/lib/string.S +++ b/arch/ppc/lib/string.S @@ -65,9 +65,9 @@ .stabs "arch/ppc/lib/",N_SO,0,0,0f .stabs "string.S",N_SO,0,0,0f -CACHELINE_BYTES = L1_CACHE_LINE_SIZE -LG_CACHELINE_BYTES = LG_L1_CACHE_LINE_SIZE -CACHELINE_MASK = (L1_CACHE_LINE_SIZE-1) +CACHELINE_BYTES = L1_CACHE_BYTES +LG_CACHELINE_BYTES = L1_CACHE_SHIFT +CACHELINE_MASK = (L1_CACHE_BYTES-1) _GLOBAL(strcpy) addi r5,r3,-1 @@ -265,12 +265,12 @@ _GLOBAL(cacheable_memcpy) dcbz r11,r6 #endif COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES COPY_16_BYTES -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES COPY_16_BYTES COPY_16_BYTES @@ -485,12 +485,12 @@ _GLOBAL(__copy_tofrom_user) .text /* the main body of the cacheline loop */ COPY_16_BYTES_WITHEX(0) -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES_WITHEX(1) -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES_WITHEX(2) COPY_16_BYTES_WITHEX(3) -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES_WITHEX(4) COPY_16_BYTES_WITHEX(5) COPY_16_BYTES_WITHEX(6) @@ -544,12 +544,12 @@ _GLOBAL(__copy_tofrom_user) * 104f (if in read part) or 105f (if in write part), after updating r5 */ COPY_16_BYTES_EXCODE(0) -#if L1_CACHE_LINE_SIZE >= 32 +#if L1_CACHE_BYTES >= 32 COPY_16_BYTES_EXCODE(1) -#if L1_CACHE_LINE_SIZE >= 64 +#if L1_CACHE_BYTES >= 64 COPY_16_BYTES_EXCODE(2) COPY_16_BYTES_EXCODE(3) -#if L1_CACHE_LINE_SIZE >= 128 +#if L1_CACHE_BYTES >= 128 COPY_16_BYTES_EXCODE(4) COPY_16_BYTES_EXCODE(5) COPY_16_BYTES_EXCODE(6) diff --git a/arch/ppc/platforms/katana.c b/arch/ppc/platforms/katana.c index 3eb611e..a301c5a 100644 --- a/arch/ppc/platforms/katana.c +++ b/arch/ppc/platforms/katana.c @@ -521,7 +521,7 @@ katana_fixup_resources(struct pci_dev *dev) { u16 v16; - pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_LINE_SIZE>>2); + pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, L1_CACHE_BYTES>>2); pci_read_config_word(dev, PCI_COMMAND, &v16); v16 |= PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK; diff --git a/arch/ppc/platforms/pmac_sleep.S b/arch/ppc/platforms/pmac_sleep.S index 88419c7..22b113d 100644 --- a/arch/ppc/platforms/pmac_sleep.S +++ b/arch/ppc/platforms/pmac_sleep.S @@ -387,10 +387,10 @@ turn_on_mmu: #endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */ .section .data - .balign L1_CACHE_LINE_SIZE + .balign L1_CACHE_BYTES sleep_storage: .long 0 - .balign L1_CACHE_LINE_SIZE, 0 + .balign L1_CACHE_BYTES, 0 #endif /* CONFIG_6xx */ .section .text diff --git a/arch/ppc/syslib/mv64x60.c b/arch/ppc/syslib/mv64x60.c index 4849850..a781c50 100644 --- a/arch/ppc/syslib/mv64x60.c +++ b/arch/ppc/syslib/mv64x60.c @@ -1304,7 +1304,7 @@ mv64x60_config_pci_params(struct pci_controller *hose, early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val); /* Set latency timer, cache line size, clear BIST */ - u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2); + u16_val = (pi->latency_timer << 8) | (L1_CACHE_BYTES >> 2); early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val); mv64x60_pci_exclude_bridge = save_exclude; |