aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/lib/copy_page.S
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-08 16:23:08 +0900
committerPaul Mundt <lethal@linux-sh.org>2009-09-08 16:23:08 +0900
commita2494b9b5fb702becaf8d8e3138f7a1a0d3c537e (patch)
tree53e7670594825b2c558a9ca7b993670b259a7374 /arch/sh/lib/copy_page.S
parent6e4154d4c2dd3d7e61d19ddd2527322ce34c2f5a (diff)
downloadkernel_samsung_tuna-a2494b9b5fb702becaf8d8e3138f7a1a0d3c537e.zip
kernel_samsung_tuna-a2494b9b5fb702becaf8d8e3138f7a1a0d3c537e.tar.gz
kernel_samsung_tuna-a2494b9b5fb702becaf8d8e3138f7a1a0d3c537e.tar.bz2
sh: Kill off dcache writeback from copy_page().
Now that the cache purging is handled manually by all copy_page() callers, we can kill off copy_page()'s on writeback. This optimizes the non-aliasing case. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/lib/copy_page.S')
-rw-r--r--arch/sh/lib/copy_page.S11
1 files changed, 3 insertions, 8 deletions
diff --git a/arch/sh/lib/copy_page.S b/arch/sh/lib/copy_page.S
index 43de7e8..9d7b8bc 100644
--- a/arch/sh/lib/copy_page.S
+++ b/arch/sh/lib/copy_page.S
@@ -30,7 +30,9 @@ ENTRY(copy_page)
mov r4,r10
mov r5,r11
mov r5,r8
- mov.l .Lpsz,r0
+ mov #(PAGE_SIZE >> 10), r0
+ shll8 r0
+ shll2 r0
add r0,r8
!
1: mov.l @r11+,r0
@@ -43,7 +45,6 @@ ENTRY(copy_page)
mov.l @r11+,r7
#if defined(CONFIG_CPU_SH4)
movca.l r0,@r10
- mov r10,r0
#else
mov.l r0,@r10
#endif
@@ -55,9 +56,6 @@ ENTRY(copy_page)
mov.l r3,@-r10
mov.l r2,@-r10
mov.l r1,@-r10
-#if defined(CONFIG_CPU_SH4)
- ocbwb @r0
-#endif
cmp/eq r11,r8
bf/s 1b
add #28,r10
@@ -68,9 +66,6 @@ ENTRY(copy_page)
rts
nop
- .balign 4
-.Lpsz: .long PAGE_SIZE
-
/*
* __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
* Return the number of bytes NOT copied