diff options
author | Nick Piggin <npiggin@suse.de> | 2008-07-25 19:45:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-26 12:00:06 -0700 |
commit | bc40d73c950146725e9e768e856a416ec8949065 (patch) | |
tree | 7d12351ddeb6a281ff7a9b5db543067c710d1859 /fs | |
parent | f5dd33c494a427b1d1a3b574de5c9e511c888864 (diff) | |
download | kernel_samsung_crespo-bc40d73c950146725e9e768e856a416ec8949065.zip kernel_samsung_crespo-bc40d73c950146725e9e768e856a416ec8949065.tar.gz kernel_samsung_crespo-bc40d73c950146725e9e768e856a416ec8949065.tar.bz2 |
splice: use get_user_pages_fast
Use get_user_pages_fast in splice. This reverts some mmap_sem batching
there, however the biggest problem with mmap_sem tends to be hold times
blocking out other threads rather than cacheline bouncing. Further: on
architectures that implement get_user_pages_fast without locks, mmap_sem
can be avoided completely anyway.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Dave Kleikamp <shaggy@austin.ibm.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Zach Brown <zach.brown@oracle.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/splice.c | 41 |
1 files changed, 3 insertions, 38 deletions
diff --git a/fs/splice.c b/fs/splice.c index 3994421..47dc1a4 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1161,36 +1161,6 @@ static long do_splice(struct file *in, loff_t __user *off_in, } /* - * Do a copy-from-user while holding the mmap_semaphore for reading, in a - * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem - * for writing) and page faulting on the user memory pointed to by src. - * This assumes that we will very rarely hit the partial != 0 path, or this - * will not be a win. - */ -static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n) -{ - int partial; - - if (!access_ok(VERIFY_READ, src, n)) - return -EFAULT; - - pagefault_disable(); - partial = __copy_from_user_inatomic(dst, src, n); - pagefault_enable(); - - /* - * Didn't copy everything, drop the mmap_sem and do a faulting copy - */ - if (unlikely(partial)) { - up_read(¤t->mm->mmap_sem); - partial = copy_from_user(dst, src, n); - down_read(¤t->mm->mmap_sem); - } - - return partial; -} - -/* * Map an iov into an array of pages and offset/length tupples. With the * partial_page structure, we can map several non-contiguous ranges into * our ones pages[] map instead of splitting that operation into pieces. @@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov, { int buffers = 0, error = 0; - down_read(¤t->mm->mmap_sem); - while (nr_vecs) { unsigned long off, npages; struct iovec entry; @@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov, int i; error = -EFAULT; - if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry))) + if (copy_from_user(&entry, iov, sizeof(entry))) break; base = entry.iov_base; @@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov, if (npages > PIPE_BUFFERS - buffers) npages = PIPE_BUFFERS - buffers; - error = get_user_pages(current, current->mm, - (unsigned long) base, npages, 0, 0, - &pages[buffers], NULL); + error = get_user_pages_fast((unsigned long)base, npages, + 0, &pages[buffers]); if (unlikely(error <= 0)) break; @@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov, iov++; } - up_read(¤t->mm->mmap_sem); - if (buffers) return buffers; |