aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-04-20 16:12:50 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-20 22:56:30 -0700
commit2b82f190c81bf1524447c021df4e9ce8ef379bd5 (patch)
tree3f4ebb8c0379715fe38897474856bdbab5750250 /fs/nfs
parent612c9384fd0486686699f7d49b774f0c7a79c511 (diff)
downloadkernel_samsung_aries-2b82f190c81bf1524447c021df4e9ce8ef379bd5.zip
kernel_samsung_aries-2b82f190c81bf1524447c021df4e9ce8ef379bd5.tar.gz
kernel_samsung_aries-2b82f190c81bf1524447c021df4e9ce8ef379bd5.tar.bz2
NFS: Fix race in nfs_set_page_dirty
Protect nfs_set_page_dirty() against races with nfs_inode_add_request. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/write.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index ce5b4a9..7975589 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -388,6 +388,8 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
}
SetPagePrivate(req->wb_page);
set_page_private(req->wb_page, (unsigned long)req);
+ if (PageDirty(req->wb_page))
+ set_bit(PG_NEED_FLUSH, &req->wb_flags);
nfsi->npages++;
atomic_inc(&req->wb_count);
return 0;
@@ -407,6 +409,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
set_page_private(req->wb_page, 0);
ClearPagePrivate(req->wb_page);
radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
+ if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
+ __set_page_dirty_nobuffers(req->wb_page);
nfsi->npages--;
if (!nfsi->npages) {
spin_unlock(&nfsi->req_lock);
@@ -1527,15 +1531,22 @@ int nfs_wb_page(struct inode *inode, struct page* page)
int nfs_set_page_dirty(struct page *page)
{
+ spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
struct nfs_page *req;
+ int ret;
- req = nfs_page_find_request(page);
+ spin_lock(req_lock);
+ req = nfs_page_find_request_locked(page);
if (req != NULL) {
/* Mark any existing write requests for flushing */
- set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
+ spin_unlock(req_lock);
nfs_release_request(req);
+ return ret;
}
- return __set_page_dirty_nobuffers(page);
+ ret = __set_page_dirty_nobuffers(page);
+ spin_unlock(req_lock);
+ return ret;
}