aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2014-03-29 15:30:40 +0800
committerAndreas Blaesius <skate4life@gmx.de>2016-06-05 21:21:25 +0200
commit67573b9442373dc9d0038e60e8236b2af0e3a39e (patch)
tree8a09c73f55dd89bf38fee4d8748f1af6631a0451 /fs
parentd711b7ac530fba9b320daac68182b8708cb48851 (diff)
downloadkernel_samsung_espresso10-67573b9442373dc9d0038e60e8236b2af0e3a39e.zip
kernel_samsung_espresso10-67573b9442373dc9d0038e60e8236b2af0e3a39e.tar.gz
kernel_samsung_espresso10-67573b9442373dc9d0038e60e8236b2af0e3a39e.tar.bz2
f2fs: fix error path when fail to read inline data
We should unlock page in ->readpage() path and also should unlock & release page in error path of ->write_begin() to avoid deadlock or memory leak. So let's add release code to fix the problem when we fail to read inline data. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/data.c14
-rw-r--r--fs/f2fs/inline.c4
2 files changed, 13 insertions, 5 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ea58787..f451e71 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -947,13 +947,19 @@ inline_data:
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
} else {
- if (f2fs_has_inline_data(inode))
+ if (f2fs_has_inline_data(inode)) {
err = f2fs_read_inline_data(inode, page);
- else
+ if (err) {
+ page_cache_release(page);
+ return err;
+ }
+ } else {
err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
READ_SYNC);
- if (err)
- return err;
+ if (err)
+ return err;
+ }
+
lock_page(page);
if (unlikely(!PageUptodate(page))) {
f2fs_put_page(page, 1);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 31ee5b1..383db1f 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -45,8 +45,10 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
}
ipage = get_node_page(sbi, inode->i_ino);
- if (IS_ERR(ipage))
+ if (IS_ERR(ipage)) {
+ unlock_page(page);
return PTR_ERR(ipage);
+ }
zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);