aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/f2fs.h
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2012-11-22 16:21:29 +0900
committerZiyan <jaraidaniel@gmail.com>2015-05-02 14:34:10 +0200
commitfa3c8c2de86d310cecafc70831893ab99bed572a (patch)
treeb8a1d20c978df905a2562b21a98cb628f5ae378c /fs/f2fs/f2fs.h
parentf45d8b4560030b28f56e2caa6211830eb4ad7d6b (diff)
downloadkernel_samsung_tuna-fa3c8c2de86d310cecafc70831893ab99bed572a.zip
kernel_samsung_tuna-fa3c8c2de86d310cecafc70831893ab99bed572a.tar.gz
kernel_samsung_tuna-fa3c8c2de86d310cecafc70831893ab99bed572a.tar.bz2
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types, such as directory operations, block allocation, block write, and so on. Reference the following lock types in f2fs.h. enum lock_type { RENAME, /* for renaming operations */ DENTRY_OPS, /* for directory operations */ DATA_WRITE, /* for data write */ DATA_NEW, /* for data allocation */ DATA_TRUNC, /* for data truncate */ NODE_NEW, /* for node allocation */ NODE_TRUNC, /* for node truncate */ NODE_WRITE, /* for node write */ NR_LOCK_TYPE, }; In that case, we lose the performance under the multi-threading environment, since every types of operations must be conducted one at a time. In order to address the problem, let's share the locks globally with a mutex array regardless of any types. So, let users grab a mutex and perform their jobs in parallel as much as possbile. For this, I propose a new global lock scheme as follows. 0. Data structure - f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS] - f2fs_sb_info -> node_write 1. mutex_lock_op(sbi) - try to get an avaiable lock from the array. - returns the index of the gottern lock variable. 2. mutex_unlock_op(sbi, index of the lock) - unlock the given index of the lock. 3. mutex_lock_all(sbi) - grab all the locks in the array before the checkpoint. 4. mutex_unlock_all(sbi) - release all the locks in the array after checkpoint. 5. block_operations() - call mutex_lock_all() - sync_dirty_dir_inodes() - grab node_write - sync_node_pages() Note that, the pairs of mutex_lock_op()/mutex_unlock_op() and mutex_lock_all()/mutex_unlock_all() should be used together. Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com> Conflicts: fs/f2fs/inode.c Change-Id: I90bc64d67e77bb38a9eabfbdb3fbf46b5e065365
Diffstat (limited to 'fs/f2fs/f2fs.h')
-rw-r--r--fs/f2fs/f2fs.h63
1 files changed, 40 insertions, 23 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 02276bb..7e56c59 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -309,23 +309,12 @@ enum count_type {
};
/*
- * FS_LOCK nesting subclasses for the lock validator:
- *
- * The locking order between these classes is
- * RENAME -> DENTRY_OPS -> DATA_WRITE -> DATA_NEW
- * -> DATA_TRUNC -> NODE_WRITE -> NODE_NEW -> NODE_TRUNC
+ * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
+ * The checkpoint procedure blocks all the locks in this fs_lock array.
+ * Some FS operations grab free locks, and if there is no free lock,
+ * then wait to grab a lock in a round-robin manner.
*/
-enum lock_type {
- RENAME, /* for renaming operations */
- DENTRY_OPS, /* for directory operations */
- DATA_WRITE, /* for data write */
- DATA_NEW, /* for data allocation */
- DATA_TRUNC, /* for data truncate */
- NODE_NEW, /* for node allocation */
- NODE_TRUNC, /* for node truncate */
- NODE_WRITE, /* for node write */
- NR_LOCK_TYPE,
-};
+#define NR_GLOBAL_LOCKS 8
/*
* The below are the page types of bios used in submti_bio().
@@ -365,10 +354,11 @@ struct f2fs_sb_info {
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
struct inode *meta_inode; /* cache meta blocks */
- struct mutex cp_mutex; /* for checkpoint procedure */
- struct mutex fs_lock[NR_LOCK_TYPE]; /* for blocking FS operations */
- struct mutex write_inode; /* mutex for write inode */
+ struct mutex cp_mutex; /* checkpoint procedure lock */
+ struct mutex fs_lock[NR_GLOBAL_LOCKS]; /* blocking FS operations */
+ struct mutex node_write; /* locking node writes */
struct mutex writepages; /* mutex for writepages() */
+ unsigned char next_lock_num; /* round-robin global locks */
int por_doing; /* recovery is doing or not */
/* for orphan inode management */
@@ -503,14 +493,40 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
cp->ckpt_flags = cpu_to_le32(ckpt_flags);
}
-static inline void mutex_lock_op(struct f2fs_sb_info *sbi, enum lock_type t)
+static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
+{
+ int i = 0;
+ for (; i < NR_GLOBAL_LOCKS; i++)
+ mutex_lock(&sbi->fs_lock[i]);
+}
+
+static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
{
- mutex_lock_nested(&sbi->fs_lock[t], t);
+ int i = 0;
+ for (; i < NR_GLOBAL_LOCKS; i++)
+ mutex_unlock(&sbi->fs_lock[i]);
}
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, enum lock_type t)
+static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
{
- mutex_unlock(&sbi->fs_lock[t]);
+ unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
+ int i = 0;
+
+ for (; i < NR_GLOBAL_LOCKS; i++)
+ if (mutex_trylock(&sbi->fs_lock[i]))
+ return i;
+
+ mutex_lock(&sbi->fs_lock[next_lock]);
+ sbi->next_lock_num++;
+ return next_lock;
+}
+
+static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+{
+ if (ilock < 0)
+ return;
+ BUG_ON(ilock >= NR_GLOBAL_LOCKS);
+ mutex_unlock(&sbi->fs_lock[ilock]);
}
/*
@@ -879,6 +895,7 @@ long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long);
void f2fs_set_inode_flags(struct inode *);
struct inode *f2fs_iget(struct super_block *, unsigned long);
void update_inode(struct inode *, struct page *);
+int update_inode_page(struct inode *);
int f2fs_write_inode(struct inode *, struct writeback_control *);
void f2fs_evict_inode(struct inode *);