aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-10-23 11:18:01 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2010-10-25 21:26:11 -0400
commitf991bd2e14210fb93d722cb23e54991de20e8a3d (patch)
tree60643fb9d828c388d963ceb26504dce9890eabbb /fs
parent7de9c6ee3ecffd99e1628e81a5ea5468f7581a1f (diff)
downloadkernel_samsung_espresso10-f991bd2e14210fb93d722cb23e54991de20e8a3d.zip
kernel_samsung_espresso10-f991bd2e14210fb93d722cb23e54991de20e8a3d.tar.gz
kernel_samsung_espresso10-f991bd2e14210fb93d722cb23e54991de20e8a3d.tar.bz2
fs: introduce a per-cpu last_ino allocator
new_inode() dirties a contended cache line to get increasing inode numbers. This limits performance on workloads that cause significant parallel inode allocation. Solve this problem by using a per_cpu variable fed by the shared last_ino in batches of 1024 allocations. This reduces contention on the shared last_ino, and give same spreading ino numbers than before (i.e. same wraparound after 2^32 allocations). Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/inode.c45
1 files changed, 38 insertions, 7 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 05ea293..46a3e12 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -717,6 +717,43 @@ repeat:
return NULL;
}
+/*
+ * Each cpu owns a range of LAST_INO_BATCH numbers.
+ * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
+ * to renew the exhausted range.
+ *
+ * This does not significantly increase overflow rate because every CPU can
+ * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
+ * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
+ * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
+ * overflow rate by 2x, which does not seem too significant.
+ *
+ * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
+ * error if st_ino won't fit in target struct field. Use 32bit counter
+ * here to attempt to avoid that.
+ */
+#define LAST_INO_BATCH 1024
+static DEFINE_PER_CPU(unsigned int, last_ino);
+
+static unsigned int get_next_ino(void)
+{
+ unsigned int *p = &get_cpu_var(last_ino);
+ unsigned int res = *p;
+
+#ifdef CONFIG_SMP
+ if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
+ static atomic_t shared_last_ino;
+ int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
+
+ res = next - LAST_INO_BATCH;
+ }
+#endif
+
+ *p = ++res;
+ put_cpu_var(last_ino);
+ return res;
+}
+
/**
* new_inode - obtain an inode
* @sb: superblock
@@ -731,12 +768,6 @@ repeat:
*/
struct inode *new_inode(struct super_block *sb)
{
- /*
- * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
- * error if st_ino won't fit in target struct field. Use 32bit counter
- * here to attempt to avoid that.
- */
- static unsigned int last_ino;
struct inode *inode;
spin_lock_prefetch(&inode_lock);
@@ -745,7 +776,7 @@ struct inode *new_inode(struct super_block *sb)
if (inode) {
spin_lock(&inode_lock);
__inode_sb_list_add(inode);
- inode->i_ino = ++last_ino;
+ inode->i_ino = get_next_ino();
inode->i_state = 0;
spin_unlock(&inode_lock);
}