aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2009-06-24 02:05:18 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2009-06-24 08:15:25 -0400
commitc63e09ecccb50f930e899d7005edc5411ee86d4f (patch)
treeaafa5169cde98a2bb8327fcc948c76dba0746322 /fs
parent7e325d3a6b117c7288bfc0755410e9d9d2b71326 (diff)
downloadkernel_samsung_smdk4412-c63e09ecccb50f930e899d7005edc5411ee86d4f.zip
kernel_samsung_smdk4412-c63e09ecccb50f930e899d7005edc5411ee86d4f.tar.gz
kernel_samsung_smdk4412-c63e09ecccb50f930e899d7005edc5411ee86d4f.tar.bz2
Make allocation of anon devices cheaper
Standard trick - add a new variable (start) such that for each n < start n is known to be busy. Allocation can skip checking everything in [0..start) and if it returns n, we can set start to n + 1. Freeing below start sets start to what we'd just freed. Of course, it still sucks if we do something like free 0 allocate allocate in a loop - still O(n^2) time. However, on saner loads it improves the things a lot and the entire thing is not worth the trouble of switching to something with better worst-case behaviour. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/super.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/fs/super.c b/fs/super.c
index d40d53a..808ffd5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -608,6 +608,7 @@ void emergency_remount(void)
static DEFINE_IDA(unnamed_dev_ida);
static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
+static int unnamed_dev_start = 0; /* don't bother trying below it */
int set_anon_super(struct super_block *s, void *data)
{
@@ -618,7 +619,8 @@ int set_anon_super(struct super_block *s, void *data)
if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
return -ENOMEM;
spin_lock(&unnamed_dev_lock);
- error = ida_get_new(&unnamed_dev_ida, &dev);
+ error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
+ unnamed_dev_start = dev + 1;
spin_unlock(&unnamed_dev_lock);
if (error == -EAGAIN)
/* We raced and lost with another CPU. */
@@ -629,6 +631,7 @@ int set_anon_super(struct super_block *s, void *data)
if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, dev);
+ unnamed_dev_start = dev;
spin_unlock(&unnamed_dev_lock);
return -EMFILE;
}
@@ -645,6 +648,8 @@ void kill_anon_super(struct super_block *sb)
generic_shutdown_super(sb);
spin_lock(&unnamed_dev_lock);
ida_remove(&unnamed_dev_ida, slot);
+ if (slot < unnamed_dev_start)
+ unnamed_dev_start = slot;
spin_unlock(&unnamed_dev_lock);
}