From 6362e4d4eda61efb04ac1cdae32e48ac6d90b701 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 23 Mar 2006 03:00:17 -0800 Subject: [PATCH] Fix oops in invalidate_dquots() When quota is being turned off we assumed that all the references to dquots were already dropped. That need not be true as inodes being deleted are not on superblock's inodes list and hence we need not reach it when removing quota references from inodes. So invalidate_dquots() has to wait for all the users of dquots (as quota is already marked as turned off, no new references can be acquired and so this is bound to happen rather early). When we do this, we can also remove the iprune_sem locking as it was protecting us against exactly the same problem when freeing inodes icache memory. Signed-off-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dquot.c | 52 +++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 13 deletions(-) (limited to 'fs/dquot.c') diff --git a/fs/dquot.c b/fs/dquot.c index 1966c89..9376a43 100644 --- a/fs/dquot.c +++ b/fs/dquot.c @@ -118,8 +118,7 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_sem > iprune_sem > journal_lock > dqptr_sem > - * > dquot->dq_lock > dqio_sem + * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > dqio_sem * i_mutex on quota files is special (it's below dqio_sem) */ @@ -407,23 +406,49 @@ out_dqlock: /* Invalidate all dquots on the list. Note that this function is called after * quota is disabled and pointers from inodes removed so there cannot be new - * quota users. Also because we hold dqonoff_sem there can be no quota users - * for this sb+type at all. */ + * quota users. There can still be some users of quotas due to inodes being + * just deleted or pruned by prune_icache() (those are not attached to any + * list). We have to wait for such users. + */ static void invalidate_dquots(struct super_block *sb, int type) { struct dquot *dquot, *tmp; +restart: spin_lock(&dq_list_lock); list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) { if (dquot->dq_sb != sb) continue; if (dquot->dq_type != type) continue; -#ifdef __DQUOT_PARANOIA - if (atomic_read(&dquot->dq_count)) - BUG(); -#endif - /* Quota now has no users and it has been written on last dqput() */ + /* Wait for dquot users */ + if (atomic_read(&dquot->dq_count)) { + DEFINE_WAIT(wait); + + atomic_inc(&dquot->dq_count); + prepare_to_wait(&dquot->dq_wait_unused, &wait, + TASK_UNINTERRUPTIBLE); + spin_unlock(&dq_list_lock); + /* Once dqput() wakes us up, we know it's time to free + * the dquot. + * IMPORTANT: we rely on the fact that there is always + * at most one process waiting for dquot to free. + * Otherwise dq_count would be > 1 and we would never + * wake up. + */ + if (atomic_read(&dquot->dq_count) > 1) + schedule(); + finish_wait(&dquot->dq_wait_unused, &wait); + dqput(dquot); + /* At this moment dquot() need not exist (it could be + * reclaimed by prune_dqcache(). Hence we must + * restart. */ + goto restart; + } + /* + * Quota now has no users and it has been written on last + * dqput() + */ remove_dquot_hash(dquot); remove_free_dquot(dquot); remove_inuse(dquot); @@ -540,6 +565,10 @@ we_slept: if (atomic_read(&dquot->dq_count) > 1) { /* We have more than one user... nothing to do */ atomic_dec(&dquot->dq_count); + /* Releasing dquot during quotaoff phase? */ + if (!sb_has_quota_enabled(dquot->dq_sb, dquot->dq_type) && + atomic_read(&dquot->dq_count) == 1) + wake_up(&dquot->dq_wait_unused); spin_unlock(&dq_list_lock); return; } @@ -581,6 +610,7 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type) INIT_LIST_HEAD(&dquot->dq_inuse); INIT_HLIST_NODE(&dquot->dq_hash); INIT_LIST_HEAD(&dquot->dq_dirty); + init_waitqueue_head(&dquot->dq_wait_unused); dquot->dq_sb = sb; dquot->dq_type = type; atomic_set(&dquot->dq_count, 1); @@ -732,13 +762,9 @@ static void drop_dquot_ref(struct super_block *sb, int type) { LIST_HEAD(tofree_head); - /* We need to be guarded against prune_icache to reach all the - * inodes - otherwise some can be on the local list of prune_icache */ - down(&iprune_sem); down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); up_write(&sb_dqopt(sb)->dqptr_sem); - up(&iprune_sem); put_dquot_list(&tofree_head); } -- cgit v1.1