From fc0e38dae645f65424d1fb5d2a938aab8ce48a58 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 9 Mar 2011 10:58:04 +0000 Subject: GFS2: Fix glock deallocation race This patch fixes a race in deallocating glocks which was introduced in the RCU glock patch. We need to ensure that the glock count is kept correct even in the case that there is a race to add a new glock into the hash table. Also, to avoid having to wait for an RCU grace period, the glock counter can be decremented before call_rcu() is called. Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'fs/gfs2/glock.c') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index ddc3e1e..3f45a14 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -103,16 +103,21 @@ static inline void spin_unlock_bucket(unsigned int hash) __bit_spin_unlock(0, (unsigned long *)bl); } -void gfs2_glock_free(struct rcu_head *rcu) +static void gfs2_glock_dealloc(struct rcu_head *rcu) { struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); - struct gfs2_sbd *sdp = gl->gl_sbd; if (gl->gl_ops->go_flags & GLOF_ASPACE) kmem_cache_free(gfs2_glock_aspace_cachep, gl); else kmem_cache_free(gfs2_glock_cachep, gl); +} + +void gfs2_glock_free(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_sbd; + call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); if (atomic_dec_and_test(&sdp->sd_glock_disposal)) wake_up(&sdp->sd_glock_wait); } @@ -760,6 +765,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, if (tmp) { spin_unlock_bucket(hash); kmem_cache_free(cachep, gl); + atomic_dec(&sdp->sd_glock_disposal); gl = tmp; } else { hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]); -- cgit v1.1