diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2010-11-30 15:49:31 +0000 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2010-11-30 15:49:31 +0000 |
commit | 47a25380e37f44db7202093ca92e4af569c34f55 (patch) | |
tree | db3e6dba3859c5562b9a86f6d4059519fa7a1c52 | |
parent | e06dfc492870e1d380f02722cde084b724dc197b (diff) | |
download | kernel_samsung_espresso10-47a25380e37f44db7202093ca92e4af569c34f55.zip kernel_samsung_espresso10-47a25380e37f44db7202093ca92e4af569c34f55.tar.gz kernel_samsung_espresso10-47a25380e37f44db7202093ca92e4af569c34f55.tar.bz2 |
GFS2: Merge glock state fields into a bitfield
We can only merge the fields into a bitfield if the locking
rules for them are the same. In this case gl_spin covers all
of the fields (write side) but a couple of them are used
with GLF_LOCK as the read side lock, which should be ok
since we know that the field in question won't be changing
at the time.
The gl_req setting has to be done earlier (in glock.c) in order
to place it under gl_spin. The gl_reply setting also has to be
brought under gl_spin in order to comply with the new rules.
This saves 4*sizeof(unsigned int) per glock.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
-rw-r--r-- | fs/gfs2/glock.c | 9 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 12 | ||||
-rw-r--r-- | fs/gfs2/lock_dlm.c | 1 |
3 files changed, 14 insertions, 8 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 2dd1d72..08a8beb 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -567,6 +567,7 @@ __acquires(&gl->gl_spin) set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); do_error(gl, 0); /* Fail queued try locks */ } + gl->gl_req = target; spin_unlock(&gl->gl_spin); if (glops->go_xmote_th) glops->go_xmote_th(gl); @@ -1353,24 +1354,28 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl) * @gl: Pointer to the glock * @ret: The return value from the dlm * + * The gl_reply field is under the gl_spin lock so that it is ok + * to use a bitfield shared with other glock state fields. */ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) { struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; + spin_lock(&gl->gl_spin); gl->gl_reply = ret; if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { - spin_lock(&gl->gl_spin); if (gfs2_should_freeze(gl)) { set_bit(GLF_FROZEN, &gl->gl_flags); spin_unlock(&gl->gl_spin); return; } - spin_unlock(&gl->gl_spin); } + + spin_unlock(&gl->gl_spin); set_bit(GLF_REPLY_PENDING, &gl->gl_flags); + smp_wmb(); gfs2_glock_hold(gl); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) gfs2_glock_put(gl); diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 764fbb4..8d3d2b4 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -207,12 +207,14 @@ struct gfs2_glock { spinlock_t gl_spin; - unsigned int gl_state; - unsigned int gl_target; - unsigned int gl_reply; + /* State fields protected by gl_spin */ + unsigned int gl_state:2, /* Current state */ + gl_target:2, /* Target state */ + gl_demote_state:2, /* State requested by remote node */ + gl_req:2, /* State in last dlm request */ + gl_reply:8; /* Last reply from the dlm */ + unsigned int gl_hash; - unsigned int gl_req; - unsigned int gl_demote_state; /* state requested by remote node */ unsigned long gl_demote_time; /* time of first demote request */ struct list_head gl_holders; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index f40ce34..6e493ae 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -153,7 +153,6 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, int req; u32 lkf; - gl->gl_req = req_state; req = make_mode(req_state); lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req); |