aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/nfs4state.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/nfs4state.c')
-rw-r--r--fs/nfs/nfs4state.c181
1 files changed, 135 insertions, 46 deletions
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index afe587d..2d5a6a2 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -264,13 +264,16 @@ nfs4_alloc_state_owner(void)
{
struct nfs4_state_owner *sp;
- sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+ sp = kzalloc(sizeof(*sp),GFP_KERNEL);
if (!sp)
return NULL;
- init_MUTEX(&sp->so_sema);
- sp->so_seqid = 0; /* arbitrary */
+ spin_lock_init(&sp->so_lock);
INIT_LIST_HEAD(&sp->so_states);
INIT_LIST_HEAD(&sp->so_delegations);
+ rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
+ sp->so_seqid.sequence = &sp->so_sequence;
+ spin_lock_init(&sp->so_sequence.lock);
+ INIT_LIST_HEAD(&sp->so_sequence.list);
atomic_set(&sp->so_count, 1);
return sp;
}
@@ -359,7 +362,6 @@ nfs4_alloc_open_state(void)
memset(state->stateid.data, 0, sizeof(state->stateid.data));
atomic_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
- init_MUTEX(&state->lock_sema);
spin_lock_init(&state->state_lock);
return state;
}
@@ -437,21 +439,23 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
if (state)
goto out;
new = nfs4_alloc_open_state();
+ spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
state = __nfs4_find_state_byowner(inode, owner);
if (state == NULL && new != NULL) {
state = new;
- /* Caller *must* be holding owner->so_sem */
- /* Note: The reclaim code dictates that we add stateless
- * and read-only stateids to the end of the list */
- list_add_tail(&state->open_states, &owner->so_states);
state->owner = owner;
atomic_inc(&owner->so_count);
list_add(&state->inode_states, &nfsi->open_states);
state->inode = igrab(inode);
spin_unlock(&inode->i_lock);
+ /* Note: The reclaim code dictates that we add stateless
+ * and read-only stateids to the end of the list */
+ list_add_tail(&state->open_states, &owner->so_states);
+ spin_unlock(&owner->so_lock);
} else {
spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
if (new)
nfs4_free_open_state(new);
}
@@ -461,19 +465,21 @@ out:
/*
* Beware! Caller must be holding exactly one
- * reference to clp->cl_sem and owner->so_sema!
+ * reference to clp->cl_sem!
*/
void nfs4_put_open_state(struct nfs4_state *state)
{
struct inode *inode = state->inode;
struct nfs4_state_owner *owner = state->owner;
- if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
+ if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
return;
+ spin_lock(&inode->i_lock);
if (!list_empty(&state->inode_states))
list_del(&state->inode_states);
- spin_unlock(&inode->i_lock);
list_del(&state->open_states);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
iput(inode);
BUG_ON (state->state != 0);
nfs4_free_open_state(state);
@@ -481,20 +487,17 @@ void nfs4_put_open_state(struct nfs4_state *state)
}
/*
- * Beware! Caller must be holding no references to clp->cl_sem!
- * of owner->so_sema!
+ * Close the current file.
*/
void nfs4_close_state(struct nfs4_state *state, mode_t mode)
{
struct inode *inode = state->inode;
struct nfs4_state_owner *owner = state->owner;
- struct nfs4_client *clp = owner->so_client;
int newstate;
atomic_inc(&owner->so_count);
- down_read(&clp->cl_sem);
- down(&owner->so_sema);
/* Protect against nfs4_find_state() */
+ spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
if (mode & FMODE_READ)
state->nreaders--;
@@ -507,6 +510,7 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
list_move_tail(&state->open_states, &owner->so_states);
}
spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
newstate = 0;
if (state->state != 0) {
if (state->nreaders)
@@ -515,14 +519,16 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
newstate |= FMODE_WRITE;
if (state->state == newstate)
goto out;
- if (nfs4_do_close(inode, state, newstate) == -EINPROGRESS)
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+ state->state = newstate;
+ goto out;
+ }
+ if (nfs4_do_close(inode, state, newstate) == 0)
return;
}
out:
nfs4_put_open_state(state);
- up(&owner->so_sema);
nfs4_put_state_owner(owner);
- up_read(&clp->cl_sem);
}
/*
@@ -546,19 +552,16 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
- * The caller must be holding state->lock_sema
*/
static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
{
struct nfs4_lock_state *lsp;
struct nfs4_client *clp = state->owner->so_client;
- lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
+ lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
if (lsp == NULL)
return NULL;
- lsp->ls_flags = 0;
- lsp->ls_seqid = 0; /* arbitrary */
- memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
+ lsp->ls_seqid.sequence = &state->owner->so_sequence;
atomic_set(&lsp->ls_count, 1);
lsp->ls_owner = fl_owner;
spin_lock(&clp->cl_lock);
@@ -572,7 +575,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
* Return a compatible lock_state. If no initialized lock_state structure
* exists, return an uninitialized one.
*
- * The caller must be holding state->lock_sema and clp->cl_sem
+ * The caller must be holding clp->cl_sem
*/
static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
{
@@ -605,7 +608,7 @@ static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_
* Release reference to lock_state, and free it if we see that
* it is no longer in use
*/
-static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
+void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
{
struct nfs4_state *state;
@@ -673,29 +676,94 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
nfs4_put_lock_state(lsp);
}
-/*
-* Called with state->lock_sema and clp->cl_sem held.
-*/
-void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
+struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
{
- if (status == NFS_OK || seqid_mutating_err(-status))
- lsp->ls_seqid++;
+ struct nfs_seqid *new;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (new != NULL) {
+ new->sequence = counter;
+ INIT_LIST_HEAD(&new->list);
+ }
+ return new;
+}
+
+void nfs_free_seqid(struct nfs_seqid *seqid)
+{
+ struct rpc_sequence *sequence = seqid->sequence->sequence;
+
+ if (!list_empty(&seqid->list)) {
+ spin_lock(&sequence->lock);
+ list_del(&seqid->list);
+ spin_unlock(&sequence->lock);
+ }
+ rpc_wake_up_next(&sequence->wait);
+ kfree(seqid);
}
/*
-* Called with sp->so_sema and clp->cl_sem held.
-*
-* Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
-* failed with a seqid incrementing error -
-* see comments nfs_fs.h:seqid_mutating_error()
-*/
-void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
-{
- if (status == NFS_OK || seqid_mutating_err(-status))
- sp->so_seqid++;
- /* If the server returns BAD_SEQID, unhash state_owner here */
- if (status == -NFS4ERR_BAD_SEQID)
+ * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
+{
+ switch (status) {
+ case 0:
+ break;
+ case -NFS4ERR_BAD_SEQID:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_BADXDR:
+ case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_NOFILEHANDLE:
+ /* Non-seqid mutating errors */
+ return;
+ };
+ /*
+ * Note: no locking needed as we are guaranteed to be first
+ * on the sequence list
+ */
+ seqid->sequence->counter++;
+}
+
+void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
+{
+ if (status == -NFS4ERR_BAD_SEQID) {
+ struct nfs4_state_owner *sp = container_of(seqid->sequence,
+ struct nfs4_state_owner, so_seqid);
nfs4_drop_state_owner(sp);
+ }
+ return nfs_increment_seqid(status, seqid);
+}
+
+/*
+ * Increment the seqid if the LOCK/LOCKU succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
+{
+ return nfs_increment_seqid(status, seqid);
+}
+
+int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
+{
+ struct rpc_sequence *sequence = seqid->sequence->sequence;
+ int status = 0;
+
+ if (sequence->list.next == &seqid->list)
+ goto out;
+ spin_lock(&sequence->lock);
+ if (!list_empty(&sequence->list)) {
+ rpc_sleep_on(&sequence->wait, task, NULL, NULL);
+ status = -EAGAIN;
+ } else
+ list_add(&seqid->list, &sequence->list);
+ spin_unlock(&sequence->lock);
+out:
+ return status;
}
static int reclaimer(void *);
@@ -791,8 +859,6 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n
if (state->state == 0)
continue;
status = ops->recover_open(sp, state);
- list_for_each_entry(lock, &state->lock_states, ls_locks)
- lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
if (status >= 0) {
status = nfs4_reclaim_locks(ops, state);
if (status < 0)
@@ -831,6 +897,28 @@ out_err:
return status;
}
+static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
+{
+ struct nfs4_state_owner *sp;
+ struct nfs4_state *state;
+ struct nfs4_lock_state *lock;
+
+ /* Reset all sequence ids to zero */
+ list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
+ sp->so_seqid.counter = 0;
+ sp->so_seqid.flags = 0;
+ spin_lock(&sp->so_lock);
+ list_for_each_entry(state, &sp->so_states, open_states) {
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ lock->ls_seqid.counter = 0;
+ lock->ls_seqid.flags = 0;
+ lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+ }
+ }
+ spin_unlock(&sp->so_lock);
+ }
+}
+
static int reclaimer(void *ptr)
{
struct reclaimer_args *args = (struct reclaimer_args *)ptr;
@@ -864,6 +952,7 @@ restart_loop:
default:
ops = &nfs4_network_partition_recovery_ops;
};
+ nfs4_state_mark_reclaim(clp);
status = __nfs4_init_client(clp);
if (status)
goto out_error;