NFSv4: Don't add a new lock on an interrupted wait for LOCK
If the wait for a LOCK operation is interrupted, and then the file is closed, the locks cleanup code will assume that no new locks will be added to the inode after it has completed. We already have a mechanism to detect if there was signal, so let's use that to avoid recreating the local lock once the RPC completes. Also skip re-sending the LOCK operation for the various error cases if we were signaled. Signed-off-by: Benjamin Coddington <bcodding@redhat.com> [Trond: Fix inverted test of locks_lock_inode_wait()] Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
parent
cf61eb2686
commit
a3cf9bca2a
1 changed files with 14 additions and 10 deletions
|
@ -6417,32 +6417,36 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
|
|||
case 0:
|
||||
renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
|
||||
data->timestamp);
|
||||
if (data->arg.new_lock) {
|
||||
if (data->arg.new_lock && !data->cancelled) {
|
||||
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
|
||||
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) {
|
||||
rpc_restart_call_prepare(task);
|
||||
if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (data->arg.new_lock_owner != 0) {
|
||||
nfs_confirm_seqid(&lsp->ls_seqid, 0);
|
||||
nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
|
||||
set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
|
||||
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
|
||||
rpc_restart_call_prepare(task);
|
||||
goto out_done;
|
||||
} else if (nfs4_update_lock_stateid(lsp, &data->res.stateid))
|
||||
goto out_done;
|
||||
|
||||
break;
|
||||
case -NFS4ERR_BAD_STATEID:
|
||||
case -NFS4ERR_OLD_STATEID:
|
||||
case -NFS4ERR_STALE_STATEID:
|
||||
case -NFS4ERR_EXPIRED:
|
||||
if (data->arg.new_lock_owner != 0) {
|
||||
if (!nfs4_stateid_match(&data->arg.open_stateid,
|
||||
if (nfs4_stateid_match(&data->arg.open_stateid,
|
||||
&lsp->ls_state->open_stateid))
|
||||
rpc_restart_call_prepare(task);
|
||||
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
|
||||
goto out_done;
|
||||
} else if (nfs4_stateid_match(&data->arg.lock_stateid,
|
||||
&lsp->ls_stateid))
|
||||
rpc_restart_call_prepare(task);
|
||||
goto out_done;
|
||||
}
|
||||
if (!data->cancelled)
|
||||
rpc_restart_call_prepare(task);
|
||||
out_done:
|
||||
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue