Merge branch 'nfsv41_cb'
* nfsv41_cb: NFSv4.x: Fix NFS4ERR_RETRY_UNCACHED_REP in nfs4_callback_sequence NFSv4.x: Allow multiple callbacks in flight NFSv4.x: Fix wraparound issues when validing the callback sequence id NFSv4.x: Enforce the ca_maxresponsesize_cached on the back channel NFSv4.x: CB_SEQUENCE should return NFS4ERR_DELAY if still executing NFSv4.x: Remove hard coded slotids in callback channel
This commit is contained in:
commit
cc1f9000ea
6 changed files with 99 additions and 53 deletions
|
@ -37,10 +37,11 @@ enum nfs4_callback_opnum {
|
|||
OP_CB_ILLEGAL = 10044,
|
||||
};
|
||||
|
||||
struct nfs4_slot;
|
||||
struct cb_process_state {
|
||||
__be32 drc_status;
|
||||
struct nfs_client *clp;
|
||||
u32 slotid;
|
||||
struct nfs4_slot *slot;
|
||||
u32 minorversion;
|
||||
struct net *net;
|
||||
};
|
||||
|
|
|
@ -354,47 +354,38 @@ out:
|
|||
* a single outstanding callback request at a time.
|
||||
*/
|
||||
static __be32
|
||||
validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
|
||||
validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
|
||||
const struct cb_sequenceargs * args)
|
||||
{
|
||||
struct nfs4_slot *slot;
|
||||
dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n",
|
||||
__func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr);
|
||||
|
||||
dprintk("%s enter. slotid %u seqid %u\n",
|
||||
__func__, args->csa_slotid, args->csa_sequenceid);
|
||||
|
||||
if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
|
||||
if (args->csa_slotid > tbl->server_highest_slotid)
|
||||
return htonl(NFS4ERR_BADSLOT);
|
||||
|
||||
slot = tbl->slots + args->csa_slotid;
|
||||
dprintk("%s slot table seqid: %u\n", __func__, slot->seq_nr);
|
||||
|
||||
/* Normal */
|
||||
if (likely(args->csa_sequenceid == slot->seq_nr + 1))
|
||||
goto out_ok;
|
||||
|
||||
/* Replay */
|
||||
if (args->csa_sequenceid == slot->seq_nr) {
|
||||
dprintk("%s seqid %u is a replay\n",
|
||||
__func__, args->csa_sequenceid);
|
||||
if (nfs4_test_locked_slot(tbl, slot->slot_nr))
|
||||
return htonl(NFS4ERR_DELAY);
|
||||
/* Signal process_op to set this error on next op */
|
||||
if (args->csa_cachethis == 0)
|
||||
return htonl(NFS4ERR_RETRY_UNCACHED_REP);
|
||||
|
||||
/* The ca_maxresponsesize_cached is 0 with no DRC */
|
||||
else if (args->csa_cachethis == 1)
|
||||
return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
|
||||
/* Liar! We never allowed you to set csa_cachethis != 0 */
|
||||
return htonl(NFS4ERR_SEQ_FALSE_RETRY);
|
||||
}
|
||||
|
||||
/* Wraparound */
|
||||
if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
|
||||
slot->seq_nr = 1;
|
||||
goto out_ok;
|
||||
}
|
||||
if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
|
||||
if (args->csa_sequenceid == 1)
|
||||
return htonl(NFS4_OK);
|
||||
} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
|
||||
return htonl(NFS4_OK);
|
||||
|
||||
/* Misordered request */
|
||||
return htonl(NFS4ERR_SEQ_MISORDERED);
|
||||
out_ok:
|
||||
tbl->highest_used_slotid = args->csa_slotid;
|
||||
return htonl(NFS4_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -473,6 +464,12 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
|
|||
tbl = &clp->cl_session->bc_slot_table;
|
||||
slot = tbl->slots + args->csa_slotid;
|
||||
|
||||
/* Set up res before grabbing the spinlock */
|
||||
memcpy(&res->csr_sessionid, &args->csa_sessionid,
|
||||
sizeof(res->csr_sessionid));
|
||||
res->csr_sequenceid = args->csa_sequenceid;
|
||||
res->csr_slotid = args->csa_slotid;
|
||||
|
||||
spin_lock(&tbl->slot_tbl_lock);
|
||||
/* state manager is resetting the session */
|
||||
if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
|
||||
|
@ -485,18 +482,26 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
memcpy(&res->csr_sessionid, &args->csa_sessionid,
|
||||
sizeof(res->csr_sessionid));
|
||||
res->csr_sequenceid = args->csa_sequenceid;
|
||||
res->csr_slotid = args->csa_slotid;
|
||||
res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
|
||||
res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
|
||||
|
||||
status = validate_seqid(tbl, args);
|
||||
if (status)
|
||||
status = htonl(NFS4ERR_BADSLOT);
|
||||
slot = nfs4_lookup_slot(tbl, args->csa_slotid);
|
||||
if (IS_ERR(slot))
|
||||
goto out_unlock;
|
||||
|
||||
cps->slotid = args->csa_slotid;
|
||||
res->csr_highestslotid = tbl->server_highest_slotid;
|
||||
res->csr_target_highestslotid = tbl->target_highest_slotid;
|
||||
|
||||
status = validate_seqid(tbl, slot, args);
|
||||
if (status)
|
||||
goto out_unlock;
|
||||
if (!nfs4_try_to_lock_slot(tbl, slot)) {
|
||||
status = htonl(NFS4ERR_DELAY);
|
||||
goto out_unlock;
|
||||
}
|
||||
cps->slot = slot;
|
||||
|
||||
/* The ca_maxresponsesize_cached is 0 with no DRC */
|
||||
if (args->csa_cachethis != 0)
|
||||
return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
|
||||
|
||||
/*
|
||||
* Check for pending referring calls. If a match is found, a
|
||||
|
@ -513,7 +518,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
|
|||
* If CB_SEQUENCE returns an error, then the state of the slot
|
||||
* (sequence ID, cached reply) MUST NOT change.
|
||||
*/
|
||||
slot->seq_nr++;
|
||||
slot->seq_nr = args->csa_sequenceid;
|
||||
out_unlock:
|
||||
spin_unlock(&tbl->slot_tbl_lock);
|
||||
|
||||
|
|
|
@ -752,7 +752,8 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
|
|||
return htonl(NFS_OK);
|
||||
}
|
||||
|
||||
static void nfs4_callback_free_slot(struct nfs4_session *session)
|
||||
static void nfs4_callback_free_slot(struct nfs4_session *session,
|
||||
struct nfs4_slot *slot)
|
||||
{
|
||||
struct nfs4_slot_table *tbl = &session->bc_slot_table;
|
||||
|
||||
|
@ -761,15 +762,17 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
|
|||
* Let the state manager know callback processing done.
|
||||
* A single slot, so highest used slotid is either 0 or -1
|
||||
*/
|
||||
tbl->highest_used_slotid = NFS4_NO_SLOT;
|
||||
nfs4_free_slot(tbl, slot);
|
||||
nfs4_slot_tbl_drain_complete(tbl);
|
||||
spin_unlock(&tbl->slot_tbl_lock);
|
||||
}
|
||||
|
||||
static void nfs4_cb_free_slot(struct cb_process_state *cps)
|
||||
{
|
||||
if (cps->slotid != NFS4_NO_SLOT)
|
||||
nfs4_callback_free_slot(cps->clp->cl_session);
|
||||
if (cps->slot) {
|
||||
nfs4_callback_free_slot(cps->clp->cl_session, cps->slot);
|
||||
cps->slot = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_NFS_V4_1 */
|
||||
|
@ -893,7 +896,6 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
|
|||
struct cb_process_state cps = {
|
||||
.drc_status = 0,
|
||||
.clp = NULL,
|
||||
.slotid = NFS4_NO_SLOT,
|
||||
.net = SVC_NET(rqstp),
|
||||
};
|
||||
unsigned int nops = 0;
|
||||
|
|
|
@ -7319,7 +7319,7 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
|
|||
args->bc_attrs.max_resp_sz = PAGE_SIZE;
|
||||
args->bc_attrs.max_resp_sz_cached = 0;
|
||||
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
|
||||
args->bc_attrs.max_reqs = 1;
|
||||
args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
|
||||
|
||||
dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
|
||||
"max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
|
||||
|
|
|
@ -135,6 +135,43 @@ static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
|
||||
struct nfs4_slot *slot)
|
||||
{
|
||||
u32 slotid = slot->slot_nr;
|
||||
|
||||
__set_bit(slotid, tbl->used_slots);
|
||||
if (slotid > tbl->highest_used_slotid ||
|
||||
tbl->highest_used_slotid == NFS4_NO_SLOT)
|
||||
tbl->highest_used_slotid = slotid;
|
||||
slot->generation = tbl->generation;
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs4_try_to_lock_slot - Given a slot try to allocate it
|
||||
*
|
||||
* Note: must be called with the slot_tbl_lock held.
|
||||
*/
|
||||
bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
|
||||
{
|
||||
if (nfs4_test_locked_slot(tbl, slot->slot_nr))
|
||||
return false;
|
||||
nfs4_lock_slot(tbl, slot);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs4_lookup_slot - Find a slot but don't allocate it
|
||||
*
|
||||
* Note: must be called with the slot_tbl_lock held.
|
||||
*/
|
||||
struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
|
||||
{
|
||||
if (slotid <= tbl->max_slotid)
|
||||
return nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
|
||||
return ERR_PTR(-E2BIG);
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs4_alloc_slot - efficiently look for a free slot
|
||||
*
|
||||
|
@ -153,18 +190,11 @@ struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
|
|||
__func__, tbl->used_slots[0], tbl->highest_used_slotid,
|
||||
tbl->max_slotid + 1);
|
||||
slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
|
||||
if (slotid > tbl->max_slotid)
|
||||
goto out;
|
||||
ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
|
||||
if (IS_ERR(ret))
|
||||
goto out;
|
||||
__set_bit(slotid, tbl->used_slots);
|
||||
if (slotid > tbl->highest_used_slotid ||
|
||||
tbl->highest_used_slotid == NFS4_NO_SLOT)
|
||||
tbl->highest_used_slotid = slotid;
|
||||
ret->generation = tbl->generation;
|
||||
|
||||
out:
|
||||
if (slotid <= tbl->max_slotid) {
|
||||
ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
|
||||
if (!IS_ERR(ret))
|
||||
nfs4_lock_slot(tbl, ret);
|
||||
}
|
||||
dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
|
||||
__func__, tbl->used_slots[0], tbl->highest_used_slotid,
|
||||
!IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
|
||||
|
|
|
@ -77,6 +77,8 @@ extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
|
|||
unsigned int max_reqs, const char *queue);
|
||||
extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
|
||||
extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
|
||||
extern struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid);
|
||||
extern bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
|
||||
extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
|
||||
extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
|
||||
bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
|
||||
|
@ -88,6 +90,12 @@ static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl)
|
|||
return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
|
||||
}
|
||||
|
||||
static inline bool nfs4_test_locked_slot(const struct nfs4_slot_table *tbl,
|
||||
u32 slotid)
|
||||
{
|
||||
return !!test_bit(slotid, tbl->used_slots);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_NFS_V4_1)
|
||||
extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
|
||||
u32 target_highest_slotid);
|
||||
|
|
Loading…
Reference in a new issue