Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
This commit is contained in:
commit
9b152d53b7
4 changed files with 31 additions and 26 deletions
|
@ -312,7 +312,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||||
int ret, length, hdr_len, copy_offset;
|
int ret, length, hdr_len, copy_offset;
|
||||||
int rmpp_active = 0;
|
int rmpp_active = 0;
|
||||||
|
|
||||||
if (count < sizeof (struct ib_user_mad))
|
if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
length = count - sizeof (struct ib_user_mad);
|
length = count - sizeof (struct ib_user_mad);
|
||||||
|
|
|
@ -730,14 +730,15 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (attr_mask & IB_QP_ACCESS_FLAGS) {
|
if (attr_mask & IB_QP_ACCESS_FLAGS) {
|
||||||
|
qp_context->params2 |=
|
||||||
|
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
|
||||||
|
MTHCA_QP_BIT_RWE : 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only enable RDMA/atomics if we have responder
|
* Only enable RDMA reads and atomics if we have
|
||||||
* resources set to a non-zero value.
|
* responder resources set to a non-zero value.
|
||||||
*/
|
*/
|
||||||
if (qp->resp_depth) {
|
if (qp->resp_depth) {
|
||||||
qp_context->params2 |=
|
|
||||||
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
|
|
||||||
MTHCA_QP_BIT_RWE : 0);
|
|
||||||
qp_context->params2 |=
|
qp_context->params2 |=
|
||||||
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
|
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
|
||||||
MTHCA_QP_BIT_RRE : 0);
|
MTHCA_QP_BIT_RRE : 0);
|
||||||
|
@ -759,22 +760,19 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||||
if (qp->resp_depth && !attr->max_dest_rd_atomic) {
|
if (qp->resp_depth && !attr->max_dest_rd_atomic) {
|
||||||
/*
|
/*
|
||||||
* Lowering our responder resources to zero.
|
* Lowering our responder resources to zero.
|
||||||
* Turn off RDMA/atomics as responder.
|
* Turn off reads RDMA and atomics as responder.
|
||||||
* (RWE/RRE/RAE in params2 already zero)
|
* (RRE/RAE in params2 already zero)
|
||||||
*/
|
*/
|
||||||
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
|
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
|
||||||
MTHCA_QP_OPTPAR_RRE |
|
|
||||||
MTHCA_QP_OPTPAR_RAE);
|
MTHCA_QP_OPTPAR_RAE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!qp->resp_depth && attr->max_dest_rd_atomic) {
|
if (!qp->resp_depth && attr->max_dest_rd_atomic) {
|
||||||
/*
|
/*
|
||||||
* Increasing our responder resources from
|
* Increasing our responder resources from
|
||||||
* zero. Turn on RDMA/atomics as appropriate.
|
* zero. Turn on RDMA reads and atomics as
|
||||||
|
* appropriate.
|
||||||
*/
|
*/
|
||||||
qp_context->params2 |=
|
|
||||||
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
|
|
||||||
MTHCA_QP_BIT_RWE : 0);
|
|
||||||
qp_context->params2 |=
|
qp_context->params2 |=
|
||||||
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
|
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
|
||||||
MTHCA_QP_BIT_RRE : 0);
|
MTHCA_QP_BIT_RRE : 0);
|
||||||
|
@ -782,8 +780,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
|
||||||
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
|
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
|
||||||
MTHCA_QP_BIT_RAE : 0);
|
MTHCA_QP_BIT_RAE : 0);
|
||||||
|
|
||||||
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
|
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRE |
|
||||||
MTHCA_QP_OPTPAR_RRE |
|
|
||||||
MTHCA_QP_OPTPAR_RAE);
|
MTHCA_QP_OPTPAR_RAE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -921,10 +918,12 @@ static void mthca_adjust_qp_caps(struct mthca_dev *dev,
|
||||||
else
|
else
|
||||||
qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
|
qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
|
||||||
|
|
||||||
qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg);
|
qp->sq.max_gs = min_t(int, dev->limits.max_sg,
|
||||||
qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
|
max_data_size / sizeof (struct mthca_data_seg));
|
||||||
sizeof (struct mthca_next_seg)) /
|
qp->rq.max_gs = min_t(int, dev->limits.max_sg,
|
||||||
sizeof (struct mthca_data_seg);
|
(min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
|
||||||
|
sizeof (struct mthca_next_seg)) /
|
||||||
|
sizeof (struct mthca_data_seg));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -802,13 +802,21 @@ static int srp_post_recv(struct srp_target_port *target)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must be called with target->scsi_host->host_lock held to protect
|
* Must be called with target->scsi_host->host_lock held to protect
|
||||||
* req_lim and tx_head.
|
* req_lim and tx_head. Lock cannot be dropped between call here and
|
||||||
|
* call to __srp_post_send().
|
||||||
*/
|
*/
|
||||||
static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
|
static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
|
||||||
{
|
{
|
||||||
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
|
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (unlikely(target->req_lim < 1)) {
|
||||||
|
if (printk_ratelimit())
|
||||||
|
printk(KERN_DEBUG PFX "Target has req_lim %d\n",
|
||||||
|
target->req_lim);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
|
return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -823,11 +831,6 @@ static int __srp_post_send(struct srp_target_port *target,
|
||||||
struct ib_send_wr wr, *bad_wr;
|
struct ib_send_wr wr, *bad_wr;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (target->req_lim < 1) {
|
|
||||||
printk(KERN_ERR PFX "Target has req_lim %d\n", target->req_lim);
|
|
||||||
return -EAGAIN;
|
|
||||||
}
|
|
||||||
|
|
||||||
list.addr = iu->dma;
|
list.addr = iu->dma;
|
||||||
list.length = len;
|
list.length = len;
|
||||||
list.lkey = target->srp_host->mr->lkey;
|
list.lkey = target->srp_host->mr->lkey;
|
||||||
|
@ -1417,6 +1420,8 @@ static ssize_t srp_create_target(struct class_device *class_dev,
|
||||||
if (!target_host)
|
if (!target_host)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
target_host->max_lun = SRP_MAX_LUN;
|
||||||
|
|
||||||
target = host_to_target(target_host);
|
target = host_to_target(target_host);
|
||||||
memset(target, 0, sizeof *target);
|
memset(target, 0, sizeof *target);
|
||||||
|
|
||||||
|
|
|
@ -54,6 +54,7 @@ enum {
|
||||||
SRP_PORT_REDIRECT = 1,
|
SRP_PORT_REDIRECT = 1,
|
||||||
SRP_DLID_REDIRECT = 2,
|
SRP_DLID_REDIRECT = 2,
|
||||||
|
|
||||||
|
SRP_MAX_LUN = 512,
|
||||||
SRP_MAX_IU_LEN = 256,
|
SRP_MAX_IU_LEN = 256,
|
||||||
|
|
||||||
SRP_RQ_SHIFT = 6,
|
SRP_RQ_SHIFT = 6,
|
||||||
|
|
Loading…
Reference in a new issue