Second batch of changes for the 3.7 merge window:
- Late-breaking fix for IPoIB on mlx4 SR-IOV VFs. - Fix for IPoIB build breakage with CONFIG_INFINIBAND_IPOIB_CM=n (new netlink config changes are to blame). - Make sure retry count values are in range in RDMA CM. - A few nes hardware driver fixes and cleanups. - Have iSER initiator use >1 interrupt vectors if available. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABCAAGBQJQbkIkAAoJEENa44ZhAt0hrLEP/3jNpwR+6/rbVxcPkmVVITsn yUz9S3V7loJqQqlHt7ktphgcoUpar+xs2C8DbCyVupyee4K3zd7Lw+wOVO2OJ16Y +gE8PZB6mSYOVaI0JW26Qqe2gcQ5DLZ4ic6E8gEiuyjNo3ig3MZl17ZffTI+5lXi igR+Qnsy2bm18JI6aumSeKYa17f7EmOK63jaIp0jc95vrtWke3jRRs6+bmmDLV7q 4ZxCP7ckBTRFVkisXlWZ95MmUbg/lhWosZGdJwjRYs+LW3tzgY18OQZgbw6FJKpJ 0OouAO51UMKcizfg7ikwUIw6/apaCFXXv/pH3rIeq18qiJeKcDsIXseY7sFRboHR fIPpfhHENokcxtlOReaw7hnlyMuwrLiYbF6TjPN9t8dWTNbwe+4W0K9G60fu1wmk qjaUTBCCuDF4KtKqCxP01cZzXyjJrworw5p+Nc1wxds+JeyKRCpVRrfYep8vOgft z6KJjQGDF16MvXmhF1dD9VBpmaMCOA4NDhfa0IcgHPTOLc/7Nbe5NTD95InlzM3Q /C0CPj+4J4kzEMnTBHVJvv1NeD3G1RQD80bce6ViTbgOUg9pmeGHkLWe420h7xT4 u/LBnwM1dcqY7BDOnn3ycYoJgr5OCQZOpLcNWjTXv4t51mBOYtM6AJ6IBIvkOmSz QjEyiDEEvT2K7c/rhYOz =9Jv4 -----END PGP SIGNATURE----- Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband Pull infiniband changes from Roland Dreier: "Second batch of changes for the 3.7 merge window: - Late-breaking fix for IPoIB on mlx4 SR-IOV VFs. - Fix for IPoIB build breakage with CONFIG_INFINIBAND_IPOIB_CM=n (new netlink config changes are to blame). - Make sure retry count values are in range in RDMA CM. - A few nes hardware driver fixes and cleanups. - Have iSER initiator use >1 interrupt vectors if available." * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA/cma: Check that retry count values are in range IB/iser: Add more RX CQs to scale out processing of SCSI responses RDMA/nes: Bump the version number of nes driver RDMA/nes: Remove unused module parameter "send_first" RDMA/nes: Remove unnecessary if-else statement RDMA/nes: Add missing break to switch. mlx4_core: Adjust flow steering attach wrapper so that IB works on SR-IOV VFs IPoIB: Fix build with CONFIG_INFINIBAND_IPOIB_CM=n
This commit is contained in:
commit
ca4da6948b
10 changed files with 146 additions and 99 deletions
|
@ -2648,8 +2648,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
|
|||
req.responder_resources = conn_param->responder_resources;
|
||||
req.initiator_depth = conn_param->initiator_depth;
|
||||
req.flow_control = conn_param->flow_control;
|
||||
req.retry_count = conn_param->retry_count;
|
||||
req.rnr_retry_count = conn_param->rnr_retry_count;
|
||||
req.retry_count = min_t(u8, 7, conn_param->retry_count);
|
||||
req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
|
||||
req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
|
||||
req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
|
||||
req.max_cm_retries = CMA_MAX_CM_RETRIES;
|
||||
|
@ -2770,7 +2770,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv,
|
|||
rep.initiator_depth = conn_param->initiator_depth;
|
||||
rep.failover_accepted = 0;
|
||||
rep.flow_control = conn_param->flow_control;
|
||||
rep.rnr_retry_count = conn_param->rnr_retry_count;
|
||||
rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
|
||||
rep.srq = id_priv->srq ? 1 : 0;
|
||||
|
||||
ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
|
||||
|
|
|
@ -79,11 +79,6 @@ int disable_mpa_crc = 0;
|
|||
module_param(disable_mpa_crc, int, 0644);
|
||||
MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
|
||||
|
||||
unsigned int send_first = 0;
|
||||
module_param(send_first, int, 0644);
|
||||
MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
|
||||
|
||||
|
||||
unsigned int nes_drv_opt = NES_DRV_OPT_DISABLE_INT_MOD | NES_DRV_OPT_ENABLE_PAU;
|
||||
module_param(nes_drv_opt, int, 0644);
|
||||
MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
#define QUEUE_DISCONNECTS
|
||||
|
||||
#define DRV_NAME "iw_nes"
|
||||
#define DRV_VERSION "1.5.0.0"
|
||||
#define DRV_VERSION "1.5.0.1"
|
||||
#define PFX DRV_NAME ": "
|
||||
|
||||
/*
|
||||
|
@ -172,7 +172,6 @@ extern int interrupt_mod_interval;
|
|||
extern int nes_if_count;
|
||||
extern int mpa_version;
|
||||
extern int disable_mpa_crc;
|
||||
extern unsigned int send_first;
|
||||
extern unsigned int nes_drv_opt;
|
||||
extern unsigned int nes_debug_level;
|
||||
extern unsigned int wqm_quanta;
|
||||
|
|
|
@ -3006,6 +3006,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
switch (nesqp->hw_iwarp_state) {
|
||||
case NES_AEQE_IWARP_STATE_CLOSING:
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
|
||||
break;
|
||||
case NES_AEQE_IWARP_STATE_TERMINATE:
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
|
||||
break;
|
||||
|
@ -3068,18 +3069,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
|||
}
|
||||
|
||||
nesqp->ibqp_state = attr->qp_state;
|
||||
if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) ==
|
||||
(u32)NES_CQP_QP_IWARP_STATE_RTS) &&
|
||||
((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) >
|
||||
(u32)NES_CQP_QP_IWARP_STATE_RTS)) {
|
||||
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
|
||||
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
|
||||
nesqp->iwarp_state);
|
||||
} else {
|
||||
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
|
||||
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
|
||||
nesqp->iwarp_state);
|
||||
}
|
||||
nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
|
||||
nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
|
||||
nesqp->iwarp_state);
|
||||
}
|
||||
|
||||
if (attr_mask & IB_QP_ACCESS_FLAGS) {
|
||||
|
|
|
@ -535,14 +535,14 @@ void ipoib_drain_cq(struct net_device *dev);
|
|||
void ipoib_set_ethtool_ops(struct net_device *dev);
|
||||
int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_CM
|
||||
|
||||
#define IPOIB_FLAGS_RC 0x80
|
||||
#define IPOIB_FLAGS_UC 0x40
|
||||
|
||||
/* We don't support UC connections at the moment */
|
||||
#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_IPOIB_CM
|
||||
|
||||
extern int ipoib_max_conn_qp;
|
||||
|
||||
static inline int ipoib_cm_admin_enabled(struct net_device *dev)
|
||||
|
|
|
@ -1448,37 +1448,6 @@ static ssize_t show_mode(struct device *d, struct device_attribute *attr,
|
|||
return sprintf(buf, "datagram\n");
|
||||
}
|
||||
|
||||
int ipoib_set_mode(struct net_device *dev, const char *buf)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* flush paths if we switch modes so that connections are restarted */
|
||||
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
|
||||
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
ipoib_warn(priv, "enabling connected mode "
|
||||
"will cause multicast packet drops\n");
|
||||
netdev_update_features(dev);
|
||||
rtnl_unlock();
|
||||
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
|
||||
|
||||
ipoib_flush_paths(dev);
|
||||
rtnl_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(buf, "datagram\n")) {
|
||||
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
netdev_update_features(dev);
|
||||
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
|
||||
rtnl_unlock();
|
||||
ipoib_flush_paths(dev);
|
||||
rtnl_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t set_mode(struct device *d, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
|
|
|
@ -215,6 +215,37 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ipoib_set_mode(struct net_device *dev, const char *buf)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* flush paths if we switch modes so that connections are restarted */
|
||||
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
|
||||
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
ipoib_warn(priv, "enabling connected mode "
|
||||
"will cause multicast packet drops\n");
|
||||
netdev_update_features(dev);
|
||||
rtnl_unlock();
|
||||
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
|
||||
|
||||
ipoib_flush_paths(dev);
|
||||
rtnl_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!strcmp(buf, "datagram\n")) {
|
||||
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
|
||||
netdev_update_features(dev);
|
||||
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
|
||||
rtnl_unlock();
|
||||
ipoib_flush_paths(dev);
|
||||
rtnl_lock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = netdev_priv(dev);
|
||||
|
|
|
@ -177,6 +177,7 @@ struct iser_data_buf {
|
|||
|
||||
/* fwd declarations */
|
||||
struct iser_device;
|
||||
struct iser_cq_desc;
|
||||
struct iscsi_iser_conn;
|
||||
struct iscsi_iser_task;
|
||||
struct iscsi_endpoint;
|
||||
|
@ -226,16 +227,21 @@ struct iser_rx_desc {
|
|||
char pad[ISER_RX_PAD_SIZE];
|
||||
} __attribute__((packed));
|
||||
|
||||
#define ISER_MAX_CQ 4
|
||||
|
||||
struct iser_device {
|
||||
struct ib_device *ib_device;
|
||||
struct ib_pd *pd;
|
||||
struct ib_cq *rx_cq;
|
||||
struct ib_cq *tx_cq;
|
||||
struct ib_cq *rx_cq[ISER_MAX_CQ];
|
||||
struct ib_cq *tx_cq[ISER_MAX_CQ];
|
||||
struct ib_mr *mr;
|
||||
struct tasklet_struct cq_tasklet;
|
||||
struct tasklet_struct cq_tasklet[ISER_MAX_CQ];
|
||||
struct ib_event_handler event_handler;
|
||||
struct list_head ig_list; /* entry in ig devices list */
|
||||
int refcount;
|
||||
int cq_active_qps[ISER_MAX_CQ];
|
||||
int cqs_used;
|
||||
struct iser_cq_desc *cq_desc;
|
||||
};
|
||||
|
||||
struct iser_conn {
|
||||
|
@ -287,6 +293,11 @@ struct iser_page_vec {
|
|||
int data_size;
|
||||
};
|
||||
|
||||
struct iser_cq_desc {
|
||||
struct iser_device *device;
|
||||
int cq_index;
|
||||
};
|
||||
|
||||
struct iser_global {
|
||||
struct mutex device_list_mutex;/* */
|
||||
struct list_head device_list; /* all iSER devices */
|
||||
|
|
|
@ -70,32 +70,50 @@ static void iser_event_handler(struct ib_event_handler *handler,
|
|||
*/
|
||||
static int iser_create_device_ib_res(struct iser_device *device)
|
||||
{
|
||||
int i, j;
|
||||
struct iser_cq_desc *cq_desc;
|
||||
|
||||
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
|
||||
iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
|
||||
device->ib_device->name, device->ib_device->num_comp_vectors);
|
||||
|
||||
device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
|
||||
GFP_KERNEL);
|
||||
if (device->cq_desc == NULL)
|
||||
goto cq_desc_err;
|
||||
cq_desc = device->cq_desc;
|
||||
|
||||
device->pd = ib_alloc_pd(device->ib_device);
|
||||
if (IS_ERR(device->pd))
|
||||
goto pd_err;
|
||||
|
||||
device->rx_cq = ib_create_cq(device->ib_device,
|
||||
iser_cq_callback,
|
||||
iser_cq_event_callback,
|
||||
(void *)device,
|
||||
ISER_MAX_RX_CQ_LEN, 0);
|
||||
if (IS_ERR(device->rx_cq))
|
||||
goto rx_cq_err;
|
||||
for (i = 0; i < device->cqs_used; i++) {
|
||||
cq_desc[i].device = device;
|
||||
cq_desc[i].cq_index = i;
|
||||
|
||||
device->tx_cq = ib_create_cq(device->ib_device,
|
||||
NULL, iser_cq_event_callback,
|
||||
(void *)device,
|
||||
ISER_MAX_TX_CQ_LEN, 0);
|
||||
device->rx_cq[i] = ib_create_cq(device->ib_device,
|
||||
iser_cq_callback,
|
||||
iser_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_RX_CQ_LEN, i);
|
||||
if (IS_ERR(device->rx_cq[i]))
|
||||
goto cq_err;
|
||||
|
||||
if (IS_ERR(device->tx_cq))
|
||||
goto tx_cq_err;
|
||||
device->tx_cq[i] = ib_create_cq(device->ib_device,
|
||||
NULL, iser_cq_event_callback,
|
||||
(void *)&cq_desc[i],
|
||||
ISER_MAX_TX_CQ_LEN, i);
|
||||
|
||||
if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
|
||||
goto cq_arm_err;
|
||||
if (IS_ERR(device->tx_cq[i]))
|
||||
goto cq_err;
|
||||
|
||||
tasklet_init(&device->cq_tasklet,
|
||||
iser_cq_tasklet_fn,
|
||||
(unsigned long)device);
|
||||
if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
|
||||
goto cq_err;
|
||||
|
||||
tasklet_init(&device->cq_tasklet[i],
|
||||
iser_cq_tasklet_fn,
|
||||
(unsigned long)&cq_desc[i]);
|
||||
}
|
||||
|
||||
device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
|
||||
IB_ACCESS_REMOTE_WRITE |
|
||||
|
@ -113,14 +131,19 @@ static int iser_create_device_ib_res(struct iser_device *device)
|
|||
handler_err:
|
||||
ib_dereg_mr(device->mr);
|
||||
dma_mr_err:
|
||||
tasklet_kill(&device->cq_tasklet);
|
||||
cq_arm_err:
|
||||
ib_destroy_cq(device->tx_cq);
|
||||
tx_cq_err:
|
||||
ib_destroy_cq(device->rx_cq);
|
||||
rx_cq_err:
|
||||
for (j = 0; j < device->cqs_used; j++)
|
||||
tasklet_kill(&device->cq_tasklet[j]);
|
||||
cq_err:
|
||||
for (j = 0; j < i; j++) {
|
||||
if (device->tx_cq[j])
|
||||
ib_destroy_cq(device->tx_cq[j]);
|
||||
if (device->rx_cq[j])
|
||||
ib_destroy_cq(device->rx_cq[j]);
|
||||
}
|
||||
ib_dealloc_pd(device->pd);
|
||||
pd_err:
|
||||
kfree(device->cq_desc);
|
||||
cq_desc_err:
|
||||
iser_err("failed to allocate an IB resource\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -131,18 +154,24 @@ pd_err:
|
|||
*/
|
||||
static void iser_free_device_ib_res(struct iser_device *device)
|
||||
{
|
||||
int i;
|
||||
BUG_ON(device->mr == NULL);
|
||||
|
||||
tasklet_kill(&device->cq_tasklet);
|
||||
for (i = 0; i < device->cqs_used; i++) {
|
||||
tasklet_kill(&device->cq_tasklet[i]);
|
||||
(void)ib_destroy_cq(device->tx_cq[i]);
|
||||
(void)ib_destroy_cq(device->rx_cq[i]);
|
||||
device->tx_cq[i] = NULL;
|
||||
device->rx_cq[i] = NULL;
|
||||
}
|
||||
|
||||
(void)ib_unregister_event_handler(&device->event_handler);
|
||||
(void)ib_dereg_mr(device->mr);
|
||||
(void)ib_destroy_cq(device->tx_cq);
|
||||
(void)ib_destroy_cq(device->rx_cq);
|
||||
(void)ib_dealloc_pd(device->pd);
|
||||
|
||||
kfree(device->cq_desc);
|
||||
|
||||
device->mr = NULL;
|
||||
device->tx_cq = NULL;
|
||||
device->rx_cq = NULL;
|
||||
device->pd = NULL;
|
||||
}
|
||||
|
||||
|
@ -157,6 +186,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
|
|||
struct ib_qp_init_attr init_attr;
|
||||
int req_err, resp_err, ret = -ENOMEM;
|
||||
struct ib_fmr_pool_param params;
|
||||
int index, min_index = 0;
|
||||
|
||||
BUG_ON(ib_conn->device == NULL);
|
||||
|
||||
|
@ -220,10 +250,20 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
|
|||
|
||||
memset(&init_attr, 0, sizeof init_attr);
|
||||
|
||||
mutex_lock(&ig.connlist_mutex);
|
||||
/* select the CQ with the minimal number of usages */
|
||||
for (index = 0; index < device->cqs_used; index++)
|
||||
if (device->cq_active_qps[index] <
|
||||
device->cq_active_qps[min_index])
|
||||
min_index = index;
|
||||
device->cq_active_qps[min_index]++;
|
||||
mutex_unlock(&ig.connlist_mutex);
|
||||
iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
|
||||
|
||||
init_attr.event_handler = iser_qp_event_callback;
|
||||
init_attr.qp_context = (void *)ib_conn;
|
||||
init_attr.send_cq = device->tx_cq;
|
||||
init_attr.recv_cq = device->rx_cq;
|
||||
init_attr.send_cq = device->tx_cq[min_index];
|
||||
init_attr.recv_cq = device->rx_cq[min_index];
|
||||
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
|
||||
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
|
||||
init_attr.cap.max_send_sge = 2;
|
||||
|
@ -252,6 +292,7 @@ out_err:
|
|||
*/
|
||||
static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
|
||||
{
|
||||
int cq_index;
|
||||
BUG_ON(ib_conn == NULL);
|
||||
|
||||
iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
|
||||
|
@ -262,9 +303,12 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
|
|||
if (ib_conn->fmr_pool != NULL)
|
||||
ib_destroy_fmr_pool(ib_conn->fmr_pool);
|
||||
|
||||
if (ib_conn->qp != NULL)
|
||||
rdma_destroy_qp(ib_conn->cma_id);
|
||||
if (ib_conn->qp != NULL) {
|
||||
cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
|
||||
ib_conn->device->cq_active_qps[cq_index]--;
|
||||
|
||||
rdma_destroy_qp(ib_conn->cma_id);
|
||||
}
|
||||
/* if cma handler context, the caller acts s.t the cma destroy the id */
|
||||
if (ib_conn->cma_id != NULL && can_destroy_id)
|
||||
rdma_destroy_id(ib_conn->cma_id);
|
||||
|
@ -791,9 +835,9 @@ static void iser_handle_comp_error(struct iser_tx_desc *desc,
|
|||
}
|
||||
}
|
||||
|
||||
static int iser_drain_tx_cq(struct iser_device *device)
|
||||
static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
|
||||
{
|
||||
struct ib_cq *cq = device->tx_cq;
|
||||
struct ib_cq *cq = device->tx_cq[cq_index];
|
||||
struct ib_wc wc;
|
||||
struct iser_tx_desc *tx_desc;
|
||||
struct iser_conn *ib_conn;
|
||||
|
@ -822,8 +866,10 @@ static int iser_drain_tx_cq(struct iser_device *device)
|
|||
|
||||
static void iser_cq_tasklet_fn(unsigned long data)
|
||||
{
|
||||
struct iser_device *device = (struct iser_device *)data;
|
||||
struct ib_cq *cq = device->rx_cq;
|
||||
struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
|
||||
struct iser_device *device = cq_desc->device;
|
||||
int cq_index = cq_desc->cq_index;
|
||||
struct ib_cq *cq = device->rx_cq[cq_index];
|
||||
struct ib_wc wc;
|
||||
struct iser_rx_desc *desc;
|
||||
unsigned long xfer_len;
|
||||
|
@ -851,19 +897,21 @@ static void iser_cq_tasklet_fn(unsigned long data)
|
|||
}
|
||||
completed_rx++;
|
||||
if (!(completed_rx & 63))
|
||||
completed_tx += iser_drain_tx_cq(device);
|
||||
completed_tx += iser_drain_tx_cq(device, cq_index);
|
||||
}
|
||||
/* #warning "it is assumed here that arming CQ only once its empty" *
|
||||
* " would not cause interrupts to be missed" */
|
||||
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
|
||||
|
||||
completed_tx += iser_drain_tx_cq(device);
|
||||
completed_tx += iser_drain_tx_cq(device, cq_index);
|
||||
iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
|
||||
}
|
||||
|
||||
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
|
||||
{
|
||||
struct iser_device *device = (struct iser_device *)cq_context;
|
||||
struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
|
||||
struct iser_device *device = cq_desc->device;
|
||||
int cq_index = cq_desc->cq_index;
|
||||
|
||||
tasklet_schedule(&device->cq_tasklet);
|
||||
tasklet_schedule(&device->cq_tasklet[cq_index]);
|
||||
}
|
||||
|
|
|
@ -3094,6 +3094,8 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
if (validate_eth_header_mac(slave, rule_header, rlist))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case MLX4_NET_TRANS_RULE_ID_IB:
|
||||
break;
|
||||
case MLX4_NET_TRANS_RULE_ID_IPV4:
|
||||
case MLX4_NET_TRANS_RULE_ID_TCP:
|
||||
case MLX4_NET_TRANS_RULE_ID_UDP:
|
||||
|
|
Loading…
Reference in a new issue