NTB: Use DMA Engine to Transmit and Receive
Allocate and use a DMA engine channel to transmit and receive data over NTB. If none is allocated, fall back to using the CPU to transfer data. Signed-off-by: Jon Mason <jon.mason@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Reviewed-by: Dave Jiang <dave.jiang@intel.com>
This commit is contained in:
parent
ac477afb04
commit
282a2feeb9
3 changed files with 298 additions and 50 deletions
|
@ -349,6 +349,23 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ntb_get_mw_base() - get addr for the NTB memory window
|
||||
* @ndev: pointer to ntb_device instance
|
||||
* @mw: memory window number
|
||||
*
|
||||
* This function provides the base address of the memory window specified.
|
||||
*
|
||||
* RETURNS: address, or NULL on error.
|
||||
*/
|
||||
resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw)
|
||||
{
|
||||
if (mw >= ntb_max_mw(ndev))
|
||||
return 0;
|
||||
|
||||
return pci_resource_start(ndev->pdev, MW_TO_BAR(mw));
|
||||
}
|
||||
|
||||
/**
|
||||
* ntb_get_mw_vbase() - get virtual addr for the NTB memory window
|
||||
* @ndev: pointer to ntb_device instance
|
||||
|
|
|
@ -240,6 +240,7 @@ int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
|
|||
int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
|
||||
int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
|
||||
int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
|
||||
resource_size_t ntb_get_mw_base(struct ntb_device *ndev, unsigned int mw);
|
||||
void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
|
||||
u64 ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
|
||||
void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
*/
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
|
@ -68,6 +69,10 @@ static unsigned char max_num_clients;
|
|||
module_param(max_num_clients, byte, 0644);
|
||||
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
|
||||
|
||||
static unsigned int copy_bytes = 1024;
|
||||
module_param(copy_bytes, uint, 0644);
|
||||
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
|
||||
|
||||
struct ntb_queue_entry {
|
||||
/* ntb_queue list reference */
|
||||
struct list_head entry;
|
||||
|
@ -76,6 +81,13 @@ struct ntb_queue_entry {
|
|||
void *buf;
|
||||
unsigned int len;
|
||||
unsigned int flags;
|
||||
|
||||
struct ntb_transport_qp *qp;
|
||||
union {
|
||||
struct ntb_payload_header __iomem *tx_hdr;
|
||||
struct ntb_payload_header *rx_hdr;
|
||||
};
|
||||
unsigned int index;
|
||||
};
|
||||
|
||||
struct ntb_rx_info {
|
||||
|
@ -86,6 +98,7 @@ struct ntb_transport_qp {
|
|||
struct ntb_transport *transport;
|
||||
struct ntb_device *ndev;
|
||||
void *cb_data;
|
||||
struct dma_chan *dma_chan;
|
||||
|
||||
bool client_ready;
|
||||
bool qp_link;
|
||||
|
@ -99,6 +112,7 @@ struct ntb_transport_qp {
|
|||
struct list_head tx_free_q;
|
||||
spinlock_t ntb_tx_free_q_lock;
|
||||
void __iomem *tx_mw;
|
||||
dma_addr_t tx_mw_phys;
|
||||
unsigned int tx_index;
|
||||
unsigned int tx_max_entry;
|
||||
unsigned int tx_max_frame;
|
||||
|
@ -114,6 +128,7 @@ struct ntb_transport_qp {
|
|||
unsigned int rx_index;
|
||||
unsigned int rx_max_entry;
|
||||
unsigned int rx_max_frame;
|
||||
dma_cookie_t last_cookie;
|
||||
|
||||
void (*event_handler) (void *data, int status);
|
||||
struct delayed_work link_work;
|
||||
|
@ -129,9 +144,14 @@ struct ntb_transport_qp {
|
|||
u64 rx_err_no_buf;
|
||||
u64 rx_err_oflow;
|
||||
u64 rx_err_ver;
|
||||
u64 rx_memcpy;
|
||||
u64 rx_async;
|
||||
u64 tx_bytes;
|
||||
u64 tx_pkts;
|
||||
u64 tx_ring_full;
|
||||
u64 tx_err_no_buf;
|
||||
u64 tx_memcpy;
|
||||
u64 tx_async;
|
||||
};
|
||||
|
||||
struct ntb_transport_mw {
|
||||
|
@ -381,7 +401,7 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
|
|||
char *buf;
|
||||
ssize_t ret, out_offset, out_count;
|
||||
|
||||
out_count = 600;
|
||||
out_count = 1000;
|
||||
|
||||
buf = kmalloc(out_count, GFP_KERNEL);
|
||||
if (!buf)
|
||||
|
@ -395,6 +415,10 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
|
|||
"rx_bytes - \t%llu\n", qp->rx_bytes);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"rx_pkts - \t%llu\n", qp->rx_pkts);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"rx_memcpy - \t%llu\n", qp->rx_memcpy);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"rx_async - \t%llu\n", qp->rx_async);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"rx_ring_empty - %llu\n", qp->rx_ring_empty);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
|
@ -414,8 +438,14 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
|
|||
"tx_bytes - \t%llu\n", qp->tx_bytes);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"tx_pkts - \t%llu\n", qp->tx_pkts);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"tx_memcpy - \t%llu\n", qp->tx_memcpy);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"tx_async - \t%llu\n", qp->tx_async);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"tx_ring_full - \t%llu\n", qp->tx_ring_full);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
"tx_mw - \t%p\n", qp->tx_mw);
|
||||
out_offset += snprintf(buf + out_offset, out_count - out_offset,
|
||||
|
@ -488,11 +518,11 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
|
|||
num_qps_mw = nt->max_qps / mw_max;
|
||||
|
||||
rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
|
||||
qp->remote_rx_info = nt->mw[mw_num].virt_addr +
|
||||
(qp_num / mw_max * rx_size);
|
||||
qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
|
||||
rx_size -= sizeof(struct ntb_rx_info);
|
||||
|
||||
qp->rx_buff = qp->remote_rx_info + 1;
|
||||
qp->remote_rx_info = qp->rx_buff + rx_size;
|
||||
|
||||
/* Due to housekeeping, there must be atleast 2 buffs */
|
||||
qp->rx_max_frame = min(transport_mtu, rx_size / 2);
|
||||
qp->rx_max_entry = rx_size / qp->rx_max_frame;
|
||||
|
@ -796,12 +826,13 @@ static void ntb_qp_link_work(struct work_struct *work)
|
|||
msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
|
||||
}
|
||||
|
||||
static void ntb_transport_init_queue(struct ntb_transport *nt,
|
||||
static int ntb_transport_init_queue(struct ntb_transport *nt,
|
||||
unsigned int qp_num)
|
||||
{
|
||||
struct ntb_transport_qp *qp;
|
||||
unsigned int num_qps_mw, tx_size;
|
||||
u8 mw_num, mw_max;
|
||||
u64 qp_offset;
|
||||
|
||||
mw_max = ntb_max_mw(nt->ndev);
|
||||
mw_num = QP_TO_MW(nt->ndev, qp_num);
|
||||
|
@ -820,11 +851,18 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
|
|||
num_qps_mw = nt->max_qps / mw_max;
|
||||
|
||||
tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
|
||||
qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
|
||||
(qp_num / mw_max * tx_size);
|
||||
tx_size -= sizeof(struct ntb_rx_info);
|
||||
qp_offset = qp_num / mw_max * tx_size;
|
||||
qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
|
||||
if (!qp->tx_mw)
|
||||
return -EINVAL;
|
||||
|
||||
qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
|
||||
if (!qp->tx_mw_phys)
|
||||
return -EINVAL;
|
||||
|
||||
tx_size -= sizeof(struct ntb_rx_info);
|
||||
qp->rx_info = qp->tx_mw + tx_size;
|
||||
|
||||
qp->tx_mw = qp->rx_info + 1;
|
||||
/* Due to housekeeping, there must be atleast 2 buffs */
|
||||
qp->tx_max_frame = min(transport_mtu, tx_size / 2);
|
||||
qp->tx_max_entry = tx_size / qp->tx_max_frame;
|
||||
|
@ -851,6 +889,8 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
|
|||
INIT_LIST_HEAD(&qp->rx_pend_q);
|
||||
INIT_LIST_HEAD(&qp->rx_free_q);
|
||||
INIT_LIST_HEAD(&qp->tx_free_q);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ntb_transport_init(struct pci_dev *pdev)
|
||||
|
@ -889,8 +929,11 @@ int ntb_transport_init(struct pci_dev *pdev)
|
|||
|
||||
nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
|
||||
|
||||
for (i = 0; i < nt->max_qps; i++)
|
||||
ntb_transport_init_queue(nt, i);
|
||||
for (i = 0; i < nt->max_qps; i++) {
|
||||
rc = ntb_transport_init_queue(nt, i);
|
||||
if (rc)
|
||||
goto err3;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
|
||||
INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
|
||||
|
@ -956,13 +999,19 @@ void ntb_transport_free(void *transport)
|
|||
kfree(nt);
|
||||
}
|
||||
|
||||
static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry, void *offset)
|
||||
static void ntb_rx_copy_callback(void *data)
|
||||
{
|
||||
struct ntb_queue_entry *entry = data;
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
void *cb_data = entry->cb_data;
|
||||
unsigned int len = entry->len;
|
||||
struct ntb_payload_header *hdr = entry->rx_hdr;
|
||||
|
||||
memcpy(entry->buf, offset, entry->len);
|
||||
/* Ensure that the data is fully copied out before clearing the flag */
|
||||
wmb();
|
||||
hdr->flags = 0;
|
||||
|
||||
iowrite32(entry->index, &qp->rx_info->entry);
|
||||
|
||||
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
|
||||
|
||||
|
@ -970,6 +1019,86 @@ static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
|
|||
qp->rx_handler(qp, qp->cb_data, cb_data, len);
|
||||
}
|
||||
|
||||
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
|
||||
{
|
||||
void *buf = entry->buf;
|
||||
size_t len = entry->len;
|
||||
|
||||
memcpy(buf, offset, len);
|
||||
|
||||
ntb_rx_copy_callback(entry);
|
||||
}
|
||||
|
||||
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
|
||||
size_t len)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
struct dma_chan *chan = qp->dma_chan;
|
||||
struct dma_device *device;
|
||||
size_t pay_off, buff_off;
|
||||
dma_addr_t src, dest;
|
||||
dma_cookie_t cookie;
|
||||
void *buf = entry->buf;
|
||||
unsigned long flags;
|
||||
|
||||
entry->len = len;
|
||||
|
||||
if (!chan)
|
||||
goto err;
|
||||
|
||||
if (len < copy_bytes)
|
||||
goto err1;
|
||||
|
||||
device = chan->device;
|
||||
pay_off = (size_t) offset & ~PAGE_MASK;
|
||||
buff_off = (size_t) buf & ~PAGE_MASK;
|
||||
|
||||
if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
|
||||
goto err1;
|
||||
|
||||
dest = dma_map_single(device->dev, buf, len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(device->dev, dest))
|
||||
goto err1;
|
||||
|
||||
src = dma_map_single(device->dev, offset, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(device->dev, src))
|
||||
goto err2;
|
||||
|
||||
flags = DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SRC_UNMAP_SINGLE |
|
||||
DMA_PREP_INTERRUPT;
|
||||
txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
|
||||
if (!txd)
|
||||
goto err3;
|
||||
|
||||
txd->callback = ntb_rx_copy_callback;
|
||||
txd->callback_param = entry;
|
||||
|
||||
cookie = dmaengine_submit(txd);
|
||||
if (dma_submit_error(cookie))
|
||||
goto err3;
|
||||
|
||||
qp->last_cookie = cookie;
|
||||
|
||||
qp->rx_async++;
|
||||
|
||||
return;
|
||||
|
||||
err3:
|
||||
dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
|
||||
err2:
|
||||
dma_unmap_single(device->dev, dest, len, DMA_FROM_DEVICE);
|
||||
err1:
|
||||
/* If the callbacks come out of order, the writing of the index to the
|
||||
* last completed will be out of order. This may result in the
|
||||
* receive stalling forever.
|
||||
*/
|
||||
dma_sync_wait(chan, qp->last_cookie);
|
||||
err:
|
||||
ntb_memcpy_rx(entry, offset);
|
||||
qp->rx_memcpy++;
|
||||
}
|
||||
|
||||
static int ntb_process_rxc(struct ntb_transport_qp *qp)
|
||||
{
|
||||
struct ntb_payload_header *hdr;
|
||||
|
@ -1008,41 +1137,45 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
|
|||
if (hdr->flags & LINK_DOWN_FLAG) {
|
||||
ntb_qp_link_down(qp);
|
||||
|
||||
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
|
||||
&qp->rx_pend_q);
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
|
||||
"rx offset %u, ver %u - %d payload received, buf size %d\n",
|
||||
qp->rx_index, hdr->ver, hdr->len, entry->len);
|
||||
|
||||
if (hdr->len <= entry->len) {
|
||||
entry->len = hdr->len;
|
||||
ntb_rx_copy_task(qp, entry, offset);
|
||||
} else {
|
||||
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
|
||||
&qp->rx_pend_q);
|
||||
qp->rx_bytes += hdr->len;
|
||||
qp->rx_pkts++;
|
||||
|
||||
if (hdr->len > entry->len) {
|
||||
qp->rx_err_oflow++;
|
||||
dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
|
||||
"RX overflow! Wanted %d got %d\n",
|
||||
hdr->len, entry->len);
|
||||
|
||||
goto err;
|
||||
}
|
||||
|
||||
qp->rx_bytes += hdr->len;
|
||||
qp->rx_pkts++;
|
||||
entry->index = qp->rx_index;
|
||||
entry->rx_hdr = hdr;
|
||||
|
||||
ntb_async_rx(entry, offset, hdr->len);
|
||||
|
||||
out:
|
||||
qp->rx_index++;
|
||||
qp->rx_index %= qp->rx_max_entry;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
|
||||
&qp->rx_pend_q);
|
||||
/* Ensure that the data is fully copied out before clearing the flag */
|
||||
wmb();
|
||||
hdr->flags = 0;
|
||||
iowrite32(qp->rx_index, &qp->rx_info->entry);
|
||||
|
||||
qp->rx_index++;
|
||||
qp->rx_index %= qp->rx_max_entry;
|
||||
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void ntb_transport_rx(unsigned long data)
|
||||
|
@ -1058,6 +1191,9 @@ static void ntb_transport_rx(unsigned long data)
|
|||
if (rc)
|
||||
break;
|
||||
}
|
||||
|
||||
if (qp->dma_chan)
|
||||
dma_async_issue_pending(qp->dma_chan);
|
||||
}
|
||||
|
||||
static void ntb_transport_rxc_db(void *data, int db_num)
|
||||
|
@ -1070,19 +1206,13 @@ static void ntb_transport_rxc_db(void *data, int db_num)
|
|||
tasklet_schedule(&qp->rx_work);
|
||||
}
|
||||
|
||||
static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry,
|
||||
void __iomem *offset)
|
||||
static void ntb_tx_copy_callback(void *data)
|
||||
{
|
||||
struct ntb_payload_header __iomem *hdr;
|
||||
struct ntb_queue_entry *entry = data;
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
|
||||
|
||||
memcpy_toio(offset, entry->buf, entry->len);
|
||||
|
||||
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
iowrite32(entry->len, &hdr->len);
|
||||
iowrite32((u32) qp->tx_pkts, &hdr->ver);
|
||||
|
||||
/* Ensure that the data is fully copied out before setting the flag */
|
||||
/* Ensure that the data is fully copied out before setting the flags */
|
||||
wmb();
|
||||
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
|
||||
|
||||
|
@ -1103,15 +1233,81 @@ static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
|
|||
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
|
||||
}
|
||||
|
||||
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
|
||||
{
|
||||
memcpy_toio(offset, entry->buf, entry->len);
|
||||
|
||||
ntb_tx_copy_callback(entry);
|
||||
}
|
||||
|
||||
static void ntb_async_tx(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry)
|
||||
{
|
||||
struct ntb_payload_header __iomem *hdr;
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
struct dma_chan *chan = qp->dma_chan;
|
||||
struct dma_device *device;
|
||||
size_t dest_off, buff_off;
|
||||
dma_addr_t src, dest;
|
||||
dma_cookie_t cookie;
|
||||
void __iomem *offset;
|
||||
size_t len = entry->len;
|
||||
void *buf = entry->buf;
|
||||
unsigned long flags;
|
||||
|
||||
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
|
||||
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
entry->tx_hdr = hdr;
|
||||
|
||||
iowrite32(entry->len, &hdr->len);
|
||||
iowrite32((u32) qp->tx_pkts, &hdr->ver);
|
||||
|
||||
if (!chan)
|
||||
goto err;
|
||||
|
||||
if (len < copy_bytes)
|
||||
goto err;
|
||||
|
||||
device = chan->device;
|
||||
dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
|
||||
buff_off = (size_t) buf & ~PAGE_MASK;
|
||||
dest_off = (size_t) dest & ~PAGE_MASK;
|
||||
|
||||
if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
|
||||
goto err;
|
||||
|
||||
src = dma_map_single(device->dev, buf, len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(device->dev, src))
|
||||
goto err;
|
||||
|
||||
flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_PREP_INTERRUPT;
|
||||
txd = device->device_prep_dma_memcpy(chan, dest, src, len, flags);
|
||||
if (!txd)
|
||||
goto err1;
|
||||
|
||||
txd->callback = ntb_tx_copy_callback;
|
||||
txd->callback_param = entry;
|
||||
|
||||
cookie = dmaengine_submit(txd);
|
||||
if (dma_submit_error(cookie))
|
||||
goto err1;
|
||||
|
||||
dma_async_issue_pending(chan);
|
||||
qp->tx_async++;
|
||||
|
||||
return;
|
||||
err1:
|
||||
dma_unmap_single(device->dev, src, len, DMA_TO_DEVICE);
|
||||
err:
|
||||
ntb_memcpy_tx(entry, offset);
|
||||
qp->tx_memcpy++;
|
||||
}
|
||||
|
||||
static int ntb_process_tx(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry)
|
||||
{
|
||||
void __iomem *offset;
|
||||
|
||||
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
|
||||
|
||||
dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
|
||||
qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
|
||||
dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - tx %u, entry len %d flags %x buff %p\n",
|
||||
qp->tx_pkts, qp->tx_index, entry->len, entry->flags,
|
||||
entry->buf);
|
||||
if (qp->tx_index == qp->remote_rx_info->entry) {
|
||||
qp->tx_ring_full++;
|
||||
|
@ -1127,7 +1323,7 @@ static int ntb_process_tx(struct ntb_transport_qp *qp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
ntb_tx_copy_task(qp, entry, offset);
|
||||
ntb_async_tx(qp, entry);
|
||||
|
||||
qp->tx_index++;
|
||||
qp->tx_index %= qp->tx_max_entry;
|
||||
|
@ -1213,11 +1409,18 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
|
|||
qp->tx_handler = handlers->tx_handler;
|
||||
qp->event_handler = handlers->event_handler;
|
||||
|
||||
qp->dma_chan = dma_find_channel(DMA_MEMCPY);
|
||||
if (!qp->dma_chan)
|
||||
dev_info(&pdev->dev, "Unable to allocate DMA channel, using CPU instead\n");
|
||||
else
|
||||
dmaengine_get();
|
||||
|
||||
for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
|
||||
entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
|
||||
if (!entry)
|
||||
goto err1;
|
||||
|
||||
entry->qp = qp;
|
||||
ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
|
||||
&qp->rx_free_q);
|
||||
}
|
||||
|
@ -1227,6 +1430,7 @@ ntb_transport_create_queue(void *data, struct pci_dev *pdev,
|
|||
if (!entry)
|
||||
goto err2;
|
||||
|
||||
entry->qp = qp;
|
||||
ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
|
||||
&qp->tx_free_q);
|
||||
}
|
||||
|
@ -1272,11 +1476,26 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
|
|||
|
||||
pdev = ntb_query_pdev(qp->ndev);
|
||||
|
||||
cancel_delayed_work_sync(&qp->link_work);
|
||||
if (qp->dma_chan) {
|
||||
struct dma_chan *chan = qp->dma_chan;
|
||||
/* Putting the dma_chan to NULL will force any new traffic to be
|
||||
* processed by the CPU instead of the DAM engine
|
||||
*/
|
||||
qp->dma_chan = NULL;
|
||||
|
||||
/* Try to be nice and wait for any queued DMA engine
|
||||
* transactions to process before smashing it with a rock
|
||||
*/
|
||||
dma_sync_wait(chan, qp->last_cookie);
|
||||
dmaengine_terminate_all(chan);
|
||||
dmaengine_put();
|
||||
}
|
||||
|
||||
ntb_unregister_db_callback(qp->ndev, qp->qp_num);
|
||||
tasklet_disable(&qp->rx_work);
|
||||
|
||||
cancel_delayed_work_sync(&qp->link_work);
|
||||
|
||||
while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
|
||||
kfree(entry);
|
||||
|
||||
|
@ -1382,8 +1601,10 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
|
|||
return -EINVAL;
|
||||
|
||||
entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
|
||||
if (!entry)
|
||||
if (!entry) {
|
||||
qp->tx_err_no_buf++;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
entry->cb_data = cb;
|
||||
entry->buf = data;
|
||||
|
@ -1499,9 +1720,18 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
|
|||
*/
|
||||
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
|
||||
{
|
||||
unsigned int max;
|
||||
|
||||
if (!qp)
|
||||
return 0;
|
||||
|
||||
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
if (!qp->dma_chan)
|
||||
return qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
|
||||
/* If DMA engine usage is possible, try to find the max size for that */
|
||||
max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
max -= max % (1 << qp->dma_chan->device->copy_align);
|
||||
|
||||
return max;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
|
||||
|
|
Loading…
Reference in a new issue