Merge branch 'topic/err_reporting' into for-linus
Signed-off-by: Vinod Koul <vinod.koul@intel.com> Conflicts: drivers/dma/cppi41.c
This commit is contained in:
commit
11bfedff55
41 changed files with 597 additions and 286 deletions
|
@ -282,6 +282,17 @@ supported.
|
|||
that is supposed to push the current
|
||||
transaction descriptor to a pending queue, waiting
|
||||
for issue_pending to be called.
|
||||
- In this structure the function pointer callback_result can be
|
||||
initialized in order for the submitter to be notified that a
|
||||
transaction has completed. In the earlier code the function pointer
|
||||
callback has been used. However it does not provide any status to the
|
||||
transaction and will be deprecated. The result structure defined as
|
||||
dmaengine_result that is passed in to callback_result has two fields:
|
||||
+ result: This provides the transfer result defined by
|
||||
dmaengine_tx_result. Either success or some error
|
||||
condition.
|
||||
+ residue: Provides the residue bytes of the transfer for those that
|
||||
support residue.
|
||||
|
||||
* device_issue_pending
|
||||
- Takes the first transaction descriptor in the pending queue,
|
||||
|
|
|
@ -473,15 +473,11 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
|||
/* for cyclic transfers,
|
||||
* no need to replay callback function while stopping */
|
||||
if (!atc_chan_is_cyclic(atchan)) {
|
||||
dma_async_tx_callback callback = txd->callback;
|
||||
void *param = txd->callback_param;
|
||||
|
||||
/*
|
||||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
|
@ -598,15 +594,12 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
|
|||
{
|
||||
struct at_desc *first = atc_first_active(atchan);
|
||||
struct dma_async_tx_descriptor *txd = &first->txd;
|
||||
dma_async_tx_callback callback = txd->callback;
|
||||
void *param = txd->callback_param;
|
||||
|
||||
dev_vdbg(chan2dev(&atchan->chan_common),
|
||||
"new cyclic period llp 0x%08x\n",
|
||||
channel_readl(atchan, DSCR));
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
|
||||
/*-- IRQ & Tasklet ---------------------------------------------------*/
|
||||
|
|
|
@ -1572,8 +1572,8 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
|
|||
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
|
||||
txd = &desc->tx_dma_desc;
|
||||
|
||||
if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
|
||||
txd->callback(txd->callback_param);
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
|
||||
static void at_xdmac_tasklet(unsigned long data)
|
||||
|
@ -1616,8 +1616,8 @@ static void at_xdmac_tasklet(unsigned long data)
|
|||
|
||||
if (!at_xdmac_chan_is_cyclic(atchan)) {
|
||||
dma_cookie_complete(txd);
|
||||
if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
|
||||
txd->callback(txd->callback_param);
|
||||
if (txd->flags & DMA_PREP_INTERRUPT)
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
}
|
||||
|
||||
dma_run_dependencies(txd);
|
||||
|
|
|
@ -1875,8 +1875,7 @@ static void dma_tasklet(unsigned long data)
|
|||
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
|
||||
struct coh901318_desc *cohd_fin;
|
||||
unsigned long flags;
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
|
||||
" nbr_active_done %ld\n", __func__,
|
||||
|
@ -1891,8 +1890,7 @@ static void dma_tasklet(unsigned long data)
|
|||
goto err;
|
||||
|
||||
/* locate callback to client */
|
||||
callback = cohd_fin->desc.callback;
|
||||
callback_param = cohd_fin->desc.callback_param;
|
||||
dmaengine_desc_get_callback(&cohd_fin->desc, &cb);
|
||||
|
||||
/* sign this job as completed on the channel */
|
||||
dma_cookie_complete(&cohd_fin->desc);
|
||||
|
@ -1907,8 +1905,7 @@ static void dma_tasklet(unsigned long data)
|
|||
spin_unlock_irqrestore(&cohc->lock, flags);
|
||||
|
||||
/* Call the callback when we're done */
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
spin_lock_irqsave(&cohc->lock, flags);
|
||||
|
||||
|
|
|
@ -336,7 +336,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
|
|||
|
||||
c->residue = pd_trans_len(c->desc->pd6) - len;
|
||||
dma_cookie_complete(&c->txd);
|
||||
c->txd.callback(c->txd.callback_param);
|
||||
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
|
||||
|
||||
/* Paired with cppi41_dma_issue_pending */
|
||||
pm_runtime_mark_last_busy(cdd->ddev.dev);
|
||||
|
|
|
@ -86,4 +86,88 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
|
|||
state->residue = residue;
|
||||
}
|
||||
|
||||
struct dmaengine_desc_callback {
|
||||
dma_async_tx_callback callback;
|
||||
dma_async_tx_callback_result callback_result;
|
||||
void *callback_param;
|
||||
};
|
||||
|
||||
/**
|
||||
* dmaengine_desc_get_callback - get the passed in callback function
|
||||
* @tx: tx descriptor
|
||||
* @cb: temp struct to hold the callback info
|
||||
*
|
||||
* Fill the passed in cb struct with what's available in the passed in
|
||||
* tx descriptor struct
|
||||
* No locking is required.
|
||||
*/
|
||||
static inline void
|
||||
dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
|
||||
struct dmaengine_desc_callback *cb)
|
||||
{
|
||||
cb->callback = tx->callback;
|
||||
cb->callback_result = tx->callback_result;
|
||||
cb->callback_param = tx->callback_param;
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_desc_callback_invoke - call the callback function in cb struct
|
||||
* @cb: temp struct that is holding the callback info
|
||||
* @result: transaction result
|
||||
*
|
||||
* Call the callback function provided in the cb struct with the parameter
|
||||
* in the cb struct.
|
||||
* Locking is dependent on the driver.
|
||||
*/
|
||||
static inline void
|
||||
dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
|
||||
const struct dmaengine_result *result)
|
||||
{
|
||||
struct dmaengine_result dummy_result = {
|
||||
.result = DMA_TRANS_NOERROR,
|
||||
.residue = 0
|
||||
};
|
||||
|
||||
if (cb->callback_result) {
|
||||
if (!result)
|
||||
result = &dummy_result;
|
||||
cb->callback_result(cb->callback_param, result);
|
||||
} else if (cb->callback) {
|
||||
cb->callback(cb->callback_param);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
|
||||
* then immediately call the callback.
|
||||
* @tx: dma async tx descriptor
|
||||
* @result: transaction result
|
||||
*
|
||||
* Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
|
||||
* in a single function since no work is necessary in between for the driver.
|
||||
* Locking is dependent on the driver.
|
||||
*/
|
||||
static inline void
|
||||
dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
|
||||
const struct dmaengine_result *result)
|
||||
{
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
dmaengine_desc_get_callback(tx, &cb);
|
||||
dmaengine_desc_callback_invoke(&cb, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_desc_callback_valid - verify the callback is valid in cb
|
||||
* @cb: callback info struct
|
||||
*
|
||||
* Return a bool that verifies whether callback in cb is valid or not.
|
||||
* No locking is required.
|
||||
*/
|
||||
static inline bool
|
||||
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
|
||||
{
|
||||
return (cb->callback) ? true : false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -270,20 +270,19 @@ static void
|
|||
dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
||||
bool callback_required)
|
||||
{
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *param = NULL;
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
struct dw_desc *child;
|
||||
unsigned long flags;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
dma_cookie_complete(txd);
|
||||
if (callback_required) {
|
||||
callback = txd->callback;
|
||||
param = txd->callback_param;
|
||||
}
|
||||
if (callback_required)
|
||||
dmaengine_desc_get_callback(txd, &cb);
|
||||
else
|
||||
memset(&cb, 0, sizeof(cb));
|
||||
|
||||
/* async_tx_ack */
|
||||
list_for_each_entry(child, &desc->tx_list, desc_node)
|
||||
|
@ -292,8 +291,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
|||
dwc_desc_put(dwc, desc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
}
|
||||
|
||||
static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
||||
|
|
|
@ -737,10 +737,10 @@ static void ep93xx_dma_tasklet(unsigned long data)
|
|||
{
|
||||
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
|
||||
struct ep93xx_dma_desc *desc, *d;
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *callback_param = NULL;
|
||||
struct dmaengine_desc_callback cb;
|
||||
LIST_HEAD(list);
|
||||
|
||||
memset(&cb, 0, sizeof(cb));
|
||||
spin_lock_irq(&edmac->lock);
|
||||
/*
|
||||
* If dma_terminate_all() was called before we get to run, the active
|
||||
|
@ -755,8 +755,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
|
|||
dma_cookie_complete(&desc->txd);
|
||||
list_splice_init(&edmac->active, &list);
|
||||
}
|
||||
callback = desc->txd.callback;
|
||||
callback_param = desc->txd.callback_param;
|
||||
dmaengine_desc_get_callback(&desc->txd, &cb);
|
||||
}
|
||||
spin_unlock_irq(&edmac->lock);
|
||||
|
||||
|
@ -769,8 +768,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
|
|||
ep93xx_dma_desc_put(edmac, desc);
|
||||
}
|
||||
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
}
|
||||
|
||||
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
|
||||
|
|
|
@ -134,16 +134,8 @@ static void fsl_re_issue_pending(struct dma_chan *chan)
|
|||
|
||||
static void fsl_re_desc_done(struct fsl_re_desc *desc)
|
||||
{
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
|
||||
dma_cookie_complete(&desc->async_tx);
|
||||
|
||||
callback = desc->async_tx.callback;
|
||||
callback_param = desc->async_tx.callback_param;
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
|
||||
dma_descriptor_unmap(&desc->async_tx);
|
||||
}
|
||||
|
||||
|
|
|
@ -517,11 +517,7 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
|
|||
ret = txd->cookie;
|
||||
|
||||
/* Run the link descriptor callback function */
|
||||
if (txd->callback) {
|
||||
chan_dbg(chan, "LD %p callback\n", desc);
|
||||
txd->callback(txd->callback_param);
|
||||
}
|
||||
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
dma_descriptor_unmap(txd);
|
||||
}
|
||||
|
||||
|
|
|
@ -663,9 +663,7 @@ static void imxdma_tasklet(unsigned long data)
|
|||
out:
|
||||
spin_unlock_irqrestore(&imxdma->lock, flags);
|
||||
|
||||
if (desc->desc.callback)
|
||||
desc->desc.callback(desc->desc.callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&desc->desc, NULL);
|
||||
}
|
||||
|
||||
static int imxdma_terminate_all(struct dma_chan *chan)
|
||||
|
|
|
@ -650,8 +650,7 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
|
|||
|
||||
static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
|
||||
{
|
||||
if (sdmac->desc.callback)
|
||||
sdmac->desc.callback(sdmac->desc.callback_param);
|
||||
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
|
||||
}
|
||||
|
||||
static void sdma_update_channel_loop(struct sdma_channel *sdmac)
|
||||
|
@ -701,8 +700,8 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
|
|||
sdmac->status = DMA_COMPLETE;
|
||||
|
||||
dma_cookie_complete(&sdmac->desc);
|
||||
if (sdmac->desc.callback)
|
||||
sdmac->desc.callback(sdmac->desc.callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
|
||||
}
|
||||
|
||||
static void sdma_tasklet(unsigned long data)
|
||||
|
|
|
@ -38,8 +38,54 @@
|
|||
|
||||
#include "../dmaengine.h"
|
||||
|
||||
static char *chanerr_str[] = {
|
||||
"DMA Transfer Destination Address Error",
|
||||
"Next Descriptor Address Error",
|
||||
"Descriptor Error",
|
||||
"Chan Address Value Error",
|
||||
"CHANCMD Error",
|
||||
"Chipset Uncorrectable Data Integrity Error",
|
||||
"DMA Uncorrectable Data Integrity Error",
|
||||
"Read Data Error",
|
||||
"Write Data Error",
|
||||
"Descriptor Control Error",
|
||||
"Descriptor Transfer Size Error",
|
||||
"Completion Address Error",
|
||||
"Interrupt Configuration Error",
|
||||
"Super extended descriptor Address Error",
|
||||
"Unaffiliated Error",
|
||||
"CRC or XOR P Error",
|
||||
"XOR Q Error",
|
||||
"Descriptor Count Error",
|
||||
"DIF All F detect Error",
|
||||
"Guard Tag verification Error",
|
||||
"Application Tag verification Error",
|
||||
"Reference Tag verification Error",
|
||||
"Bundle Bit Error",
|
||||
"Result DIF All F detect Error",
|
||||
"Result Guard Tag verification Error",
|
||||
"Result Application Tag verification Error",
|
||||
"Result Reference Tag verification Error",
|
||||
NULL
|
||||
};
|
||||
|
||||
static void ioat_eh(struct ioatdma_chan *ioat_chan);
|
||||
|
||||
static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if ((chanerr >> i) & 1) {
|
||||
if (chanerr_str[i]) {
|
||||
dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
|
||||
i, chanerr_str[i]);
|
||||
} else
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ioat_dma_do_interrupt - handler used for single vector interrupt mode
|
||||
* @irq: interrupt id
|
||||
|
@ -568,12 +614,14 @@ static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
|
|||
|
||||
tx = &desc->txd;
|
||||
if (tx->cookie) {
|
||||
struct dmaengine_result res;
|
||||
|
||||
dma_cookie_complete(tx);
|
||||
dma_descriptor_unmap(tx);
|
||||
if (tx->callback) {
|
||||
tx->callback(tx->callback_param);
|
||||
tx->callback = NULL;
|
||||
}
|
||||
res.result = DMA_TRANS_NOERROR;
|
||||
dmaengine_desc_get_callback_invoke(tx, NULL);
|
||||
tx->callback = NULL;
|
||||
tx->callback_result = NULL;
|
||||
}
|
||||
|
||||
if (tx->phys == phys_complete)
|
||||
|
@ -622,7 +670,8 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
|
|||
if (is_ioat_halted(*ioat_chan->completion)) {
|
||||
u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
|
||||
if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
|
||||
if (chanerr &
|
||||
(IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
|
||||
mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
|
||||
ioat_eh(ioat_chan);
|
||||
}
|
||||
|
@ -652,6 +701,61 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
|
|||
__ioat_restart_chan(ioat_chan);
|
||||
}
|
||||
|
||||
|
||||
static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
|
||||
{
|
||||
struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
|
||||
struct ioat_ring_ent *desc;
|
||||
u16 active;
|
||||
int idx = ioat_chan->tail, i;
|
||||
|
||||
/*
|
||||
* We assume that the failed descriptor has been processed.
|
||||
* Now we are just returning all the remaining submitted
|
||||
* descriptors to abort.
|
||||
*/
|
||||
active = ioat_ring_active(ioat_chan);
|
||||
|
||||
/* we skip the failed descriptor that tail points to */
|
||||
for (i = 1; i < active; i++) {
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
|
||||
smp_read_barrier_depends();
|
||||
prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
|
||||
desc = ioat_get_ring_ent(ioat_chan, idx + i);
|
||||
|
||||
tx = &desc->txd;
|
||||
if (tx->cookie) {
|
||||
struct dmaengine_result res;
|
||||
|
||||
dma_cookie_complete(tx);
|
||||
dma_descriptor_unmap(tx);
|
||||
res.result = DMA_TRANS_ABORTED;
|
||||
dmaengine_desc_get_callback_invoke(tx, &res);
|
||||
tx->callback = NULL;
|
||||
tx->callback_result = NULL;
|
||||
}
|
||||
|
||||
/* skip extended descriptors */
|
||||
if (desc_has_ext(desc)) {
|
||||
WARN_ON(i + 1 >= active);
|
||||
i++;
|
||||
}
|
||||
|
||||
/* cleanup super extended descriptors */
|
||||
if (desc->sed) {
|
||||
ioat_free_sed(ioat_dma, desc->sed);
|
||||
desc->sed = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
smp_mb(); /* finish all descriptor reads before incrementing tail */
|
||||
ioat_chan->tail = idx + active;
|
||||
|
||||
desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
|
||||
ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
|
||||
}
|
||||
|
||||
static void ioat_eh(struct ioatdma_chan *ioat_chan)
|
||||
{
|
||||
struct pci_dev *pdev = to_pdev(ioat_chan);
|
||||
|
@ -662,6 +766,8 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
|
|||
u32 err_handled = 0;
|
||||
u32 chanerr_int;
|
||||
u32 chanerr;
|
||||
bool abort = false;
|
||||
struct dmaengine_result res;
|
||||
|
||||
/* cleanup so tail points to descriptor that caused the error */
|
||||
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
|
||||
|
@ -697,30 +803,55 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan)
|
|||
break;
|
||||
}
|
||||
|
||||
if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
|
||||
if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
|
||||
res.result = DMA_TRANS_READ_FAILED;
|
||||
err_handled |= IOAT_CHANERR_READ_DATA_ERR;
|
||||
} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
|
||||
res.result = DMA_TRANS_WRITE_FAILED;
|
||||
err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
|
||||
}
|
||||
|
||||
abort = true;
|
||||
} else
|
||||
res.result = DMA_TRANS_NOERROR;
|
||||
|
||||
/* fault on unhandled error or spurious halt */
|
||||
if (chanerr ^ err_handled || chanerr == 0) {
|
||||
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
|
||||
__func__, chanerr, err_handled);
|
||||
dev_err(to_dev(ioat_chan), "Errors handled:\n");
|
||||
ioat_print_chanerrs(ioat_chan, err_handled);
|
||||
dev_err(to_dev(ioat_chan), "Errors not handled:\n");
|
||||
ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
|
||||
|
||||
BUG();
|
||||
} else { /* cleanup the faulty descriptor */
|
||||
tx = &desc->txd;
|
||||
if (tx->cookie) {
|
||||
dma_cookie_complete(tx);
|
||||
dma_descriptor_unmap(tx);
|
||||
if (tx->callback) {
|
||||
tx->callback(tx->callback_param);
|
||||
tx->callback = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
|
||||
/* cleanup the faulty descriptor since we are continuing */
|
||||
tx = &desc->txd;
|
||||
if (tx->cookie) {
|
||||
dma_cookie_complete(tx);
|
||||
dma_descriptor_unmap(tx);
|
||||
dmaengine_desc_get_callback_invoke(tx, &res);
|
||||
tx->callback = NULL;
|
||||
tx->callback_result = NULL;
|
||||
}
|
||||
|
||||
/* mark faulting descriptor as complete */
|
||||
*ioat_chan->completion = desc->txd.phys;
|
||||
|
||||
spin_lock_bh(&ioat_chan->prep_lock);
|
||||
/* we need abort all descriptors */
|
||||
if (abort) {
|
||||
ioat_abort_descs(ioat_chan);
|
||||
/* clean up the channel, we could be in weird state */
|
||||
ioat_reset_hw(ioat_chan);
|
||||
}
|
||||
|
||||
writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
|
||||
|
||||
ioat_restart_channel(ioat_chan);
|
||||
spin_unlock_bh(&ioat_chan->prep_lock);
|
||||
}
|
||||
|
@ -753,10 +884,28 @@ void ioat_timer_event(unsigned long data)
|
|||
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
|
||||
__func__, chanerr);
|
||||
if (test_bit(IOAT_RUN, &ioat_chan->state))
|
||||
BUG_ON(is_ioat_bug(chanerr));
|
||||
else /* we never got off the ground */
|
||||
return;
|
||||
dev_err(to_dev(ioat_chan), "Errors:\n");
|
||||
ioat_print_chanerrs(ioat_chan, chanerr);
|
||||
|
||||
if (test_bit(IOAT_RUN, &ioat_chan->state)) {
|
||||
spin_lock_bh(&ioat_chan->cleanup_lock);
|
||||
spin_lock_bh(&ioat_chan->prep_lock);
|
||||
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
||||
spin_unlock_bh(&ioat_chan->prep_lock);
|
||||
|
||||
ioat_abort_descs(ioat_chan);
|
||||
dev_warn(to_dev(ioat_chan), "Reset channel...\n");
|
||||
ioat_reset_hw(ioat_chan);
|
||||
dev_warn(to_dev(ioat_chan), "Restart channel...\n");
|
||||
ioat_restart_channel(ioat_chan);
|
||||
|
||||
spin_lock_bh(&ioat_chan->prep_lock);
|
||||
clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
||||
spin_unlock_bh(&ioat_chan->prep_lock);
|
||||
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ioat_chan->cleanup_lock);
|
||||
|
@ -780,14 +929,26 @@ void ioat_timer_event(unsigned long data)
|
|||
u32 chanerr;
|
||||
|
||||
chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
|
||||
dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
|
||||
status, chanerr);
|
||||
dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
|
||||
ioat_ring_active(ioat_chan));
|
||||
dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
|
||||
status, chanerr);
|
||||
dev_err(to_dev(ioat_chan), "Errors:\n");
|
||||
ioat_print_chanerrs(ioat_chan, chanerr);
|
||||
|
||||
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
|
||||
ioat_ring_active(ioat_chan));
|
||||
|
||||
spin_lock_bh(&ioat_chan->prep_lock);
|
||||
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
||||
spin_unlock_bh(&ioat_chan->prep_lock);
|
||||
|
||||
ioat_abort_descs(ioat_chan);
|
||||
dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
|
||||
ioat_reset_hw(ioat_chan);
|
||||
dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
|
||||
ioat_restart_channel(ioat_chan);
|
||||
|
||||
spin_lock_bh(&ioat_chan->prep_lock);
|
||||
clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
|
||||
spin_unlock_bh(&ioat_chan->prep_lock);
|
||||
spin_unlock_bh(&ioat_chan->cleanup_lock);
|
||||
return;
|
||||
|
|
|
@ -240,6 +240,8 @@
|
|||
#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000
|
||||
|
||||
#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR)
|
||||
#define IOAT_CHANERR_RECOVER_MASK (IOAT_CHANERR_READ_DATA_ERR | \
|
||||
IOAT_CHANERR_WRITE_DATA_ERR)
|
||||
|
||||
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
|
||||
|
||||
|
|
|
@ -71,8 +71,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
|
|||
/* call the callback (must not sleep or submit new
|
||||
* operations to this channel)
|
||||
*/
|
||||
if (tx->callback)
|
||||
tx->callback(tx->callback_param);
|
||||
dmaengine_desc_get_callback_invoke(tx, NULL);
|
||||
|
||||
dma_descriptor_unmap(tx);
|
||||
if (desc->group_head)
|
||||
|
|
|
@ -1160,11 +1160,10 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||
struct scatterlist **sg, *sgnext, *sgnew = NULL;
|
||||
/* Next transfer descriptor */
|
||||
struct idmac_tx_desc *desc, *descnew;
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
bool done = false;
|
||||
u32 ready0, ready1, curbuf, err;
|
||||
unsigned long flags;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
|
||||
|
||||
|
@ -1278,12 +1277,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||
|
||||
if (likely(sgnew) &&
|
||||
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
|
||||
callback = descnew->txd.callback;
|
||||
callback_param = descnew->txd.callback_param;
|
||||
dmaengine_desc_get_callback(&descnew->txd, &cb);
|
||||
|
||||
list_del_init(&descnew->list);
|
||||
spin_unlock(&ichan->lock);
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
spin_lock(&ichan->lock);
|
||||
}
|
||||
|
||||
|
@ -1292,13 +1291,12 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
|
|||
if (done)
|
||||
dma_cookie_complete(&desc->txd);
|
||||
|
||||
callback = desc->txd.callback;
|
||||
callback_param = desc->txd.callback_param;
|
||||
dmaengine_desc_get_callback(&desc->txd, &cb);
|
||||
|
||||
spin_unlock(&ichan->lock);
|
||||
|
||||
if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback)
|
||||
callback(callback_param);
|
||||
if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -104,10 +104,8 @@ static void mic_dma_cleanup(struct mic_dma_chan *ch)
|
|||
tx = &ch->tx_array[last_tail];
|
||||
if (tx->cookie) {
|
||||
dma_cookie_complete(tx);
|
||||
if (tx->callback) {
|
||||
tx->callback(tx->callback_param);
|
||||
tx->callback = NULL;
|
||||
}
|
||||
dmaengine_desc_get_callback_invoke(tx, NULL);
|
||||
tx->callback = NULL;
|
||||
}
|
||||
last_tail = mic_dma_hw_ring_inc(last_tail);
|
||||
}
|
||||
|
|
|
@ -864,19 +864,15 @@ static void dma_do_tasklet(unsigned long data)
|
|||
struct mmp_pdma_desc_sw *desc, *_desc;
|
||||
LIST_HEAD(chain_cleanup);
|
||||
unsigned long flags;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
if (chan->cyclic_first) {
|
||||
dma_async_tx_callback cb = NULL;
|
||||
void *cb_data = NULL;
|
||||
|
||||
spin_lock_irqsave(&chan->desc_lock, flags);
|
||||
desc = chan->cyclic_first;
|
||||
cb = desc->async_tx.callback;
|
||||
cb_data = desc->async_tx.callback_param;
|
||||
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||
spin_unlock_irqrestore(&chan->desc_lock, flags);
|
||||
|
||||
if (cb)
|
||||
cb(cb_data);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -921,8 +917,8 @@ static void dma_do_tasklet(unsigned long data)
|
|||
/* Remove from the list of transactions */
|
||||
list_del(&desc->node);
|
||||
/* Run the link descriptor callback function */
|
||||
if (txd->callback)
|
||||
txd->callback(txd->callback_param);
|
||||
dmaengine_desc_get_callback(txd, &cb);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
dma_pool_free(chan->desc_pool, desc, txd->phys);
|
||||
}
|
||||
|
|
|
@ -349,9 +349,7 @@ static void dma_do_tasklet(unsigned long data)
|
|||
{
|
||||
struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
|
||||
|
||||
if (tdmac->desc.callback)
|
||||
tdmac->desc.callback(tdmac->desc.callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&tdmac->desc, NULL);
|
||||
}
|
||||
|
||||
static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
|
||||
|
|
|
@ -411,8 +411,7 @@ static void mpc_dma_process_completed(struct mpc_dma *mdma)
|
|||
list_for_each_entry(mdesc, &list, node) {
|
||||
desc = &mdesc->desc;
|
||||
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
dmaengine_desc_get_callback_invoke(desc, NULL);
|
||||
|
||||
last_cookie = desc->cookie;
|
||||
dma_run_dependencies(desc);
|
||||
|
|
|
@ -209,10 +209,7 @@ mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
|
|||
/* call the callback (must not sleep or submit new
|
||||
* operations to this channel)
|
||||
*/
|
||||
if (desc->async_tx.callback)
|
||||
desc->async_tx.callback(
|
||||
desc->async_tx.callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
|
||||
dma_descriptor_unmap(&desc->async_tx);
|
||||
}
|
||||
|
||||
|
|
|
@ -326,8 +326,7 @@ static void mxs_dma_tasklet(unsigned long data)
|
|||
{
|
||||
struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data;
|
||||
|
||||
if (mxs_chan->desc.callback)
|
||||
mxs_chan->desc.callback(mxs_chan->desc.callback_param);
|
||||
dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL);
|
||||
}
|
||||
|
||||
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq)
|
||||
|
|
|
@ -1102,8 +1102,7 @@ static void nbpf_chan_tasklet(unsigned long data)
|
|||
{
|
||||
struct nbpf_channel *chan = (struct nbpf_channel *)data;
|
||||
struct nbpf_desc *desc, *tmp;
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
while (!list_empty(&chan->done)) {
|
||||
bool found = false, must_put, recycling = false;
|
||||
|
@ -1151,14 +1150,12 @@ static void nbpf_chan_tasklet(unsigned long data)
|
|||
must_put = false;
|
||||
}
|
||||
|
||||
callback = desc->async_tx.callback;
|
||||
param = desc->async_tx.callback_param;
|
||||
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||
|
||||
/* ack and callback completed descriptor */
|
||||
spin_unlock_irq(&chan->lock);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
if (must_put)
|
||||
nbpf_desc_put(desc);
|
||||
|
|
|
@ -357,14 +357,13 @@ static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
|
|||
struct pch_dma_desc *desc)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
dma_async_tx_callback callback = txd->callback;
|
||||
void *param = txd->callback_param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
dmaengine_desc_get_callback(txd, &cb);
|
||||
list_splice_init(&desc->tx_list, &pd_chan->free_list);
|
||||
list_move(&desc->desc_node, &pd_chan->free_list);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
}
|
||||
|
||||
static void pdc_complete_all(struct pch_dma_chan *pd_chan)
|
||||
|
|
|
@ -2039,14 +2039,12 @@ static void pl330_tasklet(unsigned long data)
|
|||
}
|
||||
|
||||
while (!list_empty(&pch->completed_list)) {
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
desc = list_first_entry(&pch->completed_list,
|
||||
struct dma_pl330_desc, node);
|
||||
|
||||
callback = desc->txd.callback;
|
||||
callback_param = desc->txd.callback_param;
|
||||
dmaengine_desc_get_callback(&desc->txd, &cb);
|
||||
|
||||
if (pch->cyclic) {
|
||||
desc->status = PREP;
|
||||
|
@ -2064,9 +2062,9 @@ static void pl330_tasklet(unsigned long data)
|
|||
|
||||
dma_descriptor_unmap(&desc->txd);
|
||||
|
||||
if (callback) {
|
||||
if (dmaengine_desc_callback_valid(&cb)) {
|
||||
spin_unlock_irqrestore(&pch->lock, flags);
|
||||
callback(callback_param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
spin_lock_irqsave(&pch->lock, flags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1485,10 +1485,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
|
|||
/* call the callback (must not sleep or submit new
|
||||
* operations to this channel)
|
||||
*/
|
||||
if (desc->async_tx.callback)
|
||||
desc->async_tx.callback(
|
||||
desc->async_tx.callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
|
||||
dma_descriptor_unmap(&desc->async_tx);
|
||||
}
|
||||
|
||||
|
|
|
@ -111,6 +111,7 @@ static void hidma_process_completed(struct hidma_chan *mchan)
|
|||
struct dma_async_tx_descriptor *desc;
|
||||
dma_cookie_t last_cookie;
|
||||
struct hidma_desc *mdesc;
|
||||
struct hidma_desc *next;
|
||||
unsigned long irqflags;
|
||||
struct list_head list;
|
||||
|
||||
|
@ -122,28 +123,36 @@ static void hidma_process_completed(struct hidma_chan *mchan)
|
|||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
/* Execute callbacks and run dependencies */
|
||||
list_for_each_entry(mdesc, &list, node) {
|
||||
list_for_each_entry_safe(mdesc, next, &list, node) {
|
||||
enum dma_status llstat;
|
||||
struct dmaengine_desc_callback cb;
|
||||
struct dmaengine_result result;
|
||||
|
||||
desc = &mdesc->desc;
|
||||
last_cookie = desc->cookie;
|
||||
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
dma_cookie_complete(desc);
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
|
||||
if (desc->callback && (llstat == DMA_COMPLETE))
|
||||
desc->callback(desc->callback_param);
|
||||
dmaengine_desc_get_callback(desc, &cb);
|
||||
|
||||
last_cookie = desc->cookie;
|
||||
dma_run_dependencies(desc);
|
||||
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
list_move(&mdesc->node, &mchan->free);
|
||||
|
||||
if (llstat == DMA_COMPLETE) {
|
||||
mchan->last_success = last_cookie;
|
||||
result.result = DMA_TRANS_NOERROR;
|
||||
} else
|
||||
result.result = DMA_TRANS_ABORTED;
|
||||
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, &result);
|
||||
}
|
||||
|
||||
/* Free descriptors */
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
list_splice_tail_init(&list, &mchan->free);
|
||||
spin_unlock_irqrestore(&mchan->lock, irqflags);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -238,6 +247,19 @@ static void hidma_issue_pending(struct dma_chan *dmach)
|
|||
hidma_ll_start(dmadev->lldev);
|
||||
}
|
||||
|
||||
static inline bool hidma_txn_is_success(dma_cookie_t cookie,
|
||||
dma_cookie_t last_success, dma_cookie_t last_used)
|
||||
{
|
||||
if (last_success <= last_used) {
|
||||
if ((cookie <= last_success) || (cookie > last_used))
|
||||
return true;
|
||||
} else {
|
||||
if ((cookie <= last_success) && (cookie > last_used))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static enum dma_status hidma_tx_status(struct dma_chan *dmach,
|
||||
dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
|
@ -246,8 +268,13 @@ static enum dma_status hidma_tx_status(struct dma_chan *dmach,
|
|||
enum dma_status ret;
|
||||
|
||||
ret = dma_cookie_status(dmach, cookie, txstate);
|
||||
if (ret == DMA_COMPLETE)
|
||||
return ret;
|
||||
if (ret == DMA_COMPLETE) {
|
||||
bool is_success;
|
||||
|
||||
is_success = hidma_txn_is_success(cookie, mchan->last_success,
|
||||
dmach->cookie);
|
||||
return is_success ? ret : DMA_ERROR;
|
||||
}
|
||||
|
||||
if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
|
||||
unsigned long flags;
|
||||
|
@ -398,6 +425,7 @@ static int hidma_terminate_channel(struct dma_chan *chan)
|
|||
hidma_process_completed(mchan);
|
||||
|
||||
spin_lock_irqsave(&mchan->lock, irqflags);
|
||||
mchan->last_success = 0;
|
||||
list_splice_init(&mchan->active, &list);
|
||||
list_splice_init(&mchan->prepared, &list);
|
||||
list_splice_init(&mchan->completed, &list);
|
||||
|
@ -413,14 +441,9 @@ static int hidma_terminate_channel(struct dma_chan *chan)
|
|||
/* return all user requests */
|
||||
list_for_each_entry_safe(mdesc, tmp, &list, node) {
|
||||
struct dma_async_tx_descriptor *txd = &mdesc->desc;
|
||||
dma_async_tx_callback callback = mdesc->desc.callback;
|
||||
void *param = mdesc->desc.callback_param;
|
||||
|
||||
dma_descriptor_unmap(txd);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(txd, NULL);
|
||||
dma_run_dependencies(txd);
|
||||
|
||||
/* move myself to free_list */
|
||||
|
|
|
@ -72,7 +72,6 @@ struct hidma_lldev {
|
|||
|
||||
u32 tre_write_offset; /* TRE write location */
|
||||
struct tasklet_struct task; /* task delivering notifications */
|
||||
struct tasklet_struct rst_task; /* task to reset HW */
|
||||
DECLARE_KFIFO_PTR(handoff_fifo,
|
||||
struct hidma_tre *); /* pending TREs FIFO */
|
||||
};
|
||||
|
@ -89,6 +88,7 @@ struct hidma_chan {
|
|||
bool allocated;
|
||||
char dbg_name[16];
|
||||
u32 dma_sig;
|
||||
dma_cookie_t last_success;
|
||||
|
||||
/*
|
||||
* active descriptor on this channel
|
||||
|
|
|
@ -380,27 +380,6 @@ static int hidma_ll_reset(struct hidma_lldev *lldev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort all transactions and perform a reset.
|
||||
*/
|
||||
static void hidma_ll_abort(unsigned long arg)
|
||||
{
|
||||
struct hidma_lldev *lldev = (struct hidma_lldev *)arg;
|
||||
u8 err_code = HIDMA_EVRE_STATUS_ERROR;
|
||||
u8 err_info = 0xFF;
|
||||
int rc;
|
||||
|
||||
hidma_cleanup_pending_tre(lldev, err_info, err_code);
|
||||
|
||||
/* reset the channel for recovery */
|
||||
rc = hidma_ll_setup(lldev);
|
||||
if (rc) {
|
||||
dev_err(lldev->dev, "channel reinitialize failed after error\n");
|
||||
return;
|
||||
}
|
||||
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
||||
}
|
||||
|
||||
/*
|
||||
* The interrupt handler for HIDMA will try to consume as many pending
|
||||
* EVRE from the event queue as possible. Each EVRE has an associated
|
||||
|
@ -454,13 +433,18 @@ irqreturn_t hidma_ll_inthandler(int chirq, void *arg)
|
|||
|
||||
while (cause) {
|
||||
if (cause & HIDMA_ERR_INT_MASK) {
|
||||
dev_err(lldev->dev, "error 0x%x, resetting...\n",
|
||||
dev_err(lldev->dev, "error 0x%x, disabling...\n",
|
||||
cause);
|
||||
|
||||
/* Clear out pending interrupts */
|
||||
writel(cause, lldev->evca + HIDMA_EVCA_IRQ_CLR_REG);
|
||||
|
||||
tasklet_schedule(&lldev->rst_task);
|
||||
/* No further submissions. */
|
||||
hidma_ll_disable(lldev);
|
||||
|
||||
/* Driver completes the txn and intimates the client.*/
|
||||
hidma_cleanup_pending_tre(lldev, 0xFF,
|
||||
HIDMA_EVRE_STATUS_ERROR);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -808,7 +792,6 @@ struct hidma_lldev *hidma_ll_init(struct device *dev, u32 nr_tres,
|
|||
return NULL;
|
||||
|
||||
spin_lock_init(&lldev->lock);
|
||||
tasklet_init(&lldev->rst_task, hidma_ll_abort, (unsigned long)lldev);
|
||||
tasklet_init(&lldev->task, hidma_ll_tre_complete, (unsigned long)lldev);
|
||||
lldev->initialized = 1;
|
||||
writel(ENABLE_IRQS, lldev->evca + HIDMA_EVCA_IRQ_EN_REG);
|
||||
|
@ -831,7 +814,6 @@ int hidma_ll_uninit(struct hidma_lldev *lldev)
|
|||
|
||||
required_bytes = sizeof(struct hidma_tre) * lldev->nr_tres;
|
||||
tasklet_kill(&lldev->task);
|
||||
tasklet_kill(&lldev->rst_task);
|
||||
memset(lldev->trepool, 0, required_bytes);
|
||||
lldev->trepool = NULL;
|
||||
lldev->pending_tre_count = 0;
|
||||
|
|
|
@ -1389,21 +1389,18 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
|
|||
{
|
||||
struct rcar_dmac_chan *chan = dev;
|
||||
struct rcar_dmac_desc *desc;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
spin_lock_irq(&chan->lock);
|
||||
|
||||
/* For cyclic transfers notify the user after every chunk. */
|
||||
if (chan->desc.running && chan->desc.running->cyclic) {
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
|
||||
desc = chan->desc.running;
|
||||
callback = desc->async_tx.callback;
|
||||
callback_param = desc->async_tx.callback_param;
|
||||
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||
|
||||
if (callback) {
|
||||
if (dmaengine_desc_callback_valid(&cb)) {
|
||||
spin_unlock_irq(&chan->lock);
|
||||
callback(callback_param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
spin_lock_irq(&chan->lock);
|
||||
}
|
||||
}
|
||||
|
@ -1418,14 +1415,15 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev)
|
|||
dma_cookie_complete(&desc->async_tx);
|
||||
list_del(&desc->node);
|
||||
|
||||
if (desc->async_tx.callback) {
|
||||
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||
if (dmaengine_desc_callback_valid(&cb)) {
|
||||
spin_unlock_irq(&chan->lock);
|
||||
/*
|
||||
* We own the only reference to this descriptor, we can
|
||||
* safely dereference it without holding the channel
|
||||
* lock.
|
||||
*/
|
||||
desc->async_tx.callback(desc->async_tx.callback_param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
spin_lock_irq(&chan->lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -330,10 +330,11 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
|
|||
bool head_acked = false;
|
||||
dma_cookie_t cookie = 0;
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *param = NULL;
|
||||
struct dmaengine_desc_callback cb;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(cyclic_list);
|
||||
|
||||
memset(&cb, 0, sizeof(cb));
|
||||
spin_lock_irqsave(&schan->chan_lock, flags);
|
||||
list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
|
||||
struct dma_async_tx_descriptor *tx = &desc->async_tx;
|
||||
|
@ -367,8 +368,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
|
|||
/* Call callback on the last chunk */
|
||||
if (desc->mark == DESC_COMPLETED && tx->callback) {
|
||||
desc->mark = DESC_WAITING;
|
||||
dmaengine_desc_get_callback(tx, &cb);
|
||||
callback = tx->callback;
|
||||
param = tx->callback_param;
|
||||
dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
|
||||
tx->cookie, tx, schan->id);
|
||||
BUG_ON(desc->chunks != 1);
|
||||
|
@ -430,8 +431,7 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
|
|||
|
||||
spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
return callback;
|
||||
}
|
||||
|
@ -885,9 +885,9 @@ bool shdma_reset(struct shdma_dev *sdev)
|
|||
/* Complete all */
|
||||
list_for_each_entry(sdesc, &dl, node) {
|
||||
struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
|
||||
|
||||
sdesc->mark = DESC_IDLE;
|
||||
if (tx->callback)
|
||||
tx->callback(tx->callback_param);
|
||||
dmaengine_desc_get_callback_invoke(tx, NULL);
|
||||
}
|
||||
|
||||
spin_lock(&schan->chan_lock);
|
||||
|
|
|
@ -360,9 +360,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
|
|||
list_for_each_entry(sdesc, &list, node) {
|
||||
desc = &sdesc->desc;
|
||||
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
|
||||
dmaengine_desc_get_callback_invoke(desc, NULL);
|
||||
last_cookie = desc->cookie;
|
||||
dma_run_dependencies(desc);
|
||||
}
|
||||
|
@ -388,8 +386,7 @@ static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
|
|||
|
||||
desc = &sdesc->desc;
|
||||
while (happened_cyclic != schan->completed_cyclic) {
|
||||
if (desc->callback)
|
||||
desc->callback(desc->callback_param);
|
||||
dmaengine_desc_get_callback_invoke(desc, NULL);
|
||||
schan->completed_cyclic++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1570,8 +1570,7 @@ static void dma_tasklet(unsigned long data)
|
|||
struct d40_desc *d40d;
|
||||
unsigned long flags;
|
||||
bool callback_active;
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
spin_lock_irqsave(&d40c->lock, flags);
|
||||
|
||||
|
@ -1598,8 +1597,7 @@ static void dma_tasklet(unsigned long data)
|
|||
|
||||
/* Callback to client */
|
||||
callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
|
||||
callback = d40d->txd.callback;
|
||||
callback_param = d40d->txd.callback_param;
|
||||
dmaengine_desc_get_callback(&d40d->txd, &cb);
|
||||
|
||||
if (!d40d->cyclic) {
|
||||
if (async_tx_test_ack(&d40d->txd)) {
|
||||
|
@ -1620,8 +1618,8 @@ static void dma_tasklet(unsigned long data)
|
|||
|
||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||
|
||||
if (callback_active && callback)
|
||||
callback(callback_param);
|
||||
if (callback_active)
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
return;
|
||||
|
||||
|
|
|
@ -655,8 +655,7 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
|
|||
static void tegra_dma_tasklet(unsigned long data)
|
||||
{
|
||||
struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
|
||||
dma_async_tx_callback callback = NULL;
|
||||
void *callback_param = NULL;
|
||||
struct dmaengine_desc_callback cb;
|
||||
struct tegra_dma_desc *dma_desc;
|
||||
unsigned long flags;
|
||||
int cb_count;
|
||||
|
@ -666,13 +665,12 @@ static void tegra_dma_tasklet(unsigned long data)
|
|||
dma_desc = list_first_entry(&tdc->cb_desc,
|
||||
typeof(*dma_desc), cb_node);
|
||||
list_del(&dma_desc->cb_node);
|
||||
callback = dma_desc->txd.callback;
|
||||
callback_param = dma_desc->txd.callback_param;
|
||||
dmaengine_desc_get_callback(&dma_desc->txd, &cb);
|
||||
cb_count = dma_desc->cb_count;
|
||||
dma_desc->cb_count = 0;
|
||||
spin_unlock_irqrestore(&tdc->lock, flags);
|
||||
while (cb_count-- && callback)
|
||||
callback(callback_param);
|
||||
while (cb_count--)
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
spin_lock_irqsave(&tdc->lock, flags);
|
||||
}
|
||||
spin_unlock_irqrestore(&tdc->lock, flags);
|
||||
|
|
|
@ -226,8 +226,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
|
|||
|
||||
static void __td_finish(struct timb_dma_chan *td_chan)
|
||||
{
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
struct timb_dma_desc *td_desc;
|
||||
|
||||
|
@ -252,8 +251,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
|
|||
dma_cookie_complete(txd);
|
||||
td_chan->ongoing = false;
|
||||
|
||||
callback = txd->callback;
|
||||
param = txd->callback_param;
|
||||
dmaengine_desc_get_callback(txd, &cb);
|
||||
|
||||
list_move(&td_desc->desc_node, &td_chan->free_list);
|
||||
|
||||
|
@ -262,8 +260,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
|
|||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
}
|
||||
|
||||
static u32 __td_ier_mask(struct timb_dma *td)
|
||||
|
|
|
@ -403,16 +403,14 @@ static void
|
|||
txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
|
||||
struct txx9dmac_desc *desc)
|
||||
{
|
||||
dma_async_tx_callback callback;
|
||||
void *param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
|
||||
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
|
||||
txd->cookie, desc);
|
||||
|
||||
dma_cookie_complete(txd);
|
||||
callback = txd->callback;
|
||||
param = txd->callback_param;
|
||||
dmaengine_desc_get_callback(txd, &cb);
|
||||
|
||||
txx9dmac_sync_desc_for_cpu(dc, desc);
|
||||
list_splice_init(&desc->tx_list, &dc->free_list);
|
||||
|
@ -423,8 +421,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
|
|||
* The API requires that no submissions are done from a
|
||||
* callback, so we don't need to drop the lock here
|
||||
*/
|
||||
if (callback)
|
||||
callback(param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
dma_run_dependencies(txd);
|
||||
}
|
||||
|
||||
|
|
|
@ -87,8 +87,7 @@ static void vchan_complete(unsigned long arg)
|
|||
{
|
||||
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
|
||||
struct virt_dma_desc *vd;
|
||||
dma_async_tx_callback cb = NULL;
|
||||
void *cb_data = NULL;
|
||||
struct dmaengine_desc_callback cb;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irq(&vc->lock);
|
||||
|
@ -96,18 +95,17 @@ static void vchan_complete(unsigned long arg)
|
|||
vd = vc->cyclic;
|
||||
if (vd) {
|
||||
vc->cyclic = NULL;
|
||||
cb = vd->tx.callback;
|
||||
cb_data = vd->tx.callback_param;
|
||||
dmaengine_desc_get_callback(&vd->tx, &cb);
|
||||
} else {
|
||||
memset(&cb, 0, sizeof(cb));
|
||||
}
|
||||
spin_unlock_irq(&vc->lock);
|
||||
|
||||
if (cb)
|
||||
cb(cb_data);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
|
||||
while (!list_empty(&head)) {
|
||||
vd = list_first_entry(&head, struct virt_dma_desc, node);
|
||||
cb = vd->tx.callback;
|
||||
cb_data = vd->tx.callback_param;
|
||||
dmaengine_desc_get_callback(&vd->tx, &cb);
|
||||
|
||||
list_del(&vd->node);
|
||||
if (dmaengine_desc_test_reuse(&vd->tx))
|
||||
|
@ -115,8 +113,7 @@ static void vchan_complete(unsigned long arg)
|
|||
else
|
||||
vc->desc_free(vd);
|
||||
|
||||
if (cb)
|
||||
cb(cb_data);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -608,8 +608,7 @@ static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
|
|||
dma_cookie_complete(tx);
|
||||
|
||||
/* Run the link descriptor callback function */
|
||||
if (tx->callback)
|
||||
tx->callback(tx->callback_param);
|
||||
dmaengine_desc_get_callback_invoke(tx, NULL);
|
||||
|
||||
dma_descriptor_unmap(tx);
|
||||
|
||||
|
|
|
@ -755,8 +755,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
|
|||
spin_lock_irqsave(&chan->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
|
||||
dma_async_tx_callback callback;
|
||||
void *callback_param;
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
if (desc->cyclic) {
|
||||
xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
|
||||
|
@ -767,11 +766,10 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
|
|||
list_del(&desc->node);
|
||||
|
||||
/* Run the link descriptor callback function */
|
||||
callback = desc->async_tx.callback;
|
||||
callback_param = desc->async_tx.callback_param;
|
||||
if (callback) {
|
||||
dmaengine_desc_get_callback(&desc->async_tx, &cb);
|
||||
if (dmaengine_desc_callback_valid(&cb)) {
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
callback(callback_param);
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -102,13 +102,16 @@ struct ntb_queue_entry {
|
|||
void *buf;
|
||||
unsigned int len;
|
||||
unsigned int flags;
|
||||
int retries;
|
||||
int errors;
|
||||
unsigned int tx_index;
|
||||
unsigned int rx_index;
|
||||
|
||||
struct ntb_transport_qp *qp;
|
||||
union {
|
||||
struct ntb_payload_header __iomem *tx_hdr;
|
||||
struct ntb_payload_header *rx_hdr;
|
||||
};
|
||||
unsigned int index;
|
||||
};
|
||||
|
||||
struct ntb_rx_info {
|
||||
|
@ -259,6 +262,12 @@ enum {
|
|||
static void ntb_transport_rxc_db(unsigned long data);
|
||||
static const struct ntb_ctx_ops ntb_transport_ops;
|
||||
static struct ntb_client ntb_transport_client;
|
||||
static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry);
|
||||
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
|
||||
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
|
||||
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);
|
||||
|
||||
|
||||
static int ntb_transport_bus_match(struct device *dev,
|
||||
struct device_driver *drv)
|
||||
|
@ -1229,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
|
|||
break;
|
||||
|
||||
entry->rx_hdr->flags = 0;
|
||||
iowrite32(entry->index, &qp->rx_info->entry);
|
||||
iowrite32(entry->rx_index, &qp->rx_info->entry);
|
||||
|
||||
cb_data = entry->cb_data;
|
||||
len = entry->len;
|
||||
|
@ -1247,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
|
|||
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
|
||||
}
|
||||
|
||||
static void ntb_rx_copy_callback(void *data)
|
||||
static void ntb_rx_copy_callback(void *data,
|
||||
const struct dmaengine_result *res)
|
||||
{
|
||||
struct ntb_queue_entry *entry = data;
|
||||
|
||||
/* we need to check DMA results if we are using DMA */
|
||||
if (res) {
|
||||
enum dmaengine_tx_result dma_err = res->result;
|
||||
|
||||
switch (dma_err) {
|
||||
case DMA_TRANS_READ_FAILED:
|
||||
case DMA_TRANS_WRITE_FAILED:
|
||||
entry->errors++;
|
||||
case DMA_TRANS_ABORTED:
|
||||
{
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
void *offset = qp->rx_buff + qp->rx_max_frame *
|
||||
qp->rx_index;
|
||||
|
||||
ntb_memcpy_rx(entry, offset);
|
||||
qp->rx_memcpy++;
|
||||
return;
|
||||
}
|
||||
|
||||
case DMA_TRANS_NOERROR:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
entry->flags |= DESC_DONE_FLAG;
|
||||
|
||||
ntb_complete_rxc(entry->qp);
|
||||
|
@ -1266,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
|
|||
/* Ensure that the data is fully copied out before clearing the flag */
|
||||
wmb();
|
||||
|
||||
ntb_rx_copy_callback(entry);
|
||||
ntb_rx_copy_callback(entry, NULL);
|
||||
}
|
||||
|
||||
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
||||
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
|
@ -1282,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
|||
int retries = 0;
|
||||
|
||||
len = entry->len;
|
||||
|
||||
if (!chan)
|
||||
goto err;
|
||||
|
||||
if (len < copy_bytes)
|
||||
goto err;
|
||||
|
||||
device = chan->device;
|
||||
pay_off = (size_t)offset & ~PAGE_MASK;
|
||||
buff_off = (size_t)buf & ~PAGE_MASK;
|
||||
|
@ -1316,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
|||
unmap->from_cnt = 1;
|
||||
|
||||
for (retries = 0; retries < DMA_RETRIES; retries++) {
|
||||
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
|
||||
txd = device->device_prep_dma_memcpy(chan,
|
||||
unmap->addr[1],
|
||||
unmap->addr[0], len,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (txd)
|
||||
|
@ -1331,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
|||
goto err_get_unmap;
|
||||
}
|
||||
|
||||
txd->callback = ntb_rx_copy_callback;
|
||||
txd->callback_result = ntb_rx_copy_callback;
|
||||
txd->callback_param = entry;
|
||||
dma_set_unmap(txd, unmap);
|
||||
|
||||
|
@ -1345,12 +1374,37 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
|||
|
||||
qp->rx_async++;
|
||||
|
||||
return;
|
||||
return 0;
|
||||
|
||||
err_set_unmap:
|
||||
dmaengine_unmap_put(unmap);
|
||||
err_get_unmap:
|
||||
dmaengine_unmap_put(unmap);
|
||||
err:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
|
||||
{
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
struct dma_chan *chan = qp->rx_dma_chan;
|
||||
int res;
|
||||
|
||||
if (!chan)
|
||||
goto err;
|
||||
|
||||
if (entry->len < copy_bytes)
|
||||
goto err;
|
||||
|
||||
res = ntb_async_rx_submit(entry, offset);
|
||||
if (res < 0)
|
||||
goto err;
|
||||
|
||||
if (!entry->retries)
|
||||
qp->rx_async++;
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
ntb_memcpy_rx(entry, offset);
|
||||
qp->rx_memcpy++;
|
||||
|
@ -1397,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
|
|||
}
|
||||
|
||||
entry->rx_hdr = hdr;
|
||||
entry->index = qp->rx_index;
|
||||
entry->rx_index = qp->rx_index;
|
||||
|
||||
if (hdr->len > entry->len) {
|
||||
dev_dbg(&qp->ndev->pdev->dev,
|
||||
|
@ -1467,12 +1521,39 @@ static void ntb_transport_rxc_db(unsigned long data)
|
|||
}
|
||||
}
|
||||
|
||||
static void ntb_tx_copy_callback(void *data)
|
||||
static void ntb_tx_copy_callback(void *data,
|
||||
const struct dmaengine_result *res)
|
||||
{
|
||||
struct ntb_queue_entry *entry = data;
|
||||
struct ntb_transport_qp *qp = entry->qp;
|
||||
struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
|
||||
|
||||
/* we need to check DMA results if we are using DMA */
|
||||
if (res) {
|
||||
enum dmaengine_tx_result dma_err = res->result;
|
||||
|
||||
switch (dma_err) {
|
||||
case DMA_TRANS_READ_FAILED:
|
||||
case DMA_TRANS_WRITE_FAILED:
|
||||
entry->errors++;
|
||||
case DMA_TRANS_ABORTED:
|
||||
{
|
||||
void __iomem *offset =
|
||||
qp->tx_mw + qp->tx_max_frame *
|
||||
entry->tx_index;
|
||||
|
||||
/* resubmit via CPU */
|
||||
ntb_memcpy_tx(entry, offset);
|
||||
qp->tx_memcpy++;
|
||||
return;
|
||||
}
|
||||
|
||||
case DMA_TRANS_NOERROR:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
|
||||
|
||||
ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
|
||||
|
@ -1507,40 +1588,25 @@ static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
|
|||
/* Ensure that the data is fully copied out before setting the flags */
|
||||
wmb();
|
||||
|
||||
ntb_tx_copy_callback(entry);
|
||||
ntb_tx_copy_callback(entry, NULL);
|
||||
}
|
||||
|
||||
static void ntb_async_tx(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry)
|
||||
static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry)
|
||||
{
|
||||
struct ntb_payload_header __iomem *hdr;
|
||||
struct dma_async_tx_descriptor *txd;
|
||||
struct dma_chan *chan = qp->tx_dma_chan;
|
||||
struct dma_device *device;
|
||||
size_t len = entry->len;
|
||||
void *buf = entry->buf;
|
||||
size_t dest_off, buff_off;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
dma_addr_t dest;
|
||||
dma_cookie_t cookie;
|
||||
void __iomem *offset;
|
||||
size_t len = entry->len;
|
||||
void *buf = entry->buf;
|
||||
int retries = 0;
|
||||
|
||||
offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
|
||||
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
entry->tx_hdr = hdr;
|
||||
|
||||
iowrite32(entry->len, &hdr->len);
|
||||
iowrite32((u32)qp->tx_pkts, &hdr->ver);
|
||||
|
||||
if (!chan)
|
||||
goto err;
|
||||
|
||||
if (len < copy_bytes)
|
||||
goto err;
|
||||
|
||||
device = chan->device;
|
||||
dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
|
||||
dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
|
||||
buff_off = (size_t)buf & ~PAGE_MASK;
|
||||
dest_off = (size_t)dest & ~PAGE_MASK;
|
||||
|
||||
|
@ -1560,8 +1626,9 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
|
|||
unmap->to_cnt = 1;
|
||||
|
||||
for (retries = 0; retries < DMA_RETRIES; retries++) {
|
||||
txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0],
|
||||
len, DMA_PREP_INTERRUPT);
|
||||
txd = device->device_prep_dma_memcpy(chan, dest,
|
||||
unmap->addr[0], len,
|
||||
DMA_PREP_INTERRUPT);
|
||||
if (txd)
|
||||
break;
|
||||
|
||||
|
@ -1574,7 +1641,7 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
|
|||
goto err_get_unmap;
|
||||
}
|
||||
|
||||
txd->callback = ntb_tx_copy_callback;
|
||||
txd->callback_result = ntb_tx_copy_callback;
|
||||
txd->callback_param = entry;
|
||||
dma_set_unmap(txd, unmap);
|
||||
|
||||
|
@ -1585,13 +1652,47 @@ static void ntb_async_tx(struct ntb_transport_qp *qp,
|
|||
dmaengine_unmap_put(unmap);
|
||||
|
||||
dma_async_issue_pending(chan);
|
||||
qp->tx_async++;
|
||||
|
||||
return;
|
||||
return 0;
|
||||
err_set_unmap:
|
||||
dmaengine_unmap_put(unmap);
|
||||
err_get_unmap:
|
||||
dmaengine_unmap_put(unmap);
|
||||
err:
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void ntb_async_tx(struct ntb_transport_qp *qp,
|
||||
struct ntb_queue_entry *entry)
|
||||
{
|
||||
struct ntb_payload_header __iomem *hdr;
|
||||
struct dma_chan *chan = qp->tx_dma_chan;
|
||||
void __iomem *offset;
|
||||
int res;
|
||||
|
||||
entry->tx_index = qp->tx_index;
|
||||
offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
|
||||
hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
||||
entry->tx_hdr = hdr;
|
||||
|
||||
iowrite32(entry->len, &hdr->len);
|
||||
iowrite32((u32)qp->tx_pkts, &hdr->ver);
|
||||
|
||||
if (!chan)
|
||||
goto err;
|
||||
|
||||
if (entry->len < copy_bytes)
|
||||
goto err;
|
||||
|
||||
res = ntb_async_tx_submit(qp, entry);
|
||||
if (res < 0)
|
||||
goto err;
|
||||
|
||||
if (!entry->retries)
|
||||
qp->tx_async++;
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
ntb_memcpy_tx(entry, offset);
|
||||
qp->tx_memcpy++;
|
||||
|
@ -1928,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
|
|||
entry->buf = data;
|
||||
entry->len = len;
|
||||
entry->flags = 0;
|
||||
entry->retries = 0;
|
||||
entry->errors = 0;
|
||||
entry->rx_index = 0;
|
||||
|
||||
ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
|
||||
|
||||
|
@ -1970,6 +2074,9 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
|
|||
entry->buf = data;
|
||||
entry->len = len;
|
||||
entry->flags = 0;
|
||||
entry->errors = 0;
|
||||
entry->retries = 0;
|
||||
entry->tx_index = 0;
|
||||
|
||||
rc = ntb_process_tx(qp, entry);
|
||||
if (rc)
|
||||
|
|
|
@ -441,6 +441,21 @@ typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
|||
|
||||
typedef void (*dma_async_tx_callback)(void *dma_async_param);
|
||||
|
||||
enum dmaengine_tx_result {
|
||||
DMA_TRANS_NOERROR = 0, /* SUCCESS */
|
||||
DMA_TRANS_READ_FAILED, /* Source DMA read failed */
|
||||
DMA_TRANS_WRITE_FAILED, /* Destination DMA write failed */
|
||||
DMA_TRANS_ABORTED, /* Op never submitted / aborted */
|
||||
};
|
||||
|
||||
struct dmaengine_result {
|
||||
enum dmaengine_tx_result result;
|
||||
u32 residue;
|
||||
};
|
||||
|
||||
typedef void (*dma_async_tx_callback_result)(void *dma_async_param,
|
||||
const struct dmaengine_result *result);
|
||||
|
||||
struct dmaengine_unmap_data {
|
||||
u8 map_cnt;
|
||||
u8 to_cnt;
|
||||
|
@ -478,6 +493,7 @@ struct dma_async_tx_descriptor {
|
|||
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
|
||||
int (*desc_free)(struct dma_async_tx_descriptor *tx);
|
||||
dma_async_tx_callback callback;
|
||||
dma_async_tx_callback_result callback_result;
|
||||
void *callback_param;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
||||
|
|
Loading…
Reference in a new issue