Merge branch 'for-linus' into next
Conflicts: drivers/dma/edma.c Moved the memory leak fix post merge Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
commit
b967aecf17
12 changed files with 254 additions and 135 deletions
|
@ -28,7 +28,7 @@ The three cells in order are:
|
|||
dependent:
|
||||
- bit 7-0: peripheral identifier for the hardware handshaking interface. The
|
||||
identifier can be different for tx and rx.
|
||||
- bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
|
||||
- bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -2133,8 +2133,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
|
||||
writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
|
||||
|
||||
ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
|
||||
DRIVER_NAME, pl08x);
|
||||
ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
|
||||
if (ret) {
|
||||
dev_err(&adev->dev, "%s failed to request interrupt %d\n",
|
||||
__func__, adev->irq[0]);
|
||||
|
|
|
@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
|
|||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
|
||||
err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
|
||||
"coh901318", base);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -674,14 +674,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
|
|||
}
|
||||
}
|
||||
|
||||
static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
|
||||
static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
|
||||
{
|
||||
struct cppi41_channel *cchan;
|
||||
int i;
|
||||
int ret;
|
||||
u32 n_chans;
|
||||
|
||||
ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
|
||||
ret = of_property_read_u32(dev->of_node, "#dma-channels",
|
||||
&n_chans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -719,7 +719,7 @@ err:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
|
||||
static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
|
||||
{
|
||||
unsigned int mem_decs;
|
||||
int i;
|
||||
|
@ -731,7 +731,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
|
|||
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
|
||||
cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
|
||||
|
||||
dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
|
||||
dma_free_coherent(dev, mem_decs, cdd->cd,
|
||||
cdd->descs_phys);
|
||||
}
|
||||
}
|
||||
|
@ -741,19 +741,19 @@ static void disable_sched(struct cppi41_dd *cdd)
|
|||
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
|
||||
}
|
||||
|
||||
static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
|
||||
static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
|
||||
{
|
||||
disable_sched(cdd);
|
||||
|
||||
purge_descs(pdev, cdd);
|
||||
purge_descs(dev, cdd);
|
||||
|
||||
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
|
||||
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
|
||||
dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
|
||||
dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
|
||||
cdd->scratch_phys);
|
||||
}
|
||||
|
||||
static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
|
||||
static int init_descs(struct device *dev, struct cppi41_dd *cdd)
|
||||
{
|
||||
unsigned int desc_size;
|
||||
unsigned int mem_decs;
|
||||
|
@ -777,7 +777,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
|
|||
reg |= ilog2(ALLOC_DECS_NUM) - 5;
|
||||
|
||||
BUILD_BUG_ON(DESCS_AREAS != 1);
|
||||
cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
|
||||
cdd->cd = dma_alloc_coherent(dev, mem_decs,
|
||||
&cdd->descs_phys, GFP_KERNEL);
|
||||
if (!cdd->cd)
|
||||
return -ENOMEM;
|
||||
|
@ -813,12 +813,12 @@ static void init_sched(struct cppi41_dd *cdd)
|
|||
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
|
||||
}
|
||||
|
||||
static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
|
||||
static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
|
||||
cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
|
||||
cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
|
||||
&cdd->scratch_phys, GFP_KERNEL);
|
||||
if (!cdd->qmgr_scratch)
|
||||
return -ENOMEM;
|
||||
|
@ -827,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
|
|||
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
|
||||
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
|
||||
|
||||
ret = init_descs(pdev, cdd);
|
||||
ret = init_descs(dev, cdd);
|
||||
if (ret)
|
||||
goto err_td;
|
||||
|
||||
|
@ -835,7 +835,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
|
|||
init_sched(cdd);
|
||||
return 0;
|
||||
err_td:
|
||||
deinit_cpii41(pdev, cdd);
|
||||
deinit_cppi41(dev, cdd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -914,11 +914,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
|
||||
|
||||
static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
|
||||
static const struct cppi_glue_infos *get_glue_info(struct device *dev)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
|
||||
of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
|
||||
of_id = of_match_node(cppi41_dma_ids, dev->of_node);
|
||||
if (!of_id)
|
||||
return NULL;
|
||||
return of_id->data;
|
||||
|
@ -927,11 +927,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
|
|||
static int cppi41_dma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct cppi41_dd *cdd;
|
||||
struct device *dev = &pdev->dev;
|
||||
const struct cppi_glue_infos *glue_info;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
glue_info = get_glue_info(pdev);
|
||||
glue_info = get_glue_info(dev);
|
||||
if (!glue_info)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -946,14 +947,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
|
|||
cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
|
||||
cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
|
||||
cdd->ddev.device_control = cppi41_dma_control;
|
||||
cdd->ddev.dev = &pdev->dev;
|
||||
cdd->ddev.dev = dev;
|
||||
INIT_LIST_HEAD(&cdd->ddev.channels);
|
||||
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
|
||||
|
||||
cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
|
||||
cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
|
||||
cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
|
||||
cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
|
||||
cdd->usbss_mem = of_iomap(dev->of_node, 0);
|
||||
cdd->ctrl_mem = of_iomap(dev->of_node, 1);
|
||||
cdd->sched_mem = of_iomap(dev->of_node, 2);
|
||||
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
|
||||
|
||||
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
|
||||
!cdd->qmgr_mem) {
|
||||
|
@ -961,8 +962,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
|
|||
goto err_remap;
|
||||
}
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
ret = pm_runtime_get_sync(&pdev->dev);
|
||||
pm_runtime_enable(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret)
|
||||
goto err_get_sync;
|
||||
|
||||
|
@ -970,22 +971,22 @@ static int cppi41_dma_probe(struct platform_device *pdev)
|
|||
cdd->queues_tx = glue_info->queues_tx;
|
||||
cdd->td_queue = glue_info->td_queue;
|
||||
|
||||
ret = init_cppi41(pdev, cdd);
|
||||
ret = init_cppi41(dev, cdd);
|
||||
if (ret)
|
||||
goto err_init_cppi;
|
||||
|
||||
ret = cppi41_add_chans(pdev, cdd);
|
||||
ret = cppi41_add_chans(dev, cdd);
|
||||
if (ret)
|
||||
goto err_chans;
|
||||
|
||||
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
irq = irq_of_parse_and_map(dev->of_node, 0);
|
||||
if (!irq)
|
||||
goto err_irq;
|
||||
|
||||
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
|
||||
|
||||
ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
|
||||
dev_name(&pdev->dev), cdd);
|
||||
dev_name(dev), cdd);
|
||||
if (ret)
|
||||
goto err_irq;
|
||||
cdd->irq = irq;
|
||||
|
@ -994,7 +995,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
goto err_dma_reg;
|
||||
|
||||
ret = of_dma_controller_register(pdev->dev.of_node,
|
||||
ret = of_dma_controller_register(dev->of_node,
|
||||
cppi41_dma_xlate, &cpp41_dma_info);
|
||||
if (ret)
|
||||
goto err_of;
|
||||
|
@ -1009,11 +1010,11 @@ err_irq:
|
|||
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
|
||||
cleanup_chans(cdd);
|
||||
err_chans:
|
||||
deinit_cpii41(pdev, cdd);
|
||||
deinit_cppi41(dev, cdd);
|
||||
err_init_cppi:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_put(dev);
|
||||
err_get_sync:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_disable(dev);
|
||||
iounmap(cdd->usbss_mem);
|
||||
iounmap(cdd->ctrl_mem);
|
||||
iounmap(cdd->sched_mem);
|
||||
|
@ -1033,7 +1034,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
|
|||
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
|
||||
free_irq(cdd->irq, cdd);
|
||||
cleanup_chans(cdd);
|
||||
deinit_cpii41(pdev, cdd);
|
||||
deinit_cppi41(&pdev->dev, cdd);
|
||||
iounmap(cdd->usbss_mem);
|
||||
iounmap(cdd->ctrl_mem);
|
||||
iounmap(cdd->sched_mem);
|
||||
|
@ -1044,12 +1045,41 @@ static int cppi41_dma_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int cppi41_suspend(struct device *dev)
|
||||
{
|
||||
struct cppi41_dd *cdd = dev_get_drvdata(dev);
|
||||
|
||||
cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
|
||||
disable_sched(cdd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cppi41_resume(struct device *dev)
|
||||
{
|
||||
struct cppi41_dd *cdd = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DESCS_AREAS; i++)
|
||||
cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
|
||||
|
||||
init_sched(cdd);
|
||||
cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
|
||||
|
||||
static struct platform_driver cpp41_dma_driver = {
|
||||
.probe = cppi41_dma_probe,
|
||||
.remove = cppi41_dma_remove,
|
||||
.driver = {
|
||||
.name = "cppi41-dma-engine",
|
||||
.owner = THIS_MODULE,
|
||||
.pm = &cppi41_pm_ops,
|
||||
.of_match_table = of_match_ptr(cppi41_dma_ids),
|
||||
},
|
||||
};
|
||||
|
|
|
@ -46,8 +46,14 @@
|
|||
#define EDMA_CHANS 64
|
||||
#endif /* CONFIG_ARCH_DAVINCI_DA8XX */
|
||||
|
||||
/* Max of 16 segments per channel to conserve PaRAM slots */
|
||||
#define MAX_NR_SG 16
|
||||
/*
|
||||
* Max of 20 segments per channel to conserve PaRAM slots
|
||||
* Also note that MAX_NR_SG should be atleast the no.of periods
|
||||
* that are required for ASoC, otherwise DMA prep calls will
|
||||
* fail. Today davinci-pcm is the only user of this driver and
|
||||
* requires atleast 17 slots, so we setup the default to 20.
|
||||
*/
|
||||
#define MAX_NR_SG 20
|
||||
#define EDMA_MAX_SLOTS MAX_NR_SG
|
||||
#define EDMA_DESCRIPTORS 16
|
||||
|
||||
|
@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* A PaRAM set configuration abstraction used by other modes
|
||||
* @chan: Channel who's PaRAM set we're configuring
|
||||
* @pset: PaRAM set to initialize and setup.
|
||||
* @src_addr: Source address of the DMA
|
||||
* @dst_addr: Destination address of the DMA
|
||||
* @burst: In units of dev_width, how much to send
|
||||
* @dev_width: How much is the dev_width
|
||||
* @dma_length: Total length of the DMA transfer
|
||||
* @direction: Direction of the transfer
|
||||
*/
|
||||
static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
|
||||
dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
|
||||
enum dma_slave_buswidth dev_width, unsigned int dma_length,
|
||||
enum dma_transfer_direction direction)
|
||||
{
|
||||
struct edma_chan *echan = to_edma_chan(chan);
|
||||
struct device *dev = chan->device->dev;
|
||||
int acnt, bcnt, ccnt, cidx;
|
||||
int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
||||
int absync;
|
||||
|
||||
acnt = dev_width;
|
||||
/*
|
||||
* If the maxburst is equal to the fifo width, use
|
||||
* A-synced transfers. This allows for large contiguous
|
||||
* buffer transfers using only one PaRAM set.
|
||||
*/
|
||||
if (burst == 1) {
|
||||
/*
|
||||
* For the A-sync case, bcnt and ccnt are the remainder
|
||||
* and quotient respectively of the division of:
|
||||
* (dma_length / acnt) by (SZ_64K -1). This is so
|
||||
* that in case bcnt over flows, we have ccnt to use.
|
||||
* Note: In A-sync tranfer only, bcntrld is used, but it
|
||||
* only applies for sg_dma_len(sg) >= SZ_64K.
|
||||
* In this case, the best way adopted is- bccnt for the
|
||||
* first frame will be the remainder below. Then for
|
||||
* every successive frame, bcnt will be SZ_64K-1. This
|
||||
* is assured as bcntrld = 0xffff in end of function.
|
||||
*/
|
||||
absync = false;
|
||||
ccnt = dma_length / acnt / (SZ_64K - 1);
|
||||
bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
|
||||
/*
|
||||
* If bcnt is non-zero, we have a remainder and hence an
|
||||
* extra frame to transfer, so increment ccnt.
|
||||
*/
|
||||
if (bcnt)
|
||||
ccnt++;
|
||||
else
|
||||
bcnt = SZ_64K - 1;
|
||||
cidx = acnt;
|
||||
} else {
|
||||
/*
|
||||
* If maxburst is greater than the fifo address_width,
|
||||
* use AB-synced transfers where A count is the fifo
|
||||
* address_width and B count is the maxburst. In this
|
||||
* case, we are limited to transfers of C count frames
|
||||
* of (address_width * maxburst) where C count is limited
|
||||
* to SZ_64K-1. This places an upper bound on the length
|
||||
* of an SG segment that can be handled.
|
||||
*/
|
||||
absync = true;
|
||||
bcnt = burst;
|
||||
ccnt = dma_length / (acnt * bcnt);
|
||||
if (ccnt > (SZ_64K - 1)) {
|
||||
dev_err(dev, "Exceeded max SG segment size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
cidx = acnt * bcnt;
|
||||
}
|
||||
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
src_bidx = acnt;
|
||||
src_cidx = cidx;
|
||||
dst_bidx = 0;
|
||||
dst_cidx = 0;
|
||||
} else if (direction == DMA_DEV_TO_MEM) {
|
||||
src_bidx = 0;
|
||||
src_cidx = 0;
|
||||
dst_bidx = acnt;
|
||||
dst_cidx = cidx;
|
||||
} else {
|
||||
dev_err(dev, "%s: direction not implemented yet\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
||||
/* Configure A or AB synchronized transfers */
|
||||
if (absync)
|
||||
pset->opt |= SYNCDIM;
|
||||
|
||||
pset->src = src_addr;
|
||||
pset->dst = dst_addr;
|
||||
|
||||
pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
||||
pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
||||
|
||||
pset->a_b_cnt = bcnt << 16 | acnt;
|
||||
pset->ccnt = ccnt;
|
||||
/*
|
||||
* Only time when (bcntrld) auto reload is required is for
|
||||
* A-sync case, and in this case, a requirement of reload value
|
||||
* of SZ_64K-1 only is assured. 'link' is initially set to NULL
|
||||
* and then later will be populated by edma_execute.
|
||||
*/
|
||||
pset->link_bcntrld = 0xffffffff;
|
||||
return absync;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl,
|
||||
unsigned int sg_len, enum dma_transfer_direction direction,
|
||||
|
@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
struct edma_chan *echan = to_edma_chan(chan);
|
||||
struct device *dev = chan->device->dev;
|
||||
struct edma_desc *edesc;
|
||||
dma_addr_t dev_addr;
|
||||
dma_addr_t src_addr = 0, dst_addr = 0;
|
||||
enum dma_slave_buswidth dev_width;
|
||||
u32 burst;
|
||||
struct scatterlist *sg;
|
||||
int acnt, bcnt, ccnt, src, dst, cidx;
|
||||
int src_bidx, dst_bidx, src_cidx, dst_cidx;
|
||||
int i, nslots;
|
||||
int i, nslots, ret;
|
||||
|
||||
if (unlikely(!echan || !sgl || !sg_len))
|
||||
return NULL;
|
||||
|
||||
if (direction == DMA_DEV_TO_MEM) {
|
||||
dev_addr = echan->cfg.src_addr;
|
||||
src_addr = echan->cfg.src_addr;
|
||||
dev_width = echan->cfg.src_addr_width;
|
||||
burst = echan->cfg.src_maxburst;
|
||||
} else if (direction == DMA_MEM_TO_DEV) {
|
||||
dev_addr = echan->cfg.dst_addr;
|
||||
dst_addr = echan->cfg.dst_addr;
|
||||
dev_width = echan->cfg.dst_addr_width;
|
||||
burst = echan->cfg.dst_maxburst;
|
||||
} else {
|
||||
|
@ -315,64 +430,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
|
||||
/* Configure PaRAM sets for each SG */
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
/* Get address for each SG */
|
||||
if (direction == DMA_DEV_TO_MEM)
|
||||
dst_addr = sg_dma_address(sg);
|
||||
else
|
||||
src_addr = sg_dma_address(sg);
|
||||
|
||||
acnt = dev_width;
|
||||
|
||||
/*
|
||||
* If the maxburst is equal to the fifo width, use
|
||||
* A-synced transfers. This allows for large contiguous
|
||||
* buffer transfers using only one PaRAM set.
|
||||
*/
|
||||
if (burst == 1) {
|
||||
edesc->absync = false;
|
||||
ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
|
||||
bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
|
||||
if (bcnt)
|
||||
ccnt++;
|
||||
else
|
||||
bcnt = SZ_64K - 1;
|
||||
cidx = acnt;
|
||||
/*
|
||||
* If maxburst is greater than the fifo address_width,
|
||||
* use AB-synced transfers where A count is the fifo
|
||||
* address_width and B count is the maxburst. In this
|
||||
* case, we are limited to transfers of C count frames
|
||||
* of (address_width * maxburst) where C count is limited
|
||||
* to SZ_64K-1. This places an upper bound on the length
|
||||
* of an SG segment that can be handled.
|
||||
*/
|
||||
} else {
|
||||
edesc->absync = true;
|
||||
bcnt = burst;
|
||||
ccnt = sg_dma_len(sg) / (acnt * bcnt);
|
||||
if (ccnt > (SZ_64K - 1)) {
|
||||
dev_err(dev, "Exceeded max SG segment size\n");
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
cidx = acnt * bcnt;
|
||||
ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
|
||||
dst_addr, burst, dev_width,
|
||||
sg_dma_len(sg), direction);
|
||||
if (ret < 0) {
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (direction == DMA_MEM_TO_DEV) {
|
||||
src = sg_dma_address(sg);
|
||||
dst = dev_addr;
|
||||
src_bidx = acnt;
|
||||
src_cidx = cidx;
|
||||
dst_bidx = 0;
|
||||
dst_cidx = 0;
|
||||
} else {
|
||||
src = dev_addr;
|
||||
dst = sg_dma_address(sg);
|
||||
src_bidx = 0;
|
||||
src_cidx = 0;
|
||||
dst_bidx = acnt;
|
||||
dst_cidx = cidx;
|
||||
}
|
||||
|
||||
edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
|
||||
/* Configure A or AB synchronized transfers */
|
||||
if (edesc->absync)
|
||||
edesc->pset[i].opt |= SYNCDIM;
|
||||
edesc->absync = ret;
|
||||
|
||||
/* If this is the last in a current SG set of transactions,
|
||||
enable interrupts so that next set is processed */
|
||||
|
@ -382,17 +454,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
/* If this is the last set, enable completion interrupt flag */
|
||||
if (i == sg_len - 1)
|
||||
edesc->pset[i].opt |= TCINTEN;
|
||||
|
||||
edesc->pset[i].src = src;
|
||||
edesc->pset[i].dst = dst;
|
||||
|
||||
edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
|
||||
edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
|
||||
|
||||
edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
|
||||
edesc->pset[i].ccnt = ccnt;
|
||||
edesc->pset[i].link_bcntrld = 0xffffffff;
|
||||
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
|
||||
|
|
|
@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
|
|||
|
||||
irq = platform_get_irq(op, 0);
|
||||
ret = devm_request_irq(&op->dev, irq,
|
||||
k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
|
||||
k3_dma_int_handler, 0, DRIVER_NAME, d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
|
|||
* move the descriptors to a temporary list so we can drop
|
||||
* the lock during the entire cleanup operation
|
||||
*/
|
||||
list_del(&desc->node);
|
||||
list_add(&desc->node, &chain_cleanup);
|
||||
list_move(&desc->node, &chain_cleanup);
|
||||
|
||||
/*
|
||||
* Look for the first list entry which has the ENDIRQEN flag
|
||||
|
@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
|
|||
|
||||
if (irq) {
|
||||
ret = devm_request_irq(pdev->dev, irq,
|
||||
mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
|
||||
mmp_pdma_chan_handler, 0, "pdma", phy);
|
||||
if (ret) {
|
||||
dev_err(pdev->dev, "channel request irq fail!\n");
|
||||
return ret;
|
||||
|
@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
|
|||
/* all chan share one irq, demux inside */
|
||||
irq = platform_get_irq(op, 0);
|
||||
ret = devm_request_irq(pdev->dev, irq,
|
||||
mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
|
||||
mmp_pdma_int_handler, 0, "pdma", pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -62,6 +62,11 @@
|
|||
#define TDCR_BURSTSZ_16B (0x3 << 6)
|
||||
#define TDCR_BURSTSZ_32B (0x6 << 6)
|
||||
#define TDCR_BURSTSZ_64B (0x7 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_1B (0x5 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_2B (0x6 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_4B (0x0 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_8B (0x1 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_16B (0x3 << 6)
|
||||
#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
|
||||
#define TDCR_BURSTSZ_128B (0x5 << 6)
|
||||
#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
|
||||
|
@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
|
|||
return -EINVAL;
|
||||
}
|
||||
} else if (tdmac->type == PXA910_SQU) {
|
||||
tdcr |= TDCR_BURSTSZ_SQU_32B;
|
||||
tdcr |= TDCR_SSPMOD;
|
||||
|
||||
switch (tdmac->burst_sz) {
|
||||
case 1:
|
||||
tdcr |= TDCR_BURSTSZ_SQU_1B;
|
||||
break;
|
||||
case 2:
|
||||
tdcr |= TDCR_BURSTSZ_SQU_2B;
|
||||
break;
|
||||
case 4:
|
||||
tdcr |= TDCR_BURSTSZ_SQU_4B;
|
||||
break;
|
||||
case 8:
|
||||
tdcr |= TDCR_BURSTSZ_SQU_8B;
|
||||
break;
|
||||
case 16:
|
||||
tdcr |= TDCR_BURSTSZ_SQU_16B;
|
||||
break;
|
||||
case 32:
|
||||
tdcr |= TDCR_BURSTSZ_SQU_32B;
|
||||
break;
|
||||
default:
|
||||
dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
writel(tdcr, tdmac->reg_base + TDCR);
|
||||
|
@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
|
|||
|
||||
if (tdmac->irq) {
|
||||
ret = devm_request_irq(tdmac->dev, tdmac->irq,
|
||||
mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
|
||||
mmp_tdma_chan_handler, 0, "tdma", tdmac);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -559,7 +587,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
|
|||
if (irq_num != chan_num) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
ret = devm_request_irq(&pdev->dev, irq,
|
||||
mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
|
||||
mmp_tdma_int_handler, 0, "tdma", tdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2922,16 +2922,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
|
||||
amba_set_drvdata(adev, pdmac);
|
||||
|
||||
irq = adev->irq[0];
|
||||
ret = request_irq(irq, pl330_irq_handler, 0,
|
||||
dev_name(&adev->dev), pi);
|
||||
if (ret)
|
||||
return ret;
|
||||
for (i = 0; i <= AMBA_NR_IRQS; i++) {
|
||||
irq = adev->irq[i];
|
||||
if (irq) {
|
||||
ret = devm_request_irq(&adev->dev, irq,
|
||||
pl330_irq_handler, 0,
|
||||
dev_name(&adev->dev), pi);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pi->pcfg.periph_id = adev->periphid;
|
||||
ret = pl330_add(pi);
|
||||
if (ret)
|
||||
goto probe_err1;
|
||||
return ret;
|
||||
|
||||
INIT_LIST_HEAD(&pdmac->desc_pool);
|
||||
spin_lock_init(&pdmac->pool_lock);
|
||||
|
@ -3044,8 +3051,6 @@ probe_err3:
|
|||
}
|
||||
probe_err2:
|
||||
pl330_del(pi);
|
||||
probe_err1:
|
||||
free_irq(irq, pi);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3055,7 +3060,6 @@ static int pl330_remove(struct amba_device *adev)
|
|||
struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
|
||||
struct dma_pl330_chan *pch, *_p;
|
||||
struct pl330_info *pi;
|
||||
int irq;
|
||||
|
||||
if (!pdmac)
|
||||
return 0;
|
||||
|
@ -3082,9 +3086,6 @@ static int pl330_remove(struct amba_device *adev)
|
|||
|
||||
pl330_del(pi);
|
||||
|
||||
irq = adev->irq[0];
|
||||
free_irq(irq, pi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
|
|||
static int sh_dmae_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct sh_dmae_pdata *pdata;
|
||||
unsigned long irqflags = IRQF_DISABLED,
|
||||
unsigned long irqflags = 0,
|
||||
chan_flag[SH_DMAE_MAX_CHANNELS] = {};
|
||||
int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
|
||||
int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
|
||||
|
@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
|
|||
IORESOURCE_IRQ_SHAREABLE)
|
||||
chan_flag[irq_cnt] = IRQF_SHARED;
|
||||
else
|
||||
chan_flag[irq_cnt] = IRQF_DISABLED;
|
||||
chan_flag[irq_cnt] = 0;
|
||||
dev_dbg(&pdev->dev,
|
||||
"Found IRQ %d for channel %d\n",
|
||||
i, irq_cnt);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/err.h>
|
||||
|
@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|||
src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
|
||||
dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
|
||||
dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
|
||||
((src_addr_width > 1) && (src_addr_width & 1)) ||
|
||||
((dst_addr_width > 1) && (dst_addr_width & 1)))
|
||||
!is_power_of_2(src_addr_width) ||
|
||||
!is_power_of_2(dst_addr_width))
|
||||
return -EINVAL;
|
||||
|
||||
cfg->src_info.data_width = src_addr_width;
|
||||
|
|
|
@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
|
|||
return &dma_desc->txd;
|
||||
}
|
||||
|
||||
struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
|
||||
static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
|
||||
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags, void *context)
|
||||
|
|
Loading…
Reference in a new issue