Merge branch 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM DMA engine updates from Russell King: "This looks scary at first glance, but what it is is: - a rework of the sa11x0 DMA engine driver merged during the previous cycle, to extract a common set of helper functions for DMA engine implementations. - conversion of amba-pl08x.c to use these helper functions. - addition of OMAP DMA engine driver (using these helper functions), and conversion of some of the OMAP DMA users to use DMA engine. Nothing in the helper functions is ARM specific, so I hope that other implementations can consolidate some of their code by making use of these helpers. This has been sitting in linux-next most of the merge cycle, and has been tested by several OMAP folk. I've tested it on sa11x0 platforms, and given it my best shot on my broken platforms which have the amba-pl08x controller. The last point is the addition to feature-removal-schedule.txt, which will have a merge conflict. Between myself and TI, we're planning to remove the old TI DMA implementation next year." Fix up trivial add/add conflicts in Documentation/feature-removal-schedule.txt and drivers/dma/{Kconfig,Makefile} * 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm: (53 commits) ARM: 7481/1: OMAP2+: omap2plus_defconfig: enable OMAP DMA engine ARM: 7464/1: mmc: omap_hsmmc: ensure probe returns error if DMA channel request fails Add feature removal of old OMAP private DMA implementation mtd: omap2: remove private DMA API implementation mtd: omap2: add DMA engine support spi: omap2-mcspi: remove private DMA API implementation spi: omap2-mcspi: add DMA engine support ARM: omap: remove mmc platform data dma_mask and initialization mmc: omap: remove private DMA API implementation mmc: omap: add DMA engine support mmc: omap_hsmmc: remove private DMA API implementation mmc: omap_hsmmc: add DMA engine support dmaengine: omap: add support for cyclic DMA dmaengine: omap: add support for setting fi dmaengine: omap: add support for returning residue in tx_state method dmaengine: add OMAP DMA engine driver dmaengine: sa11x0-dma: add cyclic DMA support dmaengine: sa11x0-dma: fix DMA residue support dmaengine: PL08x: ensure all descriptors are freed when channel is released dmaengine: PL08x: get rid of write only pool_ctr and free_txd locking ...
This commit is contained in:
commit
a6dc77254b
28 changed files with 2131 additions and 1422 deletions
|
@ -626,3 +626,14 @@ Why: New drivers should use new V4L2_CAP_VIDEO_M2M capability flag
|
|||
Who: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: OMAP private DMA implementation
|
||||
When: 2013
|
||||
Why: We have a DMA engine implementation; all users should be updated
|
||||
to use this rather than persisting with the old APIs. The old APIs
|
||||
block merging the old DMA engine implementation into the DMA
|
||||
engine driver.
|
||||
Who: Russell King <linux@arm.linux.org.uk>,
|
||||
Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
|
||||
----------------------------
|
||||
|
|
|
@ -193,6 +193,8 @@ CONFIG_MMC_OMAP_HS=y
|
|||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_TWL92330=y
|
||||
CONFIG_RTC_DRV_TWL4030=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_DMA_OMAP=y
|
||||
CONFIG_EXT2_FS=y
|
||||
CONFIG_EXT3_FS=y
|
||||
# CONFIG_EXT3_FS_XATTR is not set
|
||||
|
|
|
@ -54,7 +54,6 @@ static struct omap_mmc_platform_data mmc1_data = {
|
|||
.nr_slots = 1,
|
||||
.init = mmc_late_init,
|
||||
.cleanup = mmc_cleanup,
|
||||
.dma_mask = 0xffffffff,
|
||||
.slots[0] = {
|
||||
.set_power = mmc_set_power,
|
||||
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
|
||||
|
|
|
@ -36,7 +36,6 @@ static int mmc_set_power(struct device *dev, int slot, int power_on,
|
|||
*/
|
||||
static struct omap_mmc_platform_data mmc1_data = {
|
||||
.nr_slots = 1,
|
||||
.dma_mask = 0xffffffff,
|
||||
.slots[0] = {
|
||||
.set_power = mmc_set_power,
|
||||
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
|
||||
|
|
|
@ -185,7 +185,6 @@ static int nokia770_mmc_get_cover_state(struct device *dev, int slot)
|
|||
|
||||
static struct omap_mmc_platform_data nokia770_mmc2_data = {
|
||||
.nr_slots = 1,
|
||||
.dma_mask = 0xffffffff,
|
||||
.max_freq = 12000000,
|
||||
.slots[0] = {
|
||||
.set_power = nokia770_mmc_set_power,
|
||||
|
|
|
@ -468,7 +468,6 @@ static struct omap_mmc_platform_data mmc1_data = {
|
|||
.cleanup = n8x0_mmc_cleanup,
|
||||
.shutdown = n8x0_mmc_shutdown,
|
||||
.max_freq = 24000000,
|
||||
.dma_mask = 0xffffffff,
|
||||
.slots[0] = {
|
||||
.wires = 4,
|
||||
.set_power = n8x0_mmc_set_power,
|
||||
|
|
|
@ -315,7 +315,6 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
|
|||
mmc->slots[0].caps = c->caps;
|
||||
mmc->slots[0].pm_caps = c->pm_caps;
|
||||
mmc->slots[0].internal_clock = !c->ext_clock;
|
||||
mmc->dma_mask = 0xffffffff;
|
||||
mmc->max_freq = c->max_freq;
|
||||
if (cpu_is_omap44xx())
|
||||
mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
|
||||
|
|
|
@ -120,182 +120,156 @@ struct pl08x_channel_data spear300_dma_info[] = {
|
|||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart0_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "irda",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "adc",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "to_jpeg",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "from_jpeg",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras0_rx",
|
||||
.min_signal = 0,
|
||||
.max_signal = 0,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras0_tx",
|
||||
.min_signal = 1,
|
||||
.max_signal = 1,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras1_rx",
|
||||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras1_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras2_rx",
|
||||
.min_signal = 4,
|
||||
.max_signal = 4,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras2_tx",
|
||||
.min_signal = 5,
|
||||
.max_signal = 5,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras3_rx",
|
||||
.min_signal = 6,
|
||||
.max_signal = 6,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras3_tx",
|
||||
.min_signal = 7,
|
||||
.max_signal = 7,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras4_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras4_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras5_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras5_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras6_rx",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras6_tx",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras7_rx",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras7_tx",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -205,182 +205,156 @@ struct pl08x_channel_data spear310_dma_info[] = {
|
|||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart0_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "irda",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "adc",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "to_jpeg",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "from_jpeg",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart1_rx",
|
||||
.min_signal = 0,
|
||||
.max_signal = 0,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart1_tx",
|
||||
.min_signal = 1,
|
||||
.max_signal = 1,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart2_rx",
|
||||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart2_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart3_rx",
|
||||
.min_signal = 4,
|
||||
.max_signal = 4,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart3_tx",
|
||||
.min_signal = 5,
|
||||
.max_signal = 5,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart4_rx",
|
||||
.min_signal = 6,
|
||||
.max_signal = 6,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart4_tx",
|
||||
.min_signal = 7,
|
||||
.max_signal = 7,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart5_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart5_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras5_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras5_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras6_rx",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras6_tx",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras7_rx",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras7_tx",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -213,182 +213,156 @@ struct pl08x_channel_data spear320_dma_info[] = {
|
|||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart0_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c0_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c0_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "irda",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "adc",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "to_jpeg",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "from_jpeg",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp1_rx",
|
||||
.min_signal = 0,
|
||||
.max_signal = 0,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ssp1_tx",
|
||||
.min_signal = 1,
|
||||
.max_signal = 1,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ssp2_rx",
|
||||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ssp2_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "uart1_rx",
|
||||
.min_signal = 4,
|
||||
.max_signal = 4,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "uart1_tx",
|
||||
.min_signal = 5,
|
||||
.max_signal = 5,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "uart2_rx",
|
||||
.min_signal = 6,
|
||||
.max_signal = 6,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "uart2_tx",
|
||||
.min_signal = 7,
|
||||
.max_signal = 7,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "i2c1_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "i2c1_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "i2c2_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "i2c2_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "i2s_rx",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "i2s_tx",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "rs485_rx",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "rs485_tx",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -46,7 +46,8 @@ struct pl022_ssp_controller pl022_plat_data = {
|
|||
struct pl08x_platform_data pl080_plat_data = {
|
||||
.memcpy_channel = {
|
||||
.bus_id = "memcpy",
|
||||
.cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
|
||||
.cctl_memcpy =
|
||||
(PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
|
||||
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
|
||||
|
|
|
@ -36,336 +36,288 @@ static struct pl08x_channel_data spear600_dma_info[] = {
|
|||
.min_signal = 0,
|
||||
.max_signal = 0,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp1_tx",
|
||||
.min_signal = 1,
|
||||
.max_signal = 1,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart0_rx",
|
||||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart0_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart1_rx",
|
||||
.min_signal = 4,
|
||||
.max_signal = 4,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "uart1_tx",
|
||||
.min_signal = 5,
|
||||
.max_signal = 5,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp2_rx",
|
||||
.min_signal = 6,
|
||||
.max_signal = 6,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ssp2_tx",
|
||||
.min_signal = 7,
|
||||
.max_signal = 7,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ssp0_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ssp0_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "i2c_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "irda",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "adc",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "to_jpeg",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "from_jpeg",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 0,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras0_rx",
|
||||
.min_signal = 0,
|
||||
.max_signal = 0,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras0_tx",
|
||||
.min_signal = 1,
|
||||
.max_signal = 1,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras1_rx",
|
||||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras1_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras2_rx",
|
||||
.min_signal = 4,
|
||||
.max_signal = 4,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras2_tx",
|
||||
.min_signal = 5,
|
||||
.max_signal = 5,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras3_rx",
|
||||
.min_signal = 6,
|
||||
.max_signal = 6,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras3_tx",
|
||||
.min_signal = 7,
|
||||
.max_signal = 7,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras4_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras4_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras5_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras5_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras6_rx",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras6_tx",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras7_rx",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ras7_tx",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 1,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB1,
|
||||
}, {
|
||||
.bus_id = "ext0_rx",
|
||||
.min_signal = 0,
|
||||
.max_signal = 0,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext0_tx",
|
||||
.min_signal = 1,
|
||||
.max_signal = 1,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext1_rx",
|
||||
.min_signal = 2,
|
||||
.max_signal = 2,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext1_tx",
|
||||
.min_signal = 3,
|
||||
.max_signal = 3,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext2_rx",
|
||||
.min_signal = 4,
|
||||
.max_signal = 4,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext2_tx",
|
||||
.min_signal = 5,
|
||||
.max_signal = 5,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext3_rx",
|
||||
.min_signal = 6,
|
||||
.max_signal = 6,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext3_tx",
|
||||
.min_signal = 7,
|
||||
.max_signal = 7,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext4_rx",
|
||||
.min_signal = 8,
|
||||
.max_signal = 8,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext4_tx",
|
||||
.min_signal = 9,
|
||||
.max_signal = 9,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext5_rx",
|
||||
.min_signal = 10,
|
||||
.max_signal = 10,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext5_tx",
|
||||
.min_signal = 11,
|
||||
.max_signal = 11,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext6_rx",
|
||||
.min_signal = 12,
|
||||
.max_signal = 12,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext6_tx",
|
||||
.min_signal = 13,
|
||||
.max_signal = 13,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext7_rx",
|
||||
.min_signal = 14,
|
||||
.max_signal = 14,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
}, {
|
||||
.bus_id = "ext7_tx",
|
||||
.min_signal = 15,
|
||||
.max_signal = 15,
|
||||
.muxval = 2,
|
||||
.cctl = 0,
|
||||
.periph_buses = PL08X_AHB2,
|
||||
},
|
||||
};
|
||||
|
@ -373,7 +325,8 @@ static struct pl08x_channel_data spear600_dma_info[] = {
|
|||
struct pl08x_platform_data pl080_plat_data = {
|
||||
.memcpy_channel = {
|
||||
.bus_id = "memcpy",
|
||||
.cctl = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
|
||||
.cctl_memcpy =
|
||||
(PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT | \
|
||||
PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT | \
|
||||
PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT | \
|
||||
|
|
|
@ -81,8 +81,6 @@ struct omap_mmc_platform_data {
|
|||
/* Return context loss count due to PM states changing */
|
||||
int (*get_context_loss_count)(struct device *dev);
|
||||
|
||||
u64 dma_mask;
|
||||
|
||||
/* Integrating attributes from the omap_hwmod layer */
|
||||
u8 controller_flags;
|
||||
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
#ifndef __PLAT_PL080_H
|
||||
#define __PLAT_PL080_H
|
||||
|
||||
struct pl08x_dma_chan;
|
||||
int pl080_get_signal(struct pl08x_dma_chan *ch);
|
||||
void pl080_put_signal(struct pl08x_dma_chan *ch);
|
||||
struct pl08x_channel_data;
|
||||
int pl080_get_signal(const struct pl08x_channel_data *cd);
|
||||
void pl080_put_signal(const struct pl08x_channel_data *cd, int signal);
|
||||
|
||||
#endif /* __PLAT_PL080_H */
|
||||
|
|
|
@ -27,9 +27,8 @@ struct {
|
|||
unsigned char val;
|
||||
} signals[16] = {{0, 0}, };
|
||||
|
||||
int pl080_get_signal(struct pl08x_dma_chan *ch)
|
||||
int pl080_get_signal(const struct pl08x_channel_data *cd)
|
||||
{
|
||||
const struct pl08x_channel_data *cd = ch->cd;
|
||||
unsigned int signal = cd->min_signal, val;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -63,18 +62,17 @@ int pl080_get_signal(struct pl08x_dma_chan *ch)
|
|||
return signal;
|
||||
}
|
||||
|
||||
void pl080_put_signal(struct pl08x_dma_chan *ch)
|
||||
void pl080_put_signal(const struct pl08x_channel_data *cd, int signal)
|
||||
{
|
||||
const struct pl08x_channel_data *cd = ch->cd;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
|
||||
/* if signal is not used */
|
||||
if (!signals[cd->min_signal].busy)
|
||||
if (!signals[signal].busy)
|
||||
BUG();
|
||||
|
||||
signals[cd->min_signal].busy--;
|
||||
signals[signal].busy--;
|
||||
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
}
|
||||
|
|
|
@ -53,6 +53,7 @@ config AMBA_PL08X
|
|||
bool "ARM PrimeCell PL080 or PL081 support"
|
||||
depends on ARM_AMBA && EXPERIMENTAL
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Platform has a PL08x DMAC device
|
||||
which can provide DMA engine support
|
||||
|
@ -269,6 +270,7 @@ config DMA_SA11X0
|
|||
tristate "SA-11x0 DMA support"
|
||||
depends on ARCH_SA1100
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
help
|
||||
Support the DMA engine found on Intel StrongARM SA-1100 and
|
||||
SA-1110 SoCs. This DMA engine can only be used with on-chip
|
||||
|
@ -284,9 +286,18 @@ config MMP_TDMA
|
|||
|
||||
Say Y here if you enabled MMP ADMA, otherwise say N.
|
||||
|
||||
config DMA_OMAP
|
||||
tristate "OMAP DMA support"
|
||||
depends on ARCH_OMAP
|
||||
select DMA_ENGINE
|
||||
select DMA_VIRTUAL_CHANNELS
|
||||
|
||||
config DMA_ENGINE
|
||||
bool
|
||||
|
||||
config DMA_VIRTUAL_CHANNELS
|
||||
tristate
|
||||
|
||||
comment "DMA Clients"
|
||||
depends on DMA_ENGINE
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
|
|||
ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
|
||||
|
||||
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
|
||||
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
|
||||
obj-$(CONFIG_NET_DMA) += iovlock.o
|
||||
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
|
||||
obj-$(CONFIG_DMATEST) += dmatest.o
|
||||
|
@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
|
|||
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
|
||||
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
|
||||
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
|
||||
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
|
||||
|
|
File diff suppressed because it is too large
Load diff
669
drivers/dma/omap-dma.c
Normal file
669
drivers/dma/omap-dma.c
Normal file
|
@ -0,0 +1,669 @@
|
|||
/*
|
||||
* OMAP DMAengine support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/omap-dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "virt-dma.h"
|
||||
#include <plat/dma.h>
|
||||
|
||||
struct omap_dmadev {
|
||||
struct dma_device ddev;
|
||||
spinlock_t lock;
|
||||
struct tasklet_struct task;
|
||||
struct list_head pending;
|
||||
};
|
||||
|
||||
struct omap_chan {
|
||||
struct virt_dma_chan vc;
|
||||
struct list_head node;
|
||||
|
||||
struct dma_slave_config cfg;
|
||||
unsigned dma_sig;
|
||||
bool cyclic;
|
||||
|
||||
int dma_ch;
|
||||
struct omap_desc *desc;
|
||||
unsigned sgidx;
|
||||
};
|
||||
|
||||
struct omap_sg {
|
||||
dma_addr_t addr;
|
||||
uint32_t en; /* number of elements (24-bit) */
|
||||
uint32_t fn; /* number of frames (16-bit) */
|
||||
};
|
||||
|
||||
struct omap_desc {
|
||||
struct virt_dma_desc vd;
|
||||
enum dma_transfer_direction dir;
|
||||
dma_addr_t dev_addr;
|
||||
|
||||
int16_t fi; /* for OMAP_DMA_SYNC_PACKET */
|
||||
uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */
|
||||
uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */
|
||||
uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */
|
||||
uint8_t periph_port; /* Peripheral port */
|
||||
|
||||
unsigned sglen;
|
||||
struct omap_sg sg[0];
|
||||
};
|
||||
|
||||
static const unsigned es_bytes[] = {
|
||||
[OMAP_DMA_DATA_TYPE_S8] = 1,
|
||||
[OMAP_DMA_DATA_TYPE_S16] = 2,
|
||||
[OMAP_DMA_DATA_TYPE_S32] = 4,
|
||||
};
|
||||
|
||||
static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
|
||||
{
|
||||
return container_of(d, struct omap_dmadev, ddev);
|
||||
}
|
||||
|
||||
static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
|
||||
{
|
||||
return container_of(c, struct omap_chan, vc.chan);
|
||||
}
|
||||
|
||||
static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
|
||||
{
|
||||
return container_of(t, struct omap_desc, vd.tx);
|
||||
}
|
||||
|
||||
static void omap_dma_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
kfree(container_of(vd, struct omap_desc, vd));
|
||||
}
|
||||
|
||||
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
|
||||
unsigned idx)
|
||||
{
|
||||
struct omap_sg *sg = d->sg + idx;
|
||||
|
||||
if (d->dir == DMA_DEV_TO_MEM)
|
||||
omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
|
||||
OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
|
||||
else
|
||||
omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
|
||||
OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
|
||||
|
||||
omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
|
||||
d->sync_mode, c->dma_sig, d->sync_type);
|
||||
|
||||
omap_start_dma(c->dma_ch);
|
||||
}
|
||||
|
||||
static void omap_dma_start_desc(struct omap_chan *c)
|
||||
{
|
||||
struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
|
||||
struct omap_desc *d;
|
||||
|
||||
if (!vd) {
|
||||
c->desc = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
list_del(&vd->node);
|
||||
|
||||
c->desc = d = to_omap_dma_desc(&vd->tx);
|
||||
c->sgidx = 0;
|
||||
|
||||
if (d->dir == DMA_DEV_TO_MEM)
|
||||
omap_set_dma_src_params(c->dma_ch, d->periph_port,
|
||||
OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
|
||||
else
|
||||
omap_set_dma_dest_params(c->dma_ch, d->periph_port,
|
||||
OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
|
||||
|
||||
omap_dma_start_sg(c, d, 0);
|
||||
}
|
||||
|
||||
static void omap_dma_callback(int ch, u16 status, void *data)
|
||||
{
|
||||
struct omap_chan *c = data;
|
||||
struct omap_desc *d;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
d = c->desc;
|
||||
if (d) {
|
||||
if (!c->cyclic) {
|
||||
if (++c->sgidx < d->sglen) {
|
||||
omap_dma_start_sg(c, d, c->sgidx);
|
||||
} else {
|
||||
omap_dma_start_desc(c);
|
||||
vchan_cookie_complete(&d->vd);
|
||||
}
|
||||
} else {
|
||||
vchan_cyclic_callback(&d->vd);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This callback schedules all pending channels. We could be more
|
||||
* clever here by postponing allocation of the real DMA channels to
|
||||
* this point, and freeing them when our virtual channel becomes idle.
|
||||
*
|
||||
* We would then need to deal with 'all channels in-use'
|
||||
*/
|
||||
static void omap_dma_sched(unsigned long data)
|
||||
{
|
||||
struct omap_dmadev *d = (struct omap_dmadev *)data;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irq(&d->lock);
|
||||
list_splice_tail_init(&d->pending, &head);
|
||||
spin_unlock_irq(&d->lock);
|
||||
|
||||
while (!list_empty(&head)) {
|
||||
struct omap_chan *c = list_first_entry(&head,
|
||||
struct omap_chan, node);
|
||||
|
||||
spin_lock_irq(&c->vc.lock);
|
||||
list_del_init(&c->node);
|
||||
omap_dma_start_desc(c);
|
||||
spin_unlock_irq(&c->vc.lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
|
||||
dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
|
||||
|
||||
return omap_request_dma(c->dma_sig, "DMA engine",
|
||||
omap_dma_callback, c, &c->dma_ch);
|
||||
}
|
||||
|
||||
static void omap_dma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
|
||||
vchan_free_chan_resources(&c->vc);
|
||||
omap_free_dma(c->dma_ch);
|
||||
|
||||
dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
|
||||
}
|
||||
|
||||
static size_t omap_dma_sg_size(struct omap_sg *sg)
|
||||
{
|
||||
return sg->en * sg->fn;
|
||||
}
|
||||
|
||||
static size_t omap_dma_desc_size(struct omap_desc *d)
|
||||
{
|
||||
unsigned i;
|
||||
size_t size;
|
||||
|
||||
for (size = i = 0; i < d->sglen; i++)
|
||||
size += omap_dma_sg_size(&d->sg[i]);
|
||||
|
||||
return size * es_bytes[d->es];
|
||||
}
|
||||
|
||||
static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
|
||||
{
|
||||
unsigned i;
|
||||
size_t size, es_size = es_bytes[d->es];
|
||||
|
||||
for (size = i = 0; i < d->sglen; i++) {
|
||||
size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
|
||||
|
||||
if (size)
|
||||
size += this_size;
|
||||
else if (addr >= d->sg[i].addr &&
|
||||
addr < d->sg[i].addr + this_size)
|
||||
size += d->sg[i].addr + this_size - addr;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie, struct dma_tx_state *txstate)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
struct virt_dma_desc *vd;
|
||||
enum dma_status ret;
|
||||
unsigned long flags;
|
||||
|
||||
ret = dma_cookie_status(chan, cookie, txstate);
|
||||
if (ret == DMA_SUCCESS || !txstate)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
vd = vchan_find_desc(&c->vc, cookie);
|
||||
if (vd) {
|
||||
txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
|
||||
} else if (c->desc && c->desc->vd.tx.cookie == cookie) {
|
||||
struct omap_desc *d = c->desc;
|
||||
dma_addr_t pos;
|
||||
|
||||
if (d->dir == DMA_MEM_TO_DEV)
|
||||
pos = omap_get_dma_src_pos(c->dma_ch);
|
||||
else if (d->dir == DMA_DEV_TO_MEM)
|
||||
pos = omap_get_dma_dst_pos(c->dma_ch);
|
||||
else
|
||||
pos = 0;
|
||||
|
||||
txstate->residue = omap_dma_desc_size_pos(d, pos);
|
||||
} else {
|
||||
txstate->residue = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void omap_dma_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (vchan_issue_pending(&c->vc) && !c->desc) {
|
||||
struct omap_dmadev *d = to_omap_dma_dev(chan->device);
|
||||
spin_lock(&d->lock);
|
||||
if (list_empty(&c->node))
|
||||
list_add_tail(&c->node, &d->pending);
|
||||
spin_unlock(&d->lock);
|
||||
tasklet_schedule(&d->task);
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
|
||||
struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
|
||||
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
enum dma_slave_buswidth dev_width;
|
||||
struct scatterlist *sgent;
|
||||
struct omap_desc *d;
|
||||
dma_addr_t dev_addr;
|
||||
unsigned i, j = 0, es, en, frame_bytes, sync_type;
|
||||
u32 burst;
|
||||
|
||||
if (dir == DMA_DEV_TO_MEM) {
|
||||
dev_addr = c->cfg.src_addr;
|
||||
dev_width = c->cfg.src_addr_width;
|
||||
burst = c->cfg.src_maxburst;
|
||||
sync_type = OMAP_DMA_SRC_SYNC;
|
||||
} else if (dir == DMA_MEM_TO_DEV) {
|
||||
dev_addr = c->cfg.dst_addr;
|
||||
dev_width = c->cfg.dst_addr_width;
|
||||
burst = c->cfg.dst_maxburst;
|
||||
sync_type = OMAP_DMA_DST_SYNC;
|
||||
} else {
|
||||
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Bus width translates to the element size (ES) */
|
||||
switch (dev_width) {
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
es = OMAP_DMA_DATA_TYPE_S8;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
es = OMAP_DMA_DATA_TYPE_S16;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
es = OMAP_DMA_DATA_TYPE_S32;
|
||||
break;
|
||||
default: /* not reached */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Now allocate and setup the descriptor. */
|
||||
d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
d->dir = dir;
|
||||
d->dev_addr = dev_addr;
|
||||
d->es = es;
|
||||
d->sync_mode = OMAP_DMA_SYNC_FRAME;
|
||||
d->sync_type = sync_type;
|
||||
d->periph_port = OMAP_DMA_PORT_TIPB;
|
||||
|
||||
/*
|
||||
* Build our scatterlist entries: each contains the address,
|
||||
* the number of elements (EN) in each frame, and the number of
|
||||
* frames (FN). Number of bytes for this entry = ES * EN * FN.
|
||||
*
|
||||
* Burst size translates to number of elements with frame sync.
|
||||
* Note: DMA engine defines burst to be the number of dev-width
|
||||
* transfers.
|
||||
*/
|
||||
en = burst;
|
||||
frame_bytes = es_bytes[es] * en;
|
||||
for_each_sg(sgl, sgent, sglen, i) {
|
||||
d->sg[j].addr = sg_dma_address(sgent);
|
||||
d->sg[j].en = en;
|
||||
d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
|
||||
j++;
|
||||
}
|
||||
|
||||
d->sglen = j;
|
||||
|
||||
return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
|
||||
size_t period_len, enum dma_transfer_direction dir, void *context)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
enum dma_slave_buswidth dev_width;
|
||||
struct omap_desc *d;
|
||||
dma_addr_t dev_addr;
|
||||
unsigned es, sync_type;
|
||||
u32 burst;
|
||||
|
||||
if (dir == DMA_DEV_TO_MEM) {
|
||||
dev_addr = c->cfg.src_addr;
|
||||
dev_width = c->cfg.src_addr_width;
|
||||
burst = c->cfg.src_maxburst;
|
||||
sync_type = OMAP_DMA_SRC_SYNC;
|
||||
} else if (dir == DMA_MEM_TO_DEV) {
|
||||
dev_addr = c->cfg.dst_addr;
|
||||
dev_width = c->cfg.dst_addr_width;
|
||||
burst = c->cfg.dst_maxburst;
|
||||
sync_type = OMAP_DMA_DST_SYNC;
|
||||
} else {
|
||||
dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Bus width translates to the element size (ES) */
|
||||
switch (dev_width) {
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
es = OMAP_DMA_DATA_TYPE_S8;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
es = OMAP_DMA_DATA_TYPE_S16;
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
es = OMAP_DMA_DATA_TYPE_S32;
|
||||
break;
|
||||
default: /* not reached */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Now allocate and setup the descriptor. */
|
||||
d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
|
||||
if (!d)
|
||||
return NULL;
|
||||
|
||||
d->dir = dir;
|
||||
d->dev_addr = dev_addr;
|
||||
d->fi = burst;
|
||||
d->es = es;
|
||||
d->sync_mode = OMAP_DMA_SYNC_PACKET;
|
||||
d->sync_type = sync_type;
|
||||
d->periph_port = OMAP_DMA_PORT_MPUI;
|
||||
d->sg[0].addr = buf_addr;
|
||||
d->sg[0].en = period_len / es_bytes[es];
|
||||
d->sg[0].fn = buf_len / period_len;
|
||||
d->sglen = 1;
|
||||
|
||||
if (!c->cyclic) {
|
||||
c->cyclic = true;
|
||||
omap_dma_link_lch(c->dma_ch, c->dma_ch);
|
||||
omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
|
||||
omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
|
||||
}
|
||||
|
||||
if (!cpu_class_is_omap1()) {
|
||||
omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
|
||||
omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
|
||||
}
|
||||
|
||||
static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
|
||||
{
|
||||
if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
|
||||
cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&c->cfg, cfg, sizeof(c->cfg));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_dma_terminate_all(struct omap_chan *c)
|
||||
{
|
||||
struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
|
||||
/* Prevent this channel being scheduled */
|
||||
spin_lock(&d->lock);
|
||||
list_del_init(&c->node);
|
||||
spin_unlock(&d->lock);
|
||||
|
||||
/*
|
||||
* Stop DMA activity: we assume the callback will not be called
|
||||
* after omap_stop_dma() returns (even if it does, it will see
|
||||
* c->desc is NULL and exit.)
|
||||
*/
|
||||
if (c->desc) {
|
||||
c->desc = NULL;
|
||||
omap_stop_dma(c->dma_ch);
|
||||
}
|
||||
|
||||
if (c->cyclic) {
|
||||
c->cyclic = false;
|
||||
omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
|
||||
}
|
||||
|
||||
vchan_get_all_descriptors(&c->vc, &head);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&c->vc, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap_dma_pause(struct omap_chan *c)
|
||||
{
|
||||
/* FIXME: not supported by platform private API */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int omap_dma_resume(struct omap_chan *c)
|
||||
{
|
||||
/* FIXME: not supported by platform private API */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
case DMA_SLAVE_CONFIG:
|
||||
ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
|
||||
break;
|
||||
|
||||
case DMA_TERMINATE_ALL:
|
||||
ret = omap_dma_terminate_all(c);
|
||||
break;
|
||||
|
||||
case DMA_PAUSE:
|
||||
ret = omap_dma_pause(c);
|
||||
break;
|
||||
|
||||
case DMA_RESUME:
|
||||
ret = omap_dma_resume(c);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
|
||||
{
|
||||
struct omap_chan *c;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
||||
c->dma_sig = dma_sig;
|
||||
c->vc.desc_free = omap_dma_desc_free;
|
||||
vchan_init(&c->vc, &od->ddev);
|
||||
INIT_LIST_HEAD(&c->node);
|
||||
|
||||
od->ddev.chancnt++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap_dma_free(struct omap_dmadev *od)
|
||||
{
|
||||
tasklet_kill(&od->task);
|
||||
while (!list_empty(&od->ddev.channels)) {
|
||||
struct omap_chan *c = list_first_entry(&od->ddev.channels,
|
||||
struct omap_chan, vc.chan.device_node);
|
||||
|
||||
list_del(&c->vc.chan.device_node);
|
||||
tasklet_kill(&c->vc.task);
|
||||
kfree(c);
|
||||
}
|
||||
kfree(od);
|
||||
}
|
||||
|
||||
static int omap_dma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_dmadev *od;
|
||||
int rc, i;
|
||||
|
||||
od = kzalloc(sizeof(*od), GFP_KERNEL);
|
||||
if (!od)
|
||||
return -ENOMEM;
|
||||
|
||||
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
|
||||
od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
|
||||
od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
|
||||
od->ddev.device_tx_status = omap_dma_tx_status;
|
||||
od->ddev.device_issue_pending = omap_dma_issue_pending;
|
||||
od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
|
||||
od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
|
||||
od->ddev.device_control = omap_dma_control;
|
||||
od->ddev.dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&od->ddev.channels);
|
||||
INIT_LIST_HEAD(&od->pending);
|
||||
spin_lock_init(&od->lock);
|
||||
|
||||
tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
|
||||
|
||||
for (i = 0; i < 127; i++) {
|
||||
rc = omap_dma_chan_init(od, i);
|
||||
if (rc) {
|
||||
omap_dma_free(od);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
rc = dma_async_device_register(&od->ddev);
|
||||
if (rc) {
|
||||
pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
|
||||
rc);
|
||||
omap_dma_free(od);
|
||||
} else {
|
||||
platform_set_drvdata(pdev, od);
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "OMAP DMA engine driver\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int omap_dma_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct omap_dmadev *od = platform_get_drvdata(pdev);
|
||||
|
||||
dma_async_device_unregister(&od->ddev);
|
||||
omap_dma_free(od);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver omap_dma_driver = {
|
||||
.probe = omap_dma_probe,
|
||||
.remove = omap_dma_remove,
|
||||
.driver = {
|
||||
.name = "omap-dma-engine",
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
|
||||
{
|
||||
if (chan->device->dev->driver == &omap_dma_driver.driver) {
|
||||
struct omap_chan *c = to_omap_dma_chan(chan);
|
||||
unsigned req = *(unsigned *)param;
|
||||
|
||||
return req == c->dma_sig;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
|
||||
|
||||
static struct platform_device *pdev;
|
||||
|
||||
static const struct platform_device_info omap_dma_dev_info = {
|
||||
.name = "omap-dma-engine",
|
||||
.id = -1,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
};
|
||||
|
||||
static int omap_dma_init(void)
|
||||
{
|
||||
int rc = platform_driver_register(&omap_dma_driver);
|
||||
|
||||
if (rc == 0) {
|
||||
pdev = platform_device_register_full(&omap_dma_dev_info);
|
||||
if (IS_ERR(pdev)) {
|
||||
platform_driver_unregister(&omap_dma_driver);
|
||||
rc = PTR_ERR(pdev);
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
subsys_initcall(omap_dma_init);
|
||||
|
||||
static void __exit omap_dma_exit(void)
|
||||
{
|
||||
platform_device_unregister(pdev);
|
||||
platform_driver_unregister(&omap_dma_driver);
|
||||
}
|
||||
module_exit(omap_dma_exit);
|
||||
|
||||
MODULE_AUTHOR("Russell King");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -21,6 +21,8 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "virt-dma.h"
|
||||
|
||||
#define NR_PHY_CHAN 6
|
||||
#define DMA_ALIGN 3
|
||||
#define DMA_MAX_SIZE 0x1fff
|
||||
|
@ -72,12 +74,13 @@ struct sa11x0_dma_sg {
|
|||
};
|
||||
|
||||
struct sa11x0_dma_desc {
|
||||
struct dma_async_tx_descriptor tx;
|
||||
struct virt_dma_desc vd;
|
||||
|
||||
u32 ddar;
|
||||
size_t size;
|
||||
unsigned period;
|
||||
bool cyclic;
|
||||
|
||||
/* maybe protected by c->lock */
|
||||
struct list_head node;
|
||||
unsigned sglen;
|
||||
struct sa11x0_dma_sg sg[0];
|
||||
};
|
||||
|
@ -85,15 +88,11 @@ struct sa11x0_dma_desc {
|
|||
struct sa11x0_dma_phy;
|
||||
|
||||
struct sa11x0_dma_chan {
|
||||
struct dma_chan chan;
|
||||
spinlock_t lock;
|
||||
dma_cookie_t lc;
|
||||
struct virt_dma_chan vc;
|
||||
|
||||
/* protected by c->lock */
|
||||
/* protected by c->vc.lock */
|
||||
struct sa11x0_dma_phy *phy;
|
||||
enum dma_status status;
|
||||
struct list_head desc_submitted;
|
||||
struct list_head desc_issued;
|
||||
|
||||
/* protected by d->lock */
|
||||
struct list_head node;
|
||||
|
@ -109,7 +108,7 @@ struct sa11x0_dma_phy {
|
|||
|
||||
struct sa11x0_dma_chan *vchan;
|
||||
|
||||
/* Protected by c->lock */
|
||||
/* Protected by c->vc.lock */
|
||||
unsigned sg_load;
|
||||
struct sa11x0_dma_desc *txd_load;
|
||||
unsigned sg_done;
|
||||
|
@ -127,13 +126,12 @@ struct sa11x0_dma_dev {
|
|||
spinlock_t lock;
|
||||
struct tasklet_struct task;
|
||||
struct list_head chan_pending;
|
||||
struct list_head desc_complete;
|
||||
struct sa11x0_dma_phy phy[NR_PHY_CHAN];
|
||||
};
|
||||
|
||||
static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct sa11x0_dma_chan, chan);
|
||||
return container_of(chan, struct sa11x0_dma_chan, vc.chan);
|
||||
}
|
||||
|
||||
static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
|
||||
|
@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
|
|||
return container_of(dmadev, struct sa11x0_dma_dev, slave);
|
||||
}
|
||||
|
||||
static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
return container_of(tx, struct sa11x0_dma_desc, tx);
|
||||
}
|
||||
|
||||
static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
|
||||
{
|
||||
if (list_empty(&c->desc_issued))
|
||||
return NULL;
|
||||
struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
|
||||
|
||||
return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node);
|
||||
return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
|
||||
}
|
||||
|
||||
static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
|
||||
{
|
||||
kfree(container_of(vd, struct sa11x0_dma_desc, vd));
|
||||
}
|
||||
|
||||
static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
|
||||
{
|
||||
list_del(&txd->node);
|
||||
list_del(&txd->vd.node);
|
||||
p->txd_load = txd;
|
||||
p->sg_load = 0;
|
||||
|
||||
dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
|
||||
p->num, txd, txd->tx.cookie, txd->ddar);
|
||||
p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
|
||||
}
|
||||
|
||||
static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
|
||||
|
@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
|
|||
return;
|
||||
|
||||
if (p->sg_load == txd->sglen) {
|
||||
struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
|
||||
if (!txd->cyclic) {
|
||||
struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
|
||||
|
||||
/*
|
||||
* We have reached the end of the current descriptor.
|
||||
* Peek at the next descriptor, and if compatible with
|
||||
* the current, start processing it.
|
||||
*/
|
||||
if (txn && txn->ddar == txd->ddar) {
|
||||
txd = txn;
|
||||
sa11x0_dma_start_desc(p, txn);
|
||||
/*
|
||||
* We have reached the end of the current descriptor.
|
||||
* Peek at the next descriptor, and if compatible with
|
||||
* the current, start processing it.
|
||||
*/
|
||||
if (txn && txn->ddar == txd->ddar) {
|
||||
txd = txn;
|
||||
sa11x0_dma_start_desc(p, txn);
|
||||
} else {
|
||||
p->txd_load = NULL;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
p->txd_load = NULL;
|
||||
return;
|
||||
/* Cyclic: reset back to beginning */
|
||||
p->sg_load = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
|
|||
struct sa11x0_dma_desc *txd = p->txd_done;
|
||||
|
||||
if (++p->sg_done == txd->sglen) {
|
||||
struct sa11x0_dma_dev *d = p->dev;
|
||||
if (!txd->cyclic) {
|
||||
vchan_cookie_complete(&txd->vd);
|
||||
|
||||
dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n",
|
||||
p->num, p->txd_done, p->txd_done->tx.cookie);
|
||||
p->sg_done = 0;
|
||||
p->txd_done = p->txd_load;
|
||||
|
||||
c->lc = txd->tx.cookie;
|
||||
if (!p->txd_done)
|
||||
tasklet_schedule(&p->dev->task);
|
||||
} else {
|
||||
if ((p->sg_done % txd->period) == 0)
|
||||
vchan_cyclic_callback(&txd->vd);
|
||||
|
||||
spin_lock(&d->lock);
|
||||
list_add_tail(&txd->node, &d->desc_complete);
|
||||
spin_unlock(&d->lock);
|
||||
|
||||
p->sg_done = 0;
|
||||
p->txd_done = p->txd_load;
|
||||
|
||||
tasklet_schedule(&d->task);
|
||||
/* Cyclic: reset back to beginning */
|
||||
p->sg_done = 0;
|
||||
}
|
||||
}
|
||||
|
||||
sa11x0_dma_start_sg(p, c);
|
||||
|
@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
|
|||
if (c) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
/*
|
||||
* Now that we're holding the lock, check that the vchan
|
||||
* really is associated with this pchan before touching the
|
||||
|
@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
|
|||
if (dcsr & DCSR_DONEB)
|
||||
sa11x0_dma_complete(p, c);
|
||||
}
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg)
|
|||
struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
|
||||
struct sa11x0_dma_phy *p;
|
||||
struct sa11x0_dma_chan *c;
|
||||
struct sa11x0_dma_desc *txd, *txn;
|
||||
LIST_HEAD(head);
|
||||
unsigned pch, pch_alloc = 0;
|
||||
|
||||
dev_dbg(d->slave.dev, "tasklet enter\n");
|
||||
|
||||
/* Get the completed tx descriptors */
|
||||
spin_lock_irq(&d->lock);
|
||||
list_splice_init(&d->desc_complete, &head);
|
||||
spin_unlock_irq(&d->lock);
|
||||
|
||||
list_for_each_entry(txd, &head, node) {
|
||||
c = to_sa11x0_dma_chan(txd->tx.chan);
|
||||
|
||||
dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n",
|
||||
c, txd, txd->tx.cookie);
|
||||
|
||||
spin_lock_irq(&c->lock);
|
||||
list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
|
||||
spin_lock_irq(&c->vc.lock);
|
||||
p = c->phy;
|
||||
if (p) {
|
||||
if (!p->txd_done)
|
||||
sa11x0_dma_start_txd(c);
|
||||
if (p && !p->txd_done) {
|
||||
sa11x0_dma_start_txd(c);
|
||||
if (!p->txd_done) {
|
||||
/* No current txd associated with this channel */
|
||||
dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
|
||||
|
@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
|
|||
p->vchan = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&c->lock);
|
||||
spin_unlock_irq(&c->vc.lock);
|
||||
}
|
||||
|
||||
spin_lock_irq(&d->lock);
|
||||
|
@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg)
|
|||
/* Mark this channel allocated */
|
||||
p->vchan = c;
|
||||
|
||||
dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c);
|
||||
dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&d->lock);
|
||||
|
@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg)
|
|||
p = &d->phy[pch];
|
||||
c = p->vchan;
|
||||
|
||||
spin_lock_irq(&c->lock);
|
||||
spin_lock_irq(&c->vc.lock);
|
||||
c->phy = p;
|
||||
|
||||
sa11x0_dma_start_txd(c);
|
||||
spin_unlock_irq(&c->lock);
|
||||
spin_unlock_irq(&c->vc.lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now free the completed tx descriptor, and call their callbacks */
|
||||
list_for_each_entry_safe(txd, txn, &head, node) {
|
||||
dma_async_tx_callback callback = txd->tx.callback;
|
||||
void *callback_param = txd->tx.callback_param;
|
||||
|
||||
dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n",
|
||||
txd, txd->tx.cookie);
|
||||
|
||||
kfree(txd);
|
||||
|
||||
if (callback)
|
||||
callback(callback_param);
|
||||
}
|
||||
|
||||
dev_dbg(d->slave.dev, "tasklet exit\n");
|
||||
}
|
||||
|
||||
|
||||
static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head)
|
||||
{
|
||||
struct sa11x0_dma_desc *txd, *txn;
|
||||
|
||||
list_for_each_entry_safe(txd, txn, head, node) {
|
||||
dev_dbg(d->slave.dev, "txd %p: freeing\n", txd);
|
||||
kfree(txd);
|
||||
}
|
||||
}
|
||||
|
||||
static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
return 0;
|
||||
|
@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
|
|||
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||||
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
spin_lock(&d->lock);
|
||||
spin_lock_irqsave(&d->lock, flags);
|
||||
list_del_init(&c->node);
|
||||
spin_unlock(&d->lock);
|
||||
spin_unlock_irqrestore(&d->lock, flags);
|
||||
|
||||
list_splice_tail_init(&c->desc_submitted, &head);
|
||||
list_splice_tail_init(&c->desc_issued, &head);
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
|
||||
sa11x0_dma_desc_free(d, &head);
|
||||
vchan_free_chan_resources(&c->vc);
|
||||
}
|
||||
|
||||
static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
|
||||
|
@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
|
|||
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||||
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||||
struct sa11x0_dma_phy *p;
|
||||
struct sa11x0_dma_desc *txd;
|
||||
dma_cookie_t last_used, last_complete;
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
enum dma_status ret;
|
||||
size_t bytes = 0;
|
||||
|
||||
last_used = c->chan.cookie;
|
||||
last_complete = c->lc;
|
||||
|
||||
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
||||
if (ret == DMA_SUCCESS) {
|
||||
dma_set_tx_state(state, last_complete, last_used, 0);
|
||||
ret = dma_cookie_status(&c->vc.chan, cookie, state);
|
||||
if (ret == DMA_SUCCESS)
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
if (!state)
|
||||
return c->status;
|
||||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
p = c->phy;
|
||||
ret = c->status;
|
||||
if (p) {
|
||||
dma_addr_t addr = sa11x0_dma_pos(p);
|
||||
|
||||
dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
|
||||
/*
|
||||
* If the cookie is on our issue queue, then the residue is
|
||||
* its total size.
|
||||
*/
|
||||
vd = vchan_find_desc(&c->vc, cookie);
|
||||
if (vd) {
|
||||
state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
|
||||
} else if (!p) {
|
||||
state->residue = 0;
|
||||
} else {
|
||||
struct sa11x0_dma_desc *txd;
|
||||
size_t bytes = 0;
|
||||
|
||||
txd = p->txd_done;
|
||||
if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
|
||||
txd = p->txd_done;
|
||||
else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
|
||||
txd = p->txd_load;
|
||||
else
|
||||
txd = NULL;
|
||||
|
||||
ret = c->status;
|
||||
if (txd) {
|
||||
dma_addr_t addr = sa11x0_dma_pos(p);
|
||||
unsigned i;
|
||||
|
||||
dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
|
||||
|
||||
for (i = 0; i < txd->sglen; i++) {
|
||||
dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
|
||||
i, txd->sg[i].addr, txd->sg[i].len);
|
||||
|
@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
|
|||
bytes += txd->sg[i].len;
|
||||
}
|
||||
}
|
||||
if (txd != p->txd_load && p->txd_load)
|
||||
bytes += p->txd_load->size;
|
||||
state->residue = bytes;
|
||||
}
|
||||
list_for_each_entry(txd, &c->desc_issued, node) {
|
||||
bytes += txd->size;
|
||||
}
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
dma_set_tx_state(state, last_complete, last_used, bytes);
|
||||
|
||||
dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes);
|
||||
dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan)
|
|||
struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
list_splice_tail_init(&c->desc_submitted, &c->desc_issued);
|
||||
if (!list_empty(&c->desc_issued)) {
|
||||
spin_lock(&d->lock);
|
||||
if (!c->phy && list_empty(&c->node)) {
|
||||
list_add_tail(&c->node, &d->chan_pending);
|
||||
tasklet_schedule(&d->task);
|
||||
dev_dbg(d->slave.dev, "vchan %p: issued\n", c);
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (vchan_issue_pending(&c->vc)) {
|
||||
if (!c->phy) {
|
||||
spin_lock(&d->lock);
|
||||
if (list_empty(&c->node)) {
|
||||
list_add_tail(&c->node, &d->chan_pending);
|
||||
tasklet_schedule(&d->task);
|
||||
dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
|
||||
}
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
spin_unlock(&d->lock);
|
||||
} else
|
||||
dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c);
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
}
|
||||
|
||||
static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan);
|
||||
struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
c->chan.cookie += 1;
|
||||
if (c->chan.cookie < 0)
|
||||
c->chan.cookie = 1;
|
||||
txd->tx.cookie = c->chan.cookie;
|
||||
|
||||
list_add_tail(&txd->node, &c->desc_submitted);
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
|
||||
dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n",
|
||||
c, txd, txd->tx.cookie);
|
||||
|
||||
return txd->tx.cookie;
|
||||
dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
|
||||
|
@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
|
|||
/* SA11x0 channels can only operate in their native direction */
|
||||
if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
|
||||
dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
|
||||
c, c->ddar, dir);
|
||||
&c->vc, c->ddar, dir);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
|
|||
j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
|
||||
if (addr & DMA_ALIGN) {
|
||||
dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
|
||||
c, addr);
|
||||
&c->vc, addr);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
|
||||
if (!txd) {
|
||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c);
|
||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
|
|||
} while (len);
|
||||
}
|
||||
|
||||
dma_async_tx_descriptor_init(&txd->tx, &c->chan);
|
||||
txd->tx.flags = flags;
|
||||
txd->tx.tx_submit = sa11x0_dma_tx_submit;
|
||||
txd->ddar = c->ddar;
|
||||
txd->size = size;
|
||||
txd->sglen = j;
|
||||
|
||||
dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
|
||||
c, txd, txd->size, txd->sglen);
|
||||
&c->vc, &txd->vd, txd->size, txd->sglen);
|
||||
|
||||
return &txd->tx;
|
||||
return vchan_tx_prep(&c->vc, &txd->vd, flags);
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
|
||||
struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
|
||||
enum dma_transfer_direction dir, void *context)
|
||||
{
|
||||
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
|
||||
struct sa11x0_dma_desc *txd;
|
||||
unsigned i, j, k, sglen, sgperiod;
|
||||
|
||||
/* SA11x0 channels can only operate in their native direction */
|
||||
if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
|
||||
dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
|
||||
&c->vc, c->ddar, dir);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
|
||||
sglen = size * sgperiod / period;
|
||||
|
||||
/* Do not allow zero-sized txds */
|
||||
if (sglen == 0)
|
||||
return NULL;
|
||||
|
||||
txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
|
||||
if (!txd) {
|
||||
dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (i = k = 0; i < size / period; i++) {
|
||||
size_t tlen, len = period;
|
||||
|
||||
for (j = 0; j < sgperiod; j++, k++) {
|
||||
tlen = len;
|
||||
|
||||
if (tlen > DMA_MAX_SIZE) {
|
||||
unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
|
||||
tlen = (tlen / mult) & ~DMA_ALIGN;
|
||||
}
|
||||
|
||||
txd->sg[k].addr = addr;
|
||||
txd->sg[k].len = tlen;
|
||||
addr += tlen;
|
||||
len -= tlen;
|
||||
}
|
||||
|
||||
WARN_ON(len != 0);
|
||||
}
|
||||
|
||||
WARN_ON(k != sglen);
|
||||
|
||||
txd->ddar = c->ddar;
|
||||
txd->size = size;
|
||||
txd->sglen = sglen;
|
||||
txd->cyclic = 1;
|
||||
txd->period = sgperiod;
|
||||
|
||||
return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
}
|
||||
|
||||
static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
|
||||
|
@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c
|
|||
if (maxburst == 8)
|
||||
ddar |= DDAR_BS;
|
||||
|
||||
dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
|
||||
c, addr, width, maxburst);
|
||||
dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
|
||||
&c->vc, addr, width, maxburst);
|
||||
|
||||
c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
|
||||
|
||||
|
@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
|
||||
|
||||
case DMA_TERMINATE_ALL:
|
||||
dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c);
|
||||
dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
|
||||
/* Clear the tx descriptor lists */
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
list_splice_tail_init(&c->desc_submitted, &head);
|
||||
list_splice_tail_init(&c->desc_issued, &head);
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&c->vc, &head);
|
||||
|
||||
p = c->phy;
|
||||
if (p) {
|
||||
struct sa11x0_dma_desc *txd, *txn;
|
||||
|
||||
dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
|
||||
/* vchan is assigned to a pchan - stop the channel */
|
||||
writel(DCSR_RUN | DCSR_IE |
|
||||
|
@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
DCSR_STRTB | DCSR_DONEB,
|
||||
p->base + DMA_DCSR_C);
|
||||
|
||||
list_for_each_entry_safe(txd, txn, &d->desc_complete, node)
|
||||
if (txd->tx.chan == &c->chan)
|
||||
list_move(&txd->node, &head);
|
||||
|
||||
if (p->txd_load) {
|
||||
if (p->txd_load != p->txd_done)
|
||||
list_add_tail(&p->txd_load->node, &head);
|
||||
list_add_tail(&p->txd_load->vd.node, &head);
|
||||
p->txd_load = NULL;
|
||||
}
|
||||
if (p->txd_done) {
|
||||
list_add_tail(&p->txd_done->node, &head);
|
||||
list_add_tail(&p->txd_done->vd.node, &head);
|
||||
p->txd_done = NULL;
|
||||
}
|
||||
c->phy = NULL;
|
||||
|
@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
spin_unlock(&d->lock);
|
||||
tasklet_schedule(&d->task);
|
||||
}
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
sa11x0_dma_desc_free(d, &head);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&c->vc, &head);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case DMA_PAUSE:
|
||||
dev_dbg(d->slave.dev, "vchan %p: pause\n", c);
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (c->status == DMA_IN_PROGRESS) {
|
||||
c->status = DMA_PAUSED;
|
||||
|
||||
|
@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
spin_unlock(&d->lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
case DMA_RESUME:
|
||||
dev_dbg(d->slave.dev, "vchan %p: resume\n", c);
|
||||
spin_lock_irqsave(&c->lock, flags);
|
||||
dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
if (c->status == DMA_PAUSED) {
|
||||
c->status = DMA_IN_PROGRESS;
|
||||
|
||||
p = c->phy;
|
||||
if (p) {
|
||||
writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
|
||||
} else if (!list_empty(&c->desc_issued)) {
|
||||
} else if (!list_empty(&c->vc.desc_issued)) {
|
||||
spin_lock(&d->lock);
|
||||
list_add_tail(&c->node, &d->chan_pending);
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&c->lock, flags);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
ret = 0;
|
||||
break;
|
||||
|
||||
|
@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
c->chan.device = dmadev;
|
||||
c->status = DMA_IN_PROGRESS;
|
||||
c->ddar = chan_desc[i].ddar;
|
||||
c->name = chan_desc[i].name;
|
||||
spin_lock_init(&c->lock);
|
||||
INIT_LIST_HEAD(&c->desc_submitted);
|
||||
INIT_LIST_HEAD(&c->desc_issued);
|
||||
INIT_LIST_HEAD(&c->node);
|
||||
list_add_tail(&c->chan.device_node, &dmadev->channels);
|
||||
|
||||
c->vc.desc_free = sa11x0_dma_free_desc;
|
||||
vchan_init(&c->vc, dmadev);
|
||||
}
|
||||
|
||||
return dma_async_device_register(dmadev);
|
||||
|
@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev)
|
|||
{
|
||||
struct sa11x0_dma_chan *c, *cn;
|
||||
|
||||
list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) {
|
||||
list_del(&c->chan.device_node);
|
||||
list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
|
||||
list_del(&c->vc.chan.device_node);
|
||||
tasklet_kill(&c->vc.task);
|
||||
kfree(c);
|
||||
}
|
||||
}
|
||||
|
@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
|
|||
|
||||
spin_lock_init(&d->lock);
|
||||
INIT_LIST_HEAD(&d->chan_pending);
|
||||
INIT_LIST_HEAD(&d->desc_complete);
|
||||
|
||||
d->base = ioremap(res->start, resource_size(res));
|
||||
if (!d->base) {
|
||||
|
@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
|
||||
dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
|
||||
d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
|
||||
d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
|
||||
ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
|
||||
if (ret) {
|
||||
dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
|
||||
|
|
123
drivers/dma/virt-dma.c
Normal file
123
drivers/dma/virt-dma.c
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Virtual DMA channel support for DMAengine
|
||||
*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "virt-dma.h"
|
||||
|
||||
static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
return container_of(tx, struct virt_dma_desc, tx);
|
||||
}
|
||||
|
||||
dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(tx->chan);
|
||||
struct virt_dma_desc *vd = to_virt_desc(tx);
|
||||
unsigned long flags;
|
||||
dma_cookie_t cookie;
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
cookie = dma_cookie_assign(tx);
|
||||
|
||||
list_add_tail(&vd->node, &vc->desc_submitted);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
||||
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
|
||||
vc, vd, cookie);
|
||||
|
||||
return cookie;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vchan_tx_submit);
|
||||
|
||||
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
|
||||
dma_cookie_t cookie)
|
||||
{
|
||||
struct virt_dma_desc *vd;
|
||||
|
||||
list_for_each_entry(vd, &vc->desc_issued, node)
|
||||
if (vd->tx.cookie == cookie)
|
||||
return vd;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vchan_find_desc);
|
||||
|
||||
/*
|
||||
* This tasklet handles the completion of a DMA descriptor by
|
||||
* calling its callback and freeing it.
|
||||
*/
|
||||
static void vchan_complete(unsigned long arg)
|
||||
{
|
||||
struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
|
||||
struct virt_dma_desc *vd;
|
||||
dma_async_tx_callback cb = NULL;
|
||||
void *cb_data = NULL;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irq(&vc->lock);
|
||||
list_splice_tail_init(&vc->desc_completed, &head);
|
||||
vd = vc->cyclic;
|
||||
if (vd) {
|
||||
vc->cyclic = NULL;
|
||||
cb = vd->tx.callback;
|
||||
cb_data = vd->tx.callback_param;
|
||||
}
|
||||
spin_unlock_irq(&vc->lock);
|
||||
|
||||
if (cb)
|
||||
cb(cb_data);
|
||||
|
||||
while (!list_empty(&head)) {
|
||||
vd = list_first_entry(&head, struct virt_dma_desc, node);
|
||||
cb = vd->tx.callback;
|
||||
cb_data = vd->tx.callback_param;
|
||||
|
||||
list_del(&vd->node);
|
||||
|
||||
vc->desc_free(vd);
|
||||
|
||||
if (cb)
|
||||
cb(cb_data);
|
||||
}
|
||||
}
|
||||
|
||||
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
|
||||
{
|
||||
while (!list_empty(head)) {
|
||||
struct virt_dma_desc *vd = list_first_entry(head,
|
||||
struct virt_dma_desc, node);
|
||||
list_del(&vd->node);
|
||||
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
||||
vc->desc_free(vd);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
|
||||
|
||||
void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
|
||||
{
|
||||
dma_cookie_init(&vc->chan);
|
||||
|
||||
spin_lock_init(&vc->lock);
|
||||
INIT_LIST_HEAD(&vc->desc_submitted);
|
||||
INIT_LIST_HEAD(&vc->desc_issued);
|
||||
INIT_LIST_HEAD(&vc->desc_completed);
|
||||
|
||||
tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
|
||||
|
||||
vc->chan.device = dmadev;
|
||||
list_add_tail(&vc->chan.device_node, &dmadev->channels);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vchan_init);
|
||||
|
||||
MODULE_AUTHOR("Russell King");
|
||||
MODULE_LICENSE("GPL");
|
152
drivers/dma/virt-dma.h
Normal file
152
drivers/dma/virt-dma.h
Normal file
|
@ -0,0 +1,152 @@
|
|||
/*
|
||||
* Virtual DMA channel support for DMAengine
|
||||
*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef VIRT_DMA_H
|
||||
#define VIRT_DMA_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
|
||||
struct virt_dma_desc {
|
||||
struct dma_async_tx_descriptor tx;
|
||||
/* protected by vc.lock */
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct virt_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct tasklet_struct task;
|
||||
void (*desc_free)(struct virt_dma_desc *);
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
/* protected by vc.lock */
|
||||
struct list_head desc_submitted;
|
||||
struct list_head desc_issued;
|
||||
struct list_head desc_completed;
|
||||
|
||||
struct virt_dma_desc *cyclic;
|
||||
};
|
||||
|
||||
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct virt_dma_chan, chan);
|
||||
}
|
||||
|
||||
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
|
||||
void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
|
||||
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
|
||||
|
||||
/**
|
||||
* vchan_tx_prep - prepare a descriptor
|
||||
* vc: virtual channel allocating this descriptor
|
||||
* vd: virtual descriptor to prepare
|
||||
* tx_flags: flags argument passed in to prepare function
|
||||
*/
|
||||
static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
|
||||
struct virt_dma_desc *vd, unsigned long tx_flags)
|
||||
{
|
||||
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
|
||||
|
||||
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
|
||||
vd->tx.flags = tx_flags;
|
||||
vd->tx.tx_submit = vchan_tx_submit;
|
||||
|
||||
return &vd->tx;
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_issue_pending - move submitted descriptors to issued list
|
||||
* vc: virtual channel to update
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
|
||||
{
|
||||
list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
|
||||
return !list_empty(&vc->desc_issued);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_cookie_complete - report completion of a descriptor
|
||||
* vd: virtual descriptor to update
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
dma_cookie_complete(&vd->tx);
|
||||
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
|
||||
vd, vd->tx.cookie);
|
||||
list_add_tail(&vd->node, &vc->desc_completed);
|
||||
|
||||
tasklet_schedule(&vc->task);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_cyclic_callback - report the completion of a period
|
||||
* vd: virtual descriptor
|
||||
*/
|
||||
static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
vc->cyclic = vd;
|
||||
tasklet_schedule(&vc->task);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_next_desc - peek at the next descriptor to be processed
|
||||
* vc: virtual channel to obtain descriptor from
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
|
||||
{
|
||||
if (list_empty(&vc->desc_issued))
|
||||
return NULL;
|
||||
|
||||
return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
|
||||
* vc: virtual channel to get descriptors from
|
||||
* head: list of descriptors found
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*
|
||||
* Removes all submitted and issued descriptors from internal lists, and
|
||||
* provides a list of all descriptors found
|
||||
*/
|
||||
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
|
||||
struct list_head *head)
|
||||
{
|
||||
list_splice_tail_init(&vc->desc_submitted, head);
|
||||
list_splice_tail_init(&vc->desc_issued, head);
|
||||
list_splice_tail_init(&vc->desc_completed, head);
|
||||
}
|
||||
|
||||
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
||||
{
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
vchan_get_all_descriptors(vc, &head);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(vc, &head);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -17,10 +17,12 @@
|
|||
#include <linux/ioport.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/omap-dma.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -128,6 +130,10 @@ struct mmc_omap_host {
|
|||
unsigned char id; /* 16xx chips have 2 MMC blocks */
|
||||
struct clk * iclk;
|
||||
struct clk * fclk;
|
||||
struct dma_chan *dma_rx;
|
||||
u32 dma_rx_burst;
|
||||
struct dma_chan *dma_tx;
|
||||
u32 dma_tx_burst;
|
||||
struct resource *mem_res;
|
||||
void __iomem *virt_base;
|
||||
unsigned int phys_base;
|
||||
|
@ -153,12 +159,8 @@ struct mmc_omap_host {
|
|||
|
||||
unsigned use_dma:1;
|
||||
unsigned brs_received:1, dma_done:1;
|
||||
unsigned dma_is_read:1;
|
||||
unsigned dma_in_use:1;
|
||||
int dma_ch;
|
||||
spinlock_t dma_lock;
|
||||
struct timer_list dma_timer;
|
||||
unsigned dma_len;
|
||||
|
||||
struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS];
|
||||
struct mmc_omap_slot *current_slot;
|
||||
|
@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,
|
|||
int abort)
|
||||
{
|
||||
enum dma_data_direction dma_data_dir;
|
||||
struct device *dev = mmc_dev(host->mmc);
|
||||
struct dma_chan *c;
|
||||
|
||||
BUG_ON(host->dma_ch < 0);
|
||||
if (data->error)
|
||||
omap_stop_dma(host->dma_ch);
|
||||
/* Release DMA channel lazily */
|
||||
mod_timer(&host->dma_timer, jiffies + HZ);
|
||||
if (data->flags & MMC_DATA_WRITE)
|
||||
if (data->flags & MMC_DATA_WRITE) {
|
||||
dma_data_dir = DMA_TO_DEVICE;
|
||||
else
|
||||
c = host->dma_tx;
|
||||
} else {
|
||||
dma_data_dir = DMA_FROM_DEVICE;
|
||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
|
||||
dma_data_dir);
|
||||
c = host->dma_rx;
|
||||
}
|
||||
if (c) {
|
||||
if (data->error) {
|
||||
dmaengine_terminate_all(c);
|
||||
/* Claim nothing transferred on error... */
|
||||
data->bytes_xfered = 0;
|
||||
}
|
||||
dev = c->device->dev;
|
||||
}
|
||||
dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);
|
||||
}
|
||||
|
||||
static void mmc_omap_send_stop_work(struct work_struct *work)
|
||||
|
@ -524,16 +533,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)
|
|||
mmc_omap_xfer_done(host, data);
|
||||
}
|
||||
|
||||
static void
|
||||
mmc_omap_dma_timer(unsigned long data)
|
||||
{
|
||||
struct mmc_omap_host *host = (struct mmc_omap_host *) data;
|
||||
|
||||
BUG_ON(host->dma_ch < 0);
|
||||
omap_free_dma(host->dma_ch);
|
||||
host->dma_ch = -1;
|
||||
}
|
||||
|
||||
static void
|
||||
mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)
|
||||
{
|
||||
|
@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param)
|
|||
jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));
|
||||
}
|
||||
|
||||
/* Prepare to transfer the next segment of a scatterlist */
|
||||
static void
|
||||
mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data)
|
||||
static void mmc_omap_dma_callback(void *priv)
|
||||
{
|
||||
int dma_ch = host->dma_ch;
|
||||
unsigned long data_addr;
|
||||
u16 buf, frame;
|
||||
u32 count;
|
||||
struct scatterlist *sg = &data->sg[host->sg_idx];
|
||||
int src_port = 0;
|
||||
int dst_port = 0;
|
||||
int sync_dev = 0;
|
||||
struct mmc_omap_host *host = priv;
|
||||
struct mmc_data *data = host->data;
|
||||
|
||||
data_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
|
||||
frame = data->blksz;
|
||||
count = sg_dma_len(sg);
|
||||
/* If we got to the end of DMA, assume everything went well */
|
||||
data->bytes_xfered += data->blocks * data->blksz;
|
||||
|
||||
if ((data->blocks == 1) && (count > data->blksz))
|
||||
count = frame;
|
||||
|
||||
host->dma_len = count;
|
||||
|
||||
/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx.
|
||||
* Use 16 or 32 word frames when the blocksize is at least that large.
|
||||
* Blocksize is usually 512 bytes; but not for some SD reads.
|
||||
*/
|
||||
if (cpu_is_omap15xx() && frame > 32)
|
||||
frame = 32;
|
||||
else if (frame > 64)
|
||||
frame = 64;
|
||||
count /= frame;
|
||||
frame >>= 1;
|
||||
|
||||
if (!(data->flags & MMC_DATA_WRITE)) {
|
||||
buf = 0x800f | ((frame - 1) << 8);
|
||||
|
||||
if (cpu_class_is_omap1()) {
|
||||
src_port = OMAP_DMA_PORT_TIPB;
|
||||
dst_port = OMAP_DMA_PORT_EMIFF;
|
||||
}
|
||||
if (cpu_is_omap24xx())
|
||||
sync_dev = OMAP24XX_DMA_MMC1_RX;
|
||||
|
||||
omap_set_dma_src_params(dma_ch, src_port,
|
||||
OMAP_DMA_AMODE_CONSTANT,
|
||||
data_addr, 0, 0);
|
||||
omap_set_dma_dest_params(dma_ch, dst_port,
|
||||
OMAP_DMA_AMODE_POST_INC,
|
||||
sg_dma_address(sg), 0, 0);
|
||||
omap_set_dma_dest_data_pack(dma_ch, 1);
|
||||
omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
|
||||
} else {
|
||||
buf = 0x0f80 | ((frame - 1) << 0);
|
||||
|
||||
if (cpu_class_is_omap1()) {
|
||||
src_port = OMAP_DMA_PORT_EMIFF;
|
||||
dst_port = OMAP_DMA_PORT_TIPB;
|
||||
}
|
||||
if (cpu_is_omap24xx())
|
||||
sync_dev = OMAP24XX_DMA_MMC1_TX;
|
||||
|
||||
omap_set_dma_dest_params(dma_ch, dst_port,
|
||||
OMAP_DMA_AMODE_CONSTANT,
|
||||
data_addr, 0, 0);
|
||||
omap_set_dma_src_params(dma_ch, src_port,
|
||||
OMAP_DMA_AMODE_POST_INC,
|
||||
sg_dma_address(sg), 0, 0);
|
||||
omap_set_dma_src_data_pack(dma_ch, 1);
|
||||
omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4);
|
||||
}
|
||||
|
||||
/* Max limit for DMA frame count is 0xffff */
|
||||
BUG_ON(count > 0xffff);
|
||||
|
||||
OMAP_MMC_WRITE(host, BUF, buf);
|
||||
omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16,
|
||||
frame, count, OMAP_DMA_SYNC_FRAME,
|
||||
sync_dev, 0);
|
||||
}
|
||||
|
||||
/* A scatterlist segment completed */
|
||||
static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data)
|
||||
{
|
||||
struct mmc_omap_host *host = (struct mmc_omap_host *) data;
|
||||
struct mmc_data *mmcdat = host->data;
|
||||
|
||||
if (unlikely(host->dma_ch < 0)) {
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"DMA callback while DMA not enabled\n");
|
||||
return;
|
||||
}
|
||||
/* FIXME: We really should do something to _handle_ the errors */
|
||||
if (ch_status & OMAP1_DMA_TOUT_IRQ) {
|
||||
dev_err(mmc_dev(host->mmc),"DMA timeout\n");
|
||||
return;
|
||||
}
|
||||
if (ch_status & OMAP_DMA_DROP_IRQ) {
|
||||
dev_err(mmc_dev(host->mmc), "DMA sync error\n");
|
||||
return;
|
||||
}
|
||||
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
|
||||
return;
|
||||
}
|
||||
mmcdat->bytes_xfered += host->dma_len;
|
||||
host->sg_idx++;
|
||||
if (host->sg_idx < host->sg_len) {
|
||||
mmc_omap_prepare_dma(host, host->data);
|
||||
omap_start_dma(host->dma_ch);
|
||||
} else
|
||||
mmc_omap_dma_done(host, host->data);
|
||||
}
|
||||
|
||||
static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data)
|
||||
{
|
||||
const char *dma_dev_name;
|
||||
int sync_dev, dma_ch, is_read, r;
|
||||
|
||||
is_read = !(data->flags & MMC_DATA_WRITE);
|
||||
del_timer_sync(&host->dma_timer);
|
||||
if (host->dma_ch >= 0) {
|
||||
if (is_read == host->dma_is_read)
|
||||
return 0;
|
||||
omap_free_dma(host->dma_ch);
|
||||
host->dma_ch = -1;
|
||||
}
|
||||
|
||||
if (is_read) {
|
||||
if (host->id == 0) {
|
||||
sync_dev = OMAP_DMA_MMC_RX;
|
||||
dma_dev_name = "MMC1 read";
|
||||
} else {
|
||||
sync_dev = OMAP_DMA_MMC2_RX;
|
||||
dma_dev_name = "MMC2 read";
|
||||
}
|
||||
} else {
|
||||
if (host->id == 0) {
|
||||
sync_dev = OMAP_DMA_MMC_TX;
|
||||
dma_dev_name = "MMC1 write";
|
||||
} else {
|
||||
sync_dev = OMAP_DMA_MMC2_TX;
|
||||
dma_dev_name = "MMC2 write";
|
||||
}
|
||||
}
|
||||
r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb,
|
||||
host, &dma_ch);
|
||||
if (r != 0) {
|
||||
dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r);
|
||||
return r;
|
||||
}
|
||||
host->dma_ch = dma_ch;
|
||||
host->dma_is_read = is_read;
|
||||
|
||||
return 0;
|
||||
mmc_omap_dma_done(host, data);
|
||||
}
|
||||
|
||||
static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req)
|
||||
|
@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)
|
|||
|
||||
host->sg_idx = 0;
|
||||
if (use_dma) {
|
||||
if (mmc_omap_get_dma_channel(host, data) == 0) {
|
||||
enum dma_data_direction dma_data_dir;
|
||||
enum dma_data_direction dma_data_dir;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dma_chan *c;
|
||||
u32 burst, *bp;
|
||||
u16 buf;
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE)
|
||||
dma_data_dir = DMA_TO_DEVICE;
|
||||
else
|
||||
dma_data_dir = DMA_FROM_DEVICE;
|
||||
/*
|
||||
* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx
|
||||
* and 24xx. Use 16 or 32 word frames when the
|
||||
* blocksize is at least that large. Blocksize is
|
||||
* usually 512 bytes; but not for some SD reads.
|
||||
*/
|
||||
burst = cpu_is_omap15xx() ? 32 : 64;
|
||||
if (burst > data->blksz)
|
||||
burst = data->blksz;
|
||||
|
||||
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
|
||||
sg_len, dma_data_dir);
|
||||
host->total_bytes_left = 0;
|
||||
mmc_omap_prepare_dma(host, req->data);
|
||||
host->brs_received = 0;
|
||||
host->dma_done = 0;
|
||||
host->dma_in_use = 1;
|
||||
} else
|
||||
use_dma = 0;
|
||||
burst >>= 1;
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE) {
|
||||
c = host->dma_tx;
|
||||
bp = &host->dma_tx_burst;
|
||||
buf = 0x0f80 | (burst - 1) << 0;
|
||||
dma_data_dir = DMA_TO_DEVICE;
|
||||
} else {
|
||||
c = host->dma_rx;
|
||||
bp = &host->dma_rx_burst;
|
||||
buf = 0x800f | (burst - 1) << 8;
|
||||
dma_data_dir = DMA_FROM_DEVICE;
|
||||
}
|
||||
|
||||
if (!c)
|
||||
goto use_pio;
|
||||
|
||||
/* Only reconfigure if we have a different burst size */
|
||||
if (*bp != burst) {
|
||||
struct dma_slave_config cfg;
|
||||
|
||||
cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
|
||||
cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA);
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
cfg.src_maxburst = burst;
|
||||
cfg.dst_maxburst = burst;
|
||||
|
||||
if (dmaengine_slave_config(c, &cfg))
|
||||
goto use_pio;
|
||||
|
||||
*bp = burst;
|
||||
}
|
||||
|
||||
host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len,
|
||||
dma_data_dir);
|
||||
if (host->sg_len == 0)
|
||||
goto use_pio;
|
||||
|
||||
tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len,
|
||||
data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!tx)
|
||||
goto use_pio;
|
||||
|
||||
OMAP_MMC_WRITE(host, BUF, buf);
|
||||
|
||||
tx->callback = mmc_omap_dma_callback;
|
||||
tx->callback_param = host;
|
||||
dmaengine_submit(tx);
|
||||
host->brs_received = 0;
|
||||
host->dma_done = 0;
|
||||
host->dma_in_use = 1;
|
||||
return;
|
||||
}
|
||||
use_pio:
|
||||
|
||||
/* Revert to PIO? */
|
||||
if (!use_dma) {
|
||||
OMAP_MMC_WRITE(host, BUF, 0x1f1f);
|
||||
host->total_bytes_left = data->blocks * block_size;
|
||||
host->sg_len = sg_len;
|
||||
mmc_omap_sg_to_buf(host);
|
||||
host->dma_in_use = 0;
|
||||
}
|
||||
OMAP_MMC_WRITE(host, BUF, 0x1f1f);
|
||||
host->total_bytes_left = data->blocks * block_size;
|
||||
host->sg_len = sg_len;
|
||||
mmc_omap_sg_to_buf(host);
|
||||
host->dma_in_use = 0;
|
||||
}
|
||||
|
||||
static void mmc_omap_start_request(struct mmc_omap_host *host,
|
||||
|
@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,
|
|||
/* only touch fifo AFTER the controller readies it */
|
||||
mmc_omap_prepare_data(host, req);
|
||||
mmc_omap_start_command(host, req->cmd);
|
||||
if (host->dma_in_use)
|
||||
omap_start_dma(host->dma_ch);
|
||||
if (host->dma_in_use) {
|
||||
struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ?
|
||||
host->dma_tx : host->dma_rx;
|
||||
|
||||
dma_async_issue_pending(c);
|
||||
}
|
||||
}
|
||||
|
||||
static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req)
|
||||
|
@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
|
|||
struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
|
||||
struct mmc_omap_host *host = NULL;
|
||||
struct resource *res;
|
||||
dma_cap_mask_t mask;
|
||||
unsigned sig;
|
||||
int i, ret = 0;
|
||||
int irq;
|
||||
|
||||
|
@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
|
|||
setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);
|
||||
|
||||
spin_lock_init(&host->dma_lock);
|
||||
setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);
|
||||
spin_lock_init(&host->slot_lock);
|
||||
init_waitqueue_head(&host->slot_wq);
|
||||
|
||||
|
@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
|
|||
host->id = pdev->id;
|
||||
host->mem_res = res;
|
||||
host->irq = irq;
|
||||
|
||||
host->use_dma = 1;
|
||||
host->dev->dma_mask = &pdata->dma_mask;
|
||||
host->dma_ch = -1;
|
||||
|
||||
host->irq = irq;
|
||||
host->phys_base = host->mem_res->start;
|
||||
host->virt_base = ioremap(res->start, resource_size(res));
|
||||
|
@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
|
|||
goto err_free_iclk;
|
||||
}
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
host->dma_tx_burst = -1;
|
||||
host->dma_rx_burst = -1;
|
||||
|
||||
if (cpu_is_omap24xx())
|
||||
sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX;
|
||||
else
|
||||
sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX;
|
||||
host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
|
||||
#if 0
|
||||
if (!host->dma_tx) {
|
||||
dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n",
|
||||
sig);
|
||||
goto err_dma;
|
||||
}
|
||||
#else
|
||||
if (!host->dma_tx)
|
||||
dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n",
|
||||
sig);
|
||||
#endif
|
||||
if (cpu_is_omap24xx())
|
||||
sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX;
|
||||
else
|
||||
sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX;
|
||||
host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
|
||||
#if 0
|
||||
if (!host->dma_rx) {
|
||||
dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n",
|
||||
sig);
|
||||
goto err_dma;
|
||||
}
|
||||
#else
|
||||
if (!host->dma_rx)
|
||||
dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n",
|
||||
sig);
|
||||
#endif
|
||||
|
||||
ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);
|
||||
if (ret)
|
||||
goto err_free_fclk;
|
||||
goto err_free_dma;
|
||||
|
||||
if (pdata->init != NULL) {
|
||||
ret = pdata->init(&pdev->dev);
|
||||
|
@ -1510,7 +1457,11 @@ err_plat_cleanup:
|
|||
pdata->cleanup(&pdev->dev);
|
||||
err_free_irq:
|
||||
free_irq(host->irq, host);
|
||||
err_free_fclk:
|
||||
err_free_dma:
|
||||
if (host->dma_tx)
|
||||
dma_release_channel(host->dma_tx);
|
||||
if (host->dma_rx)
|
||||
dma_release_channel(host->dma_rx);
|
||||
clk_put(host->fclk);
|
||||
err_free_iclk:
|
||||
clk_disable(host->iclk);
|
||||
|
@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev)
|
|||
clk_disable(host->iclk);
|
||||
clk_put(host->iclk);
|
||||
|
||||
if (host->dma_tx)
|
||||
dma_release_channel(host->dma_tx);
|
||||
if (host->dma_rx)
|
||||
dma_release_channel(host->dma_rx);
|
||||
|
||||
iounmap(host->virt_base);
|
||||
release_mem_region(pdev->resource[0].start,
|
||||
pdev->resource[0].end - pdev->resource[0].start + 1);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -29,6 +30,7 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/omap-dma.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include <linux/mmc/mmc.h>
|
||||
|
@ -37,7 +39,6 @@
|
|||
#include <linux/gpio.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <plat/dma.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <plat/board.h>
|
||||
#include <plat/mmc.h>
|
||||
|
@ -166,7 +167,8 @@ struct omap_hsmmc_host {
|
|||
int suspended;
|
||||
int irq;
|
||||
int use_dma, dma_ch;
|
||||
int dma_line_tx, dma_line_rx;
|
||||
struct dma_chan *tx_chan;
|
||||
struct dma_chan *rx_chan;
|
||||
int slot_id;
|
||||
int response_busy;
|
||||
int context_loss;
|
||||
|
@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
|
|||
return DMA_FROM_DEVICE;
|
||||
}
|
||||
|
||||
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
|
||||
struct mmc_data *data)
|
||||
{
|
||||
return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
|
||||
}
|
||||
|
||||
static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
|
||||
{
|
||||
int dma_ch;
|
||||
|
@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
|
|||
spin_unlock_irqrestore(&host->irq_lock, flags);
|
||||
|
||||
if (host->use_dma && dma_ch != -1) {
|
||||
dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
|
||||
host->data->sg_len,
|
||||
struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data);
|
||||
|
||||
dmaengine_terminate_all(chan);
|
||||
dma_unmap_sg(chan->device->dev,
|
||||
host->data->sg, host->data->sg_len,
|
||||
omap_hsmmc_get_dma_dir(host, host->data));
|
||||
omap_free_dma(dma_ch);
|
||||
|
||||
host->data->host_cookie = 0;
|
||||
}
|
||||
host->data = NULL;
|
||||
|
@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host,
|
||||
struct mmc_data *data)
|
||||
static void omap_hsmmc_dma_callback(void *param)
|
||||
{
|
||||
int sync_dev;
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE)
|
||||
sync_dev = host->dma_line_tx;
|
||||
else
|
||||
sync_dev = host->dma_line_rx;
|
||||
return sync_dev;
|
||||
}
|
||||
|
||||
static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
|
||||
struct mmc_data *data,
|
||||
struct scatterlist *sgl)
|
||||
{
|
||||
int blksz, nblk, dma_ch;
|
||||
|
||||
dma_ch = host->dma_ch;
|
||||
if (data->flags & MMC_DATA_WRITE) {
|
||||
omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
|
||||
(host->mapbase + OMAP_HSMMC_DATA), 0, 0);
|
||||
omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
|
||||
sg_dma_address(sgl), 0, 0);
|
||||
} else {
|
||||
omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
|
||||
(host->mapbase + OMAP_HSMMC_DATA), 0, 0);
|
||||
omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
|
||||
sg_dma_address(sgl), 0, 0);
|
||||
}
|
||||
|
||||
blksz = host->data->blksz;
|
||||
nblk = sg_dma_len(sgl) / blksz;
|
||||
|
||||
omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32,
|
||||
blksz / 4, nblk, OMAP_DMA_SYNC_FRAME,
|
||||
omap_hsmmc_get_dma_sync_dev(host, data),
|
||||
!(data->flags & MMC_DATA_WRITE));
|
||||
|
||||
omap_start_dma(dma_ch);
|
||||
}
|
||||
|
||||
/*
|
||||
* DMA call back function
|
||||
*/
|
||||
static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
|
||||
{
|
||||
struct omap_hsmmc_host *host = cb_data;
|
||||
struct omap_hsmmc_host *host = param;
|
||||
struct dma_chan *chan;
|
||||
struct mmc_data *data;
|
||||
int dma_ch, req_in_progress;
|
||||
unsigned long flags;
|
||||
int req_in_progress;
|
||||
|
||||
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
|
||||
dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
|
||||
ch_status);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&host->irq_lock, flags);
|
||||
spin_lock_irq(&host->irq_lock);
|
||||
if (host->dma_ch < 0) {
|
||||
spin_unlock_irqrestore(&host->irq_lock, flags);
|
||||
spin_unlock_irq(&host->irq_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
data = host->mrq->data;
|
||||
host->dma_sg_idx++;
|
||||
if (host->dma_sg_idx < host->dma_len) {
|
||||
/* Fire up the next transfer. */
|
||||
omap_hsmmc_config_dma_params(host, data,
|
||||
data->sg + host->dma_sg_idx);
|
||||
spin_unlock_irqrestore(&host->irq_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
chan = omap_hsmmc_get_dma_chan(host, data);
|
||||
if (!data->host_cookie)
|
||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
|
||||
dma_unmap_sg(chan->device->dev,
|
||||
data->sg, data->sg_len,
|
||||
omap_hsmmc_get_dma_dir(host, data));
|
||||
|
||||
req_in_progress = host->req_in_progress;
|
||||
dma_ch = host->dma_ch;
|
||||
host->dma_ch = -1;
|
||||
spin_unlock_irqrestore(&host->irq_lock, flags);
|
||||
|
||||
omap_free_dma(dma_ch);
|
||||
spin_unlock_irq(&host->irq_lock);
|
||||
|
||||
/* If DMA has finished after TC, complete the request */
|
||||
if (!req_in_progress) {
|
||||
|
@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
|
|||
|
||||
static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
|
||||
struct mmc_data *data,
|
||||
struct omap_hsmmc_next *next)
|
||||
struct omap_hsmmc_next *next,
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
int dma_len;
|
||||
|
||||
|
@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
|
|||
/* Check if next job is already prepared */
|
||||
if (next ||
|
||||
(!next && data->host_cookie != host->next_data.cookie)) {
|
||||
dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
|
||||
data->sg_len,
|
||||
dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
|
||||
omap_hsmmc_get_dma_dir(host, data));
|
||||
|
||||
} else {
|
||||
|
@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
|
|||
static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
|
||||
struct mmc_request *req)
|
||||
{
|
||||
int dma_ch = 0, ret = 0, i;
|
||||
struct dma_slave_config cfg;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
int ret = 0, i;
|
||||
struct mmc_data *data = req->data;
|
||||
struct dma_chan *chan;
|
||||
|
||||
/* Sanity check: all the SG entries must be aligned by block size. */
|
||||
for (i = 0; i < data->sg_len; i++) {
|
||||
|
@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
|
|||
|
||||
BUG_ON(host->dma_ch != -1);
|
||||
|
||||
ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data),
|
||||
"MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch);
|
||||
if (ret != 0) {
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"%s: omap_request_dma() failed with %d\n",
|
||||
mmc_hostname(host->mmc), ret);
|
||||
return ret;
|
||||
}
|
||||
ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
|
||||
chan = omap_hsmmc_get_dma_chan(host, data);
|
||||
|
||||
cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA;
|
||||
cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA;
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.src_maxburst = data->blksz / 4;
|
||||
cfg.dst_maxburst = data->blksz / 4;
|
||||
|
||||
ret = dmaengine_slave_config(chan, &cfg);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
host->dma_ch = dma_ch;
|
||||
host->dma_sg_idx = 0;
|
||||
ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
omap_hsmmc_config_dma_params(host, data, data->sg);
|
||||
tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len,
|
||||
data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!tx) {
|
||||
dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
|
||||
/* FIXME: cleanup */
|
||||
return -1;
|
||||
}
|
||||
|
||||
tx->callback = omap_hsmmc_dma_callback;
|
||||
tx->callback_param = host;
|
||||
|
||||
/* Does not fail */
|
||||
dmaengine_submit(tx);
|
||||
|
||||
host->dma_ch = 1;
|
||||
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
|
|||
struct omap_hsmmc_host *host = mmc_priv(mmc);
|
||||
struct mmc_data *data = mrq->data;
|
||||
|
||||
if (host->use_dma) {
|
||||
if (data->host_cookie)
|
||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
||||
data->sg_len,
|
||||
omap_hsmmc_get_dma_dir(host, data));
|
||||
if (host->use_dma && data->host_cookie) {
|
||||
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
|
||||
|
||||
dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
|
||||
omap_hsmmc_get_dma_dir(host, data));
|
||||
data->host_cookie = 0;
|
||||
}
|
||||
}
|
||||
|
@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
|
|||
return ;
|
||||
}
|
||||
|
||||
if (host->use_dma)
|
||||
if (host->use_dma) {
|
||||
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data);
|
||||
|
||||
if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
|
||||
&host->next_data))
|
||||
&host->next_data, c))
|
||||
mrq->data->host_cookie = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
|
|||
struct resource *res;
|
||||
int ret, irq;
|
||||
const struct of_device_id *match;
|
||||
dma_cap_mask_t mask;
|
||||
unsigned tx_req, rx_req;
|
||||
|
||||
match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev);
|
||||
if (match) {
|
||||
|
@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
|
|||
host->pdata = pdata;
|
||||
host->dev = &pdev->dev;
|
||||
host->use_dma = 1;
|
||||
host->dev->dma_mask = &pdata->dma_mask;
|
||||
host->dma_ch = -1;
|
||||
host->irq = irq;
|
||||
host->slot_id = 0;
|
||||
|
@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
|
|||
ret = -ENXIO;
|
||||
goto err_irq;
|
||||
}
|
||||
host->dma_line_tx = res->start;
|
||||
tx_req = res->start;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
|
||||
if (!res) {
|
||||
|
@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
|
|||
ret = -ENXIO;
|
||||
goto err_irq;
|
||||
}
|
||||
host->dma_line_rx = res->start;
|
||||
rx_req = res->start;
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req);
|
||||
if (!host->rx_chan) {
|
||||
dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
|
||||
ret = -ENXIO;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req);
|
||||
if (!host->tx_chan) {
|
||||
dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
|
||||
ret = -ENXIO;
|
||||
goto err_irq;
|
||||
}
|
||||
|
||||
/* Request IRQ for MMC operations */
|
||||
ret = request_irq(host->irq, omap_hsmmc_irq, 0,
|
||||
|
@ -2021,6 +2014,10 @@ err_reg:
|
|||
err_irq_cd_init:
|
||||
free_irq(host->irq, host);
|
||||
err_irq:
|
||||
if (host->tx_chan)
|
||||
dma_release_channel(host->tx_chan);
|
||||
if (host->rx_chan)
|
||||
dma_release_channel(host->rx_chan);
|
||||
pm_runtime_put_sync(host->dev);
|
||||
pm_runtime_disable(host->dev);
|
||||
clk_put(host->fclk);
|
||||
|
@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
|
|||
if (mmc_slot(host).card_detect_irq)
|
||||
free_irq(mmc_slot(host).card_detect_irq, host);
|
||||
|
||||
if (host->tx_chan)
|
||||
dma_release_channel(host->tx_chan);
|
||||
if (host->rx_chan)
|
||||
dma_release_channel(host->rx_chan);
|
||||
|
||||
pm_runtime_put_sync(host->dev);
|
||||
pm_runtime_disable(host->dev);
|
||||
clk_put(host->fclk);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -18,6 +19,7 @@
|
|||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/omap-dma.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -123,7 +125,7 @@ struct omap_nand_info {
|
|||
int gpmc_cs;
|
||||
unsigned long phys_base;
|
||||
struct completion comp;
|
||||
int dma_ch;
|
||||
struct dma_chan *dma;
|
||||
int gpmc_irq;
|
||||
enum {
|
||||
OMAP_NAND_IO_READ = 0, /* read */
|
||||
|
@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
|
|||
}
|
||||
|
||||
/*
|
||||
* omap_nand_dma_cb: callback on the completion of dma transfer
|
||||
* @lch: logical channel
|
||||
* @ch_satuts: channel status
|
||||
* omap_nand_dma_callback: callback on the completion of dma transfer
|
||||
* @data: pointer to completion data structure
|
||||
*/
|
||||
static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
|
||||
static void omap_nand_dma_callback(void *data)
|
||||
{
|
||||
complete((struct completion *) data);
|
||||
}
|
||||
|
@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
|
|||
{
|
||||
struct omap_nand_info *info = container_of(mtd,
|
||||
struct omap_nand_info, mtd);
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
|
||||
DMA_FROM_DEVICE;
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
struct scatterlist sg;
|
||||
unsigned long tim, limit;
|
||||
|
||||
/* The fifo depth is 64 bytes max.
|
||||
* But configure the FIFO-threahold to 32 to get a sync at each frame
|
||||
* and frame length is 32 bytes.
|
||||
*/
|
||||
int buf_len = len >> 6;
|
||||
unsigned n;
|
||||
int ret;
|
||||
|
||||
if (addr >= high_memory) {
|
||||
struct page *p1;
|
||||
|
@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
|
|||
addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
|
||||
if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
|
||||
sg_init_one(&sg, addr, len);
|
||||
n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
|
||||
if (n == 0) {
|
||||
dev_err(&info->pdev->dev,
|
||||
"Couldn't DMA map a %d byte buffer\n", len);
|
||||
goto out_copy;
|
||||
}
|
||||
|
||||
if (is_write) {
|
||||
omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
|
||||
info->phys_base, 0, 0);
|
||||
omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
|
||||
dma_addr, 0, 0);
|
||||
omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
|
||||
0x10, buf_len, OMAP_DMA_SYNC_FRAME,
|
||||
OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC);
|
||||
} else {
|
||||
omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT,
|
||||
info->phys_base, 0, 0);
|
||||
omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC,
|
||||
dma_addr, 0, 0);
|
||||
omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32,
|
||||
0x10, buf_len, OMAP_DMA_SYNC_FRAME,
|
||||
OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC);
|
||||
}
|
||||
/* configure and start prefetch transfer */
|
||||
tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
|
||||
is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!tx)
|
||||
goto out_copy_unmap;
|
||||
|
||||
tx->callback = omap_nand_dma_callback;
|
||||
tx->callback_param = &info->comp;
|
||||
dmaengine_submit(tx);
|
||||
|
||||
/* configure and start prefetch transfer */
|
||||
ret = gpmc_prefetch_enable(info->gpmc_cs,
|
||||
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
|
||||
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
|
||||
if (ret)
|
||||
/* PFPW engine is busy, use cpu copy method */
|
||||
goto out_copy_unmap;
|
||||
|
||||
init_completion(&info->comp);
|
||||
|
||||
omap_start_dma(info->dma_ch);
|
||||
dma_async_issue_pending(info->dma);
|
||||
|
||||
/* setup and start DMA using dma_addr */
|
||||
wait_for_completion(&info->comp);
|
||||
|
@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
|
|||
/* disable and stop the PFPW engine */
|
||||
gpmc_prefetch_reset(info->gpmc_cs);
|
||||
|
||||
dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
|
||||
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
|
||||
return 0;
|
||||
|
||||
out_copy_unmap:
|
||||
dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
|
||||
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
|
||||
out_copy:
|
||||
if (info->nand.options & NAND_BUSWIDTH_16)
|
||||
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
|
||||
|
@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
|
|||
struct omap_nand_platform_data *pdata;
|
||||
int err;
|
||||
int i, offset;
|
||||
dma_cap_mask_t mask;
|
||||
unsigned sig;
|
||||
|
||||
pdata = pdev->dev.platform_data;
|
||||
if (pdata == NULL) {
|
||||
|
@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
|
|||
break;
|
||||
|
||||
case NAND_OMAP_PREFETCH_DMA:
|
||||
err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
|
||||
omap_nand_dma_cb, &info->comp, &info->dma_ch);
|
||||
if (err < 0) {
|
||||
info->dma_ch = -1;
|
||||
dev_err(&pdev->dev, "DMA request failed!\n");
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
sig = OMAP24XX_DMA_GPMC;
|
||||
info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
|
||||
if (!info->dma) {
|
||||
dev_err(&pdev->dev, "DMA engine request failed\n");
|
||||
err = -ENXIO;
|
||||
goto out_release_mem_region;
|
||||
} else {
|
||||
omap_set_dma_dest_burst_mode(info->dma_ch,
|
||||
OMAP_DMA_DATA_BURST_16);
|
||||
omap_set_dma_src_burst_mode(info->dma_ch,
|
||||
OMAP_DMA_DATA_BURST_16);
|
||||
struct dma_slave_config cfg;
|
||||
int rc;
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
cfg.src_addr = info->phys_base;
|
||||
cfg.dst_addr = info->phys_base;
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.src_maxburst = 16;
|
||||
cfg.dst_maxburst = 16;
|
||||
rc = dmaengine_slave_config(info->dma, &cfg);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
|
||||
rc);
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
info->nand.read_buf = omap_read_buf_dma_pref;
|
||||
info->nand.write_buf = omap_write_buf_dma_pref;
|
||||
}
|
||||
|
@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
out_release_mem_region:
|
||||
if (info->dma)
|
||||
dma_release_channel(info->dma);
|
||||
release_mem_region(info->phys_base, NAND_IO_SIZE);
|
||||
out_free_info:
|
||||
kfree(info);
|
||||
|
@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev)
|
|||
omap3_free_bch(&info->mtd);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
if (info->dma_ch != -1)
|
||||
omap_free_dma(info->dma_ch);
|
||||
if (info->dma)
|
||||
dma_release_channel(info->dma);
|
||||
|
||||
if (info->gpmc_irq)
|
||||
free_irq(info->gpmc_irq, info);
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/omap-dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -39,7 +41,6 @@
|
|||
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
#include <plat/dma.h>
|
||||
#include <plat/clock.h>
|
||||
#include <plat/mcspi.h>
|
||||
|
||||
|
@ -93,8 +94,8 @@
|
|||
|
||||
/* We have 2 DMA channels per CS, one for RX and one for TX */
|
||||
struct omap2_mcspi_dma {
|
||||
int dma_tx_channel;
|
||||
int dma_rx_channel;
|
||||
struct dma_chan *dma_tx;
|
||||
struct dma_chan *dma_rx;
|
||||
|
||||
int dma_tx_sync_dev;
|
||||
int dma_rx_sync_dev;
|
||||
|
@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void omap2_mcspi_rx_callback(void *data)
|
||||
{
|
||||
struct spi_device *spi = data;
|
||||
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
|
||||
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
|
||||
|
||||
complete(&mcspi_dma->dma_rx_completion);
|
||||
|
||||
/* We must disable the DMA RX request */
|
||||
omap2_mcspi_set_dma_req(spi, 1, 0);
|
||||
}
|
||||
|
||||
static void omap2_mcspi_tx_callback(void *data)
|
||||
{
|
||||
struct spi_device *spi = data;
|
||||
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
|
||||
struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
|
||||
|
||||
complete(&mcspi_dma->dma_tx_completion);
|
||||
|
||||
/* We must disable the DMA TX request */
|
||||
omap2_mcspi_set_dma_req(spi, 0, 0);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
|
||||
{
|
||||
struct omap2_mcspi *mcspi;
|
||||
struct omap2_mcspi_cs *cs = spi->controller_state;
|
||||
struct omap2_mcspi_dma *mcspi_dma;
|
||||
unsigned int count, c;
|
||||
unsigned long base, tx_reg, rx_reg;
|
||||
int word_len, data_type, element_count;
|
||||
unsigned int count;
|
||||
int word_len, element_count;
|
||||
int elements = 0;
|
||||
u32 l;
|
||||
u8 * rx;
|
||||
const u8 * tx;
|
||||
void __iomem *chstat_reg;
|
||||
struct dma_slave_config cfg;
|
||||
enum dma_slave_buswidth width;
|
||||
unsigned es;
|
||||
|
||||
mcspi = spi_master_get_devdata(spi->master);
|
||||
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
|
||||
|
@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
|
|||
|
||||
chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
|
||||
|
||||
if (cs->word_len <= 8) {
|
||||
width = DMA_SLAVE_BUSWIDTH_1_BYTE;
|
||||
es = 1;
|
||||
} else if (cs->word_len <= 16) {
|
||||
width = DMA_SLAVE_BUSWIDTH_2_BYTES;
|
||||
es = 2;
|
||||
} else {
|
||||
width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
es = 4;
|
||||
}
|
||||
|
||||
memset(&cfg, 0, sizeof(cfg));
|
||||
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
|
||||
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
|
||||
cfg.src_addr_width = width;
|
||||
cfg.dst_addr_width = width;
|
||||
cfg.src_maxburst = 1;
|
||||
cfg.dst_maxburst = 1;
|
||||
|
||||
if (xfer->tx_buf && mcspi_dma->dma_tx) {
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct scatterlist sg;
|
||||
|
||||
dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_dma_address(&sg) = xfer->tx_dma;
|
||||
sg_dma_len(&sg) = xfer->len;
|
||||
|
||||
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
|
||||
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (tx) {
|
||||
tx->callback = omap2_mcspi_tx_callback;
|
||||
tx->callback_param = spi;
|
||||
dmaengine_submit(tx);
|
||||
} else {
|
||||
/* FIXME: fall back to PIO? */
|
||||
}
|
||||
}
|
||||
|
||||
if (xfer->rx_buf && mcspi_dma->dma_rx) {
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct scatterlist sg;
|
||||
size_t len = xfer->len - es;
|
||||
|
||||
dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
|
||||
|
||||
if (l & OMAP2_MCSPI_CHCONF_TURBO)
|
||||
len -= es;
|
||||
|
||||
sg_init_table(&sg, 1);
|
||||
sg_dma_address(&sg) = xfer->rx_dma;
|
||||
sg_dma_len(&sg) = len;
|
||||
|
||||
tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
|
||||
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (tx) {
|
||||
tx->callback = omap2_mcspi_rx_callback;
|
||||
tx->callback_param = spi;
|
||||
dmaengine_submit(tx);
|
||||
} else {
|
||||
/* FIXME: fall back to PIO? */
|
||||
}
|
||||
}
|
||||
|
||||
count = xfer->len;
|
||||
c = count;
|
||||
word_len = cs->word_len;
|
||||
|
||||
base = cs->phys;
|
||||
tx_reg = base + OMAP2_MCSPI_TX0;
|
||||
rx_reg = base + OMAP2_MCSPI_RX0;
|
||||
rx = xfer->rx_buf;
|
||||
tx = xfer->tx_buf;
|
||||
|
||||
if (word_len <= 8) {
|
||||
data_type = OMAP_DMA_DATA_TYPE_S8;
|
||||
element_count = count;
|
||||
} else if (word_len <= 16) {
|
||||
data_type = OMAP_DMA_DATA_TYPE_S16;
|
||||
element_count = count >> 1;
|
||||
} else /* word_len <= 32 */ {
|
||||
data_type = OMAP_DMA_DATA_TYPE_S32;
|
||||
element_count = count >> 2;
|
||||
}
|
||||
|
||||
if (tx != NULL) {
|
||||
omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel,
|
||||
data_type, element_count, 1,
|
||||
OMAP_DMA_SYNC_ELEMENT,
|
||||
mcspi_dma->dma_tx_sync_dev, 0);
|
||||
|
||||
omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0,
|
||||
OMAP_DMA_AMODE_CONSTANT,
|
||||
tx_reg, 0, 0);
|
||||
|
||||
omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0,
|
||||
OMAP_DMA_AMODE_POST_INC,
|
||||
xfer->tx_dma, 0, 0);
|
||||
}
|
||||
|
||||
if (rx != NULL) {
|
||||
elements = element_count - 1;
|
||||
if (l & OMAP2_MCSPI_CHCONF_TURBO)
|
||||
elements--;
|
||||
|
||||
omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel,
|
||||
data_type, elements, 1,
|
||||
OMAP_DMA_SYNC_ELEMENT,
|
||||
mcspi_dma->dma_rx_sync_dev, 1);
|
||||
|
||||
omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0,
|
||||
OMAP_DMA_AMODE_CONSTANT,
|
||||
rx_reg, 0, 0);
|
||||
|
||||
omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0,
|
||||
OMAP_DMA_AMODE_POST_INC,
|
||||
xfer->rx_dma, 0, 0);
|
||||
}
|
||||
|
||||
if (tx != NULL) {
|
||||
omap_start_dma(mcspi_dma->dma_tx_channel);
|
||||
dma_async_issue_pending(mcspi_dma->dma_tx);
|
||||
omap2_mcspi_set_dma_req(spi, 0, 1);
|
||||
}
|
||||
|
||||
if (rx != NULL) {
|
||||
omap_start_dma(mcspi_dma->dma_rx_channel);
|
||||
dma_async_issue_pending(mcspi_dma->dma_rx);
|
||||
omap2_mcspi_set_dma_req(spi, 1, 1);
|
||||
}
|
||||
|
||||
|
@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
|
|||
DMA_FROM_DEVICE);
|
||||
omap2_mcspi_set_enable(spi, 0);
|
||||
|
||||
elements = element_count - 1;
|
||||
|
||||
if (l & OMAP2_MCSPI_CHCONF_TURBO) {
|
||||
elements--;
|
||||
|
||||
if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
|
||||
& OMAP2_MCSPI_CHSTAT_RXS)) {
|
||||
|
@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data)
|
||||
{
|
||||
struct spi_device *spi = data;
|
||||
struct omap2_mcspi *mcspi;
|
||||
struct omap2_mcspi_dma *mcspi_dma;
|
||||
|
||||
mcspi = spi_master_get_devdata(spi->master);
|
||||
mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
|
||||
|
||||
complete(&mcspi_dma->dma_rx_completion);
|
||||
|
||||
/* We must disable the DMA RX request */
|
||||
omap2_mcspi_set_dma_req(spi, 1, 0);
|
||||
}
|
||||
|
||||
static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data)
|
||||
{
|
||||
struct spi_device *spi = data;
|
||||
struct omap2_mcspi *mcspi;
|
||||
struct omap2_mcspi_dma *mcspi_dma;
|
||||
|
||||
mcspi = spi_master_get_devdata(spi->master);
|
||||
mcspi_dma = &(mcspi->dma_channels[spi->chip_select]);
|
||||
|
||||
complete(&mcspi_dma->dma_tx_completion);
|
||||
|
||||
/* We must disable the DMA TX request */
|
||||
omap2_mcspi_set_dma_req(spi, 0, 0);
|
||||
}
|
||||
|
||||
static int omap2_mcspi_request_dma(struct spi_device *spi)
|
||||
{
|
||||
struct spi_master *master = spi->master;
|
||||
struct omap2_mcspi *mcspi;
|
||||
struct omap2_mcspi_dma *mcspi_dma;
|
||||
dma_cap_mask_t mask;
|
||||
unsigned sig;
|
||||
|
||||
mcspi = spi_master_get_devdata(master);
|
||||
mcspi_dma = mcspi->dma_channels + spi->chip_select;
|
||||
|
||||
if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX",
|
||||
omap2_mcspi_dma_rx_callback, spi,
|
||||
&mcspi_dma->dma_rx_channel)) {
|
||||
dev_err(&spi->dev, "no RX DMA channel for McSPI\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX",
|
||||
omap2_mcspi_dma_tx_callback, spi,
|
||||
&mcspi_dma->dma_tx_channel)) {
|
||||
omap_free_dma(mcspi_dma->dma_rx_channel);
|
||||
mcspi_dma->dma_rx_channel = -1;
|
||||
dev_err(&spi->dev, "no TX DMA channel for McSPI\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
init_completion(&mcspi_dma->dma_rx_completion);
|
||||
init_completion(&mcspi_dma->dma_tx_completion);
|
||||
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
sig = mcspi_dma->dma_rx_sync_dev;
|
||||
mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
|
||||
if (!mcspi_dma->dma_rx) {
|
||||
dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
sig = mcspi_dma->dma_tx_sync_dev;
|
||||
mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig);
|
||||
if (!mcspi_dma->dma_tx) {
|
||||
dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n");
|
||||
dma_release_channel(mcspi_dma->dma_rx);
|
||||
mcspi_dma->dma_rx = NULL;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
|
|||
list_add_tail(&cs->node, &ctx->cs);
|
||||
}
|
||||
|
||||
if (mcspi_dma->dma_rx_channel == -1
|
||||
|| mcspi_dma->dma_tx_channel == -1) {
|
||||
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
|
||||
ret = omap2_mcspi_request_dma(spi);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
|
|||
if (spi->chip_select < spi->master->num_chipselect) {
|
||||
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
|
||||
|
||||
if (mcspi_dma->dma_rx_channel != -1) {
|
||||
omap_free_dma(mcspi_dma->dma_rx_channel);
|
||||
mcspi_dma->dma_rx_channel = -1;
|
||||
if (mcspi_dma->dma_rx) {
|
||||
dma_release_channel(mcspi_dma->dma_rx);
|
||||
mcspi_dma->dma_rx = NULL;
|
||||
}
|
||||
if (mcspi_dma->dma_tx_channel != -1) {
|
||||
omap_free_dma(mcspi_dma->dma_tx_channel);
|
||||
mcspi_dma->dma_tx_channel = -1;
|
||||
if (mcspi_dma->dma_tx) {
|
||||
dma_release_channel(mcspi_dma->dma_tx);
|
||||
mcspi_dma->dma_tx = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
|
|||
break;
|
||||
}
|
||||
|
||||
mcspi->dma_channels[i].dma_rx_channel = -1;
|
||||
mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start;
|
||||
sprintf(dma_ch_name, "tx%d", i);
|
||||
dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
|
||||
|
@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev)
|
|||
break;
|
||||
}
|
||||
|
||||
mcspi->dma_channels[i].dma_tx_channel = -1;
|
||||
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,9 @@
|
|||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
struct pl08x_lli;
|
||||
struct pl08x_driver_data;
|
||||
struct pl08x_phy_chan;
|
||||
struct pl08x_txd;
|
||||
|
||||
/* Bitmasks for selecting AHB ports for DMA transfers */
|
||||
enum {
|
||||
|
@ -46,169 +47,28 @@ enum {
|
|||
* devices with static assignments
|
||||
* @muxval: a number usually used to poke into some mux regiser to
|
||||
* mux in the signal to this channel
|
||||
* @cctl_opt: default options for the channel control register
|
||||
* @cctl_memcpy: options for the channel control register for memcpy
|
||||
* *** not used for slave channels ***
|
||||
* @addr: source/target address in physical memory for this DMA channel,
|
||||
* can be the address of a FIFO register for burst requests for example.
|
||||
* This can be left undefined if the PrimeCell API is used for configuring
|
||||
* this.
|
||||
* @circular_buffer: whether the buffer passed in is circular and
|
||||
* shall simply be looped round round (like a record baby round
|
||||
* round round round)
|
||||
* @single: the device connected to this channel will request single DMA
|
||||
* transfers, not bursts. (Bursts are default.)
|
||||
* @periph_buses: the device connected to this channel is accessible via
|
||||
* these buses (use PL08X_AHB1 | PL08X_AHB2).
|
||||
*/
|
||||
struct pl08x_channel_data {
|
||||
char *bus_id;
|
||||
const char *bus_id;
|
||||
int min_signal;
|
||||
int max_signal;
|
||||
u32 muxval;
|
||||
u32 cctl;
|
||||
u32 cctl_memcpy;
|
||||
dma_addr_t addr;
|
||||
bool circular_buffer;
|
||||
bool single;
|
||||
u8 periph_buses;
|
||||
};
|
||||
|
||||
/**
|
||||
* Struct pl08x_bus_data - information of source or destination
|
||||
* busses for a transfer
|
||||
* @addr: current address
|
||||
* @maxwidth: the maximum width of a transfer on this bus
|
||||
* @buswidth: the width of this bus in bytes: 1, 2 or 4
|
||||
*/
|
||||
struct pl08x_bus_data {
|
||||
dma_addr_t addr;
|
||||
u8 maxwidth;
|
||||
u8 buswidth;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_phy_chan - holder for the physical channels
|
||||
* @id: physical index to this channel
|
||||
* @lock: a lock to use when altering an instance of this struct
|
||||
* @signal: the physical signal (aka channel) serving this physical channel
|
||||
* right now
|
||||
* @serving: the virtual channel currently being served by this physical
|
||||
* channel
|
||||
* @locked: channel unavailable for the system, e.g. dedicated to secure
|
||||
* world
|
||||
*/
|
||||
struct pl08x_phy_chan {
|
||||
unsigned int id;
|
||||
void __iomem *base;
|
||||
spinlock_t lock;
|
||||
int signal;
|
||||
struct pl08x_dma_chan *serving;
|
||||
bool locked;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_sg - structure containing data per sg
|
||||
* @src_addr: src address of sg
|
||||
* @dst_addr: dst address of sg
|
||||
* @len: transfer len in bytes
|
||||
* @node: node for txd's dsg_list
|
||||
*/
|
||||
struct pl08x_sg {
|
||||
dma_addr_t src_addr;
|
||||
dma_addr_t dst_addr;
|
||||
size_t len;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
|
||||
* @tx: async tx descriptor
|
||||
* @node: node for txd list for channels
|
||||
* @dsg_list: list of children sg's
|
||||
* @direction: direction of transfer
|
||||
* @llis_bus: DMA memory address (physical) start for the LLIs
|
||||
* @llis_va: virtual memory address start for the LLIs
|
||||
* @cctl: control reg values for current txd
|
||||
* @ccfg: config reg values for current txd
|
||||
*/
|
||||
struct pl08x_txd {
|
||||
struct dma_async_tx_descriptor tx;
|
||||
struct list_head node;
|
||||
struct list_head dsg_list;
|
||||
enum dma_transfer_direction direction;
|
||||
dma_addr_t llis_bus;
|
||||
struct pl08x_lli *llis_va;
|
||||
/* Default cctl value for LLIs */
|
||||
u32 cctl;
|
||||
/*
|
||||
* Settings to be put into the physical channel when we
|
||||
* trigger this txd. Other registers are in llis_va[0].
|
||||
*/
|
||||
u32 ccfg;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
|
||||
* states
|
||||
* @PL08X_CHAN_IDLE: the channel is idle
|
||||
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
|
||||
* channel and is running a transfer on it
|
||||
* @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
|
||||
* channel, but the transfer is currently paused
|
||||
* @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
|
||||
* channel to become available (only pertains to memcpy channels)
|
||||
*/
|
||||
enum pl08x_dma_chan_state {
|
||||
PL08X_CHAN_IDLE,
|
||||
PL08X_CHAN_RUNNING,
|
||||
PL08X_CHAN_PAUSED,
|
||||
PL08X_CHAN_WAITING,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
|
||||
* @chan: wrappped abstract channel
|
||||
* @phychan: the physical channel utilized by this channel, if there is one
|
||||
* @phychan_hold: if non-zero, hold on to the physical channel even if we
|
||||
* have no pending entries
|
||||
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
|
||||
* @name: name of channel
|
||||
* @cd: channel platform data
|
||||
* @runtime_addr: address for RX/TX according to the runtime config
|
||||
* @runtime_direction: current direction of this channel according to
|
||||
* runtime config
|
||||
* @pend_list: queued transactions pending on this channel
|
||||
* @at: active transaction on this channel
|
||||
* @lock: a lock for this channel data
|
||||
* @host: a pointer to the host (internal use)
|
||||
* @state: whether the channel is idle, paused, running etc
|
||||
* @slave: whether this channel is a device (slave) or for memcpy
|
||||
* @device_fc: Flow Controller Settings for ccfg register. Only valid for slave
|
||||
* channels. Fill with 'true' if peripheral should be flow controller. Direction
|
||||
* will be selected at Runtime.
|
||||
* @waiting: a TX descriptor on this channel which is waiting for a physical
|
||||
* channel to become available
|
||||
*/
|
||||
struct pl08x_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct pl08x_phy_chan *phychan;
|
||||
int phychan_hold;
|
||||
struct tasklet_struct tasklet;
|
||||
char *name;
|
||||
const struct pl08x_channel_data *cd;
|
||||
dma_addr_t src_addr;
|
||||
dma_addr_t dst_addr;
|
||||
u32 src_cctl;
|
||||
u32 dst_cctl;
|
||||
enum dma_transfer_direction runtime_direction;
|
||||
struct list_head pend_list;
|
||||
struct pl08x_txd *at;
|
||||
spinlock_t lock;
|
||||
struct pl08x_driver_data *host;
|
||||
enum pl08x_dma_chan_state state;
|
||||
bool slave;
|
||||
bool device_fc;
|
||||
struct pl08x_txd *waiting;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pl08x_platform_data - the platform configuration for the PL08x
|
||||
* PrimeCells.
|
||||
|
@ -229,8 +89,8 @@ struct pl08x_platform_data {
|
|||
const struct pl08x_channel_data *slave_channels;
|
||||
unsigned int num_slave_channels;
|
||||
struct pl08x_channel_data memcpy_channel;
|
||||
int (*get_signal)(struct pl08x_dma_chan *);
|
||||
void (*put_signal)(struct pl08x_dma_chan *);
|
||||
int (*get_signal)(const struct pl08x_channel_data *);
|
||||
void (*put_signal)(const struct pl08x_channel_data *, int);
|
||||
u8 lli_buses;
|
||||
u8 mem_buses;
|
||||
};
|
||||
|
|
22
include/linux/omap-dma.h
Normal file
22
include/linux/omap-dma.h
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* OMAP DMA Engine support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef __LINUX_OMAP_DMA_H
|
||||
#define __LINUX_OMAP_DMA_H
|
||||
|
||||
struct dma_chan;
|
||||
|
||||
#if defined(CONFIG_DMA_OMAP) || defined(CONFIG_DMA_OMAP_MODULE)
|
||||
bool omap_dma_filter_fn(struct dma_chan *, void *);
|
||||
#else
|
||||
static inline bool omap_dma_filter_fn(struct dma_chan *c, void *d)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
Loading…
Reference in a new issue