Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (30 commits) DMAENGINE: at_hdmac: locking fixlet DMAENGINE: pch_dma: kill another usage of __raw_{read|write}l dma: dmatest: fix potential sign bug ioat2: catch and recover from broken vtd configurations v6 DMAENGINE: add runtime slave control to COH 901 318 v3 DMAENGINE: add runtime slave config to DMA40 v3 DMAENGINE: generic slave channel control v3 dmaengine: Driver for Topcliff PCH DMA controller intel_mid: Add Mrst & Mfld DMA Drivers drivers/dma: Eliminate a NULL pointer dereference dma/timb_dma: compile warning on 32 bit DMAENGINE: ste_dma40: support older silicon DMAENGINE: ste_dma40: support disabling physical channels DMAENGINE: ste_dma40: no disabled phy channels on ux500 DMAENGINE: ste_dma40: fix suspend bug DMAENGINE: ste_dma40: add DB8500 memcpy channels DMAENGINE: ste_dma40: no flow control on memcpy DMAENGINE: ste_dma40: arch updates for LCLA and LCPA DMAENGINE: ste_dma40: allocate LCLA dynamically DMAENGINE: ste_dma40: no premature stop ... Fix up trivial conflicts in arch/arm/mach-ux500/devices-db8500.c
This commit is contained in:
commit
dcded10f6d
23 changed files with 3391 additions and 383 deletions
|
@ -113,26 +113,21 @@ struct platform_device u8500_i2c4_device = {
|
|||
static struct resource dma40_resources[] = {
|
||||
[0] = {
|
||||
.start = U8500_DMA_BASE,
|
||||
.end = U8500_DMA_BASE + SZ_4K - 1,
|
||||
.end = U8500_DMA_BASE + SZ_4K - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
.name = "base",
|
||||
.name = "base",
|
||||
},
|
||||
[1] = {
|
||||
.start = U8500_DMA_LCPA_BASE,
|
||||
.end = U8500_DMA_LCPA_BASE + SZ_4K - 1,
|
||||
.end = U8500_DMA_LCPA_BASE + 2 * SZ_1K - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
.name = "lcpa",
|
||||
.name = "lcpa",
|
||||
},
|
||||
[2] = {
|
||||
.start = U8500_DMA_LCLA_BASE,
|
||||
.end = U8500_DMA_LCLA_BASE + 16 * 1024 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
.name = "lcla",
|
||||
},
|
||||
[3] = {
|
||||
.start = IRQ_DB8500_DMA,
|
||||
.end = IRQ_DB8500_DMA,
|
||||
.flags = IORESOURCE_IRQ}
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
/* Default configuration for physcial memcpy */
|
||||
|
@ -145,11 +140,12 @@ struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
|
|||
.src_info.endianess = STEDMA40_LITTLE_ENDIAN,
|
||||
.src_info.data_width = STEDMA40_BYTE_WIDTH,
|
||||
.src_info.psize = STEDMA40_PSIZE_PHY_1,
|
||||
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
|
||||
|
||||
.dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
|
||||
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
|
||||
.dst_info.psize = STEDMA40_PSIZE_PHY_1,
|
||||
|
||||
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
|
||||
};
|
||||
/* Default configuration for logical memcpy */
|
||||
struct stedma40_chan_cfg dma40_memcpy_conf_log = {
|
||||
|
@ -162,11 +158,12 @@ struct stedma40_chan_cfg dma40_memcpy_conf_log = {
|
|||
.src_info.endianess = STEDMA40_LITTLE_ENDIAN,
|
||||
.src_info.data_width = STEDMA40_BYTE_WIDTH,
|
||||
.src_info.psize = STEDMA40_PSIZE_LOG_1,
|
||||
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
|
||||
|
||||
.dst_info.endianess = STEDMA40_LITTLE_ENDIAN,
|
||||
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
|
||||
.dst_info.psize = STEDMA40_PSIZE_LOG_1,
|
||||
|
||||
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -180,10 +177,12 @@ static const dma_addr_t dma40_rx_map[STEDMA40_NR_DEV];
|
|||
|
||||
/* Reserved event lines for memcpy only */
|
||||
static int dma40_memcpy_event[] = {
|
||||
STEDMA40_MEMCPY_TX_0,
|
||||
STEDMA40_MEMCPY_TX_1,
|
||||
STEDMA40_MEMCPY_TX_2,
|
||||
STEDMA40_MEMCPY_TX_3,
|
||||
STEDMA40_MEMCPY_TX_4,
|
||||
STEDMA40_MEMCPY_TX_5,
|
||||
};
|
||||
|
||||
static struct stedma40_platform_data dma40_plat_data = {
|
||||
|
@ -195,6 +194,7 @@ static struct stedma40_platform_data dma40_plat_data = {
|
|||
.memcpy_conf_phy = &dma40_memcpy_conf_phy,
|
||||
.memcpy_conf_log = &dma40_memcpy_conf_log,
|
||||
.llis_per_log = 8,
|
||||
.disabled_channels = {-1},
|
||||
};
|
||||
|
||||
struct platform_device u8500_dma40_device = {
|
||||
|
@ -213,4 +213,6 @@ void dma40_u8500ed_fixup(void)
|
|||
dma40_plat_data.memcpy_len = 0;
|
||||
dma40_resources[0].start = U8500_DMA_BASE_ED;
|
||||
dma40_resources[0].end = U8500_DMA_BASE_ED + SZ_4K - 1;
|
||||
dma40_resources[1].start = U8500_DMA_LCPA_BASE_ED;
|
||||
dma40_resources[1].end = U8500_DMA_LCPA_BASE_ED + 2 * SZ_1K - 1;
|
||||
}
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
#define U8500_ESRAM_BANK2 (U8500_ESRAM_BANK1 + U8500_ESRAM_BANK_SIZE)
|
||||
#define U8500_ESRAM_BANK3 (U8500_ESRAM_BANK2 + U8500_ESRAM_BANK_SIZE)
|
||||
#define U8500_ESRAM_BANK4 (U8500_ESRAM_BANK3 + U8500_ESRAM_BANK_SIZE)
|
||||
/* Use bank 4 for DMA LCLA and LCPA */
|
||||
#define U8500_DMA_LCLA_BASE U8500_ESRAM_BANK4
|
||||
#define U8500_DMA_LCPA_BASE (U8500_ESRAM_BANK4 + 0x4000)
|
||||
/* Use bank 4 for DMA LCPA */
|
||||
#define U8500_DMA_LCPA_BASE U8500_ESRAM_BANK4
|
||||
#define U8500_DMA_LCPA_BASE_ED (U8500_ESRAM_BANK4 + 0x4000)
|
||||
|
||||
#define U8500_PER3_BASE 0x80000000
|
||||
#define U8500_STM_BASE 0x80100000
|
||||
|
|
|
@ -136,7 +136,7 @@ enum dma_dest_dev_type {
|
|||
STEDMA40_DEV_CAC1_TX = 48,
|
||||
STEDMA40_DEV_CAC1_TX_HAC1_TX = 49,
|
||||
STEDMA40_DEV_HAC1_TX = 50,
|
||||
STEDMA40_MEMXCPY_TX_0 = 51,
|
||||
STEDMA40_MEMCPY_TX_0 = 51,
|
||||
STEDMA40_DEV_SLIM1_CH0_TX_HSI_TX_CH4 = 52,
|
||||
STEDMA40_DEV_SLIM1_CH1_TX_HSI_TX_CH5 = 53,
|
||||
STEDMA40_DEV_SLIM1_CH2_TX_HSI_TX_CH6 = 54,
|
||||
|
|
|
@ -148,7 +148,8 @@ struct stedma40_chan_cfg {
|
|||
* @memcpy_conf_phy: default configuration of physical channel memcpy
|
||||
* @memcpy_conf_log: default configuration of logical channel memcpy
|
||||
* @llis_per_log: number of max linked list items per logical channel
|
||||
*
|
||||
* @disabled_channels: A vector, ending with -1, that marks physical channels
|
||||
* that are for different reasons not available for the driver.
|
||||
*/
|
||||
struct stedma40_platform_data {
|
||||
u32 dev_len;
|
||||
|
@ -159,6 +160,7 @@ struct stedma40_platform_data {
|
|||
struct stedma40_chan_cfg *memcpy_conf_phy;
|
||||
struct stedma40_chan_cfg *memcpy_conf_log;
|
||||
unsigned int llis_per_log;
|
||||
int disabled_channels[8];
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,6 +33,19 @@ if DMADEVICES
|
|||
|
||||
comment "DMA Devices"
|
||||
|
||||
config INTEL_MID_DMAC
|
||||
tristate "Intel MID DMA support for Peripheral DMA controllers"
|
||||
depends on PCI && X86
|
||||
select DMA_ENGINE
|
||||
default n
|
||||
help
|
||||
Enable support for the Intel(R) MID DMA engine present
|
||||
in Intel MID chipsets.
|
||||
|
||||
Say Y here if you have such a chipset.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config ASYNC_TX_DISABLE_CHANNEL_SWITCH
|
||||
bool
|
||||
|
||||
|
@ -175,6 +188,13 @@ config PL330_DMA
|
|||
You need to provide platform specific settings via
|
||||
platform_data for a dma-pl330 device.
|
||||
|
||||
config PCH_DMA
|
||||
tristate "Topcliff PCH DMA support"
|
||||
depends on PCI && X86
|
||||
select DMA_ENGINE
|
||||
help
|
||||
Enable support for the Topcliff PCH DMA engine.
|
||||
|
||||
config DMA_ENGINE
|
||||
bool
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ endif
|
|||
|
||||
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
|
||||
obj-$(CONFIG_NET_DMA) += iovlock.o
|
||||
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
|
||||
obj-$(CONFIG_DMATEST) += dmatest.o
|
||||
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
|
||||
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
|
||||
|
@ -23,3 +24,4 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
|
|||
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
|
||||
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
|
||||
obj-$(CONFIG_PL330_DMA) += pl330.o
|
||||
obj-$(CONFIG_PCH_DMA) += pch_dma.o
|
||||
|
|
|
@ -790,12 +790,12 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
list_splice_init(&atchan->queue, &list);
|
||||
list_splice_init(&atchan->active_list, &list);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
|
||||
/* Flush all pending and queued descriptors */
|
||||
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
||||
atc_chain_complete(atchan, desc);
|
||||
|
||||
spin_unlock_bh(&atchan->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -72,6 +72,9 @@ struct coh901318_chan {
|
|||
unsigned long nbr_active_done;
|
||||
unsigned long busy;
|
||||
|
||||
u32 runtime_addr;
|
||||
u32 runtime_ctrl;
|
||||
|
||||
struct coh901318_base *base;
|
||||
};
|
||||
|
||||
|
@ -190,6 +193,9 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
|
|||
static inline dma_addr_t
|
||||
cohc_dev_addr(struct coh901318_chan *cohc)
|
||||
{
|
||||
/* Runtime supplied address will take precedence */
|
||||
if (cohc->runtime_addr)
|
||||
return cohc->runtime_addr;
|
||||
return cohc->base->platform->chan_conf[cohc->id].dev_addr;
|
||||
}
|
||||
|
||||
|
@ -1055,6 +1061,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
|
||||
params = cohc_chan_param(cohc);
|
||||
config = params->config;
|
||||
/*
|
||||
* Add runtime-specific control on top, make
|
||||
* sure the bits you set per peripheral channel are
|
||||
* cleared in the default config from the platform.
|
||||
*/
|
||||
ctrl_chained |= cohc->runtime_ctrl;
|
||||
ctrl_last |= cohc->runtime_ctrl;
|
||||
ctrl |= cohc->runtime_ctrl;
|
||||
|
||||
if (direction == DMA_TO_DEVICE) {
|
||||
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
|
||||
|
@ -1113,6 +1127,12 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
if (ret)
|
||||
goto err_lli_fill;
|
||||
|
||||
/*
|
||||
* Set the default ctrl for the channel to the one from the lli,
|
||||
* things may have changed due to odd buffer alignment etc.
|
||||
*/
|
||||
coh901318_set_ctrl(cohc, lli->control);
|
||||
|
||||
COH_DBG(coh901318_list_print(cohc, lli));
|
||||
|
||||
/* Pick a descriptor to handle this transfer */
|
||||
|
@ -1175,6 +1195,146 @@ coh901318_issue_pending(struct dma_chan *chan)
|
|||
spin_unlock_irqrestore(&cohc->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Here we wrap in the runtime dma control interface
|
||||
*/
|
||||
struct burst_table {
|
||||
int burst_8bit;
|
||||
int burst_16bit;
|
||||
int burst_32bit;
|
||||
u32 reg;
|
||||
};
|
||||
|
||||
static const struct burst_table burst_sizes[] = {
|
||||
{
|
||||
.burst_8bit = 64,
|
||||
.burst_16bit = 32,
|
||||
.burst_32bit = 16,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_64_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 48,
|
||||
.burst_16bit = 24,
|
||||
.burst_32bit = 12,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_48_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 32,
|
||||
.burst_16bit = 16,
|
||||
.burst_32bit = 8,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_32_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 16,
|
||||
.burst_16bit = 8,
|
||||
.burst_32bit = 4,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_16_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 8,
|
||||
.burst_16bit = 4,
|
||||
.burst_32bit = 2,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_8_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 4,
|
||||
.burst_16bit = 2,
|
||||
.burst_32bit = 1,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_4_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 2,
|
||||
.burst_16bit = 1,
|
||||
.burst_32bit = 0,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_2_BYTES,
|
||||
},
|
||||
{
|
||||
.burst_8bit = 1,
|
||||
.burst_16bit = 0,
|
||||
.burst_32bit = 0,
|
||||
.reg = COH901318_CX_CTRL_BURST_COUNT_1_BYTE,
|
||||
},
|
||||
};
|
||||
|
||||
static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
struct coh901318_chan *cohc = to_coh901318_chan(chan);
|
||||
dma_addr_t addr;
|
||||
enum dma_slave_buswidth addr_width;
|
||||
u32 maxburst;
|
||||
u32 runtime_ctrl = 0;
|
||||
int i = 0;
|
||||
|
||||
/* We only support mem to per or per to mem transfers */
|
||||
if (config->direction == DMA_FROM_DEVICE) {
|
||||
addr = config->src_addr;
|
||||
addr_width = config->src_addr_width;
|
||||
maxburst = config->src_maxburst;
|
||||
} else if (config->direction == DMA_TO_DEVICE) {
|
||||
addr = config->dst_addr;
|
||||
addr_width = config->dst_addr_width;
|
||||
maxburst = config->dst_maxburst;
|
||||
} else {
|
||||
dev_err(COHC_2_DEV(cohc), "illegal channel mode\n");
|
||||
return;
|
||||
}
|
||||
|
||||
dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n",
|
||||
addr_width);
|
||||
switch (addr_width) {
|
||||
case DMA_SLAVE_BUSWIDTH_1_BYTE:
|
||||
runtime_ctrl |=
|
||||
COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS |
|
||||
COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS;
|
||||
|
||||
while (i < ARRAY_SIZE(burst_sizes)) {
|
||||
if (burst_sizes[i].burst_8bit <= maxburst)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_2_BYTES:
|
||||
runtime_ctrl |=
|
||||
COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS |
|
||||
COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS;
|
||||
|
||||
while (i < ARRAY_SIZE(burst_sizes)) {
|
||||
if (burst_sizes[i].burst_16bit <= maxburst)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
break;
|
||||
case DMA_SLAVE_BUSWIDTH_4_BYTES:
|
||||
/* Direction doesn't matter here, it's 32/32 bits */
|
||||
runtime_ctrl |=
|
||||
COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS |
|
||||
COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS;
|
||||
|
||||
while (i < ARRAY_SIZE(burst_sizes)) {
|
||||
if (burst_sizes[i].burst_32bit <= maxburst)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
dev_err(COHC_2_DEV(cohc),
|
||||
"bad runtimeconfig: alien address width\n");
|
||||
return;
|
||||
}
|
||||
|
||||
runtime_ctrl |= burst_sizes[i].reg;
|
||||
dev_dbg(COHC_2_DEV(cohc),
|
||||
"selected burst size %d bytes for address width %d bytes, maxburst %d\n",
|
||||
burst_sizes[i].burst_8bit, addr_width, maxburst);
|
||||
|
||||
cohc->runtime_addr = addr;
|
||||
cohc->runtime_ctrl = runtime_ctrl;
|
||||
}
|
||||
|
||||
static int
|
||||
coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
|
@ -1184,6 +1344,14 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
struct coh901318_desc *cohd;
|
||||
void __iomem *virtbase = cohc->base->virtbase;
|
||||
|
||||
if (cmd == DMA_SLAVE_CONFIG) {
|
||||
struct dma_slave_config *config =
|
||||
(struct dma_slave_config *) arg;
|
||||
|
||||
coh901318_dma_set_runtimeconfig(chan, config);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cmd == DMA_PAUSE) {
|
||||
coh901318_pause(chan);
|
||||
return 0;
|
||||
|
@ -1240,6 +1408,7 @@ coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
|
||||
struct coh901318_base *base)
|
||||
{
|
||||
|
|
|
@ -540,7 +540,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
|
|||
struct dmatest_chan *dtc;
|
||||
struct dma_device *dma_dev = chan->device;
|
||||
unsigned int thread_count = 0;
|
||||
unsigned int cnt;
|
||||
int cnt;
|
||||
|
||||
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
|
||||
if (!dtc) {
|
||||
|
|
1143
drivers/dma/intel_mid_dma.c
Normal file
1143
drivers/dma/intel_mid_dma.c
Normal file
File diff suppressed because it is too large
Load diff
260
drivers/dma/intel_mid_dma_regs.h
Normal file
260
drivers/dma/intel_mid_dma_regs.h
Normal file
|
@ -0,0 +1,260 @@
|
|||
/*
|
||||
* intel_mid_dma_regs.h - Intel MID DMA Drivers
|
||||
*
|
||||
* Copyright (C) 2008-10 Intel Corp
|
||||
* Author: Vinod Koul <vinod.koul@intel.com>
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
*
|
||||
*/
|
||||
#ifndef __INTEL_MID_DMAC_REGS_H__
|
||||
#define __INTEL_MID_DMAC_REGS_H__
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/pci_ids.h>
|
||||
|
||||
#define INTEL_MID_DMA_DRIVER_VERSION "1.0.5"
|
||||
|
||||
#define REG_BIT0 0x00000001
|
||||
#define REG_BIT8 0x00000100
|
||||
|
||||
#define UNMASK_INTR_REG(chan_num) \
|
||||
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
|
||||
#define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num)
|
||||
|
||||
#define ENABLE_CHANNEL(chan_num) \
|
||||
((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num))
|
||||
|
||||
#define DESCS_PER_CHANNEL 16
|
||||
/*DMA Registers*/
|
||||
/*registers associated with channel programming*/
|
||||
#define DMA_REG_SIZE 0x400
|
||||
#define DMA_CH_SIZE 0x58
|
||||
|
||||
/*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/
|
||||
#define SAR 0x00 /* Source Address Register*/
|
||||
#define DAR 0x08 /* Destination Address Register*/
|
||||
#define CTL_LOW 0x18 /* Control Register*/
|
||||
#define CTL_HIGH 0x1C /* Control Register*/
|
||||
#define CFG_LOW 0x40 /* Configuration Register Low*/
|
||||
#define CFG_HIGH 0x44 /* Configuration Register high*/
|
||||
|
||||
#define STATUS_TFR 0x2E8
|
||||
#define STATUS_BLOCK 0x2F0
|
||||
#define STATUS_ERR 0x308
|
||||
|
||||
#define RAW_TFR 0x2C0
|
||||
#define RAW_BLOCK 0x2C8
|
||||
#define RAW_ERR 0x2E0
|
||||
|
||||
#define MASK_TFR 0x310
|
||||
#define MASK_BLOCK 0x318
|
||||
#define MASK_SRC_TRAN 0x320
|
||||
#define MASK_DST_TRAN 0x328
|
||||
#define MASK_ERR 0x330
|
||||
|
||||
#define CLEAR_TFR 0x338
|
||||
#define CLEAR_BLOCK 0x340
|
||||
#define CLEAR_SRC_TRAN 0x348
|
||||
#define CLEAR_DST_TRAN 0x350
|
||||
#define CLEAR_ERR 0x358
|
||||
|
||||
#define INTR_STATUS 0x360
|
||||
#define DMA_CFG 0x398
|
||||
#define DMA_CHAN_EN 0x3A0
|
||||
|
||||
/*DMA channel control registers*/
|
||||
union intel_mid_dma_ctl_lo {
|
||||
struct {
|
||||
u32 int_en:1; /*enable or disable interrupts*/
|
||||
/*should be 0*/
|
||||
u32 dst_tr_width:3; /*destination transfer width*/
|
||||
/*usually 32 bits = 010*/
|
||||
u32 src_tr_width:3; /*source transfer width*/
|
||||
/*usually 32 bits = 010*/
|
||||
u32 dinc:2; /*destination address inc/dec*/
|
||||
/*For mem:INC=00, Periphral NoINC=11*/
|
||||
u32 sinc:2; /*source address inc or dec, as above*/
|
||||
u32 dst_msize:3; /*destination burst transaction length*/
|
||||
/*always = 16 ie 011*/
|
||||
u32 src_msize:3; /*source burst transaction length*/
|
||||
/*always = 16 ie 011*/
|
||||
u32 reser1:3;
|
||||
u32 tt_fc:3; /*transfer type and flow controller*/
|
||||
/*M-M = 000
|
||||
P-M = 010
|
||||
M-P = 001*/
|
||||
u32 dms:2; /*destination master select = 0*/
|
||||
u32 sms:2; /*source master select = 0*/
|
||||
u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/
|
||||
u32 llp_src_en:1; /*enable/disable source LLP = 0*/
|
||||
u32 reser2:3;
|
||||
} ctlx;
|
||||
u32 ctl_lo;
|
||||
};
|
||||
|
||||
union intel_mid_dma_ctl_hi {
|
||||
struct {
|
||||
u32 block_ts:12; /*block transfer size*/
|
||||
/*configured by DMAC*/
|
||||
u32 reser:20;
|
||||
} ctlx;
|
||||
u32 ctl_hi;
|
||||
|
||||
};
|
||||
|
||||
/*DMA channel configuration registers*/
|
||||
union intel_mid_dma_cfg_lo {
|
||||
struct {
|
||||
u32 reser1:5;
|
||||
u32 ch_prior:3; /*channel priority = 0*/
|
||||
u32 ch_susp:1; /*channel suspend = 0*/
|
||||
u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/
|
||||
u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/
|
||||
/*HW = 0, SW = 1*/
|
||||
u32 hs_sel_src:1; /*select HW/SW src handshaking*/
|
||||
u32 reser2:6;
|
||||
u32 dst_hs_pol:1; /*dest HS interface polarity*/
|
||||
u32 src_hs_pol:1; /*src HS interface polarity*/
|
||||
u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/
|
||||
u32 reload_src:1; /*auto reload src addr =1 if src is P*/
|
||||
u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/
|
||||
} cfgx;
|
||||
u32 cfg_lo;
|
||||
};
|
||||
|
||||
union intel_mid_dma_cfg_hi {
|
||||
struct {
|
||||
u32 fcmode:1; /*flow control mode = 1*/
|
||||
u32 fifo_mode:1; /*FIFO mode select = 1*/
|
||||
u32 protctl:3; /*protection control = 0*/
|
||||
u32 rsvd:2;
|
||||
u32 src_per:4; /*src hw HS interface*/
|
||||
u32 dst_per:4; /*dstn hw HS interface*/
|
||||
u32 reser2:17;
|
||||
} cfgx;
|
||||
u32 cfg_hi;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct intel_mid_dma_chan - internal mid representation of a DMA channel
|
||||
* @chan: dma_chan strcture represetation for mid chan
|
||||
* @ch_regs: MMIO register space pointer to channel register
|
||||
* @dma_base: MMIO register space DMA engine base pointer
|
||||
* @ch_id: DMA channel id
|
||||
* @lock: channel spinlock
|
||||
* @completed: DMA cookie
|
||||
* @active_list: current active descriptors
|
||||
* @queue: current queued up descriptors
|
||||
* @free_list: current free descriptors
|
||||
* @slave: dma slave struture
|
||||
* @descs_allocated: total number of decsiptors allocated
|
||||
* @dma: dma device struture pointer
|
||||
* @in_use: bool representing if ch is in use or not
|
||||
*/
|
||||
struct intel_mid_dma_chan {
|
||||
struct dma_chan chan;
|
||||
void __iomem *ch_regs;
|
||||
void __iomem *dma_base;
|
||||
int ch_id;
|
||||
spinlock_t lock;
|
||||
dma_cookie_t completed;
|
||||
struct list_head active_list;
|
||||
struct list_head queue;
|
||||
struct list_head free_list;
|
||||
struct intel_mid_dma_slave *slave;
|
||||
unsigned int descs_allocated;
|
||||
struct middma_device *dma;
|
||||
bool in_use;
|
||||
};
|
||||
|
||||
static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan(
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct intel_mid_dma_chan, chan);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct middma_device - internal representation of a DMA device
|
||||
* @pdev: PCI device
|
||||
* @dma_base: MMIO register space pointer of DMA
|
||||
* @dma_pool: for allocating DMA descriptors
|
||||
* @common: embedded struct dma_device
|
||||
* @tasklet: dma tasklet for processing interrupts
|
||||
* @ch: per channel data
|
||||
* @pci_id: DMA device PCI ID
|
||||
* @intr_mask: Interrupt mask to be used
|
||||
* @mask_reg: MMIO register for periphral mask
|
||||
* @chan_base: Base ch index (read from driver data)
|
||||
* @max_chan: max number of chs supported (from drv_data)
|
||||
* @block_size: Block size of DMA transfer supported (from drv_data)
|
||||
* @pimr_mask: MMIO register addr for periphral interrupt (from drv_data)
|
||||
*/
|
||||
struct middma_device {
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *dma_base;
|
||||
struct pci_pool *dma_pool;
|
||||
struct dma_device common;
|
||||
struct tasklet_struct tasklet;
|
||||
struct intel_mid_dma_chan ch[MAX_CHAN];
|
||||
unsigned int pci_id;
|
||||
unsigned int intr_mask;
|
||||
void __iomem *mask_reg;
|
||||
int chan_base;
|
||||
int max_chan;
|
||||
int block_size;
|
||||
unsigned int pimr_mask;
|
||||
};
|
||||
|
||||
static inline struct middma_device *to_middma_device(struct dma_device *common)
|
||||
{
|
||||
return container_of(common, struct middma_device, common);
|
||||
}
|
||||
|
||||
struct intel_mid_dma_desc {
|
||||
void __iomem *block; /*ch ptr*/
|
||||
struct list_head desc_node;
|
||||
struct dma_async_tx_descriptor txd;
|
||||
size_t len;
|
||||
dma_addr_t sar;
|
||||
dma_addr_t dar;
|
||||
u32 cfg_hi;
|
||||
u32 cfg_lo;
|
||||
u32 ctl_lo;
|
||||
u32 ctl_hi;
|
||||
dma_addr_t next;
|
||||
enum dma_data_direction dirn;
|
||||
enum dma_status status;
|
||||
enum intel_mid_dma_width width; /*width of DMA txn*/
|
||||
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
|
||||
|
||||
};
|
||||
|
||||
static inline int test_ch_en(void __iomem *dma, u32 ch_no)
|
||||
{
|
||||
u32 en_reg = ioread32(dma + DMA_CHAN_EN);
|
||||
return (en_reg >> ch_no) & 0x1;
|
||||
}
|
||||
|
||||
static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc
|
||||
(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
return container_of(txd, struct intel_mid_dma_desc, txd);
|
||||
}
|
||||
#endif /*__INTEL_MID_DMAC_REGS_H__*/
|
|
@ -97,6 +97,7 @@ struct ioat_chan_common {
|
|||
#define IOAT_RESET_PENDING 2
|
||||
#define IOAT_KOBJ_INIT_FAIL 3
|
||||
#define IOAT_RESHAPE_PENDING 4
|
||||
#define IOAT_RUN 5
|
||||
struct timer_list timer;
|
||||
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
|
||||
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
|
||||
|
|
|
@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data)
|
|||
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
|
||||
__func__, chanerr);
|
||||
BUG_ON(is_ioat_bug(chanerr));
|
||||
if (test_bit(IOAT_RUN, &chan->state))
|
||||
BUG_ON(is_ioat_bug(chanerr));
|
||||
else /* we never got off the ground */
|
||||
return;
|
||||
}
|
||||
|
||||
/* if we haven't made progress and we have already
|
||||
|
@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf
|
|||
return ring;
|
||||
}
|
||||
|
||||
void ioat2_free_chan_resources(struct dma_chan *c);
|
||||
|
||||
/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
|
||||
* @chan: channel to be initialized
|
||||
*/
|
||||
|
@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
|
|||
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
|
||||
struct ioat_chan_common *chan = &ioat->base;
|
||||
struct ioat_ring_ent **ring;
|
||||
u64 status;
|
||||
int order;
|
||||
|
||||
/* have we already been set up? */
|
||||
|
@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
|
|||
tasklet_enable(&chan->cleanup_task);
|
||||
ioat2_start_null_desc(ioat);
|
||||
|
||||
return 1 << ioat->alloc_order;
|
||||
/* check that we got off the ground */
|
||||
udelay(5);
|
||||
status = ioat_chansts(chan);
|
||||
if (is_ioat_active(status) || is_ioat_idle(status)) {
|
||||
set_bit(IOAT_RUN, &chan->state);
|
||||
return 1 << ioat->alloc_order;
|
||||
} else {
|
||||
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
|
||||
dev_WARN(to_dev(chan),
|
||||
"failed to start channel chanerr: %#x\n", chanerr);
|
||||
ioat2_free_chan_resources(c);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
|
||||
|
@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
|
|||
del_timer_sync(&chan->timer);
|
||||
device->cleanup_fn((unsigned long) c);
|
||||
device->reset_hw(chan);
|
||||
clear_bit(IOAT_RUN, &chan->state);
|
||||
|
||||
spin_lock_bh(&chan->cleanup_lock);
|
||||
spin_lock_bh(&ioat->prep_lock);
|
||||
|
|
|
@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data)
|
|||
chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
|
||||
dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
|
||||
__func__, chanerr);
|
||||
BUG_ON(is_ioat_bug(chanerr));
|
||||
if (test_bit(IOAT_RUN, &chan->state))
|
||||
BUG_ON(is_ioat_bug(chanerr));
|
||||
else /* we never got off the ground */
|
||||
return;
|
||||
}
|
||||
|
||||
/* if we haven't made progress and we have already
|
||||
|
|
957
drivers/dma/pch_dma.c
Normal file
957
drivers/dma/pch_dma.c
Normal file
|
@ -0,0 +1,957 @@
|
|||
/*
|
||||
* Topcliff PCH DMA controller driver
|
||||
* Copyright (c) 2010 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pch_dma.h>
|
||||
|
||||
#define DRV_NAME "pch-dma"
|
||||
|
||||
#define DMA_CTL0_DISABLE 0x0
|
||||
#define DMA_CTL0_SG 0x1
|
||||
#define DMA_CTL0_ONESHOT 0x2
|
||||
#define DMA_CTL0_MODE_MASK_BITS 0x3
|
||||
#define DMA_CTL0_DIR_SHIFT_BITS 2
|
||||
#define DMA_CTL0_BITS_PER_CH 4
|
||||
|
||||
#define DMA_CTL2_START_SHIFT_BITS 8
|
||||
#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
|
||||
|
||||
#define DMA_STATUS_IDLE 0x0
|
||||
#define DMA_STATUS_DESC_READ 0x1
|
||||
#define DMA_STATUS_WAIT 0x2
|
||||
#define DMA_STATUS_ACCESS 0x3
|
||||
#define DMA_STATUS_BITS_PER_CH 2
|
||||
#define DMA_STATUS_MASK_BITS 0x3
|
||||
#define DMA_STATUS_SHIFT_BITS 16
|
||||
#define DMA_STATUS_IRQ(x) (0x1 << (x))
|
||||
#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
|
||||
|
||||
#define DMA_DESC_WIDTH_SHIFT_BITS 12
|
||||
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
|
||||
#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
|
||||
#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
|
||||
#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
|
||||
#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
|
||||
#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
|
||||
#define DMA_DESC_END_WITHOUT_IRQ 0x0
|
||||
#define DMA_DESC_END_WITH_IRQ 0x1
|
||||
#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
|
||||
#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
|
||||
|
||||
#define MAX_CHAN_NR 8
|
||||
|
||||
static unsigned int init_nr_desc_per_channel = 64;
|
||||
module_param(init_nr_desc_per_channel, uint, 0644);
|
||||
MODULE_PARM_DESC(init_nr_desc_per_channel,
|
||||
"initial descriptors per channel (default: 64)");
|
||||
|
||||
struct pch_dma_desc_regs {
|
||||
u32 dev_addr;
|
||||
u32 mem_addr;
|
||||
u32 size;
|
||||
u32 next;
|
||||
};
|
||||
|
||||
struct pch_dma_regs {
|
||||
u32 dma_ctl0;
|
||||
u32 dma_ctl1;
|
||||
u32 dma_ctl2;
|
||||
u32 reserved1;
|
||||
u32 dma_sts0;
|
||||
u32 dma_sts1;
|
||||
u32 reserved2;
|
||||
u32 reserved3;
|
||||
struct pch_dma_desc_regs desc[0];
|
||||
};
|
||||
|
||||
struct pch_dma_desc {
|
||||
struct pch_dma_desc_regs regs;
|
||||
struct dma_async_tx_descriptor txd;
|
||||
struct list_head desc_node;
|
||||
struct list_head tx_list;
|
||||
};
|
||||
|
||||
struct pch_dma_chan {
|
||||
struct dma_chan chan;
|
||||
void __iomem *membase;
|
||||
enum dma_data_direction dir;
|
||||
struct tasklet_struct tasklet;
|
||||
unsigned long err_status;
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
dma_cookie_t completed_cookie;
|
||||
struct list_head active_list;
|
||||
struct list_head queue;
|
||||
struct list_head free_list;
|
||||
unsigned int descs_allocated;
|
||||
};
|
||||
|
||||
#define PDC_DEV_ADDR 0x00
|
||||
#define PDC_MEM_ADDR 0x04
|
||||
#define PDC_SIZE 0x08
|
||||
#define PDC_NEXT 0x0C
|
||||
|
||||
#define channel_readl(pdc, name) \
|
||||
readl((pdc)->membase + PDC_##name)
|
||||
#define channel_writel(pdc, name, val) \
|
||||
writel((val), (pdc)->membase + PDC_##name)
|
||||
|
||||
struct pch_dma {
|
||||
struct dma_device dma;
|
||||
void __iomem *membase;
|
||||
struct pci_pool *pool;
|
||||
struct pch_dma_regs regs;
|
||||
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
|
||||
struct pch_dma_chan channels[0];
|
||||
};
|
||||
|
||||
#define PCH_DMA_CTL0 0x00
|
||||
#define PCH_DMA_CTL1 0x04
|
||||
#define PCH_DMA_CTL2 0x08
|
||||
#define PCH_DMA_STS0 0x10
|
||||
#define PCH_DMA_STS1 0x14
|
||||
|
||||
#define dma_readl(pd, name) \
|
||||
readl((pd)->membase + PCH_DMA_##name)
|
||||
#define dma_writel(pd, name, val) \
|
||||
writel((val), (pd)->membase + PCH_DMA_##name)
|
||||
|
||||
static inline struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
return container_of(txd, struct pch_dma_desc, txd);
|
||||
}
|
||||
|
||||
static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct pch_dma_chan, chan);
|
||||
}
|
||||
|
||||
static inline struct pch_dma *to_pd(struct dma_device *ddev)
|
||||
{
|
||||
return container_of(ddev, struct pch_dma, dma);
|
||||
}
|
||||
|
||||
static inline struct device *chan2dev(struct dma_chan *chan)
|
||||
{
|
||||
return &chan->dev->device;
|
||||
}
|
||||
|
||||
static inline struct device *chan2parent(struct dma_chan *chan)
|
||||
{
|
||||
return chan->dev->device.parent;
|
||||
}
|
||||
|
||||
static inline struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
return list_first_entry(&pd_chan->active_list,
|
||||
struct pch_dma_desc, desc_node);
|
||||
}
|
||||
|
||||
static inline struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
return list_first_entry(&pd_chan->queue,
|
||||
struct pch_dma_desc, desc_node);
|
||||
}
|
||||
|
||||
static void pdc_enable_irq(struct dma_chan *chan, int enable)
|
||||
{
|
||||
struct pch_dma *pd = to_pd(chan->device);
|
||||
u32 val;
|
||||
|
||||
val = dma_readl(pd, CTL2);
|
||||
|
||||
if (enable)
|
||||
val |= 0x1 << chan->chan_id;
|
||||
else
|
||||
val &= ~(0x1 << chan->chan_id);
|
||||
|
||||
dma_writel(pd, CTL2, val);
|
||||
|
||||
dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
|
||||
chan->chan_id, val);
|
||||
}
|
||||
|
||||
static void pdc_set_dir(struct dma_chan *chan)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
struct pch_dma *pd = to_pd(chan->device);
|
||||
u32 val;
|
||||
|
||||
val = dma_readl(pd, CTL0);
|
||||
|
||||
if (pd_chan->dir == DMA_TO_DEVICE)
|
||||
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS);
|
||||
else
|
||||
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
|
||||
DMA_CTL0_DIR_SHIFT_BITS));
|
||||
|
||||
dma_writel(pd, CTL0, val);
|
||||
|
||||
dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
|
||||
chan->chan_id, val);
|
||||
}
|
||||
|
||||
static void pdc_set_mode(struct dma_chan *chan, u32 mode)
|
||||
{
|
||||
struct pch_dma *pd = to_pd(chan->device);
|
||||
u32 val;
|
||||
|
||||
val = dma_readl(pd, CTL0);
|
||||
|
||||
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
|
||||
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
|
||||
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
|
||||
|
||||
dma_writel(pd, CTL0, val);
|
||||
|
||||
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
|
||||
chan->chan_id, val);
|
||||
}
|
||||
|
||||
static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
struct pch_dma *pd = to_pd(pd_chan->chan.device);
|
||||
u32 val;
|
||||
|
||||
val = dma_readl(pd, STS0);
|
||||
return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
|
||||
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
|
||||
}
|
||||
|
||||
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
|
||||
{
|
||||
struct pch_dma *pd = to_pd(pd_chan->chan.device);
|
||||
u32 val;
|
||||
|
||||
if (!pdc_is_idle(pd_chan)) {
|
||||
dev_err(chan2dev(&pd_chan->chan),
|
||||
"BUG: Attempt to start non-idle channel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
|
||||
channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
|
||||
channel_writel(pd_chan, SIZE, desc->regs.size);
|
||||
channel_writel(pd_chan, NEXT, desc->regs.next);
|
||||
|
||||
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
|
||||
pd_chan->chan.chan_id, desc->regs.dev_addr);
|
||||
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
|
||||
pd_chan->chan.chan_id, desc->regs.mem_addr);
|
||||
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
|
||||
pd_chan->chan.chan_id, desc->regs.size);
|
||||
dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
|
||||
pd_chan->chan.chan_id, desc->regs.next);
|
||||
|
||||
if (list_empty(&desc->tx_list))
|
||||
pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
|
||||
else
|
||||
pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
|
||||
|
||||
val = dma_readl(pd, CTL2);
|
||||
val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id);
|
||||
dma_writel(pd, CTL2, val);
|
||||
}
|
||||
|
||||
static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
|
||||
struct pch_dma_desc *desc)
|
||||
{
|
||||
struct dma_async_tx_descriptor *txd = &desc->txd;
|
||||
dma_async_tx_callback callback = txd->callback;
|
||||
void *param = txd->callback_param;
|
||||
|
||||
list_splice_init(&desc->tx_list, &pd_chan->free_list);
|
||||
list_move(&desc->desc_node, &pd_chan->free_list);
|
||||
|
||||
if (callback)
|
||||
callback(param);
|
||||
}
|
||||
|
||||
static void pdc_complete_all(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
struct pch_dma_desc *desc, *_d;
|
||||
LIST_HEAD(list);
|
||||
|
||||
BUG_ON(!pdc_is_idle(pd_chan));
|
||||
|
||||
if (!list_empty(&pd_chan->queue))
|
||||
pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
|
||||
|
||||
list_splice_init(&pd_chan->active_list, &list);
|
||||
list_splice_init(&pd_chan->queue, &pd_chan->active_list);
|
||||
|
||||
list_for_each_entry_safe(desc, _d, &list, desc_node)
|
||||
pdc_chain_complete(pd_chan, desc);
|
||||
}
|
||||
|
||||
static void pdc_handle_error(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
struct pch_dma_desc *bad_desc;
|
||||
|
||||
bad_desc = pdc_first_active(pd_chan);
|
||||
list_del(&bad_desc->desc_node);
|
||||
|
||||
list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
|
||||
|
||||
if (!list_empty(&pd_chan->active_list))
|
||||
pdc_dostart(pd_chan, pdc_first_active(pd_chan));
|
||||
|
||||
dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
|
||||
dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
|
||||
bad_desc->txd.cookie);
|
||||
|
||||
pdc_chain_complete(pd_chan, bad_desc);
|
||||
}
|
||||
|
||||
static void pdc_advance_work(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
if (list_empty(&pd_chan->active_list) ||
|
||||
list_is_singular(&pd_chan->active_list)) {
|
||||
pdc_complete_all(pd_chan);
|
||||
} else {
|
||||
pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
|
||||
pdc_dostart(pd_chan, pdc_first_active(pd_chan));
|
||||
}
|
||||
}
|
||||
|
||||
static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
|
||||
struct pch_dma_desc *desc)
|
||||
{
|
||||
dma_cookie_t cookie = pd_chan->chan.cookie;
|
||||
|
||||
if (++cookie < 0)
|
||||
cookie = 1;
|
||||
|
||||
pd_chan->chan.cookie = cookie;
|
||||
desc->txd.cookie = cookie;
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
||||
static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
|
||||
{
|
||||
struct pch_dma_desc *desc = to_pd_desc(txd);
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
|
||||
dma_cookie_t cookie;
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
cookie = pdc_assign_cookie(pd_chan, desc);
|
||||
|
||||
if (list_empty(&pd_chan->active_list)) {
|
||||
list_add_tail(&desc->desc_node, &pd_chan->active_list);
|
||||
pdc_dostart(pd_chan, desc);
|
||||
} else {
|
||||
list_add_tail(&desc->desc_node, &pd_chan->queue);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
|
||||
{
|
||||
struct pch_dma_desc *desc = NULL;
|
||||
struct pch_dma *pd = to_pd(chan->device);
|
||||
dma_addr_t addr;
|
||||
|
||||
desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr);
|
||||
if (desc) {
|
||||
memset(desc, 0, sizeof(struct pch_dma_desc));
|
||||
INIT_LIST_HEAD(&desc->tx_list);
|
||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||
desc->txd.tx_submit = pd_tx_submit;
|
||||
desc->txd.flags = DMA_CTRL_ACK;
|
||||
desc->txd.phys = addr;
|
||||
}
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
|
||||
{
|
||||
struct pch_dma_desc *desc, *_d;
|
||||
struct pch_dma_desc *ret = NULL;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
|
||||
i++;
|
||||
if (async_tx_test_ack(&desc->txd)) {
|
||||
list_del(&desc->desc_node);
|
||||
ret = desc;
|
||||
break;
|
||||
}
|
||||
dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
|
||||
}
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
|
||||
|
||||
if (!ret) {
|
||||
ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
|
||||
if (ret) {
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
pd_chan->descs_allocated++;
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
} else {
|
||||
dev_err(chan2dev(&pd_chan->chan),
|
||||
"failed to alloc desc\n");
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pdc_desc_put(struct pch_dma_chan *pd_chan,
|
||||
struct pch_dma_desc *desc)
|
||||
{
|
||||
if (desc) {
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
list_splice_init(&desc->tx_list, &pd_chan->free_list);
|
||||
list_add(&desc->desc_node, &pd_chan->free_list);
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static int pd_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
struct pch_dma_desc *desc;
|
||||
LIST_HEAD(tmp_list);
|
||||
int i;
|
||||
|
||||
if (!pdc_is_idle(pd_chan)) {
|
||||
dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!list_empty(&pd_chan->free_list))
|
||||
return pd_chan->descs_allocated;
|
||||
|
||||
for (i = 0; i < init_nr_desc_per_channel; i++) {
|
||||
desc = pdc_alloc_desc(chan, GFP_KERNEL);
|
||||
|
||||
if (!desc) {
|
||||
dev_warn(chan2dev(chan),
|
||||
"Only allocated %d initial descriptors\n", i);
|
||||
break;
|
||||
}
|
||||
|
||||
list_add_tail(&desc->desc_node, &tmp_list);
|
||||
}
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
list_splice(&tmp_list, &pd_chan->free_list);
|
||||
pd_chan->descs_allocated = i;
|
||||
pd_chan->completed_cookie = chan->cookie = 1;
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
|
||||
pdc_enable_irq(chan, 1);
|
||||
pdc_set_dir(chan);
|
||||
|
||||
return pd_chan->descs_allocated;
|
||||
}
|
||||
|
||||
static void pd_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
struct pch_dma *pd = to_pd(chan->device);
|
||||
struct pch_dma_desc *desc, *_d;
|
||||
LIST_HEAD(tmp_list);
|
||||
|
||||
BUG_ON(!pdc_is_idle(pd_chan));
|
||||
BUG_ON(!list_empty(&pd_chan->active_list));
|
||||
BUG_ON(!list_empty(&pd_chan->queue));
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
list_splice_init(&pd_chan->free_list, &tmp_list);
|
||||
pd_chan->descs_allocated = 0;
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
|
||||
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
|
||||
pci_pool_free(pd->pool, desc, desc->txd.phys);
|
||||
|
||||
pdc_enable_irq(chan, 0);
|
||||
}
|
||||
|
||||
static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
|
||||
struct dma_tx_state *txstate)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
dma_cookie_t last_used;
|
||||
dma_cookie_t last_completed;
|
||||
int ret;
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
last_completed = pd_chan->completed_cookie;
|
||||
last_used = chan->cookie;
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
|
||||
ret = dma_async_is_complete(cookie, last_completed, last_used);
|
||||
|
||||
dma_set_tx_state(txstate, last_completed, last_used, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pd_issue_pending(struct dma_chan *chan)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
|
||||
if (pdc_is_idle(pd_chan)) {
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
pdc_advance_work(pd_chan);
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
}
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
|
||||
struct scatterlist *sgl, unsigned int sg_len,
|
||||
enum dma_data_direction direction, unsigned long flags)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
struct pch_dma_slave *pd_slave = chan->private;
|
||||
struct pch_dma_desc *first = NULL;
|
||||
struct pch_dma_desc *prev = NULL;
|
||||
struct pch_dma_desc *desc = NULL;
|
||||
struct scatterlist *sg;
|
||||
dma_addr_t reg;
|
||||
int i;
|
||||
|
||||
if (unlikely(!sg_len)) {
|
||||
dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (direction == DMA_FROM_DEVICE)
|
||||
reg = pd_slave->rx_reg;
|
||||
else if (direction == DMA_TO_DEVICE)
|
||||
reg = pd_slave->tx_reg;
|
||||
else
|
||||
return NULL;
|
||||
|
||||
for_each_sg(sgl, sg, sg_len, i) {
|
||||
desc = pdc_desc_get(pd_chan);
|
||||
|
||||
if (!desc)
|
||||
goto err_desc_get;
|
||||
|
||||
desc->regs.dev_addr = reg;
|
||||
desc->regs.mem_addr = sg_phys(sg);
|
||||
desc->regs.size = sg_dma_len(sg);
|
||||
desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
|
||||
|
||||
switch (pd_slave->width) {
|
||||
case PCH_DMA_WIDTH_1_BYTE:
|
||||
if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
|
||||
goto err_desc_get;
|
||||
desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
|
||||
break;
|
||||
case PCH_DMA_WIDTH_2_BYTES:
|
||||
if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
|
||||
goto err_desc_get;
|
||||
desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
|
||||
break;
|
||||
case PCH_DMA_WIDTH_4_BYTES:
|
||||
if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
|
||||
goto err_desc_get;
|
||||
desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
|
||||
break;
|
||||
default:
|
||||
goto err_desc_get;
|
||||
}
|
||||
|
||||
|
||||
if (!first) {
|
||||
first = desc;
|
||||
} else {
|
||||
prev->regs.next |= desc->txd.phys;
|
||||
list_add_tail(&desc->desc_node, &first->tx_list);
|
||||
}
|
||||
|
||||
prev = desc;
|
||||
}
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
desc->regs.next = DMA_DESC_END_WITH_IRQ;
|
||||
else
|
||||
desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
|
||||
|
||||
first->txd.cookie = -EBUSY;
|
||||
desc->txd.flags = flags;
|
||||
|
||||
return &first->txd;
|
||||
|
||||
err_desc_get:
|
||||
dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
|
||||
pdc_desc_put(pd_chan, first);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
|
||||
struct pch_dma_desc *desc, *_d;
|
||||
LIST_HEAD(list);
|
||||
|
||||
if (cmd != DMA_TERMINATE_ALL)
|
||||
return -ENXIO;
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
|
||||
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
|
||||
|
||||
list_splice_init(&pd_chan->active_list, &list);
|
||||
list_splice_init(&pd_chan->queue, &list);
|
||||
|
||||
list_for_each_entry_safe(desc, _d, &list, desc_node)
|
||||
pdc_chain_complete(pd_chan, desc);
|
||||
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdc_tasklet(unsigned long data)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
|
||||
|
||||
if (!pdc_is_idle(pd_chan)) {
|
||||
dev_err(chan2dev(&pd_chan->chan),
|
||||
"BUG: handle non-idle channel in tasklet\n");
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&pd_chan->lock);
|
||||
if (test_and_clear_bit(0, &pd_chan->err_status))
|
||||
pdc_handle_error(pd_chan);
|
||||
else
|
||||
pdc_advance_work(pd_chan);
|
||||
spin_unlock_bh(&pd_chan->lock);
|
||||
}
|
||||
|
||||
static irqreturn_t pd_irq(int irq, void *devid)
|
||||
{
|
||||
struct pch_dma *pd = (struct pch_dma *)devid;
|
||||
struct pch_dma_chan *pd_chan;
|
||||
u32 sts0;
|
||||
int i;
|
||||
int ret = IRQ_NONE;
|
||||
|
||||
sts0 = dma_readl(pd, STS0);
|
||||
|
||||
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
|
||||
|
||||
for (i = 0; i < pd->dma.chancnt; i++) {
|
||||
pd_chan = &pd->channels[i];
|
||||
|
||||
if (sts0 & DMA_STATUS_IRQ(i)) {
|
||||
if (sts0 & DMA_STATUS_ERR(i))
|
||||
set_bit(0, &pd_chan->err_status);
|
||||
|
||||
tasklet_schedule(&pd_chan->tasklet);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* clear interrupt bits in status register */
|
||||
dma_writel(pd, STS0, sts0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pch_dma_save_regs(struct pch_dma *pd)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan;
|
||||
struct dma_chan *chan, *_c;
|
||||
int i = 0;
|
||||
|
||||
pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
|
||||
pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
|
||||
pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
|
||||
|
||||
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
|
||||
pd_chan = to_pd_chan(chan);
|
||||
|
||||
pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
|
||||
pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
|
||||
pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
|
||||
pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
|
||||
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
static void pch_dma_restore_regs(struct pch_dma *pd)
|
||||
{
|
||||
struct pch_dma_chan *pd_chan;
|
||||
struct dma_chan *chan, *_c;
|
||||
int i = 0;
|
||||
|
||||
dma_writel(pd, CTL0, pd->regs.dma_ctl0);
|
||||
dma_writel(pd, CTL1, pd->regs.dma_ctl1);
|
||||
dma_writel(pd, CTL2, pd->regs.dma_ctl2);
|
||||
|
||||
list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
|
||||
pd_chan = to_pd_chan(chan);
|
||||
|
||||
channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
|
||||
channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
|
||||
channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
|
||||
channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
|
||||
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct pch_dma *pd = pci_get_drvdata(pdev);
|
||||
|
||||
if (pd)
|
||||
pch_dma_save_regs(pd);
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, pci_choose_state(pdev, state));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pch_dma_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct pch_dma *pd = pci_get_drvdata(pdev);
|
||||
int err;
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_dbg(&pdev->dev, "failed to enable device\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
if (pd)
|
||||
pch_dma_restore_regs(pd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __devinit pch_dma_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
struct pch_dma *pd;
|
||||
struct pch_dma_regs *regs;
|
||||
unsigned int nr_channels;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
nr_channels = id->driver_data;
|
||||
pd = kzalloc(sizeof(struct pch_dma)+
|
||||
sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
|
||||
if (!pd)
|
||||
return -ENOMEM;
|
||||
|
||||
pci_set_drvdata(pdev, pd);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot enable PCI device\n");
|
||||
goto err_free_mem;
|
||||
}
|
||||
|
||||
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
|
||||
dev_err(&pdev->dev, "Cannot find proper base address\n");
|
||||
goto err_disable_pdev;
|
||||
}
|
||||
|
||||
err = pci_request_regions(pdev, DRV_NAME);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
|
||||
goto err_disable_pdev;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot set proper DMA config\n");
|
||||
goto err_free_res;
|
||||
}
|
||||
|
||||
regs = pd->membase = pci_iomap(pdev, 1, 0);
|
||||
if (!pd->membase) {
|
||||
dev_err(&pdev->dev, "Cannot map MMIO registers\n");
|
||||
err = -ENOMEM;
|
||||
goto err_free_res;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to request IRQ\n");
|
||||
goto err_iounmap;
|
||||
}
|
||||
|
||||
pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
|
||||
sizeof(struct pch_dma_desc), 4, 0);
|
||||
if (!pd->pool) {
|
||||
dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
|
||||
err = -ENOMEM;
|
||||
goto err_free_irq;
|
||||
}
|
||||
|
||||
pd->dma.dev = &pdev->dev;
|
||||
pd->dma.chancnt = nr_channels;
|
||||
|
||||
INIT_LIST_HEAD(&pd->dma.channels);
|
||||
|
||||
for (i = 0; i < nr_channels; i++) {
|
||||
struct pch_dma_chan *pd_chan = &pd->channels[i];
|
||||
|
||||
pd_chan->chan.device = &pd->dma;
|
||||
pd_chan->chan.cookie = 1;
|
||||
pd_chan->chan.chan_id = i;
|
||||
|
||||
pd_chan->membase = ®s->desc[i];
|
||||
|
||||
pd_chan->dir = (i % 2) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
||||
|
||||
spin_lock_init(&pd_chan->lock);
|
||||
|
||||
INIT_LIST_HEAD(&pd_chan->active_list);
|
||||
INIT_LIST_HEAD(&pd_chan->queue);
|
||||
INIT_LIST_HEAD(&pd_chan->free_list);
|
||||
|
||||
tasklet_init(&pd_chan->tasklet, pdc_tasklet,
|
||||
(unsigned long)pd_chan);
|
||||
list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
|
||||
}
|
||||
|
||||
dma_cap_zero(pd->dma.cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
|
||||
dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
|
||||
|
||||
pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
|
||||
pd->dma.device_free_chan_resources = pd_free_chan_resources;
|
||||
pd->dma.device_tx_status = pd_tx_status;
|
||||
pd->dma.device_issue_pending = pd_issue_pending;
|
||||
pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
|
||||
pd->dma.device_control = pd_device_control;
|
||||
|
||||
err = dma_async_device_register(&pd->dma);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to register DMA device\n");
|
||||
goto err_free_pool;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_pool:
|
||||
pci_pool_destroy(pd->pool);
|
||||
err_free_irq:
|
||||
free_irq(pdev->irq, pd);
|
||||
err_iounmap:
|
||||
pci_iounmap(pdev, pd->membase);
|
||||
err_free_res:
|
||||
pci_release_regions(pdev);
|
||||
err_disable_pdev:
|
||||
pci_disable_device(pdev);
|
||||
err_free_mem:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __devexit pch_dma_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct pch_dma *pd = pci_get_drvdata(pdev);
|
||||
struct pch_dma_chan *pd_chan;
|
||||
struct dma_chan *chan, *_c;
|
||||
|
||||
if (pd) {
|
||||
dma_async_device_unregister(&pd->dma);
|
||||
|
||||
list_for_each_entry_safe(chan, _c, &pd->dma.channels,
|
||||
device_node) {
|
||||
pd_chan = to_pd_chan(chan);
|
||||
|
||||
tasklet_disable(&pd_chan->tasklet);
|
||||
tasklet_kill(&pd_chan->tasklet);
|
||||
}
|
||||
|
||||
pci_pool_destroy(pd->pool);
|
||||
free_irq(pdev->irq, pd);
|
||||
pci_iounmap(pdev, pd->membase);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
kfree(pd);
|
||||
}
|
||||
}
|
||||
|
||||
/* PCI Device ID of DMA device */
|
||||
#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810
|
||||
#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815
|
||||
|
||||
static const struct pci_device_id pch_dma_id_table[] = {
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
|
||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
|
||||
};
|
||||
|
||||
static struct pci_driver pch_dma_driver = {
|
||||
.name = DRV_NAME,
|
||||
.id_table = pch_dma_id_table,
|
||||
.probe = pch_dma_probe,
|
||||
.remove = __devexit_p(pch_dma_remove),
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = pch_dma_suspend,
|
||||
.resume = pch_dma_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init pch_dma_init(void)
|
||||
{
|
||||
return pci_register_driver(&pch_dma_driver);
|
||||
}
|
||||
|
||||
static void __exit pch_dma_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&pch_dma_driver);
|
||||
}
|
||||
|
||||
module_init(pch_dma_init);
|
||||
module_exit(pch_dma_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
|
||||
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
File diff suppressed because it is too large
Load diff
|
@ -315,11 +315,8 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
|
|||
int total_size = 0;
|
||||
struct scatterlist *current_sg = sg;
|
||||
int i;
|
||||
u32 next_lli_off_dst;
|
||||
u32 next_lli_off_src;
|
||||
|
||||
next_lli_off_src = 0;
|
||||
next_lli_off_dst = 0;
|
||||
u32 next_lli_off_dst = 0;
|
||||
u32 next_lli_off_src = 0;
|
||||
|
||||
for_each_sg(sg, current_sg, sg_len, i) {
|
||||
total_size += sg_dma_len(current_sg);
|
||||
|
@ -351,7 +348,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
|
|||
sg_dma_len(current_sg),
|
||||
next_lli_off_src,
|
||||
lcsp->lcsp1, src_data_width,
|
||||
term_int && !next_lli_off_src,
|
||||
false,
|
||||
true);
|
||||
d40_log_fill_lli(&lli->dst[i],
|
||||
dev_addr,
|
||||
|
@ -375,7 +372,7 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
|
|||
sg_dma_len(current_sg),
|
||||
next_lli_off_src,
|
||||
lcsp->lcsp1, src_data_width,
|
||||
term_int && !next_lli_off_src,
|
||||
false,
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
@ -423,32 +420,35 @@ int d40_log_sg_to_lli(int lcla_id,
|
|||
return total_size;
|
||||
}
|
||||
|
||||
void d40_log_lli_write(struct d40_log_lli_full *lcpa,
|
||||
int d40_log_lli_write(struct d40_log_lli_full *lcpa,
|
||||
struct d40_log_lli *lcla_src,
|
||||
struct d40_log_lli *lcla_dst,
|
||||
struct d40_log_lli *lli_dst,
|
||||
struct d40_log_lli *lli_src,
|
||||
int llis_per_log)
|
||||
{
|
||||
u32 slos = 0;
|
||||
u32 dlos = 0;
|
||||
u32 slos;
|
||||
u32 dlos;
|
||||
int i;
|
||||
|
||||
lcpa->lcsp0 = lli_src->lcsp02;
|
||||
lcpa->lcsp1 = lli_src->lcsp13;
|
||||
lcpa->lcsp2 = lli_dst->lcsp02;
|
||||
lcpa->lcsp3 = lli_dst->lcsp13;
|
||||
writel(lli_src->lcsp02, &lcpa->lcsp0);
|
||||
writel(lli_src->lcsp13, &lcpa->lcsp1);
|
||||
writel(lli_dst->lcsp02, &lcpa->lcsp2);
|
||||
writel(lli_dst->lcsp13, &lcpa->lcsp3);
|
||||
|
||||
slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
|
||||
dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
|
||||
|
||||
for (i = 0; (i < llis_per_log) && slos && dlos; i++) {
|
||||
writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02);
|
||||
writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13);
|
||||
writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02);
|
||||
writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13);
|
||||
writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02);
|
||||
writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13);
|
||||
writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02);
|
||||
writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13);
|
||||
|
||||
slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
|
||||
dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
|
||||
slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK;
|
||||
dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK;
|
||||
}
|
||||
|
||||
return i;
|
||||
|
||||
}
|
||||
|
|
|
@ -13,6 +13,9 @@
|
|||
#define D40_DREG_PCDELTA (8 * 4)
|
||||
#define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */
|
||||
|
||||
#define D40_LCPA_CHAN_SIZE 32
|
||||
#define D40_LCPA_CHAN_DST_DELTA 16
|
||||
|
||||
#define D40_TYPE_TO_GROUP(type) (type / 16)
|
||||
#define D40_TYPE_TO_EVENT(type) (type % 16)
|
||||
|
||||
|
@ -336,12 +339,12 @@ int d40_log_sg_to_dev(struct d40_lcla_elem *lcla,
|
|||
bool term_int, dma_addr_t dev_addr, int max_len,
|
||||
int llis_per_log);
|
||||
|
||||
void d40_log_lli_write(struct d40_log_lli_full *lcpa,
|
||||
struct d40_log_lli *lcla_src,
|
||||
struct d40_log_lli *lcla_dst,
|
||||
struct d40_log_lli *lli_dst,
|
||||
struct d40_log_lli *lli_src,
|
||||
int llis_per_log);
|
||||
int d40_log_lli_write(struct d40_log_lli_full *lcpa,
|
||||
struct d40_log_lli *lcla_src,
|
||||
struct d40_log_lli *lcla_dst,
|
||||
struct d40_log_lli *lli_dst,
|
||||
struct d40_log_lli *lli_src,
|
||||
int llis_per_log);
|
||||
|
||||
int d40_log_sg_to_lli(int lcla_id,
|
||||
struct scatterlist *sg,
|
||||
|
|
|
@ -200,8 +200,8 @@ static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n",
|
||||
dma_desc, (void *)sg_dma_address(sg));
|
||||
dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n",
|
||||
dma_desc, (unsigned long long)sg_dma_address(sg));
|
||||
|
||||
dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff;
|
||||
dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff;
|
||||
|
@ -382,7 +382,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
|
|||
td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL);
|
||||
if (!td_desc) {
|
||||
dev_err(chan2dev(chan), "Failed to alloc descriptor\n");
|
||||
goto err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE;
|
||||
|
@ -410,7 +410,7 @@ static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan)
|
|||
err:
|
||||
kfree(td_desc->desc_list);
|
||||
kfree(td_desc);
|
||||
|
||||
out:
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
|
|
@ -3030,6 +3030,34 @@ static void __init iommu_exit_mempool(void)
|
|||
|
||||
}
|
||||
|
||||
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
u32 vtbar;
|
||||
int rc;
|
||||
|
||||
/* We know that this device on this chipset has its own IOMMU.
|
||||
* If we find it under a different IOMMU, then the BIOS is lying
|
||||
* to us. Hope that the IOMMU for this device is actually
|
||||
* disabled, and it needs no translation...
|
||||
*/
|
||||
rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
|
||||
if (rc) {
|
||||
/* "can't" happen */
|
||||
dev_info(&pdev->dev, "failed to run vt-d quirk\n");
|
||||
return;
|
||||
}
|
||||
vtbar &= 0xffff0000;
|
||||
|
||||
/* we know that the this iommu should be at offset 0xa000 from vtbar */
|
||||
drhd = dmar_find_matched_drhd_unit(pdev);
|
||||
if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
|
||||
TAINT_FIRMWARE_WORKAROUND,
|
||||
"BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
|
||||
pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
|
||||
}
|
||||
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
|
||||
|
||||
static void __init init_no_remapping_devices(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
|
|
|
@ -114,11 +114,17 @@ enum dma_ctrl_flags {
|
|||
* @DMA_TERMINATE_ALL: terminate all ongoing transfers
|
||||
* @DMA_PAUSE: pause ongoing transfers
|
||||
* @DMA_RESUME: resume paused transfer
|
||||
* @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers
|
||||
* that need to runtime reconfigure the slave channels (as opposed to passing
|
||||
* configuration data in statically from the platform). An additional
|
||||
* argument of struct dma_slave_config must be passed in with this
|
||||
* command.
|
||||
*/
|
||||
enum dma_ctrl_cmd {
|
||||
DMA_TERMINATE_ALL,
|
||||
DMA_PAUSE,
|
||||
DMA_RESUME,
|
||||
DMA_SLAVE_CONFIG,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -199,6 +205,71 @@ struct dma_chan_dev {
|
|||
atomic_t *idr_ref;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dma_slave_buswidth - defines bus with of the DMA slave
|
||||
* device, source or target buses
|
||||
*/
|
||||
enum dma_slave_buswidth {
|
||||
DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
|
||||
DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
|
||||
DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
|
||||
DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
|
||||
DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dma_slave_config - dma slave channel runtime config
|
||||
* @direction: whether the data shall go in or out on this slave
|
||||
* channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
|
||||
* legal values, DMA_BIDIRECTIONAL is not acceptable since we
|
||||
* need to differentiate source and target addresses.
|
||||
* @src_addr: this is the physical address where DMA slave data
|
||||
* should be read (RX), if the source is memory this argument is
|
||||
* ignored.
|
||||
* @dst_addr: this is the physical address where DMA slave data
|
||||
* should be written (TX), if the source is memory this argument
|
||||
* is ignored.
|
||||
* @src_addr_width: this is the width in bytes of the source (RX)
|
||||
* register where DMA data shall be read. If the source
|
||||
* is memory this may be ignored depending on architecture.
|
||||
* Legal values: 1, 2, 4, 8.
|
||||
* @dst_addr_width: same as src_addr_width but for destination
|
||||
* target (TX) mutatis mutandis.
|
||||
* @src_maxburst: the maximum number of words (note: words, as in
|
||||
* units of the src_addr_width member, not bytes) that can be sent
|
||||
* in one burst to the device. Typically something like half the
|
||||
* FIFO depth on I/O peripherals so you don't overflow it. This
|
||||
* may or may not be applicable on memory sources.
|
||||
* @dst_maxburst: same as src_maxburst but for destination target
|
||||
* mutatis mutandis.
|
||||
*
|
||||
* This struct is passed in as configuration data to a DMA engine
|
||||
* in order to set up a certain channel for DMA transport at runtime.
|
||||
* The DMA device/engine has to provide support for an additional
|
||||
* command in the channel config interface, DMA_SLAVE_CONFIG
|
||||
* and this struct will then be passed in as an argument to the
|
||||
* DMA engine device_control() function.
|
||||
*
|
||||
* The rationale for adding configuration information to this struct
|
||||
* is as follows: if it is likely that most DMA slave controllers in
|
||||
* the world will support the configuration option, then make it
|
||||
* generic. If not: if it is fixed so that it be sent in static from
|
||||
* the platform data, then prefer to do that. Else, if it is neither
|
||||
* fixed at runtime, nor generic enough (such as bus mastership on
|
||||
* some CPU family and whatnot) then create a custom slave config
|
||||
* struct and pass that, then make this config a member of that
|
||||
* struct, if applicable.
|
||||
*/
|
||||
struct dma_slave_config {
|
||||
enum dma_data_direction direction;
|
||||
dma_addr_t src_addr;
|
||||
dma_addr_t dst_addr;
|
||||
enum dma_slave_buswidth src_addr_width;
|
||||
enum dma_slave_buswidth dst_addr_width;
|
||||
u32 src_maxburst;
|
||||
u32 dst_maxburst;
|
||||
};
|
||||
|
||||
static inline const char *dma_chan_name(struct dma_chan *chan)
|
||||
{
|
||||
return dev_name(&chan->dev->device);
|
||||
|
|
86
include/linux/intel_mid_dma.h
Normal file
86
include/linux/intel_mid_dma.h
Normal file
|
@ -0,0 +1,86 @@
|
|||
/*
|
||||
* intel_mid_dma.h - Intel MID DMA Drivers
|
||||
*
|
||||
* Copyright (C) 2008-10 Intel Corp
|
||||
* Author: Vinod Koul <vinod.koul@intel.com>
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
*
|
||||
*/
|
||||
#ifndef __INTEL_MID_DMA_H__
|
||||
#define __INTEL_MID_DMA_H__
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/*DMA transaction width, src and dstn width would be same
|
||||
The DMA length must be width aligned,
|
||||
for 32 bit width the length must be 32 bit (4bytes) aligned only*/
|
||||
enum intel_mid_dma_width {
|
||||
LNW_DMA_WIDTH_8BIT = 0x0,
|
||||
LNW_DMA_WIDTH_16BIT = 0x1,
|
||||
LNW_DMA_WIDTH_32BIT = 0x2,
|
||||
};
|
||||
|
||||
/*DMA mode configurations*/
|
||||
enum intel_mid_dma_mode {
|
||||
LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/
|
||||
LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/
|
||||
LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/
|
||||
};
|
||||
|
||||
/*DMA handshaking*/
|
||||
enum intel_mid_dma_hs_mode {
|
||||
LNW_DMA_HW_HS = 0, /*HW Handshaking only*/
|
||||
LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/
|
||||
};
|
||||
|
||||
/*Burst size configuration*/
|
||||
enum intel_mid_dma_msize {
|
||||
LNW_DMA_MSIZE_1 = 0x0,
|
||||
LNW_DMA_MSIZE_4 = 0x1,
|
||||
LNW_DMA_MSIZE_8 = 0x2,
|
||||
LNW_DMA_MSIZE_16 = 0x3,
|
||||
LNW_DMA_MSIZE_32 = 0x4,
|
||||
LNW_DMA_MSIZE_64 = 0x5,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct intel_mid_dma_slave - DMA slave structure
|
||||
*
|
||||
* @dirn: DMA trf direction
|
||||
* @src_width: tx register width
|
||||
* @dst_width: rx register width
|
||||
* @hs_mode: HW/SW handshaking mode
|
||||
* @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem)
|
||||
* @src_msize: Source DMA burst size
|
||||
* @dst_msize: Dst DMA burst size
|
||||
* @device_instance: DMA peripheral device instance, we can have multiple
|
||||
* peripheral device connected to single DMAC
|
||||
*/
|
||||
struct intel_mid_dma_slave {
|
||||
enum dma_data_direction dirn;
|
||||
enum intel_mid_dma_width src_width; /*width of DMA src txn*/
|
||||
enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/
|
||||
enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/
|
||||
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
|
||||
enum intel_mid_dma_msize src_msize; /*size if src burst*/
|
||||
enum intel_mid_dma_msize dst_msize; /*size of dst burst*/
|
||||
unsigned int device_instance; /*0, 1 for periphral instance*/
|
||||
};
|
||||
|
||||
#endif /*__INTEL_MID_DMA_H__*/
|
37
include/linux/pch_dma.h
Normal file
37
include/linux/pch_dma.h
Normal file
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Copyright (c) 2010 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
|
||||
#ifndef PCH_DMA_H
|
||||
#define PCH_DMA_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
enum pch_dma_width {
|
||||
PCH_DMA_WIDTH_1_BYTE,
|
||||
PCH_DMA_WIDTH_2_BYTES,
|
||||
PCH_DMA_WIDTH_4_BYTES,
|
||||
};
|
||||
|
||||
struct pch_dma_slave {
|
||||
struct device *dma_dev;
|
||||
unsigned int chan_id;
|
||||
dma_addr_t tx_reg;
|
||||
dma_addr_t rx_reg;
|
||||
enum pch_dma_width width;
|
||||
};
|
||||
|
||||
#endif
|
Loading…
Reference in a new issue