Merge branches 'defcfg', 'drivers' and 'cyberpro-next' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'defcfg' of master.kernel.org:/home/rmk/linux-2.6-arm:
  ARM: 6647/1: add Versatile Express defconfig
  ARM: 6644/1: mach-ux500: update the U8500 defconfig

* 'drivers' of master.kernel.org:/home/rmk/linux-2.6-arm:
  ARM: 6764/1: pl011: factor out FIFO to TTY code
  ARM: 6763/1: pl011: add optional RX DMA to PL011 v2
  ARM: 6758/1: amba: support pm ops
  ARM: amba: make amba_driver id_table const
  ARM: amba: make internal ID table handling const
  ARM: amba: make probe() functions take const id tables
  ARM: 6662/1: amba: make amba_bustype non-static
  ARM: mmci: add dmaengine-based DMA support
  ARM: mmci: no need for separate host->data_xfered
  ARM: mmci: avoid unnecessary switch to data available PIO interrupts
  ARM: mmci: no need to call flush_dcache_page() with sg_miter API
  ARM: mmci: avoid reporting too many completed bytes on fifo overrun
  ALSA: AACI: make fifo variables more explanitory
  ALSA: AACI: no need to call snd_pcm_period_elapsed() for each period
  ALSA: AACI: use snd_pcm_lib_period_bytes()
  ALSA: AACI: clean up AACI announcement printk
  ALSA: AACI: fix channel mask selection
  ALSA: AACI: fix number of channels for record
  ALSA: AACI: fix multiple IRQ claiming

* 'cyberpro-next' of master.kernel.org:/home/rmk/linux-2.6-arm:
  VIDEO: cyberpro: remove unused cyber2000fb_get_fb_var()
  VIDEO: cyberpro: remove useless function extreg pointers
  VIDEO: cyberpro: update handling of device structures
  VIDEO: cyberpro: add support for video capture I2C
  VIDEO: cyberpro: make 'reg_b0_lock' always present
  VIDEO: cyberpro: add I2C support
  VIDEO: cyberpro: select lowest multipler/divisor for PLL
This commit is contained in:
Linus Torvalds 2011-03-17 18:48:35 -07:00
commit 6d7ed21d17
25 changed files with 1709 additions and 355 deletions

View file

@ -1,7 +1,6 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
@ -13,43 +12,89 @@ CONFIG_UX500_SOC_DB5500=y
CONFIG_UX500_SOC_DB8500=y
CONFIG_MACH_U8500=y
CONFIG_MACH_U5500=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_CMDLINE="root=/dev/ram0 console=ttyAMA2,115200n8"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_NET=y
CONFIG_PHONET=y
CONFIG_PHONET_PIPECTRLR=y
# CONFIG_WIRELESS is not set
CONFIG_CAIF=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
# CONFIG_MISC_DEVICES is not set
CONFIG_MISC_DEVICES=y
CONFIG_AB8500_PWM=y
CONFIG_SENSORS_BH1780=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYBOARD_NOMADIK=y
CONFIG_KEYBOARD_STMPE=y
CONFIG_KEYBOARD_TC3589X=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_BU21013=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_AB8500_PONKEY=y
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_NOMADIK=y
CONFIG_I2C=y
CONFIG_I2C_NOMADIK=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
CONFIG_AB8500_CORE=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_AB8500=y
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_LP5521=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AB8500=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_STAGING=y
# CONFIG_STAGING_EXCLUDE_BUILD is not set
CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_INOTIFY=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=m
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
@ -58,5 +103,3 @@ CONFIG_DEBUG_INFO=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
CONFIG_CRC_T10DIF=m
# CONFIG_CRC32 is not set

View file

@ -0,0 +1,140 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_CA9X4=y
# CONFIG_SWP_EMULATE is not set
CONFIG_SMP=y
CONFIG_VMSPLIT_2G=y
CONFIG_HOTPLUG_CPU=y
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="root=/dev/nfs nfsroot=10.1.69.3:/work/nfsroot ip=dhcp console=ttyAMA0 mem=128M"
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_ARM_INTEGRATOR=y
CONFIG_MISC_DEVICES=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_SMSC911X=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_AMBAKMI=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_LEGACY_PTY_COUNT=16
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
# CONFIG_SND_DRIVERS is not set
CONFIG_SND_ARMAACI=y
CONFIG_HID_DRAGONRISE=y
CONFIG_HID_GYRATION=y
CONFIG_HID_TWINHAN=y
CONFIG_HID_NTRIG=y
CONFIG_HID_PANTHERLORD=y
CONFIG_HID_PETALYNX=y
CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_GREENASIA=y
CONFIG_HID_SMARTJOYPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PL031=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
CONFIG_DEBUG_LL=y
CONFIG_EARLY_PRINTK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set

View file

@ -338,7 +338,7 @@ static struct miscdevice etb_miscdev = {
.fops = &etb_fops,
};
static int __init etb_probe(struct amba_device *dev, struct amba_id *id)
static int __init etb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
@ -530,7 +530,7 @@ static ssize_t trace_mode_store(struct kobject *kobj,
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
static int __init etm_probe(struct amba_device *dev, struct amba_id *id)
static int __init etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;

View file

@ -13,16 +13,17 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/amba/bus.h>
#include <asm/irq.h>
#include <asm/sizes.h>
#define to_amba_device(d) container_of(d, struct amba_device, dev)
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
static struct amba_id *
amba_lookup(struct amba_id *table, struct amba_device *dev)
static const struct amba_id *
amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
@ -57,26 +58,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
#define amba_uevent NULL
#endif
static int amba_suspend(struct device *dev, pm_message_t state)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
int ret = 0;
if (dev->driver && drv->suspend)
ret = drv->suspend(to_amba_device(dev), state);
return ret;
}
static int amba_resume(struct device *dev)
{
struct amba_driver *drv = to_amba_driver(dev->driver);
int ret = 0;
if (dev->driver && drv->resume)
ret = drv->resume(to_amba_device(dev));
return ret;
}
#define amba_attr_func(name,fmt,arg...) \
static ssize_t name##_show(struct device *_dev, \
struct device_attribute *attr, char *buf) \
@ -102,17 +83,330 @@ static struct device_attribute amba_dev_attrs[] = {
__ATTR_NULL,
};
#ifdef CONFIG_PM_SLEEP
static int amba_legacy_suspend(struct device *dev, pm_message_t mesg)
{
struct amba_driver *adrv = to_amba_driver(dev->driver);
struct amba_device *adev = to_amba_device(dev);
int ret = 0;
if (dev->driver && adrv->suspend)
ret = adrv->suspend(adev, mesg);
return ret;
}
static int amba_legacy_resume(struct device *dev)
{
struct amba_driver *adrv = to_amba_driver(dev->driver);
struct amba_device *adev = to_amba_device(dev);
int ret = 0;
if (dev->driver && adrv->resume)
ret = adrv->resume(adev);
return ret;
}
static int amba_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm && drv->pm->prepare)
ret = drv->pm->prepare(dev);
return ret;
}
static void amba_pm_complete(struct device *dev)
{
struct device_driver *drv = dev->driver;
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
#else /* !CONFIG_PM_SLEEP */
#define amba_pm_prepare NULL
#define amba_pm_complete NULL
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
static int amba_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->suspend)
ret = drv->pm->suspend(dev);
} else {
ret = amba_legacy_suspend(dev, PMSG_SUSPEND);
}
return ret;
}
static int amba_pm_suspend_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->suspend_noirq)
ret = drv->pm->suspend_noirq(dev);
}
return ret;
}
static int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->resume)
ret = drv->pm->resume(dev);
} else {
ret = amba_legacy_resume(dev);
}
return ret;
}
static int amba_pm_resume_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->resume_noirq)
ret = drv->pm->resume_noirq(dev);
}
return ret;
}
#else /* !CONFIG_SUSPEND */
#define amba_pm_suspend NULL
#define amba_pm_resume NULL
#define amba_pm_suspend_noirq NULL
#define amba_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
static int amba_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->freeze)
ret = drv->pm->freeze(dev);
} else {
ret = amba_legacy_suspend(dev, PMSG_FREEZE);
}
return ret;
}
static int amba_pm_freeze_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->freeze_noirq)
ret = drv->pm->freeze_noirq(dev);
}
return ret;
}
static int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->thaw)
ret = drv->pm->thaw(dev);
} else {
ret = amba_legacy_resume(dev);
}
return ret;
}
static int amba_pm_thaw_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->thaw_noirq)
ret = drv->pm->thaw_noirq(dev);
}
return ret;
}
static int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->poweroff)
ret = drv->pm->poweroff(dev);
} else {
ret = amba_legacy_suspend(dev, PMSG_HIBERNATE);
}
return ret;
}
static int amba_pm_poweroff_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->poweroff_noirq)
ret = drv->pm->poweroff_noirq(dev);
}
return ret;
}
static int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->restore)
ret = drv->pm->restore(dev);
} else {
ret = amba_legacy_resume(dev);
}
return ret;
}
static int amba_pm_restore_noirq(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->restore_noirq)
ret = drv->pm->restore_noirq(dev);
}
return ret;
}
#else /* !CONFIG_HIBERNATION */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
#define amba_pm_poweroff NULL
#define amba_pm_restore NULL
#define amba_pm_freeze_noirq NULL
#define amba_pm_thaw_noirq NULL
#define amba_pm_poweroff_noirq NULL
#define amba_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATION */
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
.prepare = amba_pm_prepare,
.complete = amba_pm_complete,
.suspend = amba_pm_suspend,
.resume = amba_pm_resume,
.freeze = amba_pm_freeze,
.thaw = amba_pm_thaw,
.poweroff = amba_pm_poweroff,
.restore = amba_pm_restore,
.suspend_noirq = amba_pm_suspend_noirq,
.resume_noirq = amba_pm_resume_noirq,
.freeze_noirq = amba_pm_freeze_noirq,
.thaw_noirq = amba_pm_thaw_noirq,
.poweroff_noirq = amba_pm_poweroff_noirq,
.restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
)
};
#define AMBA_PM (&amba_pm)
#else /* !CONFIG_PM */
#define AMBA_PM NULL
#endif /* !CONFIG_PM */
/*
* Primecells are part of the Advanced Microcontroller Bus Architecture,
* so we call the bus "amba".
*/
static struct bus_type amba_bustype = {
struct bus_type amba_bustype = {
.name = "amba",
.dev_attrs = amba_dev_attrs,
.match = amba_match,
.uevent = amba_uevent,
.suspend = amba_suspend,
.resume = amba_resume,
.pm = AMBA_PM,
};
static int __init amba_init(void)
@ -188,7 +482,7 @@ static int amba_probe(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(dev->driver);
struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev);
int ret;
do {

View file

@ -39,7 +39,7 @@ static struct hwrng nmk_rng = {
.read = nmk_rng_read,
};
static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
void __iomem *base;
int ret;

View file

@ -1845,7 +1845,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
}
#endif
static int pl08x_probe(struct amba_device *adev, struct amba_id *id)
static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
{
struct pl08x_driver_data *pl08x;
const struct vendor_data *vd = id->data;

View file

@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
}
static int __devinit
pl330_probe(struct amba_device *adev, struct amba_id *id)
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;

View file

@ -232,7 +232,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
static int pl061_probe(struct amba_device *dev, struct amba_id *id)
static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;

View file

@ -107,7 +107,8 @@ static void amba_kmi_close(struct serio *io)
clk_disable(kmi->clk);
}
static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id)
static int __devinit amba_kmi_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct amba_kmi_port *kmi;
struct serio *io;

View file

@ -2,7 +2,7 @@
* linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
*
* Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
* Copyright (C) 2010 ST-Ericsson AB.
* Copyright (C) 2010 ST-Ericsson SA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -25,8 +25,10 @@
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
#include <linux/amba/mmci.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/amba/mmci.h>
#include <asm/div64.h>
#include <asm/io.h>
@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
if (mrq->data)
mrq->data->bytes_xfered = host->data_xfered;
/*
* Need to drop the host lock here; mmc_request_done may call
* back into the driver...
@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
}
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
static void __devinit mmci_dma_setup(struct mmci_host *host)
{
struct mmci_platform_data *plat = host->plat;
const char *rxname, *txname;
dma_cap_mask_t mask;
if (!plat || !plat->dma_filter) {
dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
return;
}
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/*
* If only an RX channel is specified, the driver will
* attempt to use it bidirectionally, however if it is
* is specified but cannot be located, DMA will be disabled.
*/
if (plat->dma_rx_param) {
host->dma_rx_channel = dma_request_channel(mask,
plat->dma_filter,
plat->dma_rx_param);
/* E.g if no DMA hardware is present */
if (!host->dma_rx_channel)
dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
}
if (plat->dma_tx_param) {
host->dma_tx_channel = dma_request_channel(mask,
plat->dma_filter,
plat->dma_tx_param);
if (!host->dma_tx_channel)
dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
} else {
host->dma_tx_channel = host->dma_rx_channel;
}
if (host->dma_rx_channel)
rxname = dma_chan_name(host->dma_rx_channel);
else
rxname = "none";
if (host->dma_tx_channel)
txname = dma_chan_name(host->dma_tx_channel);
else
txname = "none";
dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
rxname, txname);
/*
* Limit the maximum segment size in any SG entry according to
* the parameters of the DMA engine device.
*/
if (host->dma_tx_channel) {
struct device *dev = host->dma_tx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
if (host->dma_rx_channel) {
struct device *dev = host->dma_rx_channel->device->dev;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
if (max_seg_size < host->mmc->max_seg_size)
host->mmc->max_seg_size = max_seg_size;
}
}
/*
* This is used in __devinit or __devexit so inline it
* so it can be discarded.
*/
static inline void mmci_dma_release(struct mmci_host *host)
{
struct mmci_platform_data *plat = host->plat;
if (host->dma_rx_channel)
dma_release_channel(host->dma_rx_channel);
if (host->dma_tx_channel && plat->dma_tx_param)
dma_release_channel(host->dma_tx_channel);
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
struct dma_chan *chan = host->dma_current;
enum dma_data_direction dir;
u32 status;
int i;
/* Wait up to 1ms for the DMA to complete */
for (i = 0; ; i++) {
status = readl(host->base + MMCISTATUS);
if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
break;
udelay(10);
}
/*
* Check to see whether we still have some data left in the FIFO -
* this catches DMA controllers which are unable to monitor the
* DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
dmaengine_terminate_all(chan);
if (!data->error)
data->error = -EIO;
}
if (data->flags & MMC_DATA_WRITE) {
dir = DMA_TO_DEVICE;
} else {
dir = DMA_FROM_DEVICE;
}
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
/*
* Use of DMA with scatter-gather is impossible.
* Give up with DMA and switch back to PIO mode.
*/
if (status & MCI_RXDATAAVLBLMASK) {
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
}
static void mmci_dma_data_error(struct mmci_host *host)
{
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
dmaengine_terminate_all(host->dma_current);
}
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
.src_addr = host->phybase + MMCIFIFO,
.dst_addr = host->phybase + MMCIFIFO,
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
};
struct mmc_data *data = host->data;
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
int nr_sg;
host->dma_current = NULL;
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
conf.direction = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
/* If there's no DMA channel, fall back to PIO */
if (!chan)
return -EINVAL;
/* If less than or equal to the fifo size, don't bother with DMA */
if (host->size <= variant->fifosize)
return -EINVAL;
device = chan->device;
nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
if (nr_sg == 0)
return -EINVAL;
dmaengine_slave_config(chan, &conf);
desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
conf.direction, DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
/* Okay, go for it. */
host->dma_current = chan;
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
dmaengine_submit(desc);
dma_async_issue_pending(chan);
datactrl |= MCI_DPSM_DMAENABLE;
/* Trigger the DMA transfer */
writel(datactrl, host->base + MMCIDATACTRL);
/*
* Let the MMCI say when the data is ended and it's time
* to fire next DMA request. When that happens, MMCI will
* call mmci_data_end()
*/
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
host->base + MMCIMASK0);
return 0;
unmap_exit:
dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
return -ENOMEM;
}
#else
/* Blank functions if the DMA engine is not available */
static inline void mmci_dma_setup(struct mmci_host *host)
{
}
static inline void mmci_dma_release(struct mmci_host *host)
{
}
static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
}
static inline void mmci_dma_data_error(struct mmci_host *host)
{
}
static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
return -ENOSYS;
}
#endif
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{
struct variant_data *variant = host->variant;
@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
host->data = data;
host->size = data->blksz * data->blocks;
host->data_xfered = 0;
mmci_init_sg(host, data);
data->bytes_xfered = 0;
clks = (unsigned long long)data->timeout_ns * host->cclk;
do_div(clks, 1000000000UL);
@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
BUG_ON(1 << blksz_bits != data->blksz);
datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
if (data->flags & MMC_DATA_READ) {
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
*/
if (!mmci_dma_start_data(host, datactrl))
return;
/* IRQ mode, map the SG list for CPU reading/writing */
mmci_init_sg(host, data);
if (data->flags & MMC_DATA_READ) {
irqmask = MCI_RXFIFOHALFFULLMASK;
/*
* If we have less than a FIFOSIZE of bytes to transfer,
* trigger a PIO interrupt as soon as any data is available.
* If we have less than the fifo 'half-full' threshold to
* transfer, trigger a PIO interrupt as soon as any data
* is available.
*/
if (host->size < variant->fifosize)
if (host->size < variant->fifohalfsize)
irqmask |= MCI_RXDATAAVLBLMASK;
} else {
/*
@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
u32 remain, success;
/* Calculate how far we are into the transfer */
/* Terminate the DMA transfer */
if (dma_inprogress(host))
mmci_dma_data_error(host);
/*
* Calculate how far we are into the transfer. Note that
* the data counter gives the number of bytes transferred
* on the MMC bus, not on the host side. On reads, this
* can be as much as a FIFO-worth of data ahead. This
* matters for FIFO overruns only.
*/
remain = readl(host->base + MMCIDATACNT);
success = data->blksz * data->blocks - remain;
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
status, success);
if (status & MCI_DATACRCFAIL) {
/* Last block was not successful */
host->data_xfered = round_down(success - 1, data->blksz);
success -= 1;
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
host->data_xfered = round_down(success, data->blksz);
data->error = -ETIMEDOUT;
} else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
host->data_xfered = round_down(success, data->blksz);
} else if (status & MCI_TXUNDERRUN) {
data->error = -EIO;
} else if (status & MCI_RXOVERRUN) {
if (success > host->variant->fifosize)
success -= host->variant->fifosize;
else
success = 0;
data->error = -EIO;
}
/*
* We hit an error condition. Ensure that any data
* partially written to a page is properly coherent.
*/
if (data->flags & MMC_DATA_READ) {
struct sg_mapping_iter *sg_miter = &host->sg_miter;
unsigned long flags;
local_irq_save(flags);
if (sg_miter_next(sg_miter)) {
flush_dcache_page(sg_miter->page);
sg_miter_stop(sg_miter);
}
local_irq_restore(flags);
}
data->bytes_xfered = round_down(success, data->blksz);
}
if (status & MCI_DATABLOCKEND)
dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
if (status & MCI_DATAEND || data->error) {
if (dma_inprogress(host))
mmci_dma_unmap(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
host->data_xfered += data->blksz * data->blocks;
data->bytes_xfered = data->blksz * data->blocks;
if (!data->stop) {
mmci_request_end(host, data->mrq);
@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
if (remain)
break;
if (status & MCI_RXACTIVE)
flush_dcache_page(sg_miter->page);
status = readl(base + MMCISTATUS);
} while (1);
@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
local_irq_restore(flags);
/*
* If we're nearing the end of the read, switch to
* "any data available" mode.
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
*/
if (status & MCI_RXACTIVE && host->size < variant->fifosize)
if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
/*
@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = {
.get_cd = mmci_get_cd,
};
static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
static int __devinit mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
struct variant_data *variant = id->data;
@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
host->phybase = dev->res.start;
host->base = ioremap(dev->res.start, resource_size(&dev->res));
if (!host->base) {
ret = -ENOMEM;
@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
amba_set_drvdata(dev, mmc);
dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n",
mmc_hostname(mmc), amba_part(dev), amba_rev(dev),
(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]);
dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
amba_rev(dev), (unsigned long long)dev->res.start,
dev->irq[0], dev->irq[1]);
mmci_dma_setup(host);
mmc_add_host(mmc);
@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCICOMMAND);
writel(0, host->base + MMCIDATACTRL);
mmci_dma_release(host);
free_irq(dev->irq[0], host);
if (!host->singleirq)
free_irq(dev->irq[1], host);

View file

@ -148,8 +148,10 @@
struct clk;
struct variant_data;
struct dma_chan;
struct mmci_host {
phys_addr_t phybase;
void __iomem *base;
struct mmc_request *mrq;
struct mmc_command *cmd;
@ -161,8 +163,6 @@ struct mmci_host {
int gpio_cd_irq;
bool singleirq;
unsigned int data_xfered;
spinlock_t lock;
unsigned int mclk;
@ -181,5 +181,16 @@ struct mmci_host {
struct sg_mapping_iter sg_miter;
unsigned int size;
struct regulator *vcc;
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
struct dma_chan *dma_current;
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
#define dma_inprogress(host) ((host)->dma_current)
#else
#define dma_inprogress(host) (0)
#endif
};

View file

@ -97,7 +97,7 @@ static const struct rtc_class_ops pl030_ops = {
.set_alarm = pl030_set_alarm,
};
static int pl030_probe(struct amba_device *dev, struct amba_id *id)
static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl030_rtc *rtc;
int ret;

View file

@ -307,7 +307,7 @@ static int pl031_remove(struct amba_device *adev)
return 0;
}
static int pl031_probe(struct amba_device *adev, struct amba_id *id)
static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
struct pl031_local *ldata;

View file

@ -2021,7 +2021,7 @@ static void pl022_cleanup(struct spi_device *spi)
static int __devinit
pl022_probe(struct amba_device *adev, struct amba_id *id)
pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct pl022_ssp_controller *platform_info = adev->dev.platform_data;

View file

@ -676,7 +676,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
static int pl010_probe(struct amba_device *dev, struct amba_id *id)
static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
void __iomem *base;

View file

@ -96,6 +96,22 @@ static struct vendor_data vendor_st = {
};
/* Deals with DMA transactions */
struct pl011_sgbuf {
struct scatterlist sg;
char *buf;
};
struct pl011_dmarx_data {
struct dma_chan *chan;
struct completion complete;
bool use_buf_b;
struct pl011_sgbuf sgbuf_a;
struct pl011_sgbuf sgbuf_b;
dma_cookie_t cookie;
bool running;
};
struct pl011_dmatx_data {
struct dma_chan *chan;
struct scatterlist sg;
@ -120,11 +136,69 @@ struct uart_amba_port {
char type[12];
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
bool using_dma;
bool using_tx_dma;
bool using_rx_dma;
struct pl011_dmarx_data dmarx;
struct pl011_dmatx_data dmatx;
#endif
};
/*
* Reads up to 256 characters from the FIFO or until it's empty and
* inserts them into the TTY layer. Returns the number of characters
* read from the FIFO.
*/
static int pl011_fifo_to_tty(struct uart_amba_port *uap)
{
u16 status, ch;
unsigned int flag, max_count = 256;
int fifotaken = 0;
while (max_count--) {
status = readw(uap->port.membase + UART01x_FR);
if (status & UART01x_FR_RXFE)
break;
/* Take chars from the FIFO and update status */
ch = readw(uap->port.membase + UART01x_DR) |
UART_DUMMY_DR_RX;
flag = TTY_NORMAL;
uap->port.icount.rx++;
fifotaken++;
if (unlikely(ch & UART_DR_ERROR)) {
if (ch & UART011_DR_BE) {
ch &= ~(UART011_DR_FE | UART011_DR_PE);
uap->port.icount.brk++;
if (uart_handle_break(&uap->port))
continue;
} else if (ch & UART011_DR_PE)
uap->port.icount.parity++;
else if (ch & UART011_DR_FE)
uap->port.icount.frame++;
if (ch & UART011_DR_OE)
uap->port.icount.overrun++;
ch &= uap->port.read_status_mask;
if (ch & UART011_DR_BE)
flag = TTY_BREAK;
else if (ch & UART011_DR_PE)
flag = TTY_PARITY;
else if (ch & UART011_DR_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&uap->port, ch & 255))
continue;
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
}
return fifotaken;
}
/*
* All the DMA operation mode stuff goes inside this ifdef.
* This assumes that you have a generic DMA device interface,
@ -134,6 +208,31 @@ struct uart_amba_port {
#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
enum dma_data_direction dir)
{
sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
if (!sg->buf)
return -ENOMEM;
sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
kfree(sg->buf);
return -EINVAL;
}
return 0;
}
static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
enum dma_data_direction dir)
{
if (sg->buf) {
dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
kfree(sg->buf);
}
}
static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
return;
}
/* Try to acquire a generic DMA engine slave channel */
/* Try to acquire a generic DMA engine slave TX channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
dev_info(uap->port.dev, "DMA channel TX %s\n",
dma_chan_name(uap->dmatx.chan));
/* Optionally make use of an RX channel as well */
if (plat->dma_rx_param) {
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase + UART01x_DR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.direction = DMA_FROM_DEVICE,
.src_maxburst = uap->fifosize >> 1,
};
chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
if (!chan) {
dev_err(uap->port.dev, "no RX DMA channel!\n");
return;
}
dmaengine_slave_config(chan, &rx_conf);
uap->dmarx.chan = chan;
dev_info(uap->port.dev, "DMA channel RX %s\n",
dma_chan_name(uap->dmarx.chan));
}
}
#ifndef MODULE
@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap)
/* TODO: remove the initcall if it has not yet executed */
if (uap->dmatx.chan)
dma_release_channel(uap->dmatx.chan);
if (uap->dmarx.chan)
dma_release_channel(uap->dmarx.chan);
}
/* Forward declare this for the refill routine */
static int pl011_dma_tx_refill(struct uart_amba_port *uap);
@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
*/
static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
{
if (!uap->using_dma)
if (!uap->using_tx_dma)
return false;
/*
@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
{
u16 dmacr;
if (!uap->using_dma)
if (!uap->using_tx_dma)
return false;
if (!uap->port.x_char) {
@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
if (!uap->using_dma)
if (!uap->using_tx_dma)
return;
/* Avoid deadlock with the DMA engine callback */
@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port)
}
}
static void pl011_dma_rx_callback(void *data);
static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
{
struct dma_chan *rxchan = uap->dmarx.chan;
struct dma_device *dma_dev;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
struct pl011_sgbuf *sgbuf;
if (!rxchan)
return -EIO;
/* Start the RX DMA job */
sgbuf = uap->dmarx.use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
dma_dev = rxchan->device;
desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
DMA_FROM_DEVICE,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
* If the DMA engine is busy and cannot prepare a
* channel, no big deal, the driver will fall back
* to interrupt mode as a result of this error code.
*/
if (!desc) {
uap->dmarx.running = false;
dmaengine_terminate_all(rxchan);
return -EBUSY;
}
/* Some data to go along to the callback */
desc->callback = pl011_dma_rx_callback;
desc->callback_param = uap;
dmarx->cookie = dmaengine_submit(desc);
dma_async_issue_pending(rxchan);
uap->dmacr |= UART011_RXDMAE;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
uap->dmarx.running = true;
uap->im &= ~UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
return 0;
}
/*
* This is called when either the DMA job is complete, or
* the FIFO timeout interrupt occurred. This must be called
* with the port spinlock uap->port.lock held.
*/
static void pl011_dma_rx_chars(struct uart_amba_port *uap,
u32 pending, bool use_buf_b,
bool readfifo)
{
struct tty_struct *tty = uap->port.state->port.tty;
struct pl011_sgbuf *sgbuf = use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
struct device *dev = uap->dmarx.chan->device->dev;
int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
/* Pick everything from the DMA first */
if (pending) {
/* Sync in buffer */
dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
/*
* First take all chars in the DMA pipe, then look in the FIFO.
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
dma_count = tty_insert_flip_string(uap->port.state->port.tty,
sgbuf->buf, pending);
/* Return buffer to device */
dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
uap->port.icount.rx += dma_count;
if (dma_count < pending)
dev_warn(uap->port.dev,
"couldn't insert all characters (TTY is full?)\n");
}
/*
* Only continue with trying to read the FIFO if all DMA chars have
* been taken first.
*/
if (dma_count == pending && readfifo) {
/* Clear any error flags */
writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
uap->port.membase + UART011_ICR);
/*
* If we read all the DMA'd characters, and we had an
* incomplete buffer, that could be due to an rx error, or
* maybe we just timed out. Read any pending chars and check
* the error status.
*
* Error conditions will only occur in the FIFO, these will
* trigger an immediate interrupt and stop the DMA job, so we
* will always find the error in the FIFO, never in the DMA
* buffer.
*/
fifotaken = pl011_fifo_to_tty(uap);
}
spin_unlock(&uap->port.lock);
dev_vdbg(uap->port.dev,
"Took %d chars from DMA buffer and %d chars from the FIFO\n",
dma_count, fifotaken);
tty_flip_buffer_push(tty);
spin_lock(&uap->port.lock);
}
static void pl011_dma_rx_irq(struct uart_amba_port *uap)
{
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_chan *rxchan = dmarx->chan;
struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
&dmarx->sgbuf_b : &dmarx->sgbuf_a;
size_t pending;
struct dma_tx_state state;
enum dma_status dmastat;
/*
* Pause the transfer so we can trust the current counter,
* do this before we pause the PL011 block, else we may
* overflow the FIFO.
*/
if (dmaengine_pause(rxchan))
dev_err(uap->port.dev, "unable to pause DMA transfer\n");
dmastat = rxchan->device->device_tx_status(rxchan,
dmarx->cookie, &state);
if (dmastat != DMA_PAUSED)
dev_err(uap->port.dev, "unable to pause DMA transfer\n");
/* Disable RX DMA - incoming data will wait in the FIFO */
uap->dmacr &= ~UART011_RXDMAE;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
uap->dmarx.running = false;
pending = sgbuf->sg.length - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
/* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
/*
* This will take the chars we have so far and insert
* into the framework.
*/
pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
/* Switch buffer & re-trigger DMA job */
dmarx->use_buf_b = !dmarx->use_buf_b;
if (pl011_dma_rx_trigger_dma(uap)) {
dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
"fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
}
}
static void pl011_dma_rx_callback(void *data)
{
struct uart_amba_port *uap = data;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
bool lastbuf = dmarx->use_buf_b;
int ret;
/*
* This completion interrupt occurs typically when the
* RX buffer is totally stuffed but no timeout has yet
* occurred. When that happens, we just want the RX
* routine to flush out the secondary DMA buffer while
* we immediately trigger the next DMA job.
*/
spin_lock_irq(&uap->port.lock);
uap->dmarx.running = false;
dmarx->use_buf_b = !lastbuf;
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
spin_unlock_irq(&uap->port.lock);
/*
* Do this check after we picked the DMA chars so we don't
* get some IRQ immediately from RX.
*/
if (ret) {
dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
"fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
}
}
/*
* Stop accepting received characters, when we're shutting down or
* suspending this port.
* Locking: called with port lock held and IRQs disabled.
*/
static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
{
/* FIXME. Just disable the DMA enable */
uap->dmacr &= ~UART011_RXDMAE;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
}
static void pl011_dma_startup(struct uart_amba_port *uap)
{
int ret;
if (!uap->dmatx.chan)
return;
@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
uap->using_dma = true;
uap->using_tx_dma = true;
if (!uap->dmarx.chan)
goto skip_rx;
/* Allocate and map DMA RX buffers */
ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer A", ret);
goto skip_rx;
}
ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
DMA_FROM_DEVICE);
if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
"RX buffer B", ret);
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
DMA_FROM_DEVICE);
goto skip_rx;
}
uap->using_rx_dma = true;
skip_rx:
/* Turn on DMA error (RX/TX will be enabled on demand) */
uap->dmacr |= UART011_DMAONERR;
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
if (uap->vendor->dma_threshold)
writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
uap->port.membase + ST_UART011_DMAWM);
if (uap->using_rx_dma) {
if (pl011_dma_rx_trigger_dma(uap))
dev_dbg(uap->port.dev, "could not trigger initial "
"RX DMA job, fall back to interrupt mode\n");
}
}
static void pl011_dma_shutdown(struct uart_amba_port *uap)
{
if (!uap->using_dma)
if (!(uap->using_tx_dma || uap->using_rx_dma))
return;
/* Disable RX and TX DMA */
@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
spin_unlock_irq(&uap->port.lock);
/* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
DMA_TO_DEVICE);
uap->dmatx.queued = false;
if (uap->using_tx_dma) {
/* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan);
if (uap->dmatx.queued) {
dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
kfree(uap->dmatx.buf);
uap->using_tx_dma = false;
}
kfree(uap->dmatx.buf);
uap->using_dma = false;
if (uap->using_rx_dma) {
dmaengine_terminate_all(uap->dmarx.chan);
/* Clean up the RX DMA */
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
uap->using_rx_dma = false;
}
}
static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
{
return uap->using_rx_dma;
}
static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
{
return uap->using_rx_dma && uap->dmarx.running;
}
#else
/* Blank functions if the DMA engine is not available */
static inline void pl011_dma_probe(struct uart_amba_port *uap)
@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
return false;
}
static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
{
}
static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
{
}
static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
{
return -EIO;
}
static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
{
return false;
}
static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
{
return false;
}
#define pl011_dma_flush_buffer NULL
#endif
@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port)
uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
UART011_PEIM|UART011_BEIM|UART011_OEIM);
writew(uap->im, uap->port.membase + UART011_IMSC);
pl011_dma_rx_stop(uap);
}
static void pl011_enable_ms(struct uart_port *port)
@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port)
static void pl011_rx_chars(struct uart_amba_port *uap)
{
struct tty_struct *tty = uap->port.state->port.tty;
unsigned int status, ch, flag, max_count = 256;
status = readw(uap->port.membase + UART01x_FR);
while ((status & UART01x_FR_RXFE) == 0 && max_count--) {
ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX;
flag = TTY_NORMAL;
uap->port.icount.rx++;
pl011_fifo_to_tty(uap);
/*
* Note that the error handling code is
* out of the main execution path
*/
if (unlikely(ch & UART_DR_ERROR)) {
if (ch & UART011_DR_BE) {
ch &= ~(UART011_DR_FE | UART011_DR_PE);
uap->port.icount.brk++;
if (uart_handle_break(&uap->port))
goto ignore_char;
} else if (ch & UART011_DR_PE)
uap->port.icount.parity++;
else if (ch & UART011_DR_FE)
uap->port.icount.frame++;
if (ch & UART011_DR_OE)
uap->port.icount.overrun++;
ch &= uap->port.read_status_mask;
if (ch & UART011_DR_BE)
flag = TTY_BREAK;
else if (ch & UART011_DR_PE)
flag = TTY_PARITY;
else if (ch & UART011_DR_FE)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(&uap->port, ch & 255))
goto ignore_char;
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
ignore_char:
status = readw(uap->port.membase + UART01x_FR);
}
spin_unlock(&uap->port.lock);
tty_flip_buffer_push(tty);
/*
* If we were temporarily out of DMA mode for a while,
* attempt to switch back to DMA mode again.
*/
if (pl011_dma_rx_available(uap)) {
if (pl011_dma_rx_trigger_dma(uap)) {
dev_dbg(uap->port.dev, "could not trigger RX DMA job "
"fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
} else
uap->im &= ~UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
}
spin_lock(&uap->port.lock);
}
@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
UART011_RXIS),
uap->port.membase + UART011_ICR);
if (status & (UART011_RTIS|UART011_RXIS))
pl011_rx_chars(uap);
if (status & (UART011_RTIS|UART011_RXIS)) {
if (pl011_dma_rx_running(uap))
pl011_dma_rx_irq(uap);
else
pl011_rx_chars(uap);
}
if (status & (UART011_DSRMIS|UART011_DCDMIS|
UART011_CTSMIS|UART011_RIMIS))
pl011_modem_status(uap);
@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port)
pl011_dma_startup(uap);
/*
* Finally, enable interrupts
* Finally, enable interrupts, only timeouts when using DMA
* if initial RX DMA job failed, start in interrupt mode
* as well.
*/
spin_lock_irq(&uap->port.lock);
uap->im = UART011_RXIM | UART011_RTIM;
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
static int pl011_probe(struct amba_device *dev, struct amba_id *id)
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;

View file

@ -376,6 +376,24 @@ config FB_CYBER2000
Say Y if you have a NetWinder or a graphics card containing this
device, otherwise say N.
config FB_CYBER2000_DDC
bool "DDC for CyberPro support"
depends on FB_CYBER2000
select FB_DDC
default y
help
Say Y here if you want DDC support for your CyberPro graphics
card. This is only I2C bus support, driver does not use EDID.
config FB_CYBER2000_I2C
bool "CyberPro 2000/2010/5000 I2C support"
depends on FB_CYBER2000 && I2C && ARCH_NETWINDER
select I2C_ALGOBIT
help
Enable support for the I2C video decoder interface on the
Integraphics CyberPro 20x0 and 5000 VGA chips. This is used
on the Netwinder machines for the SAA7111 video capture.
config FB_APOLLO
bool
depends on (FB = y) && APOLLO

View file

@ -461,7 +461,7 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
static int clcdfb_probe(struct amba_device *dev, struct amba_id *id)
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev->dev.platform_data;
struct clcd_fb *fb;

View file

@ -47,6 +47,8 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@ -61,10 +63,10 @@ struct cfb_info {
struct fb_info fb;
struct display_switch *dispsw;
struct display *display;
struct pci_dev *dev;
unsigned char __iomem *region;
unsigned char __iomem *regs;
u_int id;
u_int irq;
int func_use_count;
u_long ref_ps;
@ -88,6 +90,19 @@ struct cfb_info {
u_char ramdac_powerdown;
u32 pseudo_palette[16];
spinlock_t reg_b0_lock;
#ifdef CONFIG_FB_CYBER2000_DDC
bool ddc_registered;
struct i2c_adapter ddc_adapter;
struct i2c_algo_bit_data ddc_algo;
#endif
#ifdef CONFIG_FB_CYBER2000_I2C
struct i2c_adapter i2c_adapter;
struct i2c_algo_bit_data i2c_algo;
#endif
};
static char *default_font = "Acorn8x8";
@ -494,6 +509,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
cyber2000_attrw(0x14, 0x00, cfb);
/* PLL registers */
spin_lock(&cfb->reg_b0_lock);
cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb);
cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb);
cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb);
@ -501,6 +517,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw)
cyber2000_grphw(0x90, 0x01, cfb);
cyber2000_grphw(0xb9, 0x80, cfb);
cyber2000_grphw(0xb9, 0x00, cfb);
spin_unlock(&cfb->reg_b0_lock);
cfb->ramdac_ctrl = hw->ramdac;
cyber2000fb_write_ramdac_ctrl(cfb);
@ -681,9 +698,9 @@ cyber2000fb_decode_clock(struct par_info *hw, struct cfb_info *cfb,
* pll_ps_calc = best_div1 / (ref_ps * best_mult)
*/
best_diff = 0x7fffffff;
best_mult = 32;
best_div1 = 255;
for (t_div1 = 32; t_div1 > 1; t_div1 -= 1) {
best_mult = 2;
best_div1 = 32;
for (t_div1 = 2; t_div1 < 32; t_div1 += 1) {
u_int rr, t_mult, t_pll_ps;
int diff;
@ -1105,24 +1122,22 @@ void cyber2000fb_disable_extregs(struct cfb_info *cfb)
}
EXPORT_SYMBOL(cyber2000fb_disable_extregs);
void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var)
{
memcpy(var, &cfb->fb.var, sizeof(struct fb_var_screeninfo));
}
EXPORT_SYMBOL(cyber2000fb_get_fb_var);
/*
* Attach a capture/tv driver to the core CyberX0X0 driver.
*/
int cyber2000fb_attach(struct cyberpro_info *info, int idx)
{
if (int_cfb_info != NULL) {
info->dev = int_cfb_info->dev;
info->dev = int_cfb_info->fb.device;
#ifdef CONFIG_FB_CYBER2000_I2C
info->i2c = &int_cfb_info->i2c_adapter;
#else
info->i2c = NULL;
#endif
info->regs = int_cfb_info->regs;
info->irq = int_cfb_info->irq;
info->fb = int_cfb_info->fb.screen_base;
info->fb_size = int_cfb_info->fb.fix.smem_len;
info->enable_extregs = cyber2000fb_enable_extregs;
info->disable_extregs = cyber2000fb_disable_extregs;
info->info = int_cfb_info;
strlcpy(info->dev_name, int_cfb_info->fb.fix.id,
@ -1141,6 +1156,183 @@ void cyber2000fb_detach(int idx)
}
EXPORT_SYMBOL(cyber2000fb_detach);
#ifdef CONFIG_FB_CYBER2000_DDC
#define DDC_REG 0xb0
#define DDC_SCL_OUT (1 << 0)
#define DDC_SDA_OUT (1 << 4)
#define DDC_SCL_IN (1 << 2)
#define DDC_SDA_IN (1 << 6)
static void cyber2000fb_enable_ddc(struct cfb_info *cfb)
{
spin_lock(&cfb->reg_b0_lock);
cyber2000fb_writew(0x1bf, 0x3ce, cfb);
}
static void cyber2000fb_disable_ddc(struct cfb_info *cfb)
{
cyber2000fb_writew(0x0bf, 0x3ce, cfb);
spin_unlock(&cfb->reg_b0_lock);
}
static void cyber2000fb_ddc_setscl(void *data, int val)
{
struct cfb_info *cfb = data;
unsigned char reg;
cyber2000fb_enable_ddc(cfb);
reg = cyber2000_grphr(DDC_REG, cfb);
if (!val) /* bit is inverted */
reg |= DDC_SCL_OUT;
else
reg &= ~DDC_SCL_OUT;
cyber2000_grphw(DDC_REG, reg, cfb);
cyber2000fb_disable_ddc(cfb);
}
static void cyber2000fb_ddc_setsda(void *data, int val)
{
struct cfb_info *cfb = data;
unsigned char reg;
cyber2000fb_enable_ddc(cfb);
reg = cyber2000_grphr(DDC_REG, cfb);
if (!val) /* bit is inverted */
reg |= DDC_SDA_OUT;
else
reg &= ~DDC_SDA_OUT;
cyber2000_grphw(DDC_REG, reg, cfb);
cyber2000fb_disable_ddc(cfb);
}
static int cyber2000fb_ddc_getscl(void *data)
{
struct cfb_info *cfb = data;
int retval;
cyber2000fb_enable_ddc(cfb);
retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SCL_IN);
cyber2000fb_disable_ddc(cfb);
return retval;
}
static int cyber2000fb_ddc_getsda(void *data)
{
struct cfb_info *cfb = data;
int retval;
cyber2000fb_enable_ddc(cfb);
retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SDA_IN);
cyber2000fb_disable_ddc(cfb);
return retval;
}
static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb)
{
strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id,
sizeof(cfb->ddc_adapter.name));
cfb->ddc_adapter.owner = THIS_MODULE;
cfb->ddc_adapter.class = I2C_CLASS_DDC;
cfb->ddc_adapter.algo_data = &cfb->ddc_algo;
cfb->ddc_adapter.dev.parent = cfb->fb.device;
cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda;
cfb->ddc_algo.setscl = cyber2000fb_ddc_setscl;
cfb->ddc_algo.getsda = cyber2000fb_ddc_getsda;
cfb->ddc_algo.getscl = cyber2000fb_ddc_getscl;
cfb->ddc_algo.udelay = 10;
cfb->ddc_algo.timeout = 20;
cfb->ddc_algo.data = cfb;
i2c_set_adapdata(&cfb->ddc_adapter, cfb);
return i2c_bit_add_bus(&cfb->ddc_adapter);
}
#endif /* CONFIG_FB_CYBER2000_DDC */
#ifdef CONFIG_FB_CYBER2000_I2C
static void cyber2000fb_i2c_setsda(void *data, int state)
{
struct cfb_info *cfb = data;
unsigned int latch2;
spin_lock(&cfb->reg_b0_lock);
latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
latch2 &= EXT_LATCH2_I2C_CLKEN;
if (state)
latch2 |= EXT_LATCH2_I2C_DATEN;
cyber2000_grphw(EXT_LATCH2, latch2, cfb);
spin_unlock(&cfb->reg_b0_lock);
}
static void cyber2000fb_i2c_setscl(void *data, int state)
{
struct cfb_info *cfb = data;
unsigned int latch2;
spin_lock(&cfb->reg_b0_lock);
latch2 = cyber2000_grphr(EXT_LATCH2, cfb);
latch2 &= EXT_LATCH2_I2C_DATEN;
if (state)
latch2 |= EXT_LATCH2_I2C_CLKEN;
cyber2000_grphw(EXT_LATCH2, latch2, cfb);
spin_unlock(&cfb->reg_b0_lock);
}
static int cyber2000fb_i2c_getsda(void *data)
{
struct cfb_info *cfb = data;
int ret;
spin_lock(&cfb->reg_b0_lock);
ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_DAT);
spin_unlock(&cfb->reg_b0_lock);
return ret;
}
static int cyber2000fb_i2c_getscl(void *data)
{
struct cfb_info *cfb = data;
int ret;
spin_lock(&cfb->reg_b0_lock);
ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_CLK);
spin_unlock(&cfb->reg_b0_lock);
return ret;
}
static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb)
{
strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id,
sizeof(cfb->i2c_adapter.name));
cfb->i2c_adapter.owner = THIS_MODULE;
cfb->i2c_adapter.algo_data = &cfb->i2c_algo;
cfb->i2c_adapter.dev.parent = cfb->fb.device;
cfb->i2c_algo.setsda = cyber2000fb_i2c_setsda;
cfb->i2c_algo.setscl = cyber2000fb_i2c_setscl;
cfb->i2c_algo.getsda = cyber2000fb_i2c_getsda;
cfb->i2c_algo.getscl = cyber2000fb_i2c_getscl;
cfb->i2c_algo.udelay = 5;
cfb->i2c_algo.timeout = msecs_to_jiffies(100);
cfb->i2c_algo.data = cfb;
return i2c_bit_add_bus(&cfb->i2c_adapter);
}
static void cyber2000fb_i2c_unregister(struct cfb_info *cfb)
{
i2c_del_adapter(&cfb->i2c_adapter);
}
#else
#define cyber2000fb_i2c_register(cfb) (0)
#define cyber2000fb_i2c_unregister(cfb) do { } while (0)
#endif
/*
* These parameters give
* 640x480, hsync 31.5kHz, vsync 60Hz
@ -1275,6 +1467,8 @@ static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id,
cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
cfb->fb.pseudo_palette = cfb->pseudo_palette;
spin_lock_init(&cfb->reg_b0_lock);
fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0);
return cfb;
@ -1369,6 +1563,11 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.fix.mmio_len = MMIO_SIZE;
cfb->fb.screen_base = cfb->region;
#ifdef CONFIG_FB_CYBER2000_DDC
if (cyber2000fb_setup_ddc_bus(cfb) == 0)
cfb->ddc_registered = true;
#endif
err = -EINVAL;
if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0,
&cyber2000fb_default_mode, 8)) {
@ -1401,14 +1600,32 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb)
cfb->fb.var.xres, cfb->fb.var.yres,
h_sync / 1000, h_sync % 1000, v_sync);
if (cfb->dev)
cfb->fb.device = &cfb->dev->dev;
err = cyber2000fb_i2c_register(cfb);
if (err)
goto failed;
err = register_framebuffer(&cfb->fb);
if (err)
cyber2000fb_i2c_unregister(cfb);
failed:
#ifdef CONFIG_FB_CYBER2000_DDC
if (err && cfb->ddc_registered)
i2c_del_adapter(&cfb->ddc_adapter);
#endif
return err;
}
static void __devexit cyberpro_common_remove(struct cfb_info *cfb)
{
unregister_framebuffer(&cfb->fb);
#ifdef CONFIG_FB_CYBER2000_DDC
if (cfb->ddc_registered)
i2c_del_adapter(&cfb->ddc_adapter);
#endif
cyber2000fb_i2c_unregister(cfb);
}
static void cyberpro_common_resume(struct cfb_info *cfb)
{
cyberpro_init_hw(cfb);
@ -1442,12 +1659,13 @@ static int __devinit cyberpro_vl_probe(void)
if (!cfb)
goto failed_release;
cfb->dev = NULL;
cfb->irq = -1;
cfb->region = ioremap(FB_START, FB_SIZE);
if (!cfb->region)
goto failed_ioremap;
cfb->regs = cfb->region + MMIO_OFFSET;
cfb->fb.device = NULL;
cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET;
cfb->fb.fix.smem_start = FB_START;
@ -1585,12 +1803,13 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err)
goto failed_regions;
cfb->dev = dev;
cfb->irq = dev->irq;
cfb->region = pci_ioremap_bar(dev, 0);
if (!cfb->region)
goto failed_ioremap;
cfb->regs = cfb->region + MMIO_OFFSET;
cfb->fb.device = &dev->dev;
cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET;
cfb->fb.fix.smem_start = pci_resource_start(dev, 0);
@ -1648,15 +1867,7 @@ static void __devexit cyberpro_pci_remove(struct pci_dev *dev)
struct cfb_info *cfb = pci_get_drvdata(dev);
if (cfb) {
/*
* If unregister_framebuffer fails, then
* we will be leaving hooks that could cause
* oopsen laying around.
*/
if (unregister_framebuffer(&cfb->fb))
printk(KERN_WARNING "%s: danger Will Robinson, "
"danger danger! Oopsen imminent!\n",
cfb->fb.fix.id);
cyberpro_common_remove(cfb);
iounmap(cfb->region);
cyberpro_free_fb_info(cfb);

View file

@ -464,12 +464,14 @@ static void debug_printf(char *fmt, ...)
struct cfb_info;
struct cyberpro_info {
struct pci_dev *dev;
struct device *dev;
struct i2c_adapter *i2c;
unsigned char __iomem *regs;
char __iomem *fb;
char dev_name[32];
unsigned int fb_size;
unsigned int chip_id;
unsigned int irq;
/*
* The following is a pointer to be passed into the
@ -478,15 +480,6 @@ struct cyberpro_info {
* is within this structure.
*/
struct cfb_info *info;
/*
* Use these to enable the BM or TV registers. In an SMP
* environment, these two function pointers should only be
* called from the module_init() or module_exit()
* functions.
*/
void (*enable_extregs)(struct cfb_info *);
void (*disable_extregs)(struct cfb_info *);
};
#define ID_IGA_1682 0
@ -494,8 +487,6 @@ struct cyberpro_info {
#define ID_CYBERPRO_2010 2
#define ID_CYBERPRO_5000 3
struct fb_var_screeninfo;
/*
* Note! Writing to the Cyber20x0 registers from an interrupt
* routine is definitely a bad idea atm.
@ -504,4 +495,3 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx);
void cyber2000fb_detach(int idx);
void cyber2000fb_enable_extregs(struct cfb_info *cfb);
void cyber2000fb_disable_extregs(struct cfb_info *cfb);
void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var);

View file

@ -278,7 +278,7 @@ static struct miscdevice sp805_wdt_miscdev = {
};
static int __devinit
sp805_wdt_probe(struct amba_device *adev, struct amba_id *id)
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;

View file

@ -43,12 +43,12 @@ struct amba_id {
struct amba_driver {
struct device_driver drv;
int (*probe)(struct amba_device *, struct amba_id *);
int (*probe)(struct amba_device *, const struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
int (*resume)(struct amba_device *);
struct amba_id *id_table;
const struct amba_id *id_table;
};
enum amba_vendor {
@ -56,6 +56,10 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
};
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)

View file

@ -6,6 +6,9 @@
#include <linux/mmc/host.h>
/* Just some dummy forwarding */
struct dma_chan;
/**
* struct mmci_platform_data - platform configuration for the MMCI
* (also known as PL180) block.
@ -27,6 +30,17 @@
* @cd_invert: true if the gpio_cd pin value is active low
* @capabilities: the capabilities of the block as implemented in
* this platform, signify anything MMC_CAP_* from mmc/host.h
* @dma_filter: function used to select an apropriate RX and TX
* DMA channel to be used for DMA, if and only if you're deploying the
* generic DMA engine
* @dma_rx_param: parameter passed to the DMA allocation
* filter in order to select an apropriate RX channel. If
* there is a bidirectional RX+TX channel, then just specify
* this and leave dma_tx_param set to NULL
* @dma_tx_param: parameter passed to the DMA allocation
* filter in order to select an apropriate TX channel. If this
* is NULL the driver will attempt to use the RX channel as a
* bidirectional channel
*/
struct mmci_platform_data {
unsigned int f_max;
@ -38,6 +52,9 @@ struct mmci_platform_data {
int gpio_cd;
bool cd_invert;
unsigned long capabilities;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
void *dma_rx_param;
void *dma_tx_param;
};
#endif

View file

@ -210,6 +210,7 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
if (mask & ISR_RXINTR) {
struct aaci_runtime *aacirun = &aaci->capture;
bool period_elapsed = false;
void *ptr;
if (!aacirun->substream || !aacirun->start) {
@ -222,15 +223,12 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
ptr = aacirun->ptr;
do {
unsigned int len = aacirun->fifosz;
unsigned int len = aacirun->fifo_bytes;
u32 val;
if (aacirun->bytes <= 0) {
aacirun->bytes += aacirun->period;
aacirun->ptr = ptr;
spin_unlock(&aacirun->lock);
snd_pcm_period_elapsed(aacirun->substream);
spin_lock(&aacirun->lock);
period_elapsed = true;
}
if (!(aacirun->cr & CR_EN))
break;
@ -260,6 +258,9 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
aacirun->ptr = ptr;
spin_unlock(&aacirun->lock);
if (period_elapsed)
snd_pcm_period_elapsed(aacirun->substream);
}
if (mask & ISR_URINTR) {
@ -269,6 +270,7 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
if (mask & ISR_TXINTR) {
struct aaci_runtime *aacirun = &aaci->playback;
bool period_elapsed = false;
void *ptr;
if (!aacirun->substream || !aacirun->start) {
@ -281,15 +283,12 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
ptr = aacirun->ptr;
do {
unsigned int len = aacirun->fifosz;
unsigned int len = aacirun->fifo_bytes;
u32 val;
if (aacirun->bytes <= 0) {
aacirun->bytes += aacirun->period;
aacirun->ptr = ptr;
spin_unlock(&aacirun->lock);
snd_pcm_period_elapsed(aacirun->substream);
spin_lock(&aacirun->lock);
period_elapsed = true;
}
if (!(aacirun->cr & CR_EN))
break;
@ -319,6 +318,9 @@ static void aaci_fifo_irq(struct aaci *aaci, int channel, u32 mask)
aacirun->ptr = ptr;
spin_unlock(&aacirun->lock);
if (period_elapsed)
snd_pcm_period_elapsed(aacirun->substream);
}
}
@ -361,7 +363,7 @@ static struct snd_pcm_hardware aaci_hw_info = {
/* rates are setup from the AC'97 codec */
.channels_min = 2,
.channels_max = 6,
.channels_max = 2,
.buffer_bytes_max = 64 * 1024,
.period_bytes_min = 256,
.period_bytes_max = PAGE_SIZE,
@ -369,12 +371,46 @@ static struct snd_pcm_hardware aaci_hw_info = {
.periods_max = PAGE_SIZE / 16,
};
static int __aaci_pcm_open(struct aaci *aaci,
struct snd_pcm_substream *substream,
struct aaci_runtime *aacirun)
/*
* We can support two and four channel audio. Unfortunately
* six channel audio requires a non-standard channel ordering:
* 2 -> FL(3), FR(4)
* 4 -> FL(3), FR(4), SL(7), SR(8)
* 6 -> FL(3), FR(4), SL(7), SR(8), C(6), LFE(9) (required)
* FL(3), FR(4), C(6), SL(7), SR(8), LFE(9) (actual)
* This requires an ALSA configuration file to correct.
*/
static int aaci_rule_channels(struct snd_pcm_hw_params *p,
struct snd_pcm_hw_rule *rule)
{
static unsigned int channel_list[] = { 2, 4, 6 };
struct aaci *aaci = rule->private;
unsigned int mask = 1 << 0, slots;
/* pcms[0] is the our 5.1 PCM instance. */
slots = aaci->ac97_bus->pcms[0].r[0].slots;
if (slots & (1 << AC97_SLOT_PCM_SLEFT)) {
mask |= 1 << 1;
if (slots & (1 << AC97_SLOT_LFE))
mask |= 1 << 2;
}
return snd_interval_list(hw_param_interval(p, rule->var),
ARRAY_SIZE(channel_list), channel_list, mask);
}
static int aaci_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int ret;
struct aaci *aaci = substream->private_data;
struct aaci_runtime *aacirun;
int ret = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
aacirun = &aaci->playback;
} else {
aacirun = &aaci->capture;
}
aacirun->substream = substream;
runtime->private_data = aacirun;
@ -382,27 +418,37 @@ static int __aaci_pcm_open(struct aaci *aaci,
runtime->hw.rates = aacirun->pcm->rates;
snd_pcm_limit_hw_rates(runtime);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
aacirun->pcm->r[1].slots)
snd_ac97_pcm_double_rate_rules(runtime);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
runtime->hw.channels_max = 6;
/* Add rule describing channel dependency. */
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
aaci_rule_channels, aaci,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (ret)
return ret;
if (aacirun->pcm->r[1].slots)
snd_ac97_pcm_double_rate_rules(runtime);
}
/*
* FIXME: ALSA specifies fifo_size in bytes. If we're in normal
* mode, each 32-bit word contains one sample. If we're in
* compact mode, each 32-bit word contains two samples, effectively
* halving the FIFO size. However, we don't know for sure which
* we'll be using at this point. We set this to the lower limit.
* ALSA wants the byte-size of the FIFOs. As we only support
* 16-bit samples, this is twice the FIFO depth irrespective
* of whether it's in compact mode or not.
*/
runtime->hw.fifo_size = aaci->fifosize * 2;
runtime->hw.fifo_size = aaci->fifo_depth * 2;
ret = request_irq(aaci->dev->irq[0], aaci_irq, IRQF_SHARED|IRQF_DISABLED,
DRIVER_NAME, aaci);
if (ret)
goto out;
mutex_lock(&aaci->irq_lock);
if (!aaci->users++) {
ret = request_irq(aaci->dev->irq[0], aaci_irq,
IRQF_SHARED | IRQF_DISABLED, DRIVER_NAME, aaci);
if (ret != 0)
aaci->users--;
}
mutex_unlock(&aaci->irq_lock);
return 0;
out:
return ret;
}
@ -418,7 +464,11 @@ static int aaci_pcm_close(struct snd_pcm_substream *substream)
WARN_ON(aacirun->cr & CR_EN);
aacirun->substream = NULL;
free_irq(aaci->dev->irq[0], aaci);
mutex_lock(&aaci->irq_lock);
if (!--aaci->users)
free_irq(aaci->dev->irq[0], aaci);
mutex_unlock(&aaci->irq_lock);
return 0;
}
@ -444,12 +494,21 @@ static int aaci_pcm_hw_free(struct snd_pcm_substream *substream)
return 0;
}
/* Channel to slot mask */
static const u32 channels_to_slotmask[] = {
[2] = CR_SL3 | CR_SL4,
[4] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8,
[6] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8 | CR_SL6 | CR_SL9,
};
static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
struct aaci_runtime *aacirun,
struct snd_pcm_hw_params *params)
{
struct aaci_runtime *aacirun = substream->runtime->private_data;
unsigned int channels = params_channels(params);
unsigned int rate = params_rate(params);
int dbl = rate > 48000;
int err;
struct aaci *aaci = substream->private_data;
aaci_pcm_hw_free(substream);
if (aacirun->pcm_open) {
@ -457,22 +516,28 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
aacirun->pcm_open = 0;
}
/* channels is already limited to 2, 4, or 6 by aaci_rule_channels */
if (dbl && channels != 2)
return -EINVAL;
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(params));
if (err >= 0) {
unsigned int rate = params_rate(params);
int dbl = rate > 48000;
struct aaci *aaci = substream->private_data;
err = snd_ac97_pcm_open(aacirun->pcm, rate,
params_channels(params),
err = snd_ac97_pcm_open(aacirun->pcm, rate, channels,
aacirun->pcm->r[dbl].slots);
aacirun->pcm_open = err == 0;
aacirun->cr = CR_FEN | CR_COMPACT | CR_SZ16;
aacirun->fifosz = aaci->fifosize * 4;
aacirun->cr |= channels_to_slotmask[channels + dbl * 2];
if (aacirun->cr & CR_COMPACT)
aacirun->fifosz >>= 1;
/*
* fifo_bytes is the number of bytes we transfer to/from
* the FIFO, including padding. So that's x4. As we're
* in compact mode, the FIFO is half the size.
*/
aacirun->fifo_bytes = aaci->fifo_depth * 4 / 2;
}
return err;
@ -483,11 +548,11 @@ static int aaci_pcm_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct aaci_runtime *aacirun = runtime->private_data;
aacirun->period = snd_pcm_lib_period_bytes(substream);
aacirun->start = runtime->dma_area;
aacirun->end = aacirun->start + snd_pcm_lib_buffer_bytes(substream);
aacirun->ptr = aacirun->start;
aacirun->period =
aacirun->bytes = frames_to_bytes(runtime, runtime->period_size);
aacirun->bytes = aacirun->period;
return 0;
}
@ -505,89 +570,6 @@ static snd_pcm_uframes_t aaci_pcm_pointer(struct snd_pcm_substream *substream)
/*
* Playback specific ALSA stuff
*/
static const u32 channels_to_txmask[] = {
[2] = CR_SL3 | CR_SL4,
[4] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8,
[6] = CR_SL3 | CR_SL4 | CR_SL7 | CR_SL8 | CR_SL6 | CR_SL9,
};
/*
* We can support two and four channel audio. Unfortunately
* six channel audio requires a non-standard channel ordering:
* 2 -> FL(3), FR(4)
* 4 -> FL(3), FR(4), SL(7), SR(8)
* 6 -> FL(3), FR(4), SL(7), SR(8), C(6), LFE(9) (required)
* FL(3), FR(4), C(6), SL(7), SR(8), LFE(9) (actual)
* This requires an ALSA configuration file to correct.
*/
static unsigned int channel_list[] = { 2, 4, 6 };
static int
aaci_rule_channels(struct snd_pcm_hw_params *p, struct snd_pcm_hw_rule *rule)
{
struct aaci *aaci = rule->private;
unsigned int chan_mask = 1 << 0, slots;
/*
* pcms[0] is the our 5.1 PCM instance.
*/
slots = aaci->ac97_bus->pcms[0].r[0].slots;
if (slots & (1 << AC97_SLOT_PCM_SLEFT)) {
chan_mask |= 1 << 1;
if (slots & (1 << AC97_SLOT_LFE))
chan_mask |= 1 << 2;
}
return snd_interval_list(hw_param_interval(p, rule->var),
ARRAY_SIZE(channel_list), channel_list,
chan_mask);
}
static int aaci_pcm_open(struct snd_pcm_substream *substream)
{
struct aaci *aaci = substream->private_data;
int ret;
/*
* Add rule describing channel dependency.
*/
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
aaci_rule_channels, aaci,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (ret)
return ret;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ret = __aaci_pcm_open(aaci, substream, &aaci->playback);
} else {
ret = __aaci_pcm_open(aaci, substream, &aaci->capture);
}
return ret;
}
static int aaci_pcm_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct aaci_runtime *aacirun = substream->runtime->private_data;
unsigned int channels = params_channels(params);
int ret;
WARN_ON(channels >= ARRAY_SIZE(channels_to_txmask) ||
!channels_to_txmask[channels]);
ret = aaci_pcm_hw_params(substream, aacirun, params);
/*
* Enable FIFO, compact mode, 16 bits per sample.
* FIXME: double rate slots?
*/
if (ret >= 0)
aacirun->cr |= channels_to_txmask[channels];
return ret;
}
static void aaci_pcm_playback_stop(struct aaci_runtime *aacirun)
{
u32 ie;
@ -657,27 +639,13 @@ static struct snd_pcm_ops aaci_playback_ops = {
.open = aaci_pcm_open,
.close = aaci_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = aaci_pcm_playback_hw_params,
.hw_params = aaci_pcm_hw_params,
.hw_free = aaci_pcm_hw_free,
.prepare = aaci_pcm_prepare,
.trigger = aaci_pcm_playback_trigger,
.pointer = aaci_pcm_pointer,
};
static int aaci_pcm_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct aaci_runtime *aacirun = substream->runtime->private_data;
int ret;
ret = aaci_pcm_hw_params(substream, aacirun, params);
if (ret >= 0)
/* Line in record: slot 3 and 4 */
aacirun->cr |= CR_SL3 | CR_SL4;
return ret;
}
static void aaci_pcm_capture_stop(struct aaci_runtime *aacirun)
{
u32 ie;
@ -774,7 +742,7 @@ static struct snd_pcm_ops aaci_capture_ops = {
.open = aaci_pcm_open,
.close = aaci_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = aaci_pcm_capture_hw_params,
.hw_params = aaci_pcm_hw_params,
.hw_free = aaci_pcm_hw_free,
.prepare = aaci_pcm_capture_prepare,
.trigger = aaci_pcm_capture_trigger,
@ -941,12 +909,13 @@ static struct aaci * __devinit aaci_init_card(struct amba_device *dev)
strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver));
strlcpy(card->shortname, "ARM AC'97 Interface", sizeof(card->shortname));
snprintf(card->longname, sizeof(card->longname),
"%s at 0x%016llx, irq %d",
card->shortname, (unsigned long long)dev->res.start,
dev->irq[0]);
"%s PL%03x rev%u at 0x%08llx, irq %d",
card->shortname, amba_part(dev), amba_rev(dev),
(unsigned long long)dev->res.start, dev->irq[0]);
aaci = card->private_data;
mutex_init(&aaci->ac97_sem);
mutex_init(&aaci->irq_lock);
aaci->card = card;
aaci->dev = dev;
@ -984,6 +953,10 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
struct aaci_runtime *aacirun = &aaci->playback;
int i;
/*
* Enable the channel, but don't assign it to any slots, so
* it won't empty onto the AC'97 link.
*/
writel(CR_FEN | CR_SZ16 | CR_EN, aacirun->base + AACI_TXCR);
for (i = 0; !(readl(aacirun->base + AACI_SR) & SR_TXFF) && i < 4096; i++)
@ -1002,7 +975,7 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
writel(aaci->maincr, aaci->base + AACI_MAINCR);
/*
* If we hit 4096, we failed. Go back to the specified
* If we hit 4096 entries, we failed. Go back to the specified
* fifo depth.
*/
if (i == 4096)
@ -1011,7 +984,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
return i;
}
static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
static int __devinit aaci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct aaci *aaci;
int ret, i;
@ -1067,11 +1041,12 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
/*
* Size the FIFOs (must be multiple of 16).
* This is the number of entries in the FIFO.
*/
aaci->fifosize = aaci_size_fifo(aaci);
if (aaci->fifosize & 15) {
printk(KERN_WARNING "AACI: fifosize = %d not supported\n",
aaci->fifosize);
aaci->fifo_depth = aaci_size_fifo(aaci);
if (aaci->fifo_depth & 15) {
printk(KERN_WARNING "AACI: FIFO depth %d not supported\n",
aaci->fifo_depth);
ret = -ENODEV;
goto out;
}
@ -1084,8 +1059,8 @@ static int __devinit aaci_probe(struct amba_device *dev, struct amba_id *id)
ret = snd_card_register(aaci->card);
if (ret == 0) {
dev_info(&dev->dev, "%s, fifo %d\n", aaci->card->longname,
aaci->fifosize);
dev_info(&dev->dev, "%s\n", aaci->card->longname);
dev_info(&dev->dev, "FIFO %u entries\n", aaci->fifo_depth);
amba_set_drvdata(dev, aaci->card);
return ret;
}

View file

@ -210,6 +210,8 @@ struct aaci_runtime {
u32 cr;
struct snd_pcm_substream *substream;
unsigned int period; /* byte size of a "period" */
/*
* PIO support
*/
@ -217,15 +219,16 @@ struct aaci_runtime {
void *end;
void *ptr;
int bytes;
unsigned int period;
unsigned int fifosz;
unsigned int fifo_bytes;
};
struct aaci {
struct amba_device *dev;
struct snd_card *card;
void __iomem *base;
unsigned int fifosize;
unsigned int fifo_depth;
unsigned int users;
struct mutex irq_lock;
/* AC'97 */
struct mutex ac97_sem;