Merge branch 'sh/stable-updates'
Conflicts: arch/sh/kernel/dwarf.c
This commit is contained in:
commit
6253195b67
74 changed files with 592 additions and 308 deletions
|
@ -418,6 +418,14 @@ When: 2.6.33
|
|||
Why: Should be implemented in userspace, policy daemon.
|
||||
Who: Johannes Berg <johannes@sipsolutions.net>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: CONFIG_INOTIFY
|
||||
When: 2.6.33
|
||||
Why: last user (audit) will be converted to the newer more generic
|
||||
and more easily maintained fsnotify subsystem
|
||||
Who: Eric Paris <eparis@redhat.com>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
#include <signal.h>
|
||||
#include "linux/lguest_launcher.h"
|
||||
#include "linux/virtio_config.h"
|
||||
#include <linux/virtio_ids.h>
|
||||
#include "linux/virtio_net.h"
|
||||
#include "linux/virtio_blk.h"
|
||||
#include "linux/virtio_console.h"
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/mempool.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/dwarf.h>
|
||||
#include <asm/unwinder.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -569,6 +570,27 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
|
|||
if (!pc && !prev)
|
||||
pc = (unsigned long)current_text_addr();
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/*
|
||||
* If our stack has been patched by the function graph tracer
|
||||
* then we might see the address of return_to_handler() where we
|
||||
* expected to find the real return address.
|
||||
*/
|
||||
if (pc == (unsigned long)&return_to_handler) {
|
||||
int index = current->curr_ret_stack;
|
||||
|
||||
/*
|
||||
* We currently have no way of tracking how many
|
||||
* return_to_handler()'s we've seen. If there is more
|
||||
* than one patched return address on our stack,
|
||||
* complain loudly.
|
||||
*/
|
||||
WARN_ON(index > 0);
|
||||
|
||||
pc = current->ret_stack[index].ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
|
||||
if (!frame) {
|
||||
printk(KERN_ERR "Unable to allocate a dwarf frame\n");
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
@ -114,7 +115,7 @@ static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
|
|||
static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
|
||||
#endif
|
||||
|
||||
asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
|
|
|
@ -85,6 +85,20 @@ DECLARE_EXPORT(__movstr_i4_even);
|
|||
DECLARE_EXPORT(__movstr_i4_odd);
|
||||
DECLARE_EXPORT(__movstrSI12_i4);
|
||||
DECLARE_EXPORT(__movmem);
|
||||
DECLARE_EXPORT(__movmemSI8);
|
||||
DECLARE_EXPORT(__movmemSI12);
|
||||
DECLARE_EXPORT(__movmemSI16);
|
||||
DECLARE_EXPORT(__movmemSI20);
|
||||
DECLARE_EXPORT(__movmemSI24);
|
||||
DECLARE_EXPORT(__movmemSI28);
|
||||
DECLARE_EXPORT(__movmemSI32);
|
||||
DECLARE_EXPORT(__movmemSI36);
|
||||
DECLARE_EXPORT(__movmemSI40);
|
||||
DECLARE_EXPORT(__movmemSI44);
|
||||
DECLARE_EXPORT(__movmemSI48);
|
||||
DECLARE_EXPORT(__movmemSI52);
|
||||
DECLARE_EXPORT(__movmemSI56);
|
||||
DECLARE_EXPORT(__movmemSI60);
|
||||
DECLARE_EXPORT(__movmem_i4_even);
|
||||
DECLARE_EXPORT(__movmem_i4_odd);
|
||||
DECLARE_EXPORT(__movmemSI12_i4);
|
||||
|
|
|
@ -82,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (irq_fpu_usable())
|
||||
if (!irq_fpu_usable())
|
||||
err = crypto_aes_expand_key(ctx, in_key, key_len);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
|
@ -103,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (irq_fpu_usable())
|
||||
if (!irq_fpu_usable())
|
||||
crypto_aes_encrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
|
@ -116,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
|
|||
{
|
||||
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
|
||||
|
||||
if (irq_fpu_usable())
|
||||
if (!irq_fpu_usable())
|
||||
crypto_aes_decrypt_x86(ctx, dst, src);
|
||||
else {
|
||||
kernel_fpu_begin();
|
||||
|
@ -342,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
|
|||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (irq_fpu_usable()) {
|
||||
if (!irq_fpu_usable()) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
|
@ -363,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
|
|||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
|
||||
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
|
||||
|
||||
if (irq_fpu_usable()) {
|
||||
if (!irq_fpu_usable()) {
|
||||
struct ablkcipher_request *cryptd_req =
|
||||
ablkcipher_request_ctx(req);
|
||||
memcpy(cryptd_req, req, sizeof(*req));
|
||||
|
|
|
@ -116,7 +116,7 @@ static s64 __kpit_elapsed(struct kvm *kvm)
|
|||
* itself with the initial count and continues counting
|
||||
* from there.
|
||||
*/
|
||||
remaining = hrtimer_expires_remaining(&ps->pit_timer.timer);
|
||||
remaining = hrtimer_get_remaining(&ps->pit_timer.timer);
|
||||
elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
|
||||
elapsed = mod_64(elapsed, ps->pit_timer.period);
|
||||
|
||||
|
|
|
@ -521,7 +521,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
|
|||
if (apic_get_reg(apic, APIC_TMICT) == 0)
|
||||
return 0;
|
||||
|
||||
remaining = hrtimer_expires_remaining(&apic->lapic_timer.timer);
|
||||
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
|
||||
if (ktime_to_ns(remaining) < 0)
|
||||
remaining = ktime_set(0, 0);
|
||||
|
||||
|
|
|
@ -748,7 +748,8 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
|
|||
return write_protected;
|
||||
}
|
||||
|
||||
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
|
||||
static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
{
|
||||
u64 *spte;
|
||||
int need_tlb_flush = 0;
|
||||
|
@ -763,7 +764,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
|
|||
return need_tlb_flush;
|
||||
}
|
||||
|
||||
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
|
||||
static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
{
|
||||
int need_flush = 0;
|
||||
u64 *spte, new_spte;
|
||||
|
@ -799,9 +801,10 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data,
|
||||
static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
|
||||
unsigned long data,
|
||||
int (*handler)(struct kvm *kvm, unsigned long *rmapp,
|
||||
u64 data))
|
||||
unsigned long data))
|
||||
{
|
||||
int i, j;
|
||||
int retval = 0;
|
||||
|
@ -846,10 +849,11 @@ int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp);
|
||||
kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
|
||||
}
|
||||
|
||||
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data)
|
||||
static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
|
||||
unsigned long data)
|
||||
{
|
||||
u64 *spte;
|
||||
int young = 0;
|
||||
|
|
|
@ -575,7 +575,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
|
||||
|
||||
/* AMD */
|
||||
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */
|
||||
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
|
||||
/* AMD is using RAID class only for ahci controllers */
|
||||
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
|
||||
|
@ -605,6 +605,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
|
||||
{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
|
||||
{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
|
||||
{ PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
|
||||
{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
|
||||
{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
|
||||
{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
|
||||
|
|
|
@ -5028,12 +5028,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
|
|||
qc->flags |= ATA_QCFLAG_FAILED;
|
||||
|
||||
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
|
||||
if (!ata_tag_internal(qc->tag)) {
|
||||
/* always fill result TF for failed qc */
|
||||
fill_result_tf(qc);
|
||||
/* always fill result TF for failed qc */
|
||||
fill_result_tf(qc);
|
||||
|
||||
if (!ata_tag_internal(qc->tag))
|
||||
ata_qc_schedule_eh(qc);
|
||||
return;
|
||||
}
|
||||
else
|
||||
__ata_qc_complete(qc);
|
||||
return;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
|
||||
|
|
|
@ -2981,12 +2981,14 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
|
|||
* device detection messages backwards.
|
||||
*/
|
||||
ata_for_each_dev(dev, link, ALL) {
|
||||
if (!(new_mask & (1 << dev->devno)) ||
|
||||
dev->class == ATA_DEV_PMP)
|
||||
if (!(new_mask & (1 << dev->devno)))
|
||||
continue;
|
||||
|
||||
dev->class = ehc->classes[dev->devno];
|
||||
|
||||
if (dev->class == ATA_DEV_PMP)
|
||||
continue;
|
||||
|
||||
ehc->i.flags |= ATA_EHI_PRINTINFO;
|
||||
rc = ata_dev_configure(dev);
|
||||
ehc->i.flags &= ~ATA_EHI_PRINTINFO;
|
||||
|
|
|
@ -246,7 +246,7 @@ static const struct pci_device_id atiixp[] = {
|
|||
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
|
||||
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
|
||||
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
|
||||
|
||||
{ },
|
||||
};
|
||||
|
|
|
@ -235,8 +235,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
.udma_mask = ATA_UDMA2,
|
||||
.port_ops = &sc1200_port_ops
|
||||
};
|
||||
/* Can't enable port 2 yet, see top comments */
|
||||
const struct ata_port_info *ppi[] = { &info, };
|
||||
const struct ata_port_info *ppi[] = { &info, NULL };
|
||||
|
||||
return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ static const struct via_isa_bridge {
|
|||
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
|
||||
{ "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
|
||||
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
|
||||
{ "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
|
||||
{ "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
|
||||
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
|
||||
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
|
||||
{ "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
|
||||
|
|
|
@ -1382,6 +1382,25 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
|
|||
*/
|
||||
if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
|
||||
return ATA_DEFER_PORT;
|
||||
|
||||
/* PIO commands need exclusive link: no other commands [DMA or PIO]
|
||||
* can run concurrently.
|
||||
* set excl_link when we want to send a PIO command in DMA mode
|
||||
* or a non-NCQ command in NCQ mode.
|
||||
* When we receive a command from that link, and there are no
|
||||
* outstanding commands, mark a flag to clear excl_link and let
|
||||
* the command go through.
|
||||
*/
|
||||
if (unlikely(ap->excl_link)) {
|
||||
if (link == ap->excl_link) {
|
||||
if (ap->nr_active_links)
|
||||
return ATA_DEFER_PORT;
|
||||
qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
|
||||
return 0;
|
||||
} else
|
||||
return ATA_DEFER_PORT;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the port is completely idle, then allow the new qc.
|
||||
*/
|
||||
|
@ -1395,8 +1414,14 @@ static int mv_qc_defer(struct ata_queued_cmd *qc)
|
|||
* doesn't allow it.
|
||||
*/
|
||||
if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
|
||||
(pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
|
||||
return 0;
|
||||
(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
|
||||
if (ata_is_ncq(qc->tf.protocol))
|
||||
return 0;
|
||||
else {
|
||||
ap->excl_link = link;
|
||||
return ATA_DEFER_PORT;
|
||||
}
|
||||
}
|
||||
|
||||
return ATA_DEFER_PORT;
|
||||
}
|
||||
|
|
|
@ -1594,9 +1594,21 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class,
|
|||
!ata_dev_enabled(link->device))
|
||||
sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
|
||||
NULL, NULL);
|
||||
else if (!(ehc->i.flags & ATA_EHI_QUIET))
|
||||
ata_link_printk(link, KERN_INFO,
|
||||
"nv: skipping hardreset on occupied port\n");
|
||||
else {
|
||||
const unsigned long *timing = sata_ehc_deb_timing(ehc);
|
||||
int rc;
|
||||
|
||||
if (!(ehc->i.flags & ATA_EHI_QUIET))
|
||||
ata_link_printk(link, KERN_INFO, "nv: skipping "
|
||||
"hardreset on occupied port\n");
|
||||
|
||||
/* make sure the link is online */
|
||||
rc = sata_link_resume(link, timing, deadline);
|
||||
/* whine about phy resume failure but proceed */
|
||||
if (rc && rc != -EOPNOTSUPP)
|
||||
ata_link_printk(link, KERN_WARNING, "failed to resume "
|
||||
"link (errno=%d)\n", rc);
|
||||
}
|
||||
|
||||
/* device signature acquisition is unreliable */
|
||||
return -EAGAIN;
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_blk.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
|
@ -183,34 +182,6 @@ static void do_virtblk_request(struct request_queue *q)
|
|||
vblk->vq->vq_ops->kick(vblk->vq);
|
||||
}
|
||||
|
||||
/* return ATA identify data
|
||||
*/
|
||||
static int virtblk_identify(struct gendisk *disk, void *argp)
|
||||
{
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
void *opaque;
|
||||
int err = -ENOMEM;
|
||||
|
||||
opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
|
||||
if (!opaque)
|
||||
goto out;
|
||||
|
||||
err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
|
||||
offsetof(struct virtio_blk_config, identify), opaque,
|
||||
VIRTIO_BLK_ID_BYTES);
|
||||
|
||||
if (err)
|
||||
goto out_kfree;
|
||||
|
||||
if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
|
||||
err = -EFAULT;
|
||||
|
||||
out_kfree:
|
||||
kfree(opaque);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
|
||||
{
|
||||
req->cmd_type = REQ_TYPE_LINUX_BLOCK;
|
||||
|
@ -222,10 +193,6 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
void __user *argp = (void __user *)data;
|
||||
|
||||
if (cmd == HDIO_GET_IDENTITY)
|
||||
return virtblk_identify(disk, argp);
|
||||
|
||||
/*
|
||||
* Only allow the generic SCSI ioctls if the host can support it.
|
||||
|
@ -233,7 +200,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
|
||||
return -ENOTTY;
|
||||
|
||||
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
|
||||
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
|
||||
(void __user *)data);
|
||||
}
|
||||
|
||||
/* We provide getgeo only to please some old bootloader/partitioning tools */
|
||||
|
@ -332,7 +300,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
|
|||
}
|
||||
|
||||
vblk->disk->queue->queuedata = vblk;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
|
||||
|
||||
if (index < 26) {
|
||||
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
|
||||
|
@ -445,7 +412,7 @@ static struct virtio_device_id id_table[] = {
|
|||
static unsigned int features[] = {
|
||||
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
|
||||
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
|
||||
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH
|
||||
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_rng.h>
|
||||
|
||||
/* The host will fill any buffer we give it with sweet, sweet randomness. We
|
||||
|
@ -117,7 +116,7 @@ static int virtrng_probe(struct virtio_device *vdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void virtrng_remove(struct virtio_device *vdev)
|
||||
static void __devexit virtrng_remove(struct virtio_device *vdev)
|
||||
{
|
||||
vdev->config->reset(vdev);
|
||||
hwrng_unregister(&virtio_hwrng);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_console.h>
|
||||
#include "hvc_console.h"
|
||||
|
||||
|
|
|
@ -24,6 +24,12 @@
|
|||
#include <asm/i387.h>
|
||||
#include "padlock.h"
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define STACK_ALIGN 16
|
||||
#else
|
||||
#define STACK_ALIGN 4
|
||||
#endif
|
||||
|
||||
struct padlock_sha_desc {
|
||||
struct shash_desc fallback;
|
||||
};
|
||||
|
@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
|
|||
/* We can't store directly to *out as it may be unaligned. */
|
||||
/* BTW Don't reduce the buffer size below 128 Bytes!
|
||||
* PadLock microcode needs it that big. */
|
||||
char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
|
||||
char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
|
||||
((aligned(STACK_ALIGN)));
|
||||
char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
|
||||
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
|
||||
struct sha1_state state;
|
||||
unsigned int space;
|
||||
|
@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
|
|||
/* We can't store directly to *out as it may be unaligned. */
|
||||
/* BTW Don't reduce the buffer size below 128 Bytes!
|
||||
* PadLock microcode needs it that big. */
|
||||
char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT)));
|
||||
char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
|
||||
((aligned(STACK_ALIGN)));
|
||||
char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
|
||||
struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
|
||||
struct sha256_state state;
|
||||
unsigned int space;
|
||||
|
|
|
@ -1122,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|||
debugf0("Reading K8_DRAM_BASE_LOW failed\n");
|
||||
|
||||
/* Extract parts into separate data entries */
|
||||
pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24;
|
||||
pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
|
||||
pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
|
||||
pvt->dram_rw_en[dram] = (low & 0x3);
|
||||
|
||||
|
@ -1135,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|||
* Extract parts into separate data entries. Limit is the HIGHEST memory
|
||||
* location of the region, so lower 24 bits need to be all ones
|
||||
*/
|
||||
pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF;
|
||||
pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
|
||||
pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
|
||||
pvt->dram_DstNode[dram] = (low & 0x7);
|
||||
}
|
||||
|
@ -1369,7 +1369,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|||
pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
|
||||
|
||||
pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
|
||||
(((u64)low_base & 0xFFFF0000) << 24);
|
||||
(((u64)low_base & 0xFFFF0000) << 8);
|
||||
|
||||
low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
|
||||
high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
|
||||
|
@ -1391,7 +1391,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
|
|||
* memory location of the region, so low 24 bits need to be all ones.
|
||||
*/
|
||||
pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
|
||||
(((u64) low_limit & 0xFFFF0000) << 24) |
|
||||
(((u64) low_limit & 0xFFFF0000) << 8) |
|
||||
0x00FFFFFF;
|
||||
}
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 },
|
||||
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 },
|
||||
{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 },
|
||||
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), 0 },
|
||||
{ 0, },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl);
|
||||
|
|
|
@ -782,10 +782,29 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait)
|
|||
return 0;
|
||||
}
|
||||
|
||||
union input_seq_state {
|
||||
struct {
|
||||
unsigned short pos;
|
||||
bool mutex_acquired;
|
||||
};
|
||||
void *p;
|
||||
};
|
||||
|
||||
static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
if (mutex_lock_interruptible(&input_mutex))
|
||||
return NULL;
|
||||
union input_seq_state *state = (union input_seq_state *)&seq->private;
|
||||
int error;
|
||||
|
||||
/* We need to fit into seq->private pointer */
|
||||
BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
|
||||
|
||||
error = mutex_lock_interruptible(&input_mutex);
|
||||
if (error) {
|
||||
state->mutex_acquired = false;
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
state->mutex_acquired = true;
|
||||
|
||||
return seq_list_start(&input_dev_list, *pos);
|
||||
}
|
||||
|
@ -795,9 +814,12 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
return seq_list_next(v, &input_dev_list, pos);
|
||||
}
|
||||
|
||||
static void input_devices_seq_stop(struct seq_file *seq, void *v)
|
||||
static void input_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
mutex_unlock(&input_mutex);
|
||||
union input_seq_state *state = (union input_seq_state *)&seq->private;
|
||||
|
||||
if (state->mutex_acquired)
|
||||
mutex_unlock(&input_mutex);
|
||||
}
|
||||
|
||||
static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
|
||||
|
@ -861,7 +883,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
|
|||
static const struct seq_operations input_devices_seq_ops = {
|
||||
.start = input_devices_seq_start,
|
||||
.next = input_devices_seq_next,
|
||||
.stop = input_devices_seq_stop,
|
||||
.stop = input_seq_stop,
|
||||
.show = input_devices_seq_show,
|
||||
};
|
||||
|
||||
|
@ -881,40 +903,49 @@ static const struct file_operations input_devices_fileops = {
|
|||
|
||||
static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
if (mutex_lock_interruptible(&input_mutex))
|
||||
return NULL;
|
||||
union input_seq_state *state = (union input_seq_state *)&seq->private;
|
||||
int error;
|
||||
|
||||
/* We need to fit into seq->private pointer */
|
||||
BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
|
||||
|
||||
error = mutex_lock_interruptible(&input_mutex);
|
||||
if (error) {
|
||||
state->mutex_acquired = false;
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
state->mutex_acquired = true;
|
||||
state->pos = *pos;
|
||||
|
||||
seq->private = (void *)(unsigned long)*pos;
|
||||
return seq_list_start(&input_handler_list, *pos);
|
||||
}
|
||||
|
||||
static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
seq->private = (void *)(unsigned long)(*pos + 1);
|
||||
return seq_list_next(v, &input_handler_list, pos);
|
||||
}
|
||||
union input_seq_state *state = (union input_seq_state *)&seq->private;
|
||||
|
||||
static void input_handlers_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
mutex_unlock(&input_mutex);
|
||||
state->pos = *pos + 1;
|
||||
return seq_list_next(v, &input_handler_list, pos);
|
||||
}
|
||||
|
||||
static int input_handlers_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
struct input_handler *handler = container_of(v, struct input_handler, node);
|
||||
union input_seq_state *state = (union input_seq_state *)&seq->private;
|
||||
|
||||
seq_printf(seq, "N: Number=%ld Name=%s",
|
||||
(unsigned long)seq->private, handler->name);
|
||||
seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
|
||||
if (handler->fops)
|
||||
seq_printf(seq, " Minor=%d", handler->minor);
|
||||
seq_putc(seq, '\n');
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations input_handlers_seq_ops = {
|
||||
.start = input_handlers_seq_start,
|
||||
.next = input_handlers_seq_next,
|
||||
.stop = input_handlers_seq_stop,
|
||||
.stop = input_seq_stop,
|
||||
.show = input_handlers_seq_show,
|
||||
};
|
||||
|
||||
|
|
|
@ -574,11 +574,22 @@ static void atkbd_event_work(struct work_struct *work)
|
|||
|
||||
mutex_lock(&atkbd->event_mutex);
|
||||
|
||||
if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask))
|
||||
atkbd_set_leds(atkbd);
|
||||
if (!atkbd->enabled) {
|
||||
/*
|
||||
* Serio ports are resumed asynchronously so while driver core
|
||||
* thinks that device is already fully operational in reality
|
||||
* it may not be ready yet. In this case we need to keep
|
||||
* rescheduling till reconnect completes.
|
||||
*/
|
||||
schedule_delayed_work(&atkbd->event_work,
|
||||
msecs_to_jiffies(100));
|
||||
} else {
|
||||
if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask))
|
||||
atkbd_set_leds(atkbd);
|
||||
|
||||
if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
|
||||
atkbd_set_repeat_rate(atkbd);
|
||||
if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask))
|
||||
atkbd_set_repeat_rate(atkbd);
|
||||
}
|
||||
|
||||
mutex_unlock(&atkbd->event_mutex);
|
||||
}
|
||||
|
@ -770,6 +781,30 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
|
|||
return 3;
|
||||
}
|
||||
|
||||
static int atkbd_reset_state(struct atkbd *atkbd)
|
||||
{
|
||||
struct ps2dev *ps2dev = &atkbd->ps2dev;
|
||||
unsigned char param[1];
|
||||
|
||||
/*
|
||||
* Set the LEDs to a predefined state (all off).
|
||||
*/
|
||||
|
||||
param[0] = 0;
|
||||
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Set autorepeat to fastest possible.
|
||||
*/
|
||||
|
||||
param[0] = 0;
|
||||
if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int atkbd_activate(struct atkbd *atkbd)
|
||||
{
|
||||
struct ps2dev *ps2dev = &atkbd->ps2dev;
|
||||
|
@ -851,29 +886,6 @@ static unsigned int atkbd_hp_forced_release_keys[] = {
|
|||
0x94, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* Inventec system with broken key release on volume keys
|
||||
*/
|
||||
static unsigned int atkbd_inventec_forced_release_keys[] = {
|
||||
0xae, 0xb0, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release
|
||||
* for its volume buttons
|
||||
*/
|
||||
static unsigned int atkbd_hp_zv6100_forced_release_keys[] = {
|
||||
0xae, 0xb0, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate
|
||||
* release for their volume buttons
|
||||
*/
|
||||
static unsigned int atkbd_hp_r4000_forced_release_keys[] = {
|
||||
0xae, 0xb0, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* Samsung NC10,NC20 with Fn+F? key release not working
|
||||
*/
|
||||
|
@ -881,14 +893,6 @@ static unsigned int atkbd_samsung_forced_release_keys[] = {
|
|||
0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop
|
||||
* do not generate release events so we have to do it ourselves.
|
||||
*/
|
||||
static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = {
|
||||
0xb0, 0xae, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* Amilo Pi 3525 key release for Fn+Volume keys not working
|
||||
*/
|
||||
|
@ -910,6 +914,14 @@ static unsigned int atkdb_soltech_ta12_forced_release_keys[] = {
|
|||
0xa0, 0xae, 0xb0, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* Many notebooks don't send key release event for volume up/down
|
||||
* keys, with key list below common among them
|
||||
*/
|
||||
static unsigned int atkbd_volume_forced_release_keys[] = {
|
||||
0xae, 0xb0, -1U
|
||||
};
|
||||
|
||||
/*
|
||||
* atkbd_set_keycode_table() initializes keyboard's keycode table
|
||||
* according to the selected scancode set
|
||||
|
@ -1087,6 +1099,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
|
|||
}
|
||||
|
||||
atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra);
|
||||
atkbd_reset_state(atkbd);
|
||||
atkbd_activate(atkbd);
|
||||
|
||||
} else {
|
||||
|
@ -1267,6 +1280,7 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
|
|||
|
||||
atkbd->dev = new_dev;
|
||||
atkbd->set = atkbd_select_set(atkbd, atkbd->set, value);
|
||||
atkbd_reset_state(atkbd);
|
||||
atkbd_activate(atkbd);
|
||||
atkbd_set_keycode_table(atkbd);
|
||||
atkbd_set_device_attrs(atkbd);
|
||||
|
@ -1548,7 +1562,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"),
|
||||
},
|
||||
.callback = atkbd_setup_forced_release,
|
||||
.driver_data = atkbd_hp_zv6100_forced_release_keys,
|
||||
.driver_data = atkbd_volume_forced_release_keys,
|
||||
},
|
||||
{
|
||||
.ident = "HP Presario R4000",
|
||||
|
@ -1557,7 +1571,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"),
|
||||
},
|
||||
.callback = atkbd_setup_forced_release,
|
||||
.driver_data = atkbd_hp_r4000_forced_release_keys,
|
||||
.driver_data = atkbd_volume_forced_release_keys,
|
||||
},
|
||||
{
|
||||
.ident = "HP Presario R4100",
|
||||
|
@ -1566,7 +1580,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"),
|
||||
},
|
||||
.callback = atkbd_setup_forced_release,
|
||||
.driver_data = atkbd_hp_r4000_forced_release_keys,
|
||||
.driver_data = atkbd_volume_forced_release_keys,
|
||||
},
|
||||
{
|
||||
.ident = "HP Presario R4200",
|
||||
|
@ -1575,7 +1589,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"),
|
||||
},
|
||||
.callback = atkbd_setup_forced_release,
|
||||
.driver_data = atkbd_hp_r4000_forced_release_keys,
|
||||
.driver_data = atkbd_volume_forced_release_keys,
|
||||
},
|
||||
{
|
||||
.ident = "Inventec Symphony",
|
||||
|
@ -1584,7 +1598,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
|
||||
},
|
||||
.callback = atkbd_setup_forced_release,
|
||||
.driver_data = atkbd_inventec_forced_release_keys,
|
||||
.driver_data = atkbd_volume_forced_release_keys,
|
||||
},
|
||||
{
|
||||
.ident = "Samsung NC10",
|
||||
|
@ -1620,7 +1634,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"),
|
||||
},
|
||||
.callback = atkbd_setup_forced_release,
|
||||
.driver_data = atkbd_amilo_pa1510_forced_release_keys,
|
||||
.driver_data = atkbd_volume_forced_release_keys,
|
||||
},
|
||||
{
|
||||
.ident = "Fujitsu Amilo Pi 3525",
|
||||
|
|
|
@ -209,7 +209,7 @@ static inline int hp_sdc_rtc_read_rt(struct timeval *res) {
|
|||
|
||||
/* Read the i8042 fast handshake timer */
|
||||
static inline int hp_sdc_rtc_read_fhs(struct timeval *res) {
|
||||
uint64_t raw;
|
||||
int64_t raw;
|
||||
unsigned int tenms;
|
||||
|
||||
raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2);
|
||||
|
|
|
@ -219,7 +219,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model)
|
|||
PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN |
|
||||
PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL },
|
||||
{ 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */
|
||||
{ 73, 0, PS2PP_SIDE_BTN },
|
||||
{ 73, PS2PP_KIND_TRACKMAN, PS2PP_SIDE_BTN }, /* TrackMan FX */
|
||||
{ 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
|
||||
{ 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL },
|
||||
{ 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */
|
||||
|
|
|
@ -652,6 +652,16 @@ static const struct dmi_system_id toshiba_dmi_table[] = {
|
|||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
|
||||
},
|
||||
|
||||
},
|
||||
{
|
||||
.ident = "Toshiba Portege M300",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
|
||||
},
|
||||
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -326,6 +326,17 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Reset and GET ID commands issued via KBD port are
|
||||
* sometimes being delivered to AUX3.
|
||||
*/
|
||||
.ident = "Sony Vaio FZ-240E",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Amoi M636/A737",
|
||||
.matches = {
|
||||
|
@ -661,7 +672,7 @@ static void i8042_pnp_exit(void)
|
|||
static int __init i8042_pnp_init(void)
|
||||
{
|
||||
char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 };
|
||||
int pnp_data_busted = false;
|
||||
bool pnp_data_busted = false;
|
||||
int err;
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
|
|
|
@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type)
|
|||
}
|
||||
EXPORT_SYMBOL(dm_exception_store_type_unregister);
|
||||
|
||||
/*
|
||||
* Round a number up to the nearest 'size' boundary. size must
|
||||
* be a power of 2.
|
||||
*/
|
||||
static ulong round_up(ulong n, ulong size)
|
||||
{
|
||||
size--;
|
||||
return (n + size) & ~size;
|
||||
}
|
||||
|
||||
static int set_chunk_size(struct dm_exception_store *store,
|
||||
const char *chunk_size_arg, char **error)
|
||||
{
|
||||
|
@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store,
|
|||
char *value;
|
||||
|
||||
chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10);
|
||||
if (*chunk_size_arg == '\0' || *value != '\0') {
|
||||
if (*chunk_size_arg == '\0' || *value != '\0' ||
|
||||
chunk_size_ulong > UINT_MAX) {
|
||||
*error = "Invalid chunk size";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Chunk size must be multiple of page size. Silently
|
||||
* round up if it's not.
|
||||
*/
|
||||
chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9);
|
||||
|
||||
return dm_exception_store_set_chunk_size(store, chunk_size_ulong,
|
||||
return dm_exception_store_set_chunk_size(store,
|
||||
(unsigned) chunk_size_ulong,
|
||||
error);
|
||||
}
|
||||
|
||||
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
|
||||
unsigned long chunk_size_ulong,
|
||||
unsigned chunk_size,
|
||||
char **error)
|
||||
{
|
||||
/* Check chunk_size is a power of 2 */
|
||||
if (!is_power_of_2(chunk_size_ulong)) {
|
||||
if (!is_power_of_2(chunk_size)) {
|
||||
*error = "Chunk size is not a power of 2";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Validate the chunk size against the device block size */
|
||||
if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
|
||||
if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
|
||||
*error = "Chunk size is not a multiple of device blocksize";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) {
|
||||
if (chunk_size > INT_MAX >> SECTOR_SHIFT) {
|
||||
*error = "Chunk size is too high";
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
store->chunk_size = chunk_size_ulong;
|
||||
store->chunk_mask = chunk_size_ulong - 1;
|
||||
store->chunk_shift = ffs(chunk_size_ulong) - 1;
|
||||
store->chunk_size = chunk_size;
|
||||
store->chunk_mask = chunk_size - 1;
|
||||
store->chunk_shift = ffs(chunk_size) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
|
|||
|
||||
r = set_chunk_size(tmp_store, argv[2], &ti->error);
|
||||
if (r)
|
||||
goto bad_cow;
|
||||
goto bad_ctr;
|
||||
|
||||
r = type->ctr(tmp_store, 0, NULL);
|
||||
if (r) {
|
||||
|
|
|
@ -101,9 +101,9 @@ struct dm_exception_store {
|
|||
struct dm_dev *cow;
|
||||
|
||||
/* Size of data blocks saved - must be a power of 2 */
|
||||
chunk_t chunk_size;
|
||||
chunk_t chunk_mask;
|
||||
chunk_t chunk_shift;
|
||||
unsigned chunk_size;
|
||||
unsigned chunk_mask;
|
||||
unsigned chunk_shift;
|
||||
|
||||
void *context;
|
||||
};
|
||||
|
@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
|
|||
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
|
||||
|
||||
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
|
||||
unsigned long chunk_size_ulong,
|
||||
unsigned chunk_size,
|
||||
char **error);
|
||||
|
||||
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
|
||||
|
|
|
@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
|
|||
}
|
||||
|
||||
/* The ptr value is sufficient for local unique id */
|
||||
lc->luid = (uint64_t)lc;
|
||||
lc->luid = (unsigned long)lc;
|
||||
|
||||
lc->ti = ti;
|
||||
|
||||
|
|
|
@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
|||
{
|
||||
int r;
|
||||
struct disk_header *dh;
|
||||
chunk_t chunk_size;
|
||||
unsigned chunk_size;
|
||||
int chunk_size_supplied = 1;
|
||||
char *chunk_err;
|
||||
|
||||
/*
|
||||
* Use default chunk size (or hardsect_size, if larger) if none supplied
|
||||
* Use default chunk size (or logical_block_size, if larger)
|
||||
* if none supplied
|
||||
*/
|
||||
if (!ps->store->chunk_size) {
|
||||
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
|
||||
|
@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
|||
return 0;
|
||||
|
||||
if (chunk_size_supplied)
|
||||
DMWARN("chunk size %llu in device metadata overrides "
|
||||
"table chunk size of %llu.",
|
||||
(unsigned long long)chunk_size,
|
||||
(unsigned long long)ps->store->chunk_size);
|
||||
DMWARN("chunk size %u in device metadata overrides "
|
||||
"table chunk size of %u.",
|
||||
chunk_size, ps->store->chunk_size);
|
||||
|
||||
/* We had a bogus chunk_size. Fix stuff up. */
|
||||
free_area(ps);
|
||||
|
@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot)
|
|||
r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
|
||||
&chunk_err);
|
||||
if (r) {
|
||||
DMERR("invalid on-disk chunk size %llu: %s.",
|
||||
(unsigned long long)chunk_size, chunk_err);
|
||||
DMERR("invalid on-disk chunk size %u: %s.",
|
||||
chunk_size, chunk_err);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o)
|
|||
*/
|
||||
static int register_snapshot(struct dm_snapshot *snap)
|
||||
{
|
||||
struct dm_snapshot *l;
|
||||
struct origin *o, *new_o;
|
||||
struct block_device *bdev = snap->origin->bdev;
|
||||
|
||||
|
@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap)
|
|||
__insert_origin(o);
|
||||
}
|
||||
|
||||
list_add_tail(&snap->list, &o->snapshots);
|
||||
/* Sort the list according to chunk size, largest-first smallest-last */
|
||||
list_for_each_entry(l, &o->snapshots, list)
|
||||
if (l->store->chunk_size < snap->store->chunk_size)
|
||||
break;
|
||||
list_add_tail(&snap->list, &l->list);
|
||||
|
||||
up_write(&_origins_lock);
|
||||
return 0;
|
||||
|
@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
bio_list_init(&s->queued_bios);
|
||||
INIT_WORK(&s->queued_bios_work, flush_queued_bios);
|
||||
|
||||
if (!s->store->chunk_size) {
|
||||
ti->error = "Chunk size not set";
|
||||
goto bad_load_and_register;
|
||||
}
|
||||
|
||||
/* Add snapshot to the list of snapshots for this origin */
|
||||
/* Exceptions aren't triggered till snapshot_resume() is called */
|
||||
if (register_snapshot(s)) {
|
||||
|
@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
|
|||
|
||||
src.bdev = bdev;
|
||||
src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
|
||||
src.count = min(s->store->chunk_size, dev_size - src.sector);
|
||||
src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
|
||||
|
||||
dest.bdev = s->store->cow->bdev;
|
||||
dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
|
||||
|
@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
|
|||
unsigned sz = 0;
|
||||
struct dm_snapshot *snap = ti->private;
|
||||
|
||||
down_write(&snap->lock);
|
||||
|
||||
switch (type) {
|
||||
case STATUSTYPE_INFO:
|
||||
if (!snap->valid)
|
||||
|
@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
|
|||
break;
|
||||
}
|
||||
|
||||
up_write(&snap->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti)
|
|||
struct dm_dev *dev = ti->private;
|
||||
struct dm_snapshot *snap;
|
||||
struct origin *o;
|
||||
chunk_t chunk_size = 0;
|
||||
unsigned chunk_size = 0;
|
||||
|
||||
down_read(&_origins_lock);
|
||||
o = __lookup_origin(dev->bdev);
|
||||
|
@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void)
|
|||
r = dm_register_target(&snapshot_target);
|
||||
if (r) {
|
||||
DMERR("snapshot target register failed %d", r);
|
||||
return r;
|
||||
goto bad_register_snapshot_target;
|
||||
}
|
||||
|
||||
r = dm_register_target(&origin_target);
|
||||
|
@ -1522,6 +1536,9 @@ bad2:
|
|||
dm_unregister_target(&origin_target);
|
||||
bad1:
|
||||
dm_unregister_target(&snapshot_target);
|
||||
|
||||
bad_register_snapshot_target:
|
||||
dm_exception_store_exit();
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ struct dm_io {
|
|||
atomic_t io_count;
|
||||
struct bio *bio;
|
||||
unsigned long start_time;
|
||||
spinlock_t endio_lock;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error)
|
|||
struct mapped_device *md = io->md;
|
||||
|
||||
/* Push-back supersedes any I/O errors */
|
||||
if (error && !(io->error > 0 && __noflush_suspending(md)))
|
||||
io->error = error;
|
||||
if (unlikely(error)) {
|
||||
spin_lock_irqsave(&io->endio_lock, flags);
|
||||
if (!(io->error > 0 && __noflush_suspending(md)))
|
||||
io->error = error;
|
||||
spin_unlock_irqrestore(&io->endio_lock, flags);
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&io->io_count)) {
|
||||
if (io->error == DM_ENDIO_REQUEUE) {
|
||||
|
@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
|
|||
atomic_set(&ci.io->io_count, 1);
|
||||
ci.io->bio = bio;
|
||||
ci.io->md = md;
|
||||
spin_lock_init(&ci.io->endio_lock);
|
||||
ci.sector = bio->bi_sector;
|
||||
ci.sector_count = bio_sectors(bio);
|
||||
if (unlikely(bio_empty_barrier(bio)))
|
||||
|
@ -1822,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
bad_bdev:
|
||||
destroy_workqueue(md->wq);
|
||||
bad_thread:
|
||||
del_gendisk(md->disk);
|
||||
put_disk(md->disk);
|
||||
bad_disk:
|
||||
blk_cleanup_queue(md->queue);
|
||||
|
|
|
@ -72,7 +72,6 @@
|
|||
#include <asm/irq.h>
|
||||
#include <asm/gpio.h>
|
||||
|
||||
#include <asm/mach/mmc.h>
|
||||
#include <mach/board.h>
|
||||
#include <mach/cpu.h>
|
||||
#include <mach/at91_mci.h>
|
||||
|
|
|
@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
|
|||
|
||||
int be_cmd_POST(struct be_adapter *adapter)
|
||||
{
|
||||
u16 stage, error;
|
||||
u16 stage;
|
||||
int status, timeout = 0;
|
||||
|
||||
error = be_POST_stage_get(adapter, &stage);
|
||||
if (error || stage != POST_STAGE_ARMFW_RDY) {
|
||||
dev_err(&adapter->pdev->dev, "POST failed.\n");
|
||||
return -1;
|
||||
}
|
||||
do {
|
||||
status = be_POST_stage_get(adapter, &stage);
|
||||
if (status) {
|
||||
dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
|
||||
stage);
|
||||
return -1;
|
||||
} else if (stage != POST_STAGE_ARMFW_RDY) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(2 * HZ);
|
||||
timeout += 2;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} while (timeout < 20);
|
||||
|
||||
return 0;
|
||||
dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
|
||||
|
@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
|
|||
/* Create an rx filtering policy configuration on an i/f
|
||||
* Uses mbox
|
||||
*/
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
|
||||
u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
|
||||
{
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_if_create *req;
|
||||
|
@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac,
|
|||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
|
||||
|
||||
req->capability_flags = cpu_to_le32(flags);
|
||||
req->enable_flags = cpu_to_le32(flags);
|
||||
req->capability_flags = cpu_to_le32(cap_flags);
|
||||
req->enable_flags = cpu_to_le32(en_flags);
|
||||
req->pmac_invalid = pmac_invalid;
|
||||
if (!pmac_invalid)
|
||||
memcpy(req->mac_addr, mac, ETH_ALEN);
|
||||
|
|
|
@ -720,8 +720,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
|
|||
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
|
||||
u32 if_id, u32 *pmac_id);
|
||||
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
|
||||
extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac,
|
||||
bool pmac_invalid, u32 *if_handle, u32 *pmac_id);
|
||||
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
|
||||
u32 en_flags, u8 *mac, bool pmac_invalid,
|
||||
u32 *if_handle, u32 *pmac_id);
|
||||
extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
|
||||
extern int be_cmd_eq_create(struct be_adapter *adapter,
|
||||
struct be_queue_info *eq, int eq_delay);
|
||||
|
|
|
@ -1620,19 +1620,22 @@ static int be_open(struct net_device *netdev)
|
|||
static int be_setup(struct be_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
u32 if_flags;
|
||||
u32 cap_flags, en_flags;
|
||||
int status;
|
||||
|
||||
if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS |
|
||||
BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr,
|
||||
false/* pmac_invalid */, &adapter->if_handle,
|
||||
&adapter->pmac_id);
|
||||
cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_MCAST_PROMISCUOUS |
|
||||
BE_IF_FLAGS_PROMISCUOUS |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
|
||||
BE_IF_FLAGS_PASS_L3L4_ERRORS;
|
||||
|
||||
status = be_cmd_if_create(adapter, cap_flags, en_flags,
|
||||
netdev->dev_addr, false/* pmac_invalid */,
|
||||
&adapter->if_handle, &adapter->pmac_id);
|
||||
if (status != 0)
|
||||
goto do_none;
|
||||
|
||||
|
||||
status = be_tx_queues_create(adapter);
|
||||
if (status != 0)
|
||||
goto if_destroy;
|
||||
|
@ -2055,6 +2058,10 @@ static int be_hw_up(struct be_adapter *adapter)
|
|||
if (status)
|
||||
return status;
|
||||
|
||||
status = be_cmd_reset_function(adapter);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
|
||||
if (status)
|
||||
return status;
|
||||
|
@ -2108,10 +2115,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
|||
if (status)
|
||||
goto free_netdev;
|
||||
|
||||
status = be_cmd_reset_function(adapter);
|
||||
if (status)
|
||||
goto ctrl_clean;
|
||||
|
||||
status = be_stats_init(adapter);
|
||||
if (status)
|
||||
goto ctrl_clean;
|
||||
|
|
|
@ -223,24 +223,25 @@ struct ethoc_bd {
|
|||
u32 addr;
|
||||
};
|
||||
|
||||
static u32 ethoc_read(struct ethoc *dev, loff_t offset)
|
||||
static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
|
||||
{
|
||||
return ioread32(dev->iobase + offset);
|
||||
}
|
||||
|
||||
static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
|
||||
static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
|
||||
{
|
||||
iowrite32(data, dev->iobase + offset);
|
||||
}
|
||||
|
||||
static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd)
|
||||
static inline void ethoc_read_bd(struct ethoc *dev, int index,
|
||||
struct ethoc_bd *bd)
|
||||
{
|
||||
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
|
||||
bd->stat = ethoc_read(dev, offset + 0);
|
||||
bd->addr = ethoc_read(dev, offset + 4);
|
||||
}
|
||||
|
||||
static void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
static inline void ethoc_write_bd(struct ethoc *dev, int index,
|
||||
const struct ethoc_bd *bd)
|
||||
{
|
||||
loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
|
||||
|
@ -248,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index,
|
|||
ethoc_write(dev, offset + 4, bd->addr);
|
||||
}
|
||||
|
||||
static void ethoc_enable_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
u32 imask = ethoc_read(dev, INT_MASK);
|
||||
imask |= mask;
|
||||
ethoc_write(dev, INT_MASK, imask);
|
||||
}
|
||||
|
||||
static void ethoc_disable_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
u32 imask = ethoc_read(dev, INT_MASK);
|
||||
imask &= ~mask;
|
||||
ethoc_write(dev, INT_MASK, imask);
|
||||
}
|
||||
|
||||
static void ethoc_ack_irq(struct ethoc *dev, u32 mask)
|
||||
static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
|
||||
{
|
||||
ethoc_write(dev, INT_SOURCE, mask);
|
||||
}
|
||||
|
||||
static void ethoc_enable_rx_and_tx(struct ethoc *dev)
|
||||
static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
|
||||
{
|
||||
u32 mode = ethoc_read(dev, MODER);
|
||||
mode |= MODER_RXEN | MODER_TXEN;
|
||||
ethoc_write(dev, MODER, mode);
|
||||
}
|
||||
|
||||
static void ethoc_disable_rx_and_tx(struct ethoc *dev)
|
||||
static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
|
||||
{
|
||||
u32 mode = ethoc_read(dev, MODER);
|
||||
mode &= ~(MODER_RXEN | MODER_TXEN);
|
||||
|
@ -508,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
ethoc_ack_irq(priv, INT_MASK_ALL);
|
||||
ethoc_ack_irq(priv, pending);
|
||||
|
||||
if (pending & INT_MASK_BUSY) {
|
||||
dev_err(&dev->dev, "packet dropped\n");
|
||||
|
|
|
@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = {
|
|||
*
|
||||
* index is only used in legacy code
|
||||
*/
|
||||
int __init fec_enet_init(struct net_device *dev, int index)
|
||||
static int fec_enet_init(struct net_device *dev, int index)
|
||||
{
|
||||
struct fec_enet_private *fep = netdev_priv(dev);
|
||||
struct bufdesc *cbd_base;
|
||||
|
|
|
@ -170,6 +170,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
|
|||
ks_err(ks, "spi_sync() failed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_wrreg8 - write 8bit register value to chip
|
||||
* @ks: The chip state
|
||||
* @reg: The register address
|
||||
* @val: The value to write
|
||||
*
|
||||
* Issue a write to put the value @val into the register specified in @reg.
|
||||
*/
|
||||
static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
|
||||
{
|
||||
struct spi_transfer *xfer = &ks->spi_xfer1;
|
||||
struct spi_message *msg = &ks->spi_msg1;
|
||||
__le16 txb[2];
|
||||
int ret;
|
||||
int bit;
|
||||
|
||||
bit = 1 << (reg & 3);
|
||||
|
||||
txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR);
|
||||
txb[1] = val;
|
||||
|
||||
xfer->tx_buf = txb;
|
||||
xfer->rx_buf = NULL;
|
||||
xfer->len = 3;
|
||||
|
||||
ret = spi_sync(ks->spidev, msg);
|
||||
if (ret < 0)
|
||||
ks_err(ks, "spi_sync() failed\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ks8851_rx_1msg - select whether to use one or two messages for spi read
|
||||
* @ks: The device structure
|
||||
|
@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
|
|||
static int ks8851_write_mac_addr(struct net_device *dev)
|
||||
{
|
||||
struct ks8851_net *ks = netdev_priv(dev);
|
||||
u16 *mcp = (u16 *)dev->dev_addr;
|
||||
int i;
|
||||
|
||||
mutex_lock(&ks->lock);
|
||||
|
||||
ks8851_wrreg16(ks, KS_MARL, mcp[0]);
|
||||
ks8851_wrreg16(ks, KS_MARM, mcp[1]);
|
||||
ks8851_wrreg16(ks, KS_MARH, mcp[2]);
|
||||
for (i = 0; i < ETH_ALEN; i++)
|
||||
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
|
||||
|
||||
mutex_unlock(&ks->lock);
|
||||
|
||||
|
@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev)
|
|||
mcptr = mcptr->next;
|
||||
}
|
||||
|
||||
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA;
|
||||
rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA;
|
||||
} else {
|
||||
/* just accept broadcast / unicast */
|
||||
rxctrl.rxcr1 = RXCR1_RXPAFMA;
|
||||
|
@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
|
|||
ndev->netdev_ops = &ks8851_netdev_ops;
|
||||
ndev->irq = spi->irq;
|
||||
|
||||
/* issue a global soft reset to reset the device. */
|
||||
ks8851_soft_reset(ks, GRR_GSR);
|
||||
|
||||
/* simple check for a valid chip being connected to the bus */
|
||||
|
||||
if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define CCR_32PIN (1 << 0)
|
||||
|
||||
/* MAC address registers */
|
||||
#define KS_MAR(_m) 0x15 - (_m)
|
||||
#define KS_MARL 0x10
|
||||
#define KS_MARM 0x12
|
||||
#define KS_MARH 0x14
|
||||
|
|
|
@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
|
|||
rp->rcr_index = index;
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
__pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX));
|
||||
__pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN));
|
||||
|
||||
rp->rx_packets++;
|
||||
rp->rx_bytes += skb->len;
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <linux/ethtool.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
@ -454,7 +453,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
|
|||
vi->dev->stats.tx_bytes += skb->len;
|
||||
vi->dev->stats.tx_packets++;
|
||||
tot_sgs += skb_vnet_hdr(skb)->num_sg;
|
||||
kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
return tot_sgs;
|
||||
}
|
||||
|
|
|
@ -481,7 +481,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|||
}
|
||||
rq->uncommitted[ring_idx] += num_allocated;
|
||||
|
||||
dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"alloc_rx_buf: %d allocated, next2fill %u, next2comp "
|
||||
"%u, uncommited %u\n", num_allocated, ring->next2fill,
|
||||
ring->next2comp, rq->uncommitted[ring_idx]);
|
||||
|
||||
|
@ -539,7 +540,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|||
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
||||
tbi->map_type = VMXNET3_MAP_NONE;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
tq->tx_ring.next2fill, ctx->sop_txd->txd.addr,
|
||||
ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
|
@ -572,7 +574,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|||
gdesc->dword[2] = dw2 | buf_size;
|
||||
gdesc->dword[3] = 0;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%Lx 0x%x 0x%x\n",
|
||||
tq->tx_ring.next2fill, gdesc->txd.addr,
|
||||
gdesc->dword[2], gdesc->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
|
@ -600,7 +603,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|||
gdesc->dword[2] = dw2 | frag->size;
|
||||
gdesc->dword[3] = 0;
|
||||
|
||||
dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: 0x%llu %u %u\n",
|
||||
tq->tx_ring.next2fill, gdesc->txd.addr,
|
||||
gdesc->dword[2], gdesc->dword[3]);
|
||||
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
|
||||
|
@ -697,7 +701,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
tdd = tq->data_ring.base + tq->tx_ring.next2fill;
|
||||
|
||||
memcpy(tdd->data, skb->data, ctx->copy_size);
|
||||
dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"copy %u bytes to dataRing[%u]\n",
|
||||
ctx->copy_size, tq->tx_ring.next2fill);
|
||||
return 1;
|
||||
|
||||
|
@ -808,7 +813,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
|
||||
if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
|
||||
tq->stats.tx_ring_full++;
|
||||
dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u"
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"tx queue stopped on %s, next2comp %u"
|
||||
" next2fill %u\n", adapter->netdev->name,
|
||||
tq->tx_ring.next2comp, tq->tx_ring.next2fill);
|
||||
|
||||
|
@ -853,7 +859,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
|
|||
|
||||
/* finally flips the GEN bit of the SOP desc */
|
||||
gdesc->dword[2] ^= VMXNET3_TXD_GEN;
|
||||
dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
|
||||
(u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
|
||||
tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2],
|
||||
gdesc->dword[3]);
|
||||
|
@ -990,7 +997,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|||
if (unlikely(rcd->len == 0)) {
|
||||
/* Pretend the rx buffer is skipped. */
|
||||
BUG_ON(!(rcd->sop && rcd->eop));
|
||||
dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n",
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"rxRing[%u][%u] 0 length\n",
|
||||
ring_idx, idx);
|
||||
goto rcd_done;
|
||||
}
|
||||
|
@ -1683,7 +1691,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
|
|||
int err;
|
||||
u32 ret;
|
||||
|
||||
dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
|
||||
dev_dbg(&adapter->netdev->dev,
|
||||
"%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
|
||||
" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
|
||||
adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
|
||||
adapter->rx_queue.rx_ring[0].size,
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
|
@ -59,7 +60,6 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <linux/dst.h>
|
||||
|
||||
#include "vmxnet3_defs.h"
|
||||
|
||||
|
|
|
@ -1009,7 +1009,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX,
|
|||
|
||||
static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
|
||||
{
|
||||
/* set SBX00 SATA in IDE mode to AHCI mode */
|
||||
/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
|
||||
u8 tmp;
|
||||
|
||||
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
|
||||
|
@ -1028,8 +1028,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk
|
|||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
|
||||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
|
||||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
|
||||
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
|
||||
|
||||
/*
|
||||
* Serverworks CSB5 IDE does not fully support native mode
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
*/
|
||||
//#define DEBUG
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_balloon.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/kthread.h>
|
||||
|
@ -248,7 +247,7 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void virtballoon_remove(struct virtio_device *vdev)
|
||||
static void __devexit virtballoon_remove(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_balloon *vb = vdev->priv;
|
||||
|
||||
|
|
|
@ -1253,6 +1253,7 @@ static int nfs_parse_mount_options(char *raw,
|
|||
default:
|
||||
dfprintk(MOUNT, "NFS: unrecognized "
|
||||
"transport protocol\n");
|
||||
kfree(string);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -91,6 +91,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
|
|||
struct dnotify_struct *dn;
|
||||
struct dnotify_struct **prev;
|
||||
struct fown_struct *fown;
|
||||
__u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD;
|
||||
|
||||
to_tell = event->to_tell;
|
||||
|
||||
|
@ -106,7 +107,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
|
|||
spin_lock(&entry->lock);
|
||||
prev = &dnentry->dn;
|
||||
while ((dn = *prev) != NULL) {
|
||||
if ((dn->dn_mask & event->mask) == 0) {
|
||||
if ((dn->dn_mask & test_mask) == 0) {
|
||||
prev = &dn->dn_next;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -324,11 +324,11 @@ int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
|
|||
spin_lock(&group->mark_lock);
|
||||
spin_lock(&inode->i_lock);
|
||||
|
||||
entry->group = group;
|
||||
entry->inode = inode;
|
||||
|
||||
lentry = fsnotify_find_mark_entry(group, inode);
|
||||
if (!lentry) {
|
||||
entry->group = group;
|
||||
entry->inode = inode;
|
||||
|
||||
hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
|
||||
list_add(&entry->g_list, &group->mark_entries);
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new
|
|||
/* remember, after old was put on the wait_q we aren't
|
||||
* allowed to look at the inode any more, only thing
|
||||
* left to check was if the file_name is the same */
|
||||
if (old->name_len &&
|
||||
if (!old->name_len ||
|
||||
!strcmp(old->file_name, new->file_name))
|
||||
return true;
|
||||
break;
|
||||
|
|
41
fs/pipe.c
41
fs/pipe.c
|
@ -777,36 +777,55 @@ pipe_rdwr_release(struct inode *inode, struct file *filp)
|
|||
static int
|
||||
pipe_read_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
/* We could have perhaps used atomic_t, but this and friends
|
||||
below are the only places. So it doesn't seem worthwhile. */
|
||||
int ret = -ENOENT;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode->i_pipe->readers++;
|
||||
|
||||
if (inode->i_pipe) {
|
||||
ret = 0;
|
||||
inode->i_pipe->readers++;
|
||||
}
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
pipe_write_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode->i_pipe->writers++;
|
||||
|
||||
if (inode->i_pipe) {
|
||||
ret = 0;
|
||||
inode->i_pipe->writers++;
|
||||
}
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
pipe_rdwr_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
int ret = -ENOENT;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (filp->f_mode & FMODE_READ)
|
||||
inode->i_pipe->readers++;
|
||||
if (filp->f_mode & FMODE_WRITE)
|
||||
inode->i_pipe->writers++;
|
||||
|
||||
if (inode->i_pipe) {
|
||||
ret = 0;
|
||||
if (filp->f_mode & FMODE_READ)
|
||||
inode->i_pipe->readers++;
|
||||
if (filp->f_mode & FMODE_WRITE)
|
||||
inode->i_pipe->writers++;
|
||||
}
|
||||
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -364,6 +364,7 @@ unifdef-y += utsname.h
|
|||
unifdef-y += videodev2.h
|
||||
unifdef-y += videodev.h
|
||||
unifdef-y += virtio_config.h
|
||||
unifdef-y += virtio_ids.h
|
||||
unifdef-y += virtio_blk.h
|
||||
unifdef-y += virtio_net.h
|
||||
unifdef-y += virtio_9p.h
|
||||
|
|
|
@ -379,9 +379,6 @@
|
|||
#define PCI_DEVICE_ID_ATI_IXP600_IDE 0x438c
|
||||
#define PCI_DEVICE_ID_ATI_IXP700_SATA 0x4390
|
||||
#define PCI_DEVICE_ID_ATI_IXP700_IDE 0x439c
|
||||
/* AMD SB Chipset */
|
||||
#define PCI_DEVICE_ID_AMD_SB900_IDE 0x780c
|
||||
#define PCI_DEVICE_ID_AMD_SB900_SATA_IDE 0x7800
|
||||
|
||||
#define PCI_VENDOR_ID_VLSI 0x1004
|
||||
#define PCI_DEVICE_ID_VLSI_82C592 0x0005
|
||||
|
@ -553,9 +550,10 @@
|
|||
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
|
||||
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
|
||||
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
|
||||
|
||||
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
|
||||
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
|
||||
#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
|
||||
#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
|
||||
|
||||
#define PCI_VENDOR_ID_TRIDENT 0x1023
|
||||
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _LINUX_VIRTIO_9P_H
|
||||
/* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers. */
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
/* Maximum number of virtio channels per partition (1 for now) */
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _LINUX_VIRTIO_BALLOON_H
|
||||
/* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers. */
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
/* The feature bitmap for virtio balloon */
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
/* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers. */
|
||||
#include <linux/types.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
/* Feature bits */
|
||||
|
@ -13,11 +14,8 @@
|
|||
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
|
||||
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
|
||||
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
|
||||
#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
|
||||
#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
|
||||
|
||||
#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
|
||||
|
||||
struct virtio_blk_config {
|
||||
/* The capacity (in 512-byte sectors). */
|
||||
__u64 capacity;
|
||||
|
@ -33,7 +31,6 @@ struct virtio_blk_config {
|
|||
} geometry;
|
||||
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
|
||||
__u32 blk_size;
|
||||
__u8 identify[VIRTIO_BLK_ID_BYTES];
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef _LINUX_VIRTIO_CONSOLE_H
|
||||
#define _LINUX_VIRTIO_CONSOLE_H
|
||||
#include <linux/types.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
|
||||
* anyone can use the definitions to implement compatible drivers/servers. */
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
/* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers. */
|
||||
#include <linux/types.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _LINUX_VIRTIO_RNG_H
|
||||
/* This header is BSD licensed so anyone can use the definitions to implement
|
||||
* compatible drivers/servers. */
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
#endif /* _LINUX_VIRTIO_RNG_H */
|
||||
|
|
|
@ -130,11 +130,11 @@ struct inet_timewait_sock {
|
|||
__u16 tw_num;
|
||||
kmemcheck_bitfield_begin(flags);
|
||||
/* And these are ours. */
|
||||
__u8 tw_ipv6only:1,
|
||||
tw_transparent:1;
|
||||
/* 14 bits hole, try to pack */
|
||||
unsigned int tw_ipv6only : 1,
|
||||
tw_transparent : 1,
|
||||
tw_pad : 14, /* 14 bits hole */
|
||||
tw_ipv6_offset : 16;
|
||||
kmemcheck_bitfield_end(flags);
|
||||
__u16 tw_ipv6_offset;
|
||||
unsigned long tw_ttd;
|
||||
struct inet_bind_bucket *tw_tb;
|
||||
struct hlist_node tw_death_node;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
* The time it takes is system-specific though, so when we test this
|
||||
* during system bootup we allow a LOT of time.
|
||||
*/
|
||||
#define TEST_SUSPEND_SECONDS 5
|
||||
#define TEST_SUSPEND_SECONDS 10
|
||||
|
||||
static unsigned long suspend_test_start_time;
|
||||
|
||||
|
@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
|
|||
* has some performance issues. The stack dump of a WARN_ON
|
||||
* is more likely to get the right attention than a printk...
|
||||
*/
|
||||
WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
|
||||
WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
|
||||
"Component: %s, time: %u\n", label, msec);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -43,7 +43,6 @@
|
|||
#include <net/9p/transport.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_9p.h>
|
||||
|
||||
#define VIRTQUEUE_NUM 128
|
||||
|
|
|
@ -92,6 +92,8 @@ static void add_conn(struct work_struct *work)
|
|||
|
||||
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
if (device_add(&conn->dev) < 0) {
|
||||
BT_ERR("Failed to register connection device");
|
||||
return;
|
||||
|
@ -144,8 +146,6 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
|
|||
conn->dev.class = bt_class;
|
||||
conn->dev.parent = &hdev->dev;
|
||||
|
||||
dev_set_drvdata(&conn->dev, conn);
|
||||
|
||||
device_initialize(&conn->dev);
|
||||
|
||||
INIT_WORK(&conn->work_add, add_conn);
|
||||
|
|
|
@ -555,12 +555,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
|
|||
|
||||
conn->feat_mask = 0;
|
||||
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
|
||||
spin_lock_init(&conn->lock);
|
||||
rwlock_init(&conn->chan_list.lock);
|
||||
|
||||
setup_timer(&conn->info_timer, l2cap_info_timeout,
|
||||
(unsigned long) conn);
|
||||
|
||||
conn->disc_reason = 0x13;
|
||||
|
||||
return conn;
|
||||
|
@ -783,6 +783,9 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
|
|||
/* Default config options */
|
||||
pi->conf_len = 0;
|
||||
pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
|
||||
skb_queue_head_init(TX_QUEUE(sk));
|
||||
skb_queue_head_init(SREJ_QUEUE(sk));
|
||||
INIT_LIST_HEAD(SREJ_LIST(sk));
|
||||
}
|
||||
|
||||
static struct proto l2cap_proto = {
|
||||
|
|
|
@ -446,6 +446,28 @@ extern int sysctl_tcp_synack_retries;
|
|||
|
||||
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
||||
|
||||
/* Decide when to expire the request and when to resend SYN-ACK */
|
||||
static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
|
||||
const int max_retries,
|
||||
const u8 rskq_defer_accept,
|
||||
int *expire, int *resend)
|
||||
{
|
||||
if (!rskq_defer_accept) {
|
||||
*expire = req->retrans >= thresh;
|
||||
*resend = 1;
|
||||
return;
|
||||
}
|
||||
*expire = req->retrans >= thresh &&
|
||||
(!inet_rsk(req)->acked || req->retrans >= max_retries);
|
||||
/*
|
||||
* Do not resend while waiting for data after ACK,
|
||||
* start to resend on end of deferring period to give
|
||||
* last chance for data or ACK to create established socket.
|
||||
*/
|
||||
*resend = !inet_rsk(req)->acked ||
|
||||
req->retrans >= rskq_defer_accept - 1;
|
||||
}
|
||||
|
||||
void inet_csk_reqsk_queue_prune(struct sock *parent,
|
||||
const unsigned long interval,
|
||||
const unsigned long timeout,
|
||||
|
@ -501,9 +523,15 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
|
|||
reqp=&lopt->syn_table[i];
|
||||
while ((req = *reqp) != NULL) {
|
||||
if (time_after_eq(now, req->expires)) {
|
||||
if ((req->retrans < thresh ||
|
||||
(inet_rsk(req)->acked && req->retrans < max_retries))
|
||||
&& !req->rsk_ops->rtx_syn_ack(parent, req)) {
|
||||
int expire = 0, resend = 0;
|
||||
|
||||
syn_ack_recalc(req, thresh, max_retries,
|
||||
queue->rskq_defer_accept,
|
||||
&expire, &resend);
|
||||
if (!expire &&
|
||||
(!resend ||
|
||||
!req->rsk_ops->rtx_syn_ack(parent, req) ||
|
||||
inet_rsk(req)->acked)) {
|
||||
unsigned long timeo;
|
||||
|
||||
if (req->retrans++ == 0)
|
||||
|
|
|
@ -634,17 +634,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
|
|||
break;
|
||||
}
|
||||
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
|
||||
if (dev) {
|
||||
if (dev)
|
||||
mreq.imr_ifindex = dev->ifindex;
|
||||
dev_put(dev);
|
||||
}
|
||||
} else
|
||||
dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
|
||||
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
|
||||
|
||||
|
||||
err = -EADDRNOTAVAIL;
|
||||
if (!dev)
|
||||
break;
|
||||
dev_put(dev);
|
||||
|
||||
err = -EINVAL;
|
||||
if (sk->sk_bound_dev_if &&
|
||||
|
|
|
@ -326,6 +326,43 @@ void tcp_enter_memory_pressure(struct sock *sk)
|
|||
|
||||
EXPORT_SYMBOL(tcp_enter_memory_pressure);
|
||||
|
||||
/* Convert seconds to retransmits based on initial and max timeout */
|
||||
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
|
||||
{
|
||||
u8 res = 0;
|
||||
|
||||
if (seconds > 0) {
|
||||
int period = timeout;
|
||||
|
||||
res = 1;
|
||||
while (seconds > period && res < 255) {
|
||||
res++;
|
||||
timeout <<= 1;
|
||||
if (timeout > rto_max)
|
||||
timeout = rto_max;
|
||||
period += timeout;
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Convert retransmits to seconds based on initial and max timeout */
|
||||
static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
|
||||
{
|
||||
int period = 0;
|
||||
|
||||
if (retrans > 0) {
|
||||
period = timeout;
|
||||
while (--retrans) {
|
||||
timeout <<= 1;
|
||||
if (timeout > rto_max)
|
||||
timeout = rto_max;
|
||||
period += timeout;
|
||||
}
|
||||
}
|
||||
return period;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for a TCP event.
|
||||
*
|
||||
|
@ -1405,7 +1442,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|||
goto found_ok_skb;
|
||||
if (tcp_hdr(skb)->fin)
|
||||
goto found_fin_ok;
|
||||
WARN_ON(!(flags & MSG_PEEK));
|
||||
if (WARN_ON(!(flags & MSG_PEEK)))
|
||||
printk(KERN_INFO "recvmsg bug 2: copied %X "
|
||||
"seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
|
||||
}
|
||||
|
||||
/* Well, if we have backlog, try to process it now yet. */
|
||||
|
@ -2163,16 +2202,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
|
|||
break;
|
||||
|
||||
case TCP_DEFER_ACCEPT:
|
||||
icsk->icsk_accept_queue.rskq_defer_accept = 0;
|
||||
if (val > 0) {
|
||||
/* Translate value in seconds to number of
|
||||
* retransmits */
|
||||
while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
|
||||
val > ((TCP_TIMEOUT_INIT / HZ) <<
|
||||
icsk->icsk_accept_queue.rskq_defer_accept))
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
icsk->icsk_accept_queue.rskq_defer_accept++;
|
||||
}
|
||||
/* Translate value in seconds to number of retransmits */
|
||||
icsk->icsk_accept_queue.rskq_defer_accept =
|
||||
secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
|
||||
TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
|
||||
case TCP_WINDOW_CLAMP:
|
||||
|
@ -2353,8 +2386,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
|
|||
val = (val ? : sysctl_tcp_fin_timeout) / HZ;
|
||||
break;
|
||||
case TCP_DEFER_ACCEPT:
|
||||
val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
|
||||
((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
|
||||
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
|
||||
TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
case TCP_WINDOW_CLAMP:
|
||||
val = tp->window_clamp;
|
||||
|
|
|
@ -641,10 +641,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
|||
if (!(flg & TCP_FLAG_ACK))
|
||||
return NULL;
|
||||
|
||||
/* If TCP_DEFER_ACCEPT is set, drop bare ACK. */
|
||||
if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
|
||||
if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
|
||||
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
|
||||
inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--;
|
||||
inet_rsk(req)->acked = 1;
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -496,13 +496,17 @@ done:
|
|||
goto e_inval;
|
||||
|
||||
if (val) {
|
||||
struct net_device *dev;
|
||||
|
||||
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
|
||||
goto e_inval;
|
||||
|
||||
if (__dev_get_by_index(net, val) == NULL) {
|
||||
dev = dev_get_by_index(net, val);
|
||||
if (!dev) {
|
||||
retv = -ENODEV;
|
||||
break;
|
||||
}
|
||||
dev_put(dev);
|
||||
}
|
||||
np->mcast_oif = val;
|
||||
retv = 0;
|
||||
|
|
|
@ -1074,6 +1074,8 @@ restart:
|
|||
err = -ECONNREFUSED;
|
||||
if (other->sk_state != TCP_LISTEN)
|
||||
goto out_unlock;
|
||||
if (other->sk_shutdown & RCV_SHUTDOWN)
|
||||
goto out_unlock;
|
||||
|
||||
if (unix_recvq_full(other)) {
|
||||
err = -EAGAIN;
|
||||
|
|
|
@ -2717,8 +2717,6 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
|
|||
int r;
|
||||
int cpu;
|
||||
|
||||
kvm_init_debug();
|
||||
|
||||
r = kvm_arch_init(opaque);
|
||||
if (r)
|
||||
goto out_fail;
|
||||
|
@ -2785,6 +2783,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
|
|||
kvm_preempt_ops.sched_in = kvm_sched_in;
|
||||
kvm_preempt_ops.sched_out = kvm_sched_out;
|
||||
|
||||
kvm_init_debug();
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
|
@ -2807,7 +2807,6 @@ out_free_0:
|
|||
out:
|
||||
kvm_arch_exit();
|
||||
out_fail:
|
||||
kvm_exit_debug();
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_init);
|
||||
|
@ -2815,6 +2814,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
|
|||
void kvm_exit(void)
|
||||
{
|
||||
tracepoint_synchronize_unregister();
|
||||
kvm_exit_debug();
|
||||
misc_deregister(&kvm_dev);
|
||||
kmem_cache_destroy(kvm_vcpu_cache);
|
||||
sysdev_unregister(&kvm_sysdev);
|
||||
|
@ -2824,7 +2824,6 @@ void kvm_exit(void)
|
|||
on_each_cpu(hardware_disable, NULL, 1);
|
||||
kvm_arch_hardware_unsetup();
|
||||
kvm_arch_exit();
|
||||
kvm_exit_debug();
|
||||
free_cpumask_var(cpus_hardware_enabled);
|
||||
__free_page(bad_page);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue