Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2012-04-12 19:41:23 -04:00
commit 011e3c6325
265 changed files with 2384 additions and 1882 deletions

View file

@ -34,8 +34,7 @@ Current Status: linux-2.6.34-mmotm(development version of 2010/April)
Features: Features:
- accounting anonymous pages, file caches, swap caches usage and limiting them. - accounting anonymous pages, file caches, swap caches usage and limiting them.
- private LRU and reclaim routine. (system's global LRU and private LRU - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
work independently from each other)
- optionally, memory+swap usage can be accounted and limited. - optionally, memory+swap usage can be accounted and limited.
- hierarchical accounting - hierarchical accounting
- soft limit - soft limit
@ -154,7 +153,7 @@ updated. page_cgroup has its own LRU on cgroup.
2.2.1 Accounting details 2.2.1 Accounting details
All mapped anon pages (RSS) and cache pages (Page Cache) are accounted. All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
Some pages which are never reclaimable and will not be on the global LRU Some pages which are never reclaimable and will not be on the LRU
are not accounted. We just account pages under usual VM management. are not accounted. We just account pages under usual VM management.
RSS pages are accounted at page_fault unless they've already been accounted RSS pages are accounted at page_fault unless they've already been accounted

View file

@ -114,7 +114,7 @@ members are defined:
struct file_system_type { struct file_system_type {
const char *name; const char *name;
int fs_flags; int fs_flags;
struct dentry (*mount) (struct file_system_type *, int, struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *); const char *, void *);
void (*kill_sb) (struct super_block *); void (*kill_sb) (struct super_block *);
struct module *owner; struct module *owner;

View file

@ -228,7 +228,7 @@ M: Len Brown <lenb@kernel.org>
L: linux-acpi@vger.kernel.org L: linux-acpi@vger.kernel.org
W: http://www.lesswatts.org/projects/acpi/ W: http://www.lesswatts.org/projects/acpi/
Q: http://patchwork.kernel.org/project/linux-acpi/list/ Q: http://patchwork.kernel.org/project/linux-acpi/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
S: Supported S: Supported
F: drivers/acpi/ F: drivers/acpi/
F: drivers/pnp/pnpacpi/ F: drivers/pnp/pnpacpi/
@ -1522,8 +1522,8 @@ M: Gustavo Padovan <gustavo@padovan.org>
M: Johan Hedberg <johan.hedberg@gmail.com> M: Johan Hedberg <johan.hedberg@gmail.com>
L: linux-bluetooth@vger.kernel.org L: linux-bluetooth@vger.kernel.org
W: http://www.bluez.org/ W: http://www.bluez.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
S: Maintained S: Maintained
F: drivers/bluetooth/ F: drivers/bluetooth/
@ -1533,8 +1533,8 @@ M: Gustavo Padovan <gustavo@padovan.org>
M: Johan Hedberg <johan.hedberg@gmail.com> M: Johan Hedberg <johan.hedberg@gmail.com>
L: linux-bluetooth@vger.kernel.org L: linux-bluetooth@vger.kernel.org
W: http://www.bluez.org/ W: http://www.bluez.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
S: Maintained S: Maintained
F: net/bluetooth/ F: net/bluetooth/
F: include/net/bluetooth/ F: include/net/bluetooth/
@ -2451,17 +2451,17 @@ F: fs/ecryptfs/
EDAC-CORE EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com> M: Doug Thompson <dougthompson@xmission.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Supported S: Supported
F: Documentation/edac.txt F: Documentation/edac.txt
F: drivers/edac/edac_* F: drivers/edac/
F: include/linux/edac.h F: include/linux/edac.h
EDAC-AMD64 EDAC-AMD64
M: Doug Thompson <dougthompson@xmission.com> M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <borislav.petkov@amd.com> M: Borislav Petkov <borislav.petkov@amd.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Supported S: Supported
F: drivers/edac/amd64_edac* F: drivers/edac/amd64_edac*
@ -2469,35 +2469,35 @@ F: drivers/edac/amd64_edac*
EDAC-E752X EDAC-E752X
M: Mark Gross <mark.gross@intel.com> M: Mark Gross <mark.gross@intel.com>
M: Doug Thompson <dougthompson@xmission.com> M: Doug Thompson <dougthompson@xmission.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/e752x_edac.c F: drivers/edac/e752x_edac.c
EDAC-E7XXX EDAC-E7XXX
M: Doug Thompson <dougthompson@xmission.com> M: Doug Thompson <dougthompson@xmission.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/e7xxx_edac.c F: drivers/edac/e7xxx_edac.c
EDAC-I82443BXGX EDAC-I82443BXGX
M: Tim Small <tim@buttersideup.com> M: Tim Small <tim@buttersideup.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/i82443bxgx_edac.c F: drivers/edac/i82443bxgx_edac.c
EDAC-I3000 EDAC-I3000
M: Jason Uhlenkott <juhlenko@akamai.com> M: Jason Uhlenkott <juhlenko@akamai.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/i3000_edac.c F: drivers/edac/i3000_edac.c
EDAC-I5000 EDAC-I5000
M: Doug Thompson <dougthompson@xmission.com> M: Doug Thompson <dougthompson@xmission.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/i5000_edac.c F: drivers/edac/i5000_edac.c
@ -2526,21 +2526,21 @@ F: drivers/edac/i7core_edac.c
EDAC-I82975X EDAC-I82975X
M: Ranganathan Desikan <ravi@jetztechnologies.com> M: Ranganathan Desikan <ravi@jetztechnologies.com>
M: "Arvind R." <arvino55@gmail.com> M: "Arvind R." <arvino55@gmail.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/i82975x_edac.c F: drivers/edac/i82975x_edac.c
EDAC-PASEMI EDAC-PASEMI
M: Egor Martovetsky <egor@pasemi.com> M: Egor Martovetsky <egor@pasemi.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/pasemi_edac.c F: drivers/edac/pasemi_edac.c
EDAC-R82600 EDAC-R82600
M: Tim Small <tim@buttersideup.com> M: Tim Small <tim@buttersideup.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers) L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net W: bluesmoke.sourceforge.net
S: Maintained S: Maintained
F: drivers/edac/r82600_edac.c F: drivers/edac/r82600_edac.c
@ -4528,8 +4528,7 @@ S: Supported
F: drivers/net/ethernet/myricom/myri10ge/ F: drivers/net/ethernet/myricom/myri10ge/
NATSEMI ETHERNET DRIVER (DP8381x) NATSEMI ETHERNET DRIVER (DP8381x)
M: Tim Hockin <thockin@hockin.org> S: Orphan
S: Maintained
F: drivers/net/ethernet/natsemi/natsemi.c F: drivers/net/ethernet/natsemi/natsemi.c
NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
@ -4798,6 +4797,7 @@ F: arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
F: arch/arm/mach-omap2/clockdomain44xx.c F: arch/arm/mach-omap2/clockdomain44xx.c
OMAP AUDIO SUPPORT OMAP AUDIO SUPPORT
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
M: Jarkko Nikula <jarkko.nikula@bitmer.com> M: Jarkko Nikula <jarkko.nikula@bitmer.com>
L: alsa-devel@alsa-project.org (subscribers-only) L: alsa-devel@alsa-project.org (subscribers-only)
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
@ -5112,6 +5112,11 @@ F: drivers/i2c/busses/i2c-pca-*
F: include/linux/i2c-algo-pca.h F: include/linux/i2c-algo-pca.h
F: include/linux/i2c-pca-platform.h F: include/linux/i2c-pca-platform.h
PCDP - PRIMARY CONSOLE AND DEBUG PORT
M: Khalid Aziz <khalid.aziz@hp.com>
S: Maintained
F: drivers/firmware/pcdp.*
PCI ERROR RECOVERY PCI ERROR RECOVERY
M: Linas Vepstas <linasvepstas@gmail.com> M: Linas Vepstas <linasvepstas@gmail.com>
L: linux-pci@vger.kernel.org L: linux-pci@vger.kernel.org
@ -7456,8 +7461,7 @@ F: include/linux/wm97xx.h
WOLFSON MICROELECTRONICS DRIVERS WOLFSON MICROELECTRONICS DRIVERS
M: Mark Brown <broonie@opensource.wolfsonmicro.com> M: Mark Brown <broonie@opensource.wolfsonmicro.com>
M: Ian Lartey <ian@opensource.wolfsonmicro.com> L: patches@opensource.wolfsonmicro.com
M: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices

View file

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Saber-toothed Squirrel NAME = Saber-toothed Squirrel
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -42,10 +42,6 @@
/* This number is used when no interrupt has been assigned */ /* This number is used when no interrupt has been assigned */
#define NO_IRQ 0 #define NO_IRQ 0
struct irq_data;
extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq);
extern void __init init_pic_c64xplus(void); extern void __init init_pic_c64xplus(void);
extern void init_IRQ(void); extern void init_IRQ(void);

View file

@ -130,16 +130,3 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0; return 0;
} }
irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
return d->hwirq;
}
EXPORT_SYMBOL_GPL(irqd_to_hwirq);
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);

View file

@ -33,8 +33,6 @@ extern atomic_t ppc_n_lost_interrupts;
/* Same thing, used by the generic IRQ code */ /* Same thing, used by the generic IRQ code */
#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS #define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
struct irq_data;
extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
extern irq_hw_number_t virq_to_hw(unsigned int virq); extern irq_hw_number_t virq_to_hw(unsigned int virq);
/** /**

View file

@ -206,40 +206,43 @@ reenable_mmu: /* re-enable mmu so we can */
andi. r10,r10,MSR_EE /* Did EE change? */ andi. r10,r10,MSR_EE /* Did EE change? */
beq 1f beq 1f
/* Save handler and return address into the 2 unused words
* of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
* else can be recovered from the pt_regs except r3 which for
* normal interrupts has been set to pt_regs and for syscalls
* is an argument, so we temporarily use ORIG_GPR3 to save it
*/
stw r9,8(r1)
stw r11,12(r1)
stw r3,ORIG_GPR3(r1)
/* /*
* The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
* If from user mode there is only one stack frame on the stack, and * If from user mode there is only one stack frame on the stack, and
* accessing CALLER_ADDR1 will cause oops. So we need create a dummy * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
* stack frame to make trace_hardirqs_off happy. * stack frame to make trace_hardirqs_off happy.
*
* This is handy because we also need to save a bunch of GPRs,
* r3 can be different from GPR3(r1) at this point, r9 and r11
* contains the old MSR and handler address respectively,
* r4 & r5 can contain page fault arguments that need to be passed
* along as well. r12, CCR, CTR, XER etc... are left clobbered as
* they aren't useful past this point (aren't syscall arguments),
* the rest is restored from the exception frame.
*/ */
stwu r1,-32(r1)
stw r9,8(r1)
stw r11,12(r1)
stw r3,16(r1)
stw r4,20(r1)
stw r5,24(r1)
andi. r12,r12,MSR_PR andi. r12,r12,MSR_PR
beq 11f b 11f
stwu r1,-16(r1)
bl trace_hardirqs_off bl trace_hardirqs_off
addi r1,r1,16
b 12f b 12f
11: 11:
bl trace_hardirqs_off bl trace_hardirqs_off
12: 12:
lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
lwz r11,12(r1)
lwz r9,8(r1)
addi r1,r1,32
lwz r0,GPR0(r1) lwz r0,GPR0(r1)
lwz r3,ORIG_GPR3(r1)
lwz r4,GPR4(r1)
lwz r5,GPR5(r1)
lwz r6,GPR6(r1) lwz r6,GPR6(r1)
lwz r7,GPR7(r1) lwz r7,GPR7(r1)
lwz r8,GPR8(r1) lwz r8,GPR8(r1)
lwz r9,8(r1)
lwz r11,12(r1)
1: mtctr r11 1: mtctr r11
mtlr r9 mtlr r9
bctr /* jump to handler */ bctr /* jump to handler */

View file

@ -560,12 +560,6 @@ void do_softirq(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
return d->hwirq;
}
EXPORT_SYMBOL_GPL(irqd_to_hwirq);
irq_hw_number_t virq_to_hw(unsigned int virq) irq_hw_number_t virq_to_hw(unsigned int virq)
{ {
struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *irq_data = irq_get_irq_data(virq);

View file

@ -1235,7 +1235,7 @@ void __ppc64_runlatch_on(void)
ctrl |= CTRL_RUNLATCH; ctrl |= CTRL_RUNLATCH;
mtspr(SPRN_CTRLT, ctrl); mtspr(SPRN_CTRLT, ctrl);
ti->local_flags |= TLF_RUNLATCH; ti->local_flags |= _TLF_RUNLATCH;
} }
/* Called with hard IRQs off */ /* Called with hard IRQs off */
@ -1244,7 +1244,7 @@ void __ppc64_runlatch_off(void)
struct thread_info *ti = current_thread_info(); struct thread_info *ti = current_thread_info();
unsigned long ctrl; unsigned long ctrl;
ti->local_flags &= ~TLF_RUNLATCH; ti->local_flags &= ~_TLF_RUNLATCH;
ctrl = mfspr(SPRN_CTRLF); ctrl = mfspr(SPRN_CTRLF);
ctrl &= ~CTRL_RUNLATCH; ctrl &= ~CTRL_RUNLATCH;

View file

@ -173,9 +173,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
static struct kvmppc_linear_info *kvm_alloc_linear(int type) static struct kvmppc_linear_info *kvm_alloc_linear(int type)
{ {
struct kvmppc_linear_info *ri; struct kvmppc_linear_info *ri, *ret;
ri = NULL; ret = NULL;
spin_lock(&linear_lock); spin_lock(&linear_lock);
list_for_each_entry(ri, &free_linears, list) { list_for_each_entry(ri, &free_linears, list) {
if (ri->type != type) if (ri->type != type)
@ -183,11 +183,12 @@ static struct kvmppc_linear_info *kvm_alloc_linear(int type)
list_del(&ri->list); list_del(&ri->list);
atomic_inc(&ri->use_count); atomic_inc(&ri->use_count);
memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
ret = ri;
break; break;
} }
spin_unlock(&linear_lock); spin_unlock(&linear_lock);
memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT); return ret;
return ri;
} }
static void kvm_release_linear(struct kvmppc_linear_info *ri) static void kvm_release_linear(struct kvmppc_linear_info *ri)

View file

@ -46,8 +46,10 @@ _GLOBAL(__kvmppc_vcore_entry)
/* Save host state to the stack */ /* Save host state to the stack */
stdu r1, -SWITCH_FRAME_SIZE(r1) stdu r1, -SWITCH_FRAME_SIZE(r1)
/* Save non-volatile registers (r14 - r31) */ /* Save non-volatile registers (r14 - r31) and CR */
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
mfcr r3
std r3, _CCR(r1)
/* Save host DSCR */ /* Save host DSCR */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
@ -157,8 +159,10 @@ kvmppc_handler_highmem:
* R13 = PACA * R13 = PACA
*/ */
/* Restore non-volatile host registers (r14 - r31) */ /* Restore non-volatile host registers (r14 - r31) and CR */
REST_NVGPRS(r1) REST_NVGPRS(r1)
ld r4, _CCR(r1)
mtcr r4
addi r1, r1, SWITCH_FRAME_SIZE addi r1, r1, SWITCH_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1) ld r0, PPC_LR_STKOFF(r1)

View file

@ -84,6 +84,10 @@ kvm_start_entry:
/* Save non-volatile registers (r14 - r31) */ /* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1) SAVE_NVGPRS(r1)
/* Save CR */
mfcr r14
stw r14, _CCR(r1)
/* Save LR */ /* Save LR */
PPC_STL r0, _LINK(r1) PPC_STL r0, _LINK(r1)
@ -165,6 +169,9 @@ kvm_exit_loop:
PPC_LL r4, _LINK(r1) PPC_LL r4, _LINK(r1)
mtlr r4 mtlr r4
lwz r14, _CCR(r1)
mtcr r14
/* Restore non-volatile host registers (r14 - r31) */ /* Restore non-volatile host registers (r14 - r31) */
REST_NVGPRS(r1) REST_NVGPRS(r1)

View file

@ -777,6 +777,7 @@ program_interrupt:
} }
} }
preempt_disable();
if (!(r & RESUME_HOST)) { if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if /* To avoid clobbering exit_reason, only check for signals if
* we aren't already exiting to userspace for some other * we aren't already exiting to userspace for some other
@ -798,8 +799,6 @@ program_interrupt:
run->exit_reason = KVM_EXIT_INTR; run->exit_reason = KVM_EXIT_INTR;
r = -EINTR; r = -EINTR;
} else { } else {
preempt_disable();
/* In case an interrupt came in that was triggered /* In case an interrupt came in that was triggered
* from userspace (like DEC), we need to check what * from userspace (like DEC), we need to check what
* to inject now! */ * to inject now! */
@ -881,7 +880,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
switch (reg->id) { switch (reg->id) {
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); r = copy_to_user((u64 __user *)(long)reg->addr,
&to_book3s(vcpu)->hior, sizeof(u64));
break; break;
default: default:
break; break;
@ -896,7 +896,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
switch (reg->id) { switch (reg->id) {
case KVM_REG_PPC_HIOR: case KVM_REG_PPC_HIOR:
r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr); r = copy_from_user(&to_book3s(vcpu)->hior,
(u64 __user *)(long)reg->addr, sizeof(u64));
if (!r) if (!r)
to_book3s(vcpu)->hior_explicit = true; to_book3s(vcpu)->hior_explicit = true;
break; break;

View file

@ -34,7 +34,8 @@
/* r2 is special: it holds 'current', and it made nonvolatile in the /* r2 is special: it holds 'current', and it made nonvolatile in the
* kernel with the -ffixed-r2 gcc option. */ * kernel with the -ffixed-r2 gcc option. */
#define HOST_R2 12 #define HOST_R2 12
#define HOST_NV_GPRS 16 #define HOST_CR 16
#define HOST_NV_GPRS 20
#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
@ -296,8 +297,10 @@ heavyweight_exit:
/* Return to kvm_vcpu_run(). */ /* Return to kvm_vcpu_run(). */
lwz r4, HOST_STACK_LR(r1) lwz r4, HOST_STACK_LR(r1)
lwz r5, HOST_CR(r1)
addi r1, r1, HOST_STACK_SIZE addi r1, r1, HOST_STACK_SIZE
mtlr r4 mtlr r4
mtcr r5
/* r3 still contains the return code from kvmppc_handle_exit(). */ /* r3 still contains the return code from kvmppc_handle_exit(). */
blr blr
@ -314,6 +317,8 @@ _GLOBAL(__kvmppc_vcpu_run)
stw r3, HOST_RUN(r1) stw r3, HOST_RUN(r1)
mflr r3 mflr r3
stw r3, HOST_STACK_LR(r1) stw r3, HOST_STACK_LR(r1)
mfcr r5
stw r5, HOST_CR(r1)
/* Save host non-volatile register state to stack. */ /* Save host non-volatile register state to stack. */
stw r14, HOST_NV_GPR(r14)(r1) stw r14, HOST_NV_GPR(r14)(r1)

View file

@ -392,7 +392,7 @@ static int axon_msi_probe(struct platform_device *device)
} }
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic); msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
if (!msic->irq_domain) { if (!msic->irq_domain) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name); dn->full_name);

View file

@ -239,7 +239,7 @@ void __init beatic_init_IRQ(void)
ppc_md.get_irq = beatic_get_irq; ppc_md.get_irq = beatic_get_irq;
/* Allocate an irq host */ /* Allocate an irq host */
beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL); beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
BUG_ON(beatic_host == NULL); BUG_ON(beatic_host == NULL);
irq_set_default_host(beatic_host); irq_set_default_host(beatic_host);
} }

View file

@ -192,7 +192,7 @@ static int psurge_secondary_ipi_init(void)
{ {
int rc = -ENOMEM; int rc = -ENOMEM;
psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL); psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
if (psurge_host) if (psurge_host)
psurge_secondary_virq = irq_create_direct_mapping(psurge_host); psurge_secondary_virq = irq_create_direct_mapping(psurge_host);

View file

@ -753,9 +753,8 @@ void __init ps3_init_IRQ(void)
unsigned cpu; unsigned cpu;
struct irq_domain *host; struct irq_domain *host;
host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL); host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_host(host); irq_set_default_host(host);
irq_set_virq_count(PS3_PLUG_MAX + 1);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu); struct ps3_private *pd = &per_cpu(ps3_private, cpu);

View file

@ -61,6 +61,7 @@ config DUMP_CODE
config DWARF_UNWINDER config DWARF_UNWINDER
bool "Enable the DWARF unwinder for stacktraces" bool "Enable the DWARF unwinder for stacktraces"
select FRAME_POINTER select FRAME_POINTER
depends on SUPERH32
default n default n
help help
Enabling this option will make stacktraces more accurate, at Enabling this option will make stacktraces more accurate, at

View file

@ -28,6 +28,7 @@
#include <cpu/sh7785.h> #include <cpu/sh7785.h>
#include <asm/heartbeat.h> #include <asm/heartbeat.h>
#include <asm/clock.h> #include <asm/clock.h>
#include <asm/bl_bit.h>
/* /*
* NOTE: This board has 2 physical memory maps. * NOTE: This board has 2 physical memory maps.

View file

@ -14,6 +14,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/hd64461.h> #include <asm/hd64461.h>
#include <asm/bl_bit.h>
#include <mach/hp6xx.h> #include <mach/hp6xx.h>
#include <cpu/dac.h> #include <cpu/dac.h>
#include <asm/freq.h> #include <asm/freq.h>

View file

@ -54,7 +54,7 @@ static int __init dma_subsys_init(void)
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
return device_create_file(dma_subsys.dev_root, &dev_attr_devices.attr); return device_create_file(dma_subsys.dev_root, &dev_attr_devices);
} }
postcore_initcall(dma_subsys_init); postcore_initcall(dma_subsys_init);

View file

@ -2,6 +2,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/traps.h>
int init_fpu(struct task_struct *tsk) int init_fpu(struct task_struct *tsk)
{ {

View file

@ -14,6 +14,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/traps.h>
/* The PR (precision) bit in the FP Status Register must be clear when /* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined. * an frchg instruction is executed, otherwise the instruction is undefined.

View file

@ -16,6 +16,7 @@
#include <cpu/fpu.h> #include <cpu/fpu.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/traps.h>
/* The PR (precision) bit in the FP Status Register must be clear when /* The PR (precision) bit in the FP Status Register must be clear when
* an frchg instruction is executed, otherwise the instruction is undefined. * an frchg instruction is executed, otherwise the instruction is undefined.

View file

@ -113,7 +113,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
/* MSTP32 clocks */ /* MSTP32 clocks */
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]), CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]), CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]), CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]),
CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]), CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]),

View file

@ -16,6 +16,7 @@
#include <asm/suspend.h> #include <asm/suspend.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/bl_bit.h>
/* /*
* Notifier lists for pre/post sleep notification * Notifier lists for pre/post sleep notification

View file

@ -17,8 +17,8 @@
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <asm/pgalloc.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/pgalloc.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/bl_bit.h> #include <asm/bl_bit.h>

View file

@ -14,6 +14,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/traps.h>
/* Macros for single step instruction identification */ /* Macros for single step instruction identification */
#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)

View file

@ -26,6 +26,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/switch_to.h>
void show_regs(struct pt_regs * regs) void show_regs(struct pt_regs * regs)
{ {

View file

@ -27,6 +27,7 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h>
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */

View file

@ -34,6 +34,41 @@ __kernel_rt_sigreturn:
1: .short __NR_rt_sigreturn 1: .short __NR_rt_sigreturn
.LEND_rt_sigreturn: .LEND_rt_sigreturn:
.size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
.previous
.section .eh_frame,"a",@progbits .section .eh_frame,"a",@progbits
.LCIE1:
.ualong .LCIE1_end - .LCIE1_start
.LCIE1_start:
.ualong 0 /* CIE ID */
.byte 0x1 /* Version number */
.string "zRS" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */
.uleb128 0x1 /* Augmentation length and data */
.byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.align 2
.LCIE1_end:
.ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */
.LFDE0_start:
.ualong .LFDE0_start-.LCIE1 /* CIE pointer */
.ualong .LSTART_sigreturn-. /* PC-relative start address */
.ualong .LEND_sigreturn-.LSTART_sigreturn
.uleb128 0 /* Augmentation */
.align 2
.LFDE0_end:
.ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */
.LFDE1_start:
.ualong .LFDE1_start-.LCIE1 /* CIE pointer */
.ualong .LSTART_rt_sigreturn-. /* PC-relative start address */
.ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn
.uleb128 0 /* Augmentation */
.align 2
.LFDE1_end:
.previous .previous

View file

@ -3,37 +3,34 @@
.type __kernel_vsyscall,@function .type __kernel_vsyscall,@function
__kernel_vsyscall: __kernel_vsyscall:
.LSTART_vsyscall: .LSTART_vsyscall:
/* XXX: We'll have to do something here once we opt to use the vDSO trapa #0x10
* page for something other than the signal trampoline.. as well as nop
* fill out .eh_frame -- PFM. */
.LEND_vsyscall: .LEND_vsyscall:
.size __kernel_vsyscall,.-.LSTART_vsyscall .size __kernel_vsyscall,.-.LSTART_vsyscall
.previous
.section .eh_frame,"a",@progbits .section .eh_frame,"a",@progbits
.previous
.LCIE: .LCIE:
.ualong .LCIE_end - .LCIE_start .ualong .LCIE_end - .LCIE_start
.LCIE_start: .LCIE_start:
.ualong 0 /* CIE ID */ .ualong 0 /* CIE ID */
.byte 0x1 /* Version number */ .byte 0x1 /* Version number */
.string "zRS" /* NUL-terminated augmentation string */ .string "zR" /* NUL-terminated augmentation string */
.uleb128 0x1 /* Code alignment factor */ .uleb128 0x1 /* Code alignment factor */
.sleb128 -4 /* Data alignment factor */ .sleb128 -4 /* Data alignment factor */
.byte 0x11 /* Return address register column */ .byte 0x11 /* Return address register column */
/* Augmentation length and data (none) */ .uleb128 0x1 /* Augmentation length and data */
.byte 0xc /* DW_CFA_def_cfa */ .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */
.uleb128 0xf /* r15 */ .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */
.uleb128 0x0 /* offset 0 */
.align 2 .align 2
.LCIE_end: .LCIE_end:
.ualong .LFDE_end-.LFDE_start /* Length FDE */ .ualong .LFDE_end-.LFDE_start /* Length FDE */
.LFDE_start: .LFDE_start:
.ualong .LCIE /* CIE pointer */ .ualong .LFDE_start-.LCIE /* CIE pointer */
.ualong .LSTART_vsyscall-. /* start address */ .ualong .LSTART_vsyscall-. /* PC-relative start address */
.ualong .LEND_vsyscall-.LSTART_vsyscall .ualong .LEND_vsyscall-.LSTART_vsyscall
.uleb128 0 .uleb128 0 /* Augmentation */
.align 2 .align 2
.LFDE_end: .LFDE_end:
.previous .previous

View file

@ -18,6 +18,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
/* /*

View file

@ -1,5 +1,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/traps.h> #include <asm/traps.h>

View file

@ -9,6 +9,7 @@
*/ */
#include <linux/init.h> #include <linux/init.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/errno.h>
#include <asm/sram.h> #include <asm/sram.h>
/* /*

View file

@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
void __devinit pcibios_fixup_bus(struct pci_bus *pbus) void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
{ {
struct leon_pci_info *info = pbus->sysdata;
struct pci_dev *dev; struct pci_dev *dev;
int i, has_io, has_mem; int i, has_io, has_mem;
u16 cmd; u16 cmd;
@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pci_enable_resources(dev, mask); return pci_enable_resources(dev, mask);
} }
struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
{
/*
* Currently the OpenBoot nodes are not connected with the PCI device,
* this is because the LEON PROM does not create PCI nodes. Eventually
* this will change and the same approach as pcic.c can be used to
* match PROM nodes with pci devices.
*/
return NULL;
}
EXPORT_SYMBOL(pci_device_to_OF_node);
void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
{ {
#ifdef CONFIG_PCI_DEBUG #ifdef CONFIG_PCI_DEBUG

View file

@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long g2; unsigned long g2;
int from_user = !(regs->psr & PSR_PS); int from_user = !(regs->psr & PSR_PS);
int fault, code; int fault, code;
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0));
if(text_fault) if(text_fault)
address = regs->pc; address = regs->pc;
@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
/* /*
@ -289,7 +292,11 @@ good_area:
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
@ -297,13 +304,29 @@ good_area:
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++; if (flags & FAULT_FLAG_ALLOW_RETRY) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); if (fault & VM_FAULT_MAJOR) {
} else { current->maj_flt++;
current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
1, regs, address);
}
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;

View file

@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
unsigned int insn = 0; unsigned int insn = 0;
int si_code, fault_code, fault; int si_code, fault_code, fault;
unsigned long address, mm_rss; unsigned long address, mm_rss;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
fault_code = get_thread_fault_code(); fault_code = get_thread_fault_code();
@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
insn = get_fault_insn(regs, insn); insn = get_fault_insn(regs, insn);
goto handle_kernel_fault; goto handle_kernel_fault;
} }
retry:
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
} }
@ -423,7 +426,12 @@ good_area:
goto bad_area; goto bad_area;
} }
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
@ -431,12 +439,27 @@ good_area:
goto do_sigbus; goto do_sigbus;
BUG(); BUG();
} }
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++; if (flags & FAULT_FLAG_ALLOW_RETRY) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); if (fault & VM_FAULT_MAJOR) {
} else { current->maj_flt++;
current->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
1, regs, address);
}
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);

View file

@ -12,7 +12,7 @@ config TILE
select GENERIC_PENDING_IRQ if SMP select GENERIC_PENDING_IRQ if SMP
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select SYS_HYPERVISOR select SYS_HYPERVISOR
select ARCH_HAVE_NMI_SAFE_CMPXCHG if !M386 select ARCH_HAVE_NMI_SAFE_CMPXCHG
# FIXME: investigate whether we need/want these options. # FIXME: investigate whether we need/want these options.
# select HAVE_IOREMAP_PROT # select HAVE_IOREMAP_PROT
@ -69,6 +69,9 @@ config ARCH_PHYS_ADDR_T_64BIT
config ARCH_DMA_ADDR_T_64BIT config ARCH_DMA_ADDR_T_64BIT
def_bool y def_bool y
config NEED_DMA_MAP_STATE
def_bool y
config LOCKDEP_SUPPORT config LOCKDEP_SUPPORT
def_bool y def_bool y
@ -118,7 +121,7 @@ config 64BIT
config ARCH_DEFCONFIG config ARCH_DEFCONFIG
string string
default "arch/tile/configs/tile_defconfig" if !TILEGX default "arch/tile/configs/tilepro_defconfig" if !TILEGX
default "arch/tile/configs/tilegx_defconfig" if TILEGX default "arch/tile/configs/tilegx_defconfig" if TILEGX
source "init/Kconfig" source "init/Kconfig"
@ -240,6 +243,7 @@ endchoice
config PAGE_OFFSET config PAGE_OFFSET
hex hex
depends on !64BIT
default 0xF0000000 if VMSPLIT_3_75G default 0xF0000000 if VMSPLIT_3_75G
default 0xE0000000 if VMSPLIT_3_5G default 0xE0000000 if VMSPLIT_3_5G
default 0xB0000000 if VMSPLIT_2_75G default 0xB0000000 if VMSPLIT_2_75G

View file

@ -30,7 +30,8 @@ ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
endif endif
LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) LIBGCC_PATH := \
$(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
# Provide the path to use for "make defconfig". # Provide the path to use for "make defconfig".
KBUILD_DEFCONFIG := $(ARCH)_defconfig KBUILD_DEFCONFIG := $(ARCH)_defconfig
@ -53,8 +54,6 @@ libs-y += $(LIBGCC_PATH)
# See arch/tile/Kbuild for content of core part of the kernel # See arch/tile/Kbuild for content of core part of the kernel
core-y += arch/tile/ core-y += arch/tile/
core-$(CONFIG_KVM) += arch/tile/kvm/
ifdef TILERA_ROOT ifdef TILERA_ROOT
INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
endif endif

View file

@ -60,8 +60,8 @@
_concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,) _concat4(SPR_IPI_EVENT_, CONFIG_KERNEL_PL,,)
#define SPR_IPI_EVENT_RESET_K \ #define SPR_IPI_EVENT_RESET_K \
_concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,) _concat4(SPR_IPI_EVENT_RESET_, CONFIG_KERNEL_PL,,)
#define SPR_IPI_MASK_SET_K \ #define SPR_IPI_EVENT_SET_K \
_concat4(SPR_IPI_MASK_SET_, CONFIG_KERNEL_PL,,) _concat4(SPR_IPI_EVENT_SET_, CONFIG_KERNEL_PL,,)
#define INT_IPI_K \ #define INT_IPI_K \
_concat4(INT_IPI_, CONFIG_KERNEL_PL,,) _concat4(INT_IPI_, CONFIG_KERNEL_PL,,)

View file

@ -17,6 +17,8 @@
#ifndef _ASM_TILE_ATOMIC_H #ifndef _ASM_TILE_ATOMIC_H
#define _ASM_TILE_ATOMIC_H #define _ASM_TILE_ATOMIC_H
#include <asm/cmpxchg.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/compiler.h> #include <linux/compiler.h>
@ -121,54 +123,6 @@ static inline int atomic_read(const atomic_t *v)
*/ */
#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
/* Nonexistent functions intended to cause link errors. */
extern unsigned long __xchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
#define xchg(ptr, x) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
(atomic_t *)(ptr), \
(u32)(typeof((x)-(x)))(x)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((x)-(x)))(x)); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
} \
__x; \
})
#define cmpxchg(ptr, o, n) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
(atomic_t *)(ptr), \
(u32)(typeof((o)-(o)))(o), \
(u32)(typeof((n)-(n)))(n)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((o)-(o)))(o), \
(u64)(typeof((n)-(n)))(n)); \
break; \
default: \
__cmpxchg_called_with_bad_pointer(); \
} \
__x; \
})
#define tas(ptr) (xchg((ptr), 1))
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifndef __tilegx__ #ifndef __tilegx__

View file

@ -200,7 +200,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
* *
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v. * Returns non-zero if @v was not @u, and zero otherwise.
*/ */
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
{ {

View file

@ -38,10 +38,10 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
static inline void change_bit(unsigned nr, volatile unsigned long *addr) static inline void change_bit(unsigned nr, volatile unsigned long *addr)
{ {
unsigned long old, mask = (1UL << (nr % BITS_PER_LONG)); unsigned long mask = (1UL << (nr % BITS_PER_LONG));
long guess, oldval; unsigned long guess, oldval;
addr += nr / BITS_PER_LONG; addr += nr / BITS_PER_LONG;
old = *addr; oldval = *addr;
do { do {
guess = oldval; guess = oldval;
oldval = atomic64_cmpxchg((atomic64_t *)addr, oldval = atomic64_cmpxchg((atomic64_t *)addr,
@ -85,7 +85,7 @@ static inline int test_and_change_bit(unsigned nr,
volatile unsigned long *addr) volatile unsigned long *addr)
{ {
unsigned long mask = (1UL << (nr % BITS_PER_LONG)); unsigned long mask = (1UL << (nr % BITS_PER_LONG));
long guess, oldval = *addr; unsigned long guess, oldval;
addr += nr / BITS_PER_LONG; addr += nr / BITS_PER_LONG;
oldval = *addr; oldval = *addr;
do { do {

View file

@ -0,0 +1,73 @@
/*
* cmpxchg.h -- forked from asm/atomic.h with this copyright:
*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
*/
#ifndef _ASM_TILE_CMPXCHG_H
#define _ASM_TILE_CMPXCHG_H
#ifndef __ASSEMBLY__
/* Nonexistent functions intended to cause link errors. */
extern unsigned long __xchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_called_with_bad_pointer(void);
#define xchg(ptr, x) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \
(atomic_t *)(ptr), \
(u32)(typeof((x)-(x)))(x)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((x)-(x)))(x)); \
break; \
default: \
__xchg_called_with_bad_pointer(); \
} \
__x; \
})
#define cmpxchg(ptr, o, n) \
({ \
typeof(*(ptr)) __x; \
switch (sizeof(*(ptr))) { \
case 4: \
__x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \
(atomic_t *)(ptr), \
(u32)(typeof((o)-(o)))(o), \
(u32)(typeof((n)-(n)))(n)); \
break; \
case 8: \
__x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \
(atomic64_t *)(ptr), \
(u64)(typeof((o)-(o)))(o), \
(u64)(typeof((n)-(n)))(n)); \
break; \
default: \
__cmpxchg_called_with_bad_pointer(); \
} \
__x; \
})
#define tas(ptr) (xchg((ptr), 1))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_TILE_CMPXCHG_H */

View file

@ -21,7 +21,7 @@
#define NR_IRQS 32 #define NR_IRQS 32
/* IRQ numbers used for linux IPIs. */ /* IRQ numbers used for linux IPIs. */
#define IRQ_RESCHEDULE 1 #define IRQ_RESCHEDULE 0
#define irq_canonicalize(irq) (irq) #define irq_canonicalize(irq) (irq)

View file

@ -137,7 +137,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw)
{ {
__insn_mf(); __insn_mf();
rw->lock = 0; __insn_exch4(&rw->lock, 0); /* Avoid waiting in the write buffer. */
} }
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)

View file

@ -25,7 +25,6 @@
struct KBacktraceIterator { struct KBacktraceIterator {
BacktraceIterator it; BacktraceIterator it;
struct task_struct *task; /* task we are backtracing */ struct task_struct *task; /* task we are backtracing */
pte_t *pgtable; /* page table for user space access */
int end; /* iteration complete. */ int end; /* iteration complete. */
int new_context; /* new context is starting */ int new_context; /* new context is starting */
int profile; /* profiling, so stop on async intrpt */ int profile; /* profiling, so stop on async intrpt */

View file

@ -64,7 +64,11 @@ void do_breakpoint(struct pt_regs *, int fault_num);
#ifdef __tilegx__ #ifdef __tilegx__
/* kernel/single_step.c */
void gx_singlestep_handle(struct pt_regs *, int fault_num); void gx_singlestep_handle(struct pt_regs *, int fault_num);
/* kernel/intvec_64.S */
void fill_ra_stack(void);
#endif #endif
#endif /* _ASM_TILE_SYSCALLS_H */ #endif /* _ASM_TILE_TRAPS_H */

View file

@ -85,6 +85,7 @@ STD_ENTRY(cpu_idle_on_new_stack)
/* Loop forever on a nap during SMP boot. */ /* Loop forever on a nap during SMP boot. */
STD_ENTRY(smp_nap) STD_ENTRY(smp_nap)
nap nap
nop /* avoid provoking the icache prefetch with a jump */
j smp_nap /* we are not architecturally guaranteed not to exit nap */ j smp_nap /* we are not architecturally guaranteed not to exit nap */
jrp lr /* clue in the backtracer */ jrp lr /* clue in the backtracer */
STD_ENDPROC(smp_nap) STD_ENDPROC(smp_nap)
@ -105,5 +106,6 @@ STD_ENTRY(_cpu_idle)
.global _cpu_idle_nap .global _cpu_idle_nap
_cpu_idle_nap: _cpu_idle_nap:
nap nap
nop /* avoid provoking the icache prefetch with a jump */
jrp lr jrp lr
STD_ENDPROC(_cpu_idle) STD_ENDPROC(_cpu_idle)

View file

@ -799,6 +799,10 @@ handle_interrupt:
* This routine takes a boolean in r30 indicating if this is an NMI. * This routine takes a boolean in r30 indicating if this is an NMI.
* If so, we also expect a boolean in r31 indicating whether to * If so, we also expect a boolean in r31 indicating whether to
* re-enable the oprofile interrupts. * re-enable the oprofile interrupts.
*
* Note that .Lresume_userspace is jumped to directly in several
* places, and we need to make sure r30 is set correctly in those
* callers as well.
*/ */
STD_ENTRY(interrupt_return) STD_ENTRY(interrupt_return)
/* If we're resuming to kernel space, don't check thread flags. */ /* If we're resuming to kernel space, don't check thread flags. */
@ -1237,7 +1241,10 @@ handle_syscall:
bzt r30, 1f bzt r30, 1f
jal do_syscall_trace jal do_syscall_trace
FEEDBACK_REENTER(handle_syscall) FEEDBACK_REENTER(handle_syscall)
1: j .Lresume_userspace /* jump into middle of interrupt_return */ 1: {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
.Linvalid_syscall: .Linvalid_syscall:
/* Report an invalid syscall back to the user program */ /* Report an invalid syscall back to the user program */
@ -1246,7 +1253,10 @@ handle_syscall:
movei r28, -ENOSYS movei r28, -ENOSYS
} }
sw r29, r28 sw r29, r28
j .Lresume_userspace /* jump into middle of interrupt_return */ {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
STD_ENDPROC(handle_syscall) STD_ENDPROC(handle_syscall)
/* Return the address for oprofile to suppress in backtraces. */ /* Return the address for oprofile to suppress in backtraces. */
@ -1262,7 +1272,10 @@ STD_ENTRY(ret_from_fork)
jal sim_notify_fork jal sim_notify_fork
jal schedule_tail jal schedule_tail
FEEDBACK_REENTER(ret_from_fork) FEEDBACK_REENTER(ret_from_fork)
j .Lresume_userspace /* jump into middle of interrupt_return */ {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
STD_ENDPROC(ret_from_fork) STD_ENDPROC(ret_from_fork)
/* /*
@ -1376,7 +1389,10 @@ handle_ill:
jal send_sigtrap /* issue a SIGTRAP */ jal send_sigtrap /* issue a SIGTRAP */
FEEDBACK_REENTER(handle_ill) FEEDBACK_REENTER(handle_ill)
j .Lresume_userspace /* jump into middle of interrupt_return */ {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
.Ldispatch_normal_ill: .Ldispatch_normal_ill:
{ {

View file

@ -22,6 +22,7 @@
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/signal.h>
#include <hv/hypervisor.h> #include <hv/hypervisor.h>
#include <arch/abi.h> #include <arch/abi.h>
#include <arch/interrupts.h> #include <arch/interrupts.h>
@ -605,6 +606,10 @@ handle_interrupt:
* This routine takes a boolean in r30 indicating if this is an NMI. * This routine takes a boolean in r30 indicating if this is an NMI.
* If so, we also expect a boolean in r31 indicating whether to * If so, we also expect a boolean in r31 indicating whether to
* re-enable the oprofile interrupts. * re-enable the oprofile interrupts.
*
* Note that .Lresume_userspace is jumped to directly in several
* places, and we need to make sure r30 is set correctly in those
* callers as well.
*/ */
STD_ENTRY(interrupt_return) STD_ENTRY(interrupt_return)
/* If we're resuming to kernel space, don't check thread flags. */ /* If we're resuming to kernel space, don't check thread flags. */
@ -1039,11 +1044,28 @@ handle_syscall:
/* Do syscall trace again, if requested. */ /* Do syscall trace again, if requested. */
ld r30, r31 ld r30, r31
andi r30, r30, _TIF_SYSCALL_TRACE andi r0, r30, _TIF_SYSCALL_TRACE
beqzt r30, 1f {
andi r0, r30, _TIF_SINGLESTEP
beqzt r0, 1f
}
jal do_syscall_trace jal do_syscall_trace
FEEDBACK_REENTER(handle_syscall) FEEDBACK_REENTER(handle_syscall)
1: j .Lresume_userspace /* jump into middle of interrupt_return */ andi r0, r30, _TIF_SINGLESTEP
1: beqzt r0, 2f
/* Single stepping -- notify ptrace. */
{
movei r0, SIGTRAP
jal ptrace_notify
}
FEEDBACK_REENTER(handle_syscall)
2: {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
.Lcompat_syscall: .Lcompat_syscall:
/* /*
@ -1077,7 +1099,10 @@ handle_syscall:
movei r28, -ENOSYS movei r28, -ENOSYS
} }
st r29, r28 st r29, r28
j .Lresume_userspace /* jump into middle of interrupt_return */ {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
STD_ENDPROC(handle_syscall) STD_ENDPROC(handle_syscall)
/* Return the address for oprofile to suppress in backtraces. */ /* Return the address for oprofile to suppress in backtraces. */
@ -1093,7 +1118,10 @@ STD_ENTRY(ret_from_fork)
jal sim_notify_fork jal sim_notify_fork
jal schedule_tail jal schedule_tail
FEEDBACK_REENTER(ret_from_fork) FEEDBACK_REENTER(ret_from_fork)
j .Lresume_userspace {
movei r30, 0 /* not an NMI */
j .Lresume_userspace /* jump into middle of interrupt_return */
}
STD_ENDPROC(ret_from_fork) STD_ENDPROC(ret_from_fork)
/* Various stub interrupt handlers and syscall handlers */ /* Various stub interrupt handlers and syscall handlers */
@ -1156,6 +1184,18 @@ int_unalign:
push_extra_callee_saves r0 push_extra_callee_saves r0
j do_trap j do_trap
/* Fill the return address stack with nonzero entries. */
STD_ENTRY(fill_ra_stack)
{
move r0, lr
jal 1f
}
1: jal 2f
2: jal 3f
3: jal 4f
4: jrp r0
STD_ENDPROC(fill_ra_stack)
/* Include .intrpt1 array of interrupt vectors */ /* Include .intrpt1 array of interrupt vectors */
.section ".intrpt1", "ax" .section ".intrpt1", "ax"
@ -1166,7 +1206,7 @@ int_unalign:
#define do_hardwall_trap bad_intr #define do_hardwall_trap bad_intr
#endif #endif
int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr int_hand INT_MEM_ERROR, MEM_ERROR, do_trap
int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
#if CONFIG_KERNEL_PL == 2 #if CONFIG_KERNEL_PL == 2
int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle

View file

@ -67,6 +67,8 @@ void *module_alloc(unsigned long size)
area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END);
if (!area) if (!area)
goto error; goto error;
area->nr_pages = npages;
area->pages = pages;
if (map_vm_area(area, prot_rwx, &pages)) { if (map_vm_area(area, prot_rwx, &pages)) {
vunmap(area->addr); vunmap(area->addr);

View file

@ -146,7 +146,6 @@ static ctl_table unaligned_table[] = {
}, },
{} {}
}; };
#endif
static struct ctl_path tile_path[] = { static struct ctl_path tile_path[] = {
{ .procname = "tile" }, { .procname = "tile" },
@ -155,10 +154,9 @@ static struct ctl_path tile_path[] = {
static int __init proc_sys_tile_init(void) static int __init proc_sys_tile_init(void)
{ {
#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
register_sysctl_paths(tile_path, unaligned_table); register_sysctl_paths(tile_path, unaligned_table);
#endif
return 0; return 0;
} }
arch_initcall(proc_sys_tile_init); arch_initcall(proc_sys_tile_init);
#endif

View file

@ -28,6 +28,7 @@
#include <linux/tracehook.h> #include <linux/tracehook.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <asm/stack.h> #include <asm/stack.h>
#include <asm/switch_to.h>
#include <asm/homecache.h> #include <asm/homecache.h>
#include <asm/syscalls.h> #include <asm/syscalls.h>
#include <asm/traps.h> #include <asm/traps.h>
@ -285,7 +286,7 @@ struct task_struct *validate_current(void)
static struct task_struct corrupt = { .comm = "<corrupt>" }; static struct task_struct corrupt = { .comm = "<corrupt>" };
struct task_struct *tsk = current; struct task_struct *tsk = current;
if (unlikely((unsigned long)tsk < PAGE_OFFSET || if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
(void *)tsk > high_memory || (high_memory && (void *)tsk > high_memory) ||
((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
tsk = &corrupt; tsk = &corrupt;

View file

@ -103,13 +103,11 @@ unsigned long __initdata pci_reserve_end_pfn = -1U;
static int __init setup_maxmem(char *str) static int __init setup_maxmem(char *str)
{ {
long maxmem_mb; unsigned long long maxmem;
if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || if (str == NULL || (maxmem = memparse(str, NULL)) == 0)
maxmem_mb == 0)
return -EINVAL; return -EINVAL;
maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT);
(HPAGE_SHIFT - PAGE_SHIFT);
pr_info("Forcing RAM used to no more than %dMB\n", pr_info("Forcing RAM used to no more than %dMB\n",
maxmem_pfn >> (20 - PAGE_SHIFT)); maxmem_pfn >> (20 - PAGE_SHIFT));
return 0; return 0;
@ -119,14 +117,15 @@ early_param("maxmem", setup_maxmem);
static int __init setup_maxnodemem(char *str) static int __init setup_maxnodemem(char *str)
{ {
char *endp; char *endp;
long maxnodemem_mb, node; unsigned long long maxnodemem;
long node;
node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; node = str ? simple_strtoul(str, &endp, 0) : INT_MAX;
if (node >= MAX_NUMNODES || *endp != ':' || if (node >= MAX_NUMNODES || *endp != ':')
strict_strtol(endp+1, 0, &maxnodemem_mb) != 0)
return -EINVAL; return -EINVAL;
maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << maxnodemem = memparse(endp+1, NULL);
maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) <<
(HPAGE_SHIFT - PAGE_SHIFT); (HPAGE_SHIFT - PAGE_SHIFT);
pr_info("Forcing RAM used on node %ld to no more than %dMB\n", pr_info("Forcing RAM used on node %ld to no more than %dMB\n",
node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT));
@ -913,6 +912,13 @@ void __cpuinit setup_cpu(int boot)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
/*
* Note that the kernel can potentially support other compression
* techniques than gz, though we don't do so by default. If we ever
* decide to do so we can either look for other filename extensions,
* or just allow a file with this name to be compressed with an
* arbitrary compressor (somewhat counterintuitively).
*/
static int __initdata set_initramfs_file; static int __initdata set_initramfs_file;
static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
@ -928,9 +934,9 @@ static int __init setup_initramfs_file(char *str)
early_param("initramfs_file", setup_initramfs_file); early_param("initramfs_file", setup_initramfs_file);
/* /*
* We look for an additional "initramfs.cpio.gz" file in the hvfs. * We look for an "initramfs.cpio.gz" file in the hvfs.
* If there is one, we allocate some memory for it and it will be * If there is one, we allocate some memory for it and it will be
* unpacked to the initramfs after any built-in initramfs_data. * unpacked to the initramfs.
*/ */
static void __init load_hv_initrd(void) static void __init load_hv_initrd(void)
{ {

View file

@ -153,6 +153,25 @@ static tile_bundle_bits rewrite_load_store_unaligned(
if (((unsigned long)addr % size) == 0) if (((unsigned long)addr % size) == 0)
return bundle; return bundle;
/*
* Return SIGBUS with the unaligned address, if requested.
* Note that we return SIGBUS even for completely invalid addresses
* as long as they are in fact unaligned; this matches what the
* tilepro hardware would be doing, if it could provide us with the
* actual bad address in an SPR, which it doesn't.
*/
if (unaligned_fixup == 0) {
siginfo_t info = {
.si_signo = SIGBUS,
.si_code = BUS_ADRALN,
.si_addr = addr
};
trace_unhandled_signal("unaligned trap", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
return (tilepro_bundle_bits) 0;
}
#ifndef __LITTLE_ENDIAN #ifndef __LITTLE_ENDIAN
# error We assume little-endian representation with copy_xx_user size 2 here # error We assume little-endian representation with copy_xx_user size 2 here
#endif #endif
@ -192,18 +211,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
return (tile_bundle_bits) 0; return (tile_bundle_bits) 0;
} }
if (unaligned_fixup == 0) {
siginfo_t info = {
.si_signo = SIGBUS,
.si_code = BUS_ADRALN,
.si_addr = addr
};
trace_unhandled_signal("unaligned trap", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
return (tile_bundle_bits) 0;
}
if (unaligned_printk || unaligned_fixup_count == 0) { if (unaligned_printk || unaligned_fixup_count == 0) {
pr_info("Process %d/%s: PC %#lx: Fixup of" pr_info("Process %d/%s: PC %#lx: Fixup of"
" unaligned %s at %#lx.\n", " unaligned %s at %#lx.\n",

View file

@ -103,7 +103,7 @@ static void smp_stop_cpu_interrupt(void)
set_cpu_online(smp_processor_id(), 0); set_cpu_online(smp_processor_id(), 0);
arch_local_irq_disable_all(); arch_local_irq_disable_all();
for (;;) for (;;)
asm("nap"); asm("nap; nop");
} }
/* This function calls the 'stop' function on all other CPUs in the system. */ /* This function calls the 'stop' function on all other CPUs in the system. */
@ -113,6 +113,12 @@ void smp_send_stop(void)
send_IPI_allbutself(MSG_TAG_STOP_CPU); send_IPI_allbutself(MSG_TAG_STOP_CPU);
} }
/* On panic, just wait; we may get an smp_send_stop() later on. */
void panic_smp_self_stop(void)
{
while (1)
asm("nap; nop");
}
/* /*
* Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.

View file

@ -196,6 +196,8 @@ void __cpuinit online_secondary(void)
/* This must be done before setting cpu_online_mask */ /* This must be done before setting cpu_online_mask */
wmb(); wmb();
notify_cpu_starting(smp_processor_id());
/* /*
* We need to hold call_lock, so there is no inconsistency * We need to hold call_lock, so there is no inconsistency
* between the time smp_call_function() determines number of * between the time smp_call_function() determines number of

View file

@ -21,10 +21,12 @@
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/dcache.h>
#include <linux/fs.h>
#include <asm/backtrace.h> #include <asm/backtrace.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/switch_to.h>
#include <asm/sigframe.h> #include <asm/sigframe.h>
#include <asm/stack.h> #include <asm/stack.h>
#include <arch/abi.h> #include <arch/abi.h>
@ -44,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
} }
/* Is address valid for reading? */
static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
{
HV_PTE *l1_pgtable = kbt->pgtable;
HV_PTE *l2_pgtable;
unsigned long pfn;
HV_PTE pte;
struct page *page;
if (l1_pgtable == NULL)
return 0; /* can't read user space in other tasks */
#ifdef CONFIG_64BIT
/* Find the real l1_pgtable by looking in the l0_pgtable. */
pte = l1_pgtable[HV_L0_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
pfn = hv_pte_get_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("L0 huge page has bad pfn %#lx\n", pfn);
return 0;
}
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
page = pfn_to_page(pfn);
BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
#endif
pte = l1_pgtable[HV_L1_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
pfn = hv_pte_get_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("huge page has bad pfn %#lx\n", pfn);
return 0;
}
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
page = pfn_to_page(pfn);
if (PageHighMem(page)) {
pr_err("L2 page table not in LOWMEM (%#llx)\n",
HV_PFN_TO_CPA(pfn));
return 0;
}
l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
pte = l2_pgtable[HV_L2_INDEX(address)];
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
/* Callback for backtracer; basically a glorified memcpy */ /* Callback for backtracer; basically a glorified memcpy */
static bool read_memory_func(void *result, unsigned long address, static bool read_memory_func(void *result, unsigned long address,
unsigned int size, void *vkbt) unsigned int size, void *vkbt)
{ {
int retval; int retval;
struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
if (address == 0)
return 0;
if (__kernel_text_address(address)) { if (__kernel_text_address(address)) {
/* OK to read kernel code. */ /* OK to read kernel code. */
} else if (address >= PAGE_OFFSET) { } else if (address >= PAGE_OFFSET) {
/* We only tolerate kernel-space reads of this task's stack */ /* We only tolerate kernel-space reads of this task's stack */
if (!in_kernel_stack(kbt, address)) if (!in_kernel_stack(kbt, address))
return 0; return 0;
} else if (!valid_address(kbt, address)) { } else if (!kbt->is_current) {
return 0; /* invalid user-space address */ return 0; /* can't read from other user address spaces */
} }
pagefault_disable(); pagefault_disable();
retval = __copy_from_user_inatomic(result, retval = __copy_from_user_inatomic(result,
@ -127,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
unsigned long sp = kbt->it.sp; unsigned long sp = kbt->it.sp;
struct pt_regs *p; struct pt_regs *p;
if (sp % sizeof(long) != 0)
return NULL;
if (!in_kernel_stack(kbt, sp)) if (!in_kernel_stack(kbt, sp))
return NULL; return NULL;
if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
@ -169,27 +124,27 @@ static int is_sigreturn(unsigned long pc)
} }
/* Return a pt_regs pointer for a valid signal handler frame */ /* Return a pt_regs pointer for a valid signal handler frame */
static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
struct rt_sigframe* kframe)
{ {
BacktraceIterator *b = &kbt->it; BacktraceIterator *b = &kbt->it;
if (b->pc == VDSO_BASE) { if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
struct rt_sigframe *frame; b->sp % sizeof(long) == 0) {
unsigned long sigframe_top = int retval;
b->sp + sizeof(struct rt_sigframe) - 1; pagefault_disable();
if (!valid_address(kbt, b->sp) || retval = __copy_from_user_inatomic(
!valid_address(kbt, sigframe_top)) { kframe, (void __user __force *)b->sp,
if (kbt->verbose) sizeof(*kframe));
pr_err(" (odd signal: sp %#lx?)\n", pagefault_enable();
(unsigned long)(b->sp)); if (retval != 0 ||
(unsigned int)(kframe->info.si_signo) >= _NSIG)
return NULL; return NULL;
}
frame = (struct rt_sigframe *)b->sp;
if (kbt->verbose) { if (kbt->verbose) {
pr_err(" <received signal %d>\n", pr_err(" <received signal %d>\n",
frame->info.si_signo); kframe->info.si_signo);
} }
return (struct pt_regs *)&frame->uc.uc_mcontext; return (struct pt_regs *)&kframe->uc.uc_mcontext;
} }
return NULL; return NULL;
} }
@ -202,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
{ {
struct pt_regs *p; struct pt_regs *p;
struct rt_sigframe kframe;
p = valid_fault_handler(kbt); p = valid_fault_handler(kbt);
if (p == NULL) if (p == NULL)
p = valid_sigframe(kbt); p = valid_sigframe(kbt, &kframe);
if (p == NULL) if (p == NULL)
return 0; return 0;
backtrace_init(&kbt->it, read_memory_func, kbt, backtrace_init(&kbt->it, read_memory_func, kbt,
@ -265,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
/* /*
* Set up callback information. We grab the kernel stack base * Set up callback information. We grab the kernel stack base
* so we will allow reads of that address range, and if we're * so we will allow reads of that address range.
* asking about the current process we grab the page table
* so we can check user accesses before trying to read them.
* We flush the TLB to avoid any weird skew issues.
*/ */
is_current = (t == NULL); is_current = (t == NULL || t == current);
kbt->is_current = is_current; kbt->is_current = is_current;
if (is_current) if (is_current)
t = validate_current(); t = validate_current();
kbt->task = t; kbt->task = t;
kbt->pgtable = NULL;
kbt->verbose = 0; /* override in caller if desired */ kbt->verbose = 0; /* override in caller if desired */
kbt->profile = 0; /* override in caller if desired */ kbt->profile = 0; /* override in caller if desired */
kbt->end = KBT_ONGOING; kbt->end = KBT_ONGOING;
kbt->new_context = 0; kbt->new_context = 1;
if (is_current) { if (is_current)
HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
/*
* Not just an optimization: this also allows
* this to work at all before va/pa mappings
* are set up.
*/
kbt->pgtable = swapper_pg_dir;
} else {
struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
if (!PageHighMem(page))
kbt->pgtable = __va(pgdir_pa);
else
pr_err("page table not in LOWMEM"
" (%#llx)\n", pgdir_pa);
}
local_flush_tlb_all();
validate_stack(regs); validate_stack(regs);
}
if (regs == NULL) { if (regs == NULL) {
if (is_current || t->state == TASK_RUNNING) { if (is_current || t->state == TASK_RUNNING) {
@ -345,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
} }
EXPORT_SYMBOL(KBacktraceIterator_next); EXPORT_SYMBOL(KBacktraceIterator_next);
static void describe_addr(struct KBacktraceIterator *kbt,
unsigned long address,
int have_mmap_sem, char *buf, size_t bufsize)
{
struct vm_area_struct *vma;
size_t namelen, remaining;
unsigned long size, offset, adjust;
char *p, *modname;
const char *name;
int rc;
/*
* Look one byte back for every caller frame (i.e. those that
* aren't a new context) so we look up symbol data for the
* call itself, not the following instruction, which may be on
* a different line (or in a different function).
*/
adjust = !kbt->new_context;
address -= adjust;
if (address >= PAGE_OFFSET) {
/* Handle kernel symbols. */
BUG_ON(bufsize < KSYM_NAME_LEN);
name = kallsyms_lookup(address, &size, &offset,
&modname, buf);
if (name == NULL) {
buf[0] = '\0';
return;
}
namelen = strlen(buf);
remaining = (bufsize - 1) - namelen;
p = buf + namelen;
rc = snprintf(p, remaining, "+%#lx/%#lx ",
offset + adjust, size);
if (modname && rc < remaining)
snprintf(p + rc, remaining - rc, "[%s] ", modname);
buf[bufsize-1] = '\0';
return;
}
/* If we don't have the mmap_sem, we can't show any more info. */
buf[0] = '\0';
if (!have_mmap_sem)
return;
/* Find vma info. */
vma = find_vma(kbt->task->mm, address);
if (vma == NULL || address < vma->vm_start) {
snprintf(buf, bufsize, "[unmapped address] ");
return;
}
if (vma->vm_file) {
char *s;
p = d_path(&vma->vm_file->f_path, buf, bufsize);
if (IS_ERR(p))
p = "?";
s = strrchr(p, '/');
if (s)
p = s+1;
} else {
p = "anon";
}
/* Generate a string description of the vma info. */
namelen = strlen(p);
remaining = (bufsize - 1) - namelen;
memmove(buf, p, namelen);
snprintf(buf + namelen, remaining, "[%lx+%lx] ",
vma->vm_start, vma->vm_end - vma->vm_start);
}
/* /*
* This method wraps the backtracer's more generic support. * This method wraps the backtracer's more generic support.
* It is only invoked from the architecture-specific code; show_stack() * It is only invoked from the architecture-specific code; show_stack()
@ -353,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next);
void tile_show_stack(struct KBacktraceIterator *kbt, int headers) void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
{ {
int i; int i;
int have_mmap_sem = 0;
if (headers) { if (headers) {
/* /*
@ -369,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
kbt->verbose = 1; kbt->verbose = 1;
i = 0; i = 0;
for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
char *modname;
const char *name;
unsigned long address = kbt->it.pc;
unsigned long offset, size;
char namebuf[KSYM_NAME_LEN+100]; char namebuf[KSYM_NAME_LEN+100];
unsigned long address = kbt->it.pc;
if (address >= PAGE_OFFSET) /* Try to acquire the mmap_sem as we pass into userspace. */
name = kallsyms_lookup(address, &size, &offset, if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
&modname, namebuf); have_mmap_sem =
else down_read_trylock(&kbt->task->mm->mmap_sem);
name = NULL;
if (!name) describe_addr(kbt, address, have_mmap_sem,
namebuf[0] = '\0'; namebuf, sizeof(namebuf));
else {
size_t namelen = strlen(namebuf);
size_t remaining = (sizeof(namebuf) - 1) - namelen;
char *p = namebuf + namelen;
int rc = snprintf(p, remaining, "+%#lx/%#lx ",
offset, size);
if (modname && rc < remaining)
snprintf(p + rc, remaining - rc,
"[%s] ", modname);
namebuf[sizeof(namebuf)-1] = '\0';
}
pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
i++, address, namebuf, (unsigned long)(kbt->it.sp)); i++, address, namebuf, (unsigned long)(kbt->it.sp));
@ -408,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
pr_err("Stack dump stopped; next frame identical to this one\n"); pr_err("Stack dump stopped; next frame identical to this one\n");
if (headers) if (headers)
pr_err("Stack dump complete\n"); pr_err("Stack dump complete\n");
if (have_mmap_sem)
up_read(&kbt->task->mm->mmap_sem);
} }
EXPORT_SYMBOL(tile_show_stack); EXPORT_SYMBOL(tile_show_stack);

View file

@ -200,7 +200,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
{ {
siginfo_t info = { 0 }; siginfo_t info = { 0 };
int signo, code; int signo, code;
unsigned long address; unsigned long address = 0;
bundle_bits instr; bundle_bits instr;
/* Re-enable interrupts. */ /* Re-enable interrupts. */
@ -223,6 +223,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
} }
switch (fault_num) { switch (fault_num) {
case INT_MEM_ERROR:
signo = SIGBUS;
code = BUS_OBJERR;
break;
case INT_ILL: case INT_ILL:
if (copy_from_user(&instr, (void __user *)regs->pc, if (copy_from_user(&instr, (void __user *)regs->pc,
sizeof(instr))) { sizeof(instr))) {
@ -289,7 +293,10 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
address = regs->pc; address = regs->pc;
break; break;
#ifdef __tilegx__ #ifdef __tilegx__
case INT_ILL_TRANS: case INT_ILL_TRANS: {
/* Avoid a hardware erratum with the return address stack. */
fill_ra_stack();
signo = SIGSEGV; signo = SIGSEGV;
code = SEGV_MAPERR; code = SEGV_MAPERR;
if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
@ -297,6 +304,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
else else
address = 0; /* FIXME: GX: single-step for address */ address = 0; /* FIXME: GX: single-step for address */
break; break;
}
#endif #endif
default: default:
panic("Unexpected do_trap interrupt number %d", fault_num); panic("Unexpected do_trap interrupt number %d", fault_num);
@ -308,7 +316,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
info.si_addr = (void __user *)address; info.si_addr = (void __user *)address;
if (signo == SIGILL) if (signo == SIGILL)
info.si_trapno = fault_num; info.si_trapno = fault_num;
trace_unhandled_signal("trap", regs, address, signo); if (signo != SIGTRAP)
trace_unhandled_signal("trap", regs, address, signo);
force_sig_info(signo, &info, current); force_sig_info(signo, &info, current);
} }

View file

@ -7,6 +7,7 @@ lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \
strchr_$(BITS).o strlen_$(BITS).o strchr_$(BITS).o strlen_$(BITS).o
ifeq ($(CONFIG_TILEGX),y) ifeq ($(CONFIG_TILEGX),y)
CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer
lib-y += memcpy_user_64.o lib-y += memcpy_user_64.o
else else
lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o

View file

@ -39,7 +39,21 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
{ {
char *p, *base; char *p, *base;
size_t step_size, load_count; size_t step_size, load_count;
/*
* On TILEPro the striping granularity is a fixed 8KB; on
* TILE-Gx it is configurable, and we rely on the fact that
* the hypervisor always configures maximum striping, so that
* bits 9 and 10 of the PA are part of the stripe function, so
* every 512 bytes we hit a striping boundary.
*
*/
#ifdef __tilegx__
const unsigned long STRIPE_WIDTH = 512;
#else
const unsigned long STRIPE_WIDTH = 8192; const unsigned long STRIPE_WIDTH = 8192;
#endif
#ifdef __tilegx__ #ifdef __tilegx__
/* /*
* On TILE-Gx, we must disable the dstream prefetcher before doing * On TILE-Gx, we must disable the dstream prefetcher before doing
@ -74,7 +88,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
* memory, that one load would be sufficient, but since we may * memory, that one load would be sufficient, but since we may
* be, we also need to back up to the last load issued to * be, we also need to back up to the last load issued to
* another memory controller, which would be the point where * another memory controller, which would be the point where
* we crossed an 8KB boundary (the granularity of striping * we crossed a "striping" boundary (the granularity of striping
* across memory controllers). Keep backing up and doing this * across memory controllers). Keep backing up and doing this
* until we are before the beginning of the buffer, or have * until we are before the beginning of the buffer, or have
* hit all the controllers. * hit all the controllers.
@ -88,12 +102,22 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
* every cache line on a full memory stripe on each * every cache line on a full memory stripe on each
* controller" that we simply do that, to simplify the logic. * controller" that we simply do that, to simplify the logic.
* *
* FIXME: See bug 9535 for some issues with this code. * On TILE-Gx the hash-for-home function is much more complex,
* with the upshot being we can't readily guarantee we have
* hit both entries in the 128-entry AMT that were hit by any
* load in the entire range, so we just re-load them all.
* With larger buffers, we may want to consider using a hypervisor
* trap to issue loads directly to each hash-for-home tile for
* each controller (doing it from Linux would trash the TLB).
*/ */
if (hfh) { if (hfh) {
step_size = L2_CACHE_BYTES; step_size = L2_CACHE_BYTES;
#ifdef __tilegx__
load_count = (size + L2_CACHE_BYTES - 1) / L2_CACHE_BYTES;
#else
load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) * load_count = (STRIPE_WIDTH / L2_CACHE_BYTES) *
(1 << CHIP_LOG_NUM_MSHIMS()); (1 << CHIP_LOG_NUM_MSHIMS());
#endif
} else { } else {
step_size = STRIPE_WIDTH; step_size = STRIPE_WIDTH;
load_count = (1 << CHIP_LOG_NUM_MSHIMS()); load_count = (1 << CHIP_LOG_NUM_MSHIMS());
@ -109,7 +133,7 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
/* Figure out how far back we need to go. */ /* Figure out how far back we need to go. */
base = p - (step_size * (load_count - 2)); base = p - (step_size * (load_count - 2));
if ((long)base < (long)buffer) if ((unsigned long)base < (unsigned long)buffer)
base = buffer; base = buffer;
/* /*

View file

@ -14,7 +14,13 @@
* Do memcpy(), but trap and return "n" when a load or store faults. * Do memcpy(), but trap and return "n" when a load or store faults.
* *
* Note: this idiom only works when memcpy() compiles to a leaf function. * Note: this idiom only works when memcpy() compiles to a leaf function.
* If "sp" is updated during memcpy, the "jrp lr" will be incorrect. * Here leaf function not only means it does not have calls, but also
* requires no stack operations (sp, stack frame pointer) and no
* use of callee-saved registers, else "jrp lr" will be incorrect since
* unwinding stack frame is bypassed. Since memcpy() is not complex so
* these conditions are satisfied here, but we need to be careful when
* modifying this file. This is not a clean solution but is the best
* one so far.
* *
* Also note that we are capturing "n" from the containing scope here. * Also note that we are capturing "n" from the containing scope here.
*/ */

View file

@ -60,5 +60,5 @@ static void delay_backoff(int iterations)
loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & loops += __insn_crc32_32(stack_pointer, get_cycles_low()) &
(loops - 1); (loops - 1);
relax(1 << exponent); relax(loops);
} }

View file

@ -130,7 +130,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
} }
/* /*
* Handle a fault on the vmalloc or module mapping area * Handle a fault on the vmalloc area.
*/ */
static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) static inline int vmalloc_fault(pgd_t *pgd, unsigned long address)
{ {
@ -203,9 +203,14 @@ static pgd_t *get_current_pgd(void)
* interrupt or a critical region, and must do as little as possible. * interrupt or a critical region, and must do as little as possible.
* Similarly, we can't use atomic ops here, since we may be handling a * Similarly, we can't use atomic ops here, since we may be handling a
* fault caused by an atomic op access. * fault caused by an atomic op access.
*
* If we find a migrating PTE while we're in an NMI context, and we're
* at a PC that has a registered exception handler, we don't wait,
* since this thread may (e.g.) have been interrupted while migrating
* its own stack, which would then cause us to self-deadlock.
*/ */
static int handle_migrating_pte(pgd_t *pgd, int fault_num, static int handle_migrating_pte(pgd_t *pgd, int fault_num,
unsigned long address, unsigned long address, unsigned long pc,
int is_kernel_mode, int write) int is_kernel_mode, int write)
{ {
pud_t *pud; pud_t *pud;
@ -227,6 +232,8 @@ static int handle_migrating_pte(pgd_t *pgd, int fault_num,
pte_offset_kernel(pmd, address); pte_offset_kernel(pmd, address);
pteval = *pte; pteval = *pte;
if (pte_migrating(pteval)) { if (pte_migrating(pteval)) {
if (in_nmi() && search_exception_tables(pc))
return 0;
wait_for_migration(pte); wait_for_migration(pte);
return 1; return 1;
} }
@ -300,7 +307,7 @@ static int handle_page_fault(struct pt_regs *regs,
* rather than trying to patch up the existing PTE. * rather than trying to patch up the existing PTE.
*/ */
pgd = get_current_pgd(); pgd = get_current_pgd();
if (handle_migrating_pte(pgd, fault_num, address, if (handle_migrating_pte(pgd, fault_num, address, regs->pc,
is_kernel_mode, write)) is_kernel_mode, write))
return 1; return 1;
@ -335,9 +342,12 @@ static int handle_page_fault(struct pt_regs *regs,
/* /*
* If we're trying to touch user-space addresses, we must * If we're trying to touch user-space addresses, we must
* be either at PL0, or else with interrupts enabled in the * be either at PL0, or else with interrupts enabled in the
* kernel, so either way we can re-enable interrupts here. * kernel, so either way we can re-enable interrupts here
* unless we are doing atomic access to user space with
* interrupts disabled.
*/ */
local_irq_enable(); if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
local_irq_enable();
mm = tsk->mm; mm = tsk->mm;
@ -665,7 +675,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
*/ */
if (fault_num == INT_DTLB_ACCESS) if (fault_num == INT_DTLB_ACCESS)
write = 1; write = 1;
if (handle_migrating_pte(pgd, fault_num, address, 1, write)) if (handle_migrating_pte(pgd, fault_num, address, pc, 1, write))
return state; return state;
/* Return zero so that we continue on with normal fault handling. */ /* Return zero so that we continue on with normal fault handling. */

View file

@ -394,6 +394,7 @@ int page_home(struct page *page)
return pte_to_home(*virt_to_pte(NULL, kva)); return pte_to_home(*virt_to_pte(NULL, kva));
} }
} }
EXPORT_SYMBOL(page_home);
void homecache_change_page_home(struct page *page, int order, int home) void homecache_change_page_home(struct page *page, int order, int home)
{ {

View file

@ -254,11 +254,6 @@ static pgprot_t __init init_pgprot(ulong address)
return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
} }
/* As a performance optimization, keep the boot init stack here. */
if (address >= (ulong)&init_thread_union &&
address < (ulong)&init_thread_union + THREAD_SIZE)
return construct_pgprot(PAGE_KERNEL, smp_processor_id());
#ifndef __tilegx__ #ifndef __tilegx__
#if !ATOMIC_LOCKS_FOUND_VIA_TABLE() #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
/* Force the atomic_locks[] array page to be hash-for-home. */ /* Force the atomic_locks[] array page to be hash-for-home. */
@ -557,6 +552,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
address = MEM_SV_INTRPT; address = MEM_SV_INTRPT;
pmd = get_pmd(pgtables, address); pmd = get_pmd(pgtables, address);
pfn = 0; /* code starts at PA 0 */
if (ktext_small) { if (ktext_small) {
/* Allocate an L2 PTE for the kernel text */ /* Allocate an L2 PTE for the kernel text */
int cpu = 0; int cpu = 0;
@ -579,10 +575,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
} }
BUG_ON(address != (unsigned long)_stext); BUG_ON(address != (unsigned long)_stext);
pfn = 0; /* code starts at PA 0 */ pte = NULL;
pte = alloc_pte(); for (; address < (unsigned long)_einittext;
for (pte_ofs = 0; address < (unsigned long)_einittext; pfn++, address += PAGE_SIZE) {
pfn++, pte_ofs++, address += PAGE_SIZE) { pte_ofs = pte_index(address);
if (pte_ofs == 0) {
if (pte)
assign_pte(pmd++, pte);
pte = alloc_pte();
}
if (!ktext_local) { if (!ktext_local) {
prot = set_remote_cache_cpu(prot, cpu); prot = set_remote_cache_cpu(prot, cpu);
cpu = cpumask_next(cpu, &ktext_mask); cpu = cpumask_next(cpu, &ktext_mask);
@ -591,7 +592,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
} }
pte[pte_ofs] = pfn_pte(pfn, prot); pte[pte_ofs] = pfn_pte(pfn, prot);
} }
assign_pte(pmd, pte); if (pte)
assign_pte(pmd, pte);
} else { } else {
pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
pteval = pte_mkhuge(pteval); pteval = pte_mkhuge(pteval);
@ -614,7 +616,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
else else
pteval = hv_pte_set_mode(pteval, pteval = hv_pte_set_mode(pteval,
HV_PTE_MODE_CACHE_NO_L3); HV_PTE_MODE_CACHE_NO_L3);
*(pte_t *)pmd = pteval; for (; address < (unsigned long)_einittext;
pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
} }
/* Set swapper_pgprot here so it is flushed to memory right away. */ /* Set swapper_pgprot here so it is flushed to memory right away. */

View file

@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr)
if (!pmd_huge_page(*pmd)) if (!pmd_huge_page(*pmd))
return; return;
/* spin_lock_irqsave(&init_mm.page_table_lock, flags);
* Grab the pgd_lock, since we may need it to walk the pgd_list,
* and since we need some kind of lock here to avoid races.
*/
spin_lock_irqsave(&pgd_lock, flags);
if (!pmd_huge_page(*pmd)) { if (!pmd_huge_page(*pmd)) {
/* Lost the race to convert the huge page. */ /* Lost the race to convert the huge page. */
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
return; return;
} }
@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr)
#ifdef __PAGETABLE_PMD_FOLDED #ifdef __PAGETABLE_PMD_FOLDED
/* Walk every pgd on the system and update the pmd there. */ /* Walk every pgd on the system and update the pmd there. */
spin_lock(&pgd_lock);
list_for_each(pos, &pgd_list) { list_for_each(pos, &pgd_list) {
pmd_t *copy_pmd; pmd_t *copy_pmd;
pgd = list_to_pgd(pos) + pgd_index(addr); pgd = list_to_pgd(pos) + pgd_index(addr);
@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr)
copy_pmd = pmd_offset(pud, addr); copy_pmd = pmd_offset(pud, addr);
__set_pmd(copy_pmd, *pmd); __set_pmd(copy_pmd, *pmd);
} }
spin_unlock(&pgd_lock);
#endif #endif
/* Tell every cpu to notice the change. */ /* Tell every cpu to notice the change. */
@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr)
cpu_possible_mask, NULL, 0); cpu_possible_mask, NULL, 0);
/* Hold the lock until the TLB flush is finished to avoid races. */ /* Hold the lock until the TLB flush is finished to avoid races. */
spin_unlock_irqrestore(&pgd_lock, flags); spin_unlock_irqrestore(&init_mm.page_table_lock, flags);
} }
/* /*
@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr)
* against pageattr.c; it is the unique case in which a valid change * against pageattr.c; it is the unique case in which a valid change
* of kernel pagetables can't be lazily synchronized by vmalloc faults. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
* vmalloc faults work because attached pagetables are never freed. * vmalloc faults work because attached pagetables are never freed.
* The locking scheme was chosen on the basis of manfred's *
* recommendations and having no core impact whatsoever. * The lock is always taken with interrupts disabled, unlike on x86
* -- wli * and other platforms, because we need to take the lock in
* shatter_huge_page(), which may be called from an interrupt context.
* We are not at risk from the tlbflush IPI deadlock that was seen on
* x86, since we use the flush_remote() API to have the hypervisor do
* the TLB flushes regardless of irq disabling.
*/ */
DEFINE_SPINLOCK(pgd_lock); DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list); LIST_HEAD(pgd_list);
@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte)
void set_pte(pte_t *ptep, pte_t pte) void set_pte(pte_t *ptep, pte_t pte)
{ {
struct page *page = pfn_to_page(pte_pfn(pte)); if (pte_present(pte) &&
(!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) {
/* Update the home of a PTE if necessary */ /* The PTE actually references physical memory. */
pte = pte_set_home(pte, page_home(page)); unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
/* Update the home of the PTE from the struct page. */
pte = pte_set_home(pte, page_home(pfn_to_page(pfn)));
} else if (hv_pte_get_mode(pte) == 0) {
/* remap_pfn_range(), etc, must supply PTE mode. */
panic("set_pte(): out-of-range PFN and mode 0\n");
}
}
__set_pte(ptep, pte); __set_pte(ptep, pte);
} }

View file

@ -3,41 +3,6 @@
#include <asm/types.h> #include <asm/types.h>
#if defined(__KERNEL__)
# include <asm/byteorder.h>
# if defined(__BIG_ENDIAN)
# define ntohll(x) (x)
# define htonll(x) (x)
# elif defined(__LITTLE_ENDIAN)
# define ntohll(x) be64_to_cpu(x)
# define htonll(x) cpu_to_be64(x)
# else
# error "Could not determine byte order"
# endif
#else
/* For the definition of ntohl, htonl and __BYTE_ORDER */
#include <endian.h>
#include <netinet/in.h>
#if defined(__BYTE_ORDER)
# if __BYTE_ORDER == __BIG_ENDIAN
# define ntohll(x) (x)
# define htonll(x) (x)
# elif __BYTE_ORDER == __LITTLE_ENDIAN
# define ntohll(x) bswap_64(x)
# define htonll(x) bswap_64(x)
# else
# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
# endif
#else /* ! defined(__BYTE_ORDER) */
# error "Could not determine byte order: __BYTE_ORDER not defined"
#endif
#endif /* ! defined(__KERNEL__) */
extern int init_cow_file(int fd, char *cow_file, char *backing_file, extern int init_cow_file(int fd, char *cow_file, char *backing_file,
int sectorsize, int alignment, int *bitmap_offset_out, int sectorsize, int alignment, int *bitmap_offset_out,
unsigned long *bitmap_len_out, int *data_offset_out); unsigned long *bitmap_len_out, int *data_offset_out);

View file

@ -8,11 +8,10 @@
* that. * that.
*/ */
#include <unistd.h> #include <unistd.h>
#include <byteswap.h>
#include <errno.h> #include <errno.h>
#include <string.h> #include <string.h>
#include <arpa/inet.h> #include <arpa/inet.h>
#include <asm/types.h> #include <endian.h>
#include "cow.h" #include "cow.h"
#include "cow_sys.h" #include "cow_sys.h"
@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
"header\n"); "header\n");
goto out; goto out;
} }
header->magic = htonl(COW_MAGIC); header->magic = htobe32(COW_MAGIC);
header->version = htonl(COW_VERSION); header->version = htobe32(COW_VERSION);
err = -EINVAL; err = -EINVAL;
if (strlen(backing_file) > sizeof(header->backing_file) - 1) { if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
goto out_free; goto out_free;
} }
header->mtime = htonl(modtime); header->mtime = htobe32(modtime);
header->size = htonll(*size); header->size = htobe64(*size);
header->sectorsize = htonl(sectorsize); header->sectorsize = htobe32(sectorsize);
header->alignment = htonl(alignment); header->alignment = htobe32(alignment);
header->cow_format = COW_BITMAP; header->cow_format = COW_BITMAP;
err = cow_write_file(fd, header, sizeof(*header)); err = cow_write_file(fd, header, sizeof(*header));
@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
magic = header->v1.magic; magic = header->v1.magic;
if (magic == COW_MAGIC) if (magic == COW_MAGIC)
version = header->v1.version; version = header->v1.version;
else if (magic == ntohl(COW_MAGIC)) else if (magic == be32toh(COW_MAGIC))
version = ntohl(header->v1.version); version = be32toh(header->v1.version);
/* No error printed because the non-COW case comes through here */ /* No error printed because the non-COW case comes through here */
else goto out; else goto out;
@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
"header\n"); "header\n");
goto out; goto out;
} }
*mtime_out = ntohl(header->v2.mtime); *mtime_out = be32toh(header->v2.mtime);
*size_out = ntohll(header->v2.size); *size_out = be64toh(header->v2.size);
*sectorsize_out = ntohl(header->v2.sectorsize); *sectorsize_out = be32toh(header->v2.sectorsize);
*bitmap_offset_out = sizeof(header->v2); *bitmap_offset_out = sizeof(header->v2);
*align_out = *sectorsize_out; *align_out = *sectorsize_out;
file = header->v2.backing_file; file = header->v2.backing_file;
@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
"header\n"); "header\n");
goto out; goto out;
} }
*mtime_out = ntohl(header->v3.mtime); *mtime_out = be32toh(header->v3.mtime);
*size_out = ntohll(header->v3.size); *size_out = be64toh(header->v3.size);
*sectorsize_out = ntohl(header->v3.sectorsize); *sectorsize_out = be32toh(header->v3.sectorsize);
*align_out = ntohl(header->v3.alignment); *align_out = be32toh(header->v3.alignment);
if (*align_out == 0) { if (*align_out == 0) {
cow_printf("read_cow_header - invalid COW header, " cow_printf("read_cow_header - invalid COW header, "
"align == 0\n"); "align == 0\n");
@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
* this was used until Dec2005 - 64bits are needed to represent * this was used until Dec2005 - 64bits are needed to represent
* 2038+. I.e. we can safely do this truncating cast. * 2038+. I.e. we can safely do this truncating cast.
* *
* Additionally, we must use ntohl() instead of ntohll(), since * Additionally, we must use be32toh() instead of be64toh(), since
* the program used to use the former (tested - I got mtime * the program used to use the former (tested - I got mtime
* mismatch "0 vs whatever"). * mismatch "0 vs whatever").
* *
* Ever heard about bug-to-bug-compatibility ? ;-) */ * Ever heard about bug-to-bug-compatibility ? ;-) */
*mtime_out = (time32_t) ntohl(header->v3_b.mtime); *mtime_out = (time32_t) be32toh(header->v3_b.mtime);
*size_out = ntohll(header->v3_b.size); *size_out = be64toh(header->v3_b.size);
*sectorsize_out = ntohl(header->v3_b.sectorsize); *sectorsize_out = be32toh(header->v3_b.sectorsize);
*align_out = ntohl(header->v3_b.alignment); *align_out = be32toh(header->v3_b.alignment);
if (*align_out == 0) { if (*align_out == 0) {
cow_printf("read_cow_header - invalid COW header, " cow_printf("read_cow_header - invalid COW header, "
"align == 0\n"); "align == 0\n");

View file

@ -22,6 +22,7 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/switch_to.h>
#include "init.h" #include "init.h"
#include "irq_kern.h" #include "irq_kern.h"

View file

@ -1,3 +1,4 @@
generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
generic-y += switch_to.h

View file

@ -3,9 +3,10 @@
# Licensed under the GPL # Licensed under the GPL
# #
CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
-DELF_ARCH=$(LDS_ELF_ARCH) \ -DELF_ARCH=$(LDS_ELF_ARCH) \
-DELF_FORMAT=$(LDS_ELF_FORMAT) -DELF_FORMAT=$(LDS_ELF_FORMAT) \
$(LDS_EXTRA)
extra-y := vmlinux.lds extra-y := vmlinux.lds
clean-files := clean-files :=

View file

@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task)
extern void arch_switch_to(struct task_struct *to); extern void arch_switch_to(struct task_struct *to);
void *_switch_to(void *prev, void *next, void *last) void *__switch_to(struct task_struct *from, struct task_struct *to)
{ {
struct task_struct *from = prev;
struct task_struct *to = next;
to->thread.prev_sched = from; to->thread.prev_sched = from;
set_current(to); set_current(to);
@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last)
} while (current->thread.saved_task); } while (current->thread.saved_task);
return current->thread.prev_sched; return current->thread.prev_sched;
} }
void interrupt_end(void) void interrupt_end(void)

View file

@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
void uml_setup_stubs(struct mm_struct *mm) void uml_setup_stubs(struct mm_struct *mm)
{ {
struct page **pages;
int err, ret; int err, ret;
if (!skas_needs_stub) if (!skas_needs_stub)

View file

@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32)
export LDFLAGS export LDFLAGS
LDS_EXTRA := -Ui386
export LDS_EXTRA
# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. # First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
include $(srctree)/arch/x86/Makefile_32.cpu include $(srctree)/arch/x86/Makefile_32.cpu

View file

@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; };
extern unsigned long extern unsigned long
copy_from_user_nmi(void *to, const void __user *from, unsigned long n); copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
extern __must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
/* /*
* movsl can be slow when source and dest are not both 8-byte aligned * movsl can be slow when source and dest are not both 8-byte aligned

View file

@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
return n; return n;
} }
long __must_check strncpy_from_user(char *dst, const char __user *src,
long count);
long __must_check __strncpy_from_user(char *dst,
const char __user *src, long count);
/** /**
* strlen_user: - Get the size of a string in user space. * strlen_user: - Get the size of a string in user space.
* @str: The string to measure. * @str: The string to measure.

View file

@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
} }
} }
__must_check long
strncpy_from_user(char *dst, const char __user *src, long count);
__must_check long
__strncpy_from_user(char *dst, const char __user *src, long count);
__must_check long strnlen_user(const char __user *str, long n); __must_check long strnlen_user(const char __user *str, long n);
__must_check long __strnlen_user(const char __user *str, long n); __must_check long __strnlen_user(const char __user *str, long n);
__must_check long strlen_user(const char __user *str); __must_check long strlen_user(const char __user *str);

View file

@ -38,6 +38,7 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/idle.h>
static int kvmapf = 1; static int kvmapf = 1;
@ -253,7 +254,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
kvm_async_pf_task_wait((u32)read_cr2()); kvm_async_pf_task_wait((u32)read_cr2());
break; break;
case KVM_PV_REASON_PAGE_READY: case KVM_PV_REASON_PAGE_READY:
rcu_irq_enter();
exit_idle();
kvm_async_pf_task_wake((u32)read_cr2()); kvm_async_pf_task_wake((u32)read_cr2());
rcu_irq_exit();
break; break;
} }
} }

View file

@ -369,7 +369,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (pmu->fixed_ctr_ctrl == data) if (pmu->fixed_ctr_ctrl == data)
return 0; return 0;
if (!(data & 0xfffffffffffff444)) { if (!(data & 0xfffffffffffff444ull)) {
reprogram_fixed_counters(pmu, data); reprogram_fixed_counters(pmu, data);
return 0; return 0;
} }

View file

@ -3906,7 +3906,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
vmx_set_cr4(&vmx->vcpu, 0); vmx_set_cr4(&vmx->vcpu, 0);
vmx_set_efer(&vmx->vcpu, 0); vmx_set_efer(&vmx->vcpu, 0);
vmx_fpu_activate(&vmx->vcpu); vmx_fpu_activate(&vmx->vcpu);

View file

@ -7,6 +7,8 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/word-at-a-time.h>
/* /*
* best effort, GUP based copy_from_user() that is NMI-safe * best effort, GUP based copy_from_user() that is NMI-safe
*/ */
@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return len; return len;
} }
EXPORT_SYMBOL_GPL(copy_from_user_nmi); EXPORT_SYMBOL_GPL(copy_from_user_nmi);
static inline unsigned long count_bytes(unsigned long mask)
{
mask = (mask - 1) & ~mask;
mask >>= 7;
return count_masked_bytes(mask);
}
/*
* Do a strncpy, return length of string without final '\0'.
* 'count' is the user-supplied count (return 'count' if we
* hit it), 'max' is the address space maximum (and we return
* -EFAULT if we hit it).
*/
static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, long max)
{
long res = 0;
/*
* Truncate 'max' to the user-specified limit, so that
* we only have one limit we need to check in the loop
*/
if (max > count)
max = count;
while (max >= sizeof(unsigned long)) {
unsigned long c;
/* Fall back to byte-at-a-time if we get a page fault */
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
break;
/* This can write a few bytes past the NUL character, but that's ok */
*(unsigned long *)(dst+res) = c;
c = has_zero(c);
if (c)
return res + count_bytes(c);
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
}
while (max) {
char c;
if (unlikely(__get_user(c,src+res)))
return -EFAULT;
dst[res] = c;
if (!c)
return res;
res++;
max--;
}
/*
* Uhhuh. We hit 'max'. But was that the user-specified maximum
* too? If so, that's ok - we got as much as the user asked for.
*/
if (res >= count)
return count;
/*
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's an EFAULT.
*/
return -EFAULT;
}
/**
* strncpy_from_user: - Copy a NUL terminated string from userspace.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
unsigned long max_addr, src_addr;
if (unlikely(count <= 0))
return 0;
max_addr = current_thread_info()->addr_limit.seg;
src_addr = (unsigned long)src;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
return do_strncpy_from_user(dst, src, count, max);
}
return -EFAULT;
}
EXPORT_SYMBOL(strncpy_from_user);

View file

@ -32,93 +32,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#define movsl_is_ok(a1, a2, n) \ #define movsl_is_ok(a1, a2, n) \
__movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
/*
* Copy a null terminated string from userspace.
*/
#define __do_strncpy_from_user(dst, src, count, res) \
do { \
int __d0, __d1, __d2; \
might_fault(); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
"0: lodsb\n" \
" stosb\n" \
" testb %%al,%%al\n" \
" jz 1f\n" \
" decl %1\n" \
" jnz 0b\n" \
"1: subl %1,%0\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl %5,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(0b,3b) \
: "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
"=&D" (__d2) \
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
: "memory"); \
} while (0)
/**
* __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
EXPORT_SYMBOL(__strncpy_from_user);
/**
* strncpy_from_user: - Copy a NUL terminated string from userspace.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
__do_strncpy_from_user(dst, src, count, res);
return res;
}
EXPORT_SYMBOL(strncpy_from_user);
/* /*
* Zero Userspace * Zero Userspace
*/ */

View file

@ -8,55 +8,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
/*
* Copy a null terminated string from userspace.
*/
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
long __d0, __d1, __d2; \
might_fault(); \
__asm__ __volatile__( \
" testq %1,%1\n" \
" jz 2f\n" \
"0: lodsb\n" \
" stosb\n" \
" testb %%al,%%al\n" \
" jz 1f\n" \
" decq %1\n" \
" jnz 0b\n" \
"1: subq %1,%0\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movq %5,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(0b,3b) \
: "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
"=&D" (__d2) \
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
: "memory"); \
} while (0)
long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
EXPORT_SYMBOL(__strncpy_from_user);
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, count);
return res;
}
EXPORT_SYMBOL(strncpy_from_user);
/* /*
* Zero Userspace * Zero Userspace
*/ */

75
arch/x86/um/asm/barrier.h Normal file
View file

@ -0,0 +1,75 @@
#ifndef _ASM_UM_BARRIER_H_
#define _ASM_UM_BARRIER_H_
#include <asm/asm.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else /* CONFIG_X86_32 */
#define mb() asm volatile("mfence" : : : "memory")
#define rmb() asm volatile("lfence" : : : "memory")
#define wmb() asm volatile("sfence" : : : "memory")
#endif /* CONFIG_X86_32 */
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
#define smp_rmb() rmb()
#else /* CONFIG_X86_PPRO_FENCE */
#define smp_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
#ifdef CONFIG_X86_OOSTORE
#define smp_wmb() wmb()
#else /* CONFIG_X86_OOSTORE */
#define smp_wmb() barrier()
#endif /* CONFIG_X86_OOSTORE */
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* CONFIG_SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* CONFIG_SMP */
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
#endif

View file

@ -1,135 +0,0 @@
#ifndef _ASM_X86_SYSTEM_H_
#define _ASM_X86_SYSTEM_H_
#include <asm/asm.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
#ifdef CONFIG_IA32_EMULATION
# define AT_VECTOR_SIZE_ARCH 2
#else
# define AT_VECTOR_SIZE_ARCH 1
#endif
extern unsigned long arch_align_stack(unsigned long sp);
void default_idle(void);
/*
* Force strict CPU ordering.
* And yes, this is required on UP too when we're talking
* to devices.
*/
#ifdef CONFIG_X86_32
/*
* Some non-Intel clones support out of order store. wmb() ceases to be a
* nop for these.
*/
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
#else
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
#define wmb() asm volatile("sfence" ::: "memory")
#endif
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
* depend on.
*
* No data-dependent reads from memory-like regions are ever reordered
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
* any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
* These ordering constraints are respected by both the local CPU
* and the compiler.
*
* Ordering is not guaranteed by anything other than these primitives,
* not even by data dependencies. See the documentation for
* memory_barrier() for examples and URLs to more information.
*
* For example, the following code would force ordering (the initial
* value of "a" is zero, "b" is one, and "p" is "&a"):
*
* <programlisting>
* CPU 0 CPU 1
*
* b = 2;
* memory_barrier();
* p = &b; q = p;
* read_barrier_depends();
* d = *q;
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
* two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
* CPU 0 CPU 1
*
* a = 2;
* memory_barrier();
* b = 3; y = b;
* read_barrier_depends();
* x = a;
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#ifdef CONFIG_X86_PPRO_FENCE
# define smp_rmb() rmb()
#else
# define smp_rmb() barrier()
#endif
#ifdef CONFIG_X86_OOSTORE
# define smp_wmb() wmb()
#else
# define smp_wmb() barrier()
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
/*
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
*
* (Could use an alternative three way for this if there was one.)
*/
static inline void rdtsc_barrier(void)
{
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}
extern void *_switch_to(void *prev, void *next, void *last);
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
#endif

View file

@ -1859,6 +1859,7 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss;
static unsigned char fake_ioapic_mapping[PAGE_SIZE] __page_aligned_bss;
static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
{ {
@ -1899,7 +1900,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
* We just don't map the IO APIC - all access is via * We just don't map the IO APIC - all access is via
* hypercalls. Keep the address in the pte for reference. * hypercalls. Keep the address in the pte for reference.
*/ */
pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); pte = pfn_pte(PFN_DOWN(__pa(fake_ioapic_mapping)), PAGE_KERNEL);
break; break;
#endif #endif
@ -2064,6 +2065,7 @@ void __init xen_init_mmu_ops(void)
pv_mmu_ops = xen_mmu_ops; pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE); memset(dummy_mapping, 0xff, PAGE_SIZE);
memset(fake_ioapic_mapping, 0xfd, PAGE_SIZE);
} }
/* Protected by xen_reservation_lock. */ /* Protected by xen_reservation_lock. */

View file

@ -59,7 +59,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
static void __cpuinit cpu_bringup(void) static void __cpuinit cpu_bringup(void)
{ {
int cpu = smp_processor_id(); int cpu;
cpu_init(); cpu_init();
touch_softlockup_watchdog(); touch_softlockup_watchdog();

View file

@ -627,7 +627,7 @@ config CRYPTO_BLOWFISH_COMMON
config CRYPTO_BLOWFISH_X86_64 config CRYPTO_BLOWFISH_X86_64
tristate "Blowfish cipher algorithm (x86_64)" tristate "Blowfish cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_BLOWFISH_COMMON select CRYPTO_BLOWFISH_COMMON
help help
@ -657,7 +657,7 @@ config CRYPTO_CAMELLIA
config CRYPTO_CAMELLIA_X86_64 config CRYPTO_CAMELLIA_X86_64
tristate "Camellia cipher algorithm (x86_64)" tristate "Camellia cipher algorithm (x86_64)"
depends on (X86 || UML_X86) && 64BIT depends on X86 && 64BIT
depends on CRYPTO depends on CRYPTO
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_LRW select CRYPTO_LRW
@ -893,7 +893,7 @@ config CRYPTO_TWOFISH_X86_64
config CRYPTO_TWOFISH_X86_64_3WAY config CRYPTO_TWOFISH_X86_64_3WAY
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
depends on (X86 || UML_X86) && 64BIT depends on X86 && 64BIT
select CRYPTO_ALGAPI select CRYPTO_ALGAPI
select CRYPTO_TWOFISH_COMMON select CRYPTO_TWOFISH_COMMON
select CRYPTO_TWOFISH_X86_64 select CRYPTO_TWOFISH_X86_64

View file

@ -786,7 +786,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
while (1) { while (1) {
if (cx->entry_method == ACPI_CSTATE_HALT) if (cx->entry_method == ACPI_CSTATE_HALT)
halt(); safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
inb(cx->address); inb(cx->address);
/* See comment in acpi_idle_do_entry() */ /* See comment in acpi_idle_do_entry() */

View file

@ -138,6 +138,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
unsigned int base, top; unsigned int base, top;
int nodes = 0; int nodes = 0;
int registers = 0; int registers = 0;
int average;
mutex_lock(&map->lock); mutex_lock(&map->lock);
@ -152,8 +153,13 @@ static int rbtree_show(struct seq_file *s, void *ignored)
registers += top - base + 1; registers += top - base + 1;
} }
if (nodes)
average = registers / nodes;
else
average = 0;
seq_printf(s, "%d nodes, %d registers, average %d registers\n", seq_printf(s, "%d nodes, %d registers, average %d registers\n",
nodes, registers, registers / nodes); nodes, registers, average);
mutex_unlock(&map->lock); mutex_unlock(&map->lock);

View file

@ -346,6 +346,7 @@ out:
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(regcache_sync_region);
/** /**
* regcache_cache_only: Put a register map into cache only mode * regcache_cache_only: Put a register map into cache only mode

View file

@ -29,7 +29,7 @@ config BCMA_HOST_PCI
config BCMA_DRIVER_PCI_HOSTMODE config BCMA_DRIVER_PCI_HOSTMODE
bool "Driver for PCI core working in hostmode" bool "Driver for PCI core working in hostmode"
depends on BCMA && MIPS depends on BCMA && MIPS && BCMA_HOST_PCI
help help
PCI core hostmode operation (external PCI bus). PCI core hostmode operation (external PCI bus).

View file

@ -10,6 +10,7 @@
*/ */
#include "bcma_private.h" #include "bcma_private.h"
#include <linux/pci.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/bcma/bcma.h> #include <linux/bcma/bcma.h>
#include <asm/paccess.h> #include <asm/paccess.h>

View file

@ -1475,7 +1475,7 @@ static int __init xlblk_init(void)
if (!xen_domain()) if (!xen_domain())
return -ENODEV; return -ENODEV;
if (!xen_platform_pci_unplug) if (xen_hvm_domain() && !xen_platform_pci_unplug)
return -ENODEV; return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) { if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {

View file

@ -72,7 +72,9 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3012 with sflash firmware*/ /* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x3004) },
{ USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3005) },
/* Atheros AR5BBU12 with sflash firmware */ /* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) }, { USB_DEVICE(0x0489, 0xE02C) },
@ -89,7 +91,9 @@ static struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/ /* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };

Some files were not shown because too many files have changed in this diff Show more