Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull miscellaneous x86 fixes from Peter Anvin:
 "The biggest ones are fixing suspend/resume breakage on 32 bits, and an
  interrim fix for mapping over holes that allows AMD kit with more than
  1 TB.

  A final solution for the latter is in the works, but involves some
  fairly invasive changes that will probably mean it will only be
  appropriate for 3.8."

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, MCE: Remove bios_cmci_threshold sysfs attribute
  x86, amd, mce: Avoid NULL pointer reference on CPU northbridge lookup
  x86: Exclude E820_RESERVED regions and memory holes above 4 GB from direct mapping.
  x86/cache_info: Use ARRAY_SIZE() in amd_l3_attrs()
  x86/reboot: Remove quirk entry for SBC FITPC
  x86, suspend: Correct the restore of CR4, EFER; skip computing EFLAGS.ID
This commit is contained in:
Linus Torvalds 2012-10-19 14:15:16 -07:00
commit 3b641bf453
6 changed files with 24 additions and 34 deletions

View file

@ -991,7 +991,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
if (attrs)
return attrs;
n = sizeof (default_attrs) / sizeof (struct attribute *);
n = ARRAY_SIZE(default_attrs);
if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
n += 2;

View file

@ -2209,11 +2209,6 @@ static struct dev_ext_attribute dev_attr_cmci_disabled = {
&mce_cmci_disabled
};
static struct dev_ext_attribute dev_attr_bios_cmci_threshold = {
__ATTR(bios_cmci_threshold, 0444, device_show_int, NULL),
&mce_bios_cmci_threshold
};
static struct device_attribute *mce_device_attrs[] = {
&dev_attr_tolerant.attr,
&dev_attr_check_interval.attr,
@ -2222,7 +2217,6 @@ static struct device_attribute *mce_device_attrs[] = {
&dev_attr_dont_log_ce.attr,
&dev_attr_ignore_ce.attr,
&dev_attr_cmci_disabled.attr,
&dev_attr_bios_cmci_threshold.attr,
NULL
};

View file

@ -576,12 +576,10 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
int err = 0;
if (shared_bank[bank]) {
nb = node_to_amd_nb(amd_get_nb_id(cpu));
WARN_ON(!nb);
/* threshold descriptor already initialized on this node? */
if (nb->bank4) {
if (nb && nb->bank4) {
/* yes, use it */
b = nb->bank4;
err = kobject_add(b->kobj, &dev->kobj, name);
@ -615,9 +613,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
atomic_set(&b->cpus, 1);
/* nb is already initialized, see above */
if (nb) {
WARN_ON(nb->bank4);
nb->bank4 = b;
}
}
err = allocate_threshold_blocks(cpu, bank, 0,
MSR_IA32_MC0_MISC + bank * 4);

View file

@ -358,14 +358,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
{ /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
.callback = set_bios_reboot,
.ident = "CompuLab SBC-FITPC2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
},
},
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,
.ident = "ASUS P4S800",

View file

@ -920,8 +920,21 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
max_pfn_mapped = init_memory_mapping(1UL<<32,
max_pfn<<PAGE_SHIFT);
int i;
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (ei->addr + ei->size <= 1UL << 32)
continue;
if (ei->type == E820_RESERVED)
continue;
max_pfn_mapped = init_memory_mapping(
ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
ei->addr + ei->size);
}
/* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn;
}

View file

@ -74,18 +74,9 @@ ENTRY(wakeup_start)
lidtl wakeup_idt
/* Clear the EFLAGS but remember if we have EFLAGS.ID */
movl $X86_EFLAGS_ID, %ecx
pushl %ecx
popfl
pushfl
popl %edi
/* Clear the EFLAGS */
pushl $0
popfl
pushfl
popl %edx
xorl %edx, %edi
andl %ecx, %edi /* %edi is zero iff CPUID & %cr4 are missing */
/* Check header signature... */
movl signature, %eax
@ -120,12 +111,12 @@ ENTRY(wakeup_start)
movl %eax, %cr3
btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi
jz 1f
jnc 1f
movl pmode_cr4, %eax
movl %eax, %cr4
1:
btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi
jz 1f
jnc 1f
movl pmode_efer, %eax
movl pmode_efer + 4, %edx
movl $MSR_EFER, %ecx