x86: clean up non-smp usage of cpu maps
Cleanup references to the early cpu maps for the non-SMP configuration and remove some functions called for SMP configurations only. Cc: Andi Kleen <ak@suse.de> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a24eae88ad
commit
b447a468fc
4 changed files with 30 additions and 22 deletions
|
@ -10,7 +10,7 @@
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/topology.h>
|
#include <asm/topology.h>
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
|
#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_SMP)
|
||||||
/*
|
/*
|
||||||
* Copy data used in early init routines from the initial arrays to the
|
* Copy data used in early init routines from the initial arrays to the
|
||||||
* per cpu data areas. These arrays then become expendable and the
|
* per cpu data areas. These arrays then become expendable and the
|
||||||
|
@ -21,21 +21,12 @@ static void __init setup_per_cpu_maps(void)
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
#ifdef CONFIG_SMP
|
per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu];
|
||||||
if (per_cpu_offset(cpu)) {
|
|
||||||
#endif
|
|
||||||
per_cpu(x86_cpu_to_apicid, cpu) =
|
|
||||||
x86_cpu_to_apicid_init[cpu];
|
|
||||||
per_cpu(x86_bios_cpu_apicid, cpu) =
|
per_cpu(x86_bios_cpu_apicid, cpu) =
|
||||||
x86_bios_cpu_apicid_init[cpu];
|
x86_bios_cpu_apicid_init[cpu];
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
per_cpu(x86_cpu_to_node_map, cpu) =
|
per_cpu(x86_cpu_to_node_map, cpu) =
|
||||||
x86_cpu_to_node_map_init[cpu];
|
x86_cpu_to_node_map_init[cpu];
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
} else
|
|
||||||
printk(KERN_NOTICE "per_cpu_offset zero for cpu %d\n",
|
|
||||||
cpu);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,17 +63,20 @@ void __init setup_per_cpu_areas(void)
|
||||||
|
|
||||||
/* Copy section for each CPU (we discard the original) */
|
/* Copy section for each CPU (we discard the original) */
|
||||||
size = PERCPU_ENOUGH_ROOM;
|
size = PERCPU_ENOUGH_ROOM;
|
||||||
|
|
||||||
printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
|
printk(KERN_INFO "PERCPU: Allocating %lu bytes of per cpu data\n",
|
||||||
size);
|
size);
|
||||||
for_each_cpu_mask(i, cpu_possible_map) {
|
|
||||||
|
for_each_possible_cpu(i) {
|
||||||
char *ptr;
|
char *ptr;
|
||||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||||
ptr = alloc_bootmem_pages(size);
|
ptr = alloc_bootmem_pages(size);
|
||||||
#else
|
#else
|
||||||
int node = early_cpu_to_node(i);
|
int node = early_cpu_to_node(i);
|
||||||
if (!node_online(node) || !NODE_DATA(node))
|
if (!node_online(node) || !NODE_DATA(node)) {
|
||||||
ptr = alloc_bootmem_pages(size);
|
ptr = alloc_bootmem_pages(size);
|
||||||
|
printk(KERN_INFO
|
||||||
|
"cpu %d has no node or node-local memory\n", i);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
|
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
|
||||||
#endif
|
#endif
|
||||||
|
@ -96,7 +90,7 @@ void __init setup_per_cpu_areas(void)
|
||||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* setup percpu data maps early */
|
/* Setup percpu data maps */
|
||||||
setup_per_cpu_maps();
|
setup_per_cpu_maps();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,13 +31,15 @@ bootmem_data_t plat_node_bdata[MAX_NUMNODES];
|
||||||
|
|
||||||
struct memnode memnode;
|
struct memnode memnode;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
int x86_cpu_to_node_map_init[NR_CPUS] = {
|
int x86_cpu_to_node_map_init[NR_CPUS] = {
|
||||||
[0 ... NR_CPUS-1] = NUMA_NO_NODE
|
[0 ... NR_CPUS-1] = NUMA_NO_NODE
|
||||||
};
|
};
|
||||||
void *x86_cpu_to_node_map_early_ptr;
|
void *x86_cpu_to_node_map_early_ptr;
|
||||||
|
EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
|
||||||
|
#endif
|
||||||
DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
|
DEFINE_PER_CPU(int, x86_cpu_to_node_map) = NUMA_NO_NODE;
|
||||||
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
|
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
|
||||||
EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
|
|
||||||
|
|
||||||
s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
|
s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
|
||||||
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
|
[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
|
||||||
|
|
|
@ -29,10 +29,15 @@ extern int smp_num_siblings;
|
||||||
extern unsigned int num_processors;
|
extern unsigned int num_processors;
|
||||||
extern cpumask_t cpu_initialized;
|
extern cpumask_t cpu_initialized;
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
extern u16 x86_cpu_to_apicid_init[];
|
extern u16 x86_cpu_to_apicid_init[];
|
||||||
extern u16 x86_bios_cpu_apicid_init[];
|
extern u16 x86_bios_cpu_apicid_init[];
|
||||||
extern void *x86_cpu_to_apicid_early_ptr;
|
extern void *x86_cpu_to_apicid_early_ptr;
|
||||||
extern void *x86_bios_cpu_apicid_early_ptr;
|
extern void *x86_bios_cpu_apicid_early_ptr;
|
||||||
|
#else
|
||||||
|
#define x86_cpu_to_apicid_early_ptr NULL
|
||||||
|
#define x86_bios_cpu_apicid_early_ptr NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
|
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||||
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
|
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
|
||||||
|
|
|
@ -38,8 +38,13 @@ extern int cpu_to_node_map[];
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DECLARE_PER_CPU(int, x86_cpu_to_node_map);
|
DECLARE_PER_CPU(int, x86_cpu_to_node_map);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
extern int x86_cpu_to_node_map_init[];
|
extern int x86_cpu_to_node_map_init[];
|
||||||
extern void *x86_cpu_to_node_map_early_ptr;
|
extern void *x86_cpu_to_node_map_early_ptr;
|
||||||
|
#else
|
||||||
|
#define x86_cpu_to_node_map_early_ptr NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
extern cpumask_t node_to_cpumask_map[];
|
extern cpumask_t node_to_cpumask_map[];
|
||||||
|
|
||||||
|
@ -54,6 +59,8 @@ static inline int cpu_to_node(int cpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_X86_64 */
|
#else /* CONFIG_X86_64 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
static inline int early_cpu_to_node(int cpu)
|
static inline int early_cpu_to_node(int cpu)
|
||||||
{
|
{
|
||||||
int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
|
int *cpu_to_node_map = x86_cpu_to_node_map_early_ptr;
|
||||||
|
@ -65,6 +72,9 @@ static inline int early_cpu_to_node(int cpu)
|
||||||
else
|
else
|
||||||
return NUMA_NO_NODE;
|
return NUMA_NO_NODE;
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
#define early_cpu_to_node(cpu) cpu_to_node(cpu)
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline int cpu_to_node(int cpu)
|
static inline int cpu_to_node(int cpu)
|
||||||
{
|
{
|
||||||
|
@ -76,10 +86,7 @@ static inline int cpu_to_node(int cpu)
|
||||||
return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
|
return ((int *)x86_cpu_to_node_map_early_ptr)[cpu];
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (per_cpu_offset(cpu))
|
|
||||||
return per_cpu(x86_cpu_to_node_map, cpu);
|
return per_cpu(x86_cpu_to_node_map, cpu);
|
||||||
else
|
|
||||||
return NUMA_NO_NODE;
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue