s390/numa: establish cpu to node mapping early
Initialize the cpu topology and therefore also the cpu to node mapping much earlier. Fixes this warning and subsequent crashes when using the fake numa emulation mode on s390: WARNING: CPU: 0 PID: 1 at include/linux/cpumask.h:121 select_task_rq+0xe6/0x1a8 CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.6.0-rc6-00001-ge9d867a67fd0-dirty #28 task: 00000001dd270008 ti: 00000001eccb4000 task.ti: 00000001eccb4000 Krnl PSW : 0404c00180000000 0000000000176c56 (select_task_rq+0xe6/0x1a8) R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 RI:0 EA:3 Call Trace: ([<0000000000176c30>] select_task_rq+0xc0/0x1a8) ([<0000000000177d64>] try_to_wake_up+0x2e4/0x478) ([<000000000015d46c>] create_worker+0x174/0x1c0) ([<0000000000161a98>] alloc_unbound_pwq+0x360/0x438) ([<0000000000162550>] apply_wqattrs_prepare+0x200/0x2a0) ([<000000000016266a>] apply_workqueue_attrs_locked+0x7a/0xb0) ([<0000000000162af0>] apply_workqueue_attrs+0x50/0x78) ([<000000000016441c>] __alloc_workqueue_key+0x304/0x520) ([<0000000000ee3706>] default_bdi_init+0x3e/0x70) ([<0000000000100270>] do_one_initcall+0x140/0x1d8) ([<0000000000ec9da8>] kernel_init_freeable+0x220/0x2d8) ([<0000000000984a7a>] kernel_init+0x2a/0x150) ([<00000000009913fa>] kernel_thread_starter+0x6/0xc) ([<00000000009913f4>] kernel_thread_starter+0x0/0xc) Reviewed-by: Michael Holzheu <holzheu@linux.vnet.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
30fc4ca2a8
commit
8c91058022
5 changed files with 44 additions and 18 deletions
|
@ -23,6 +23,7 @@ struct cpu_topology_s390 {
|
|||
};
|
||||
|
||||
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
extern cpumask_t cpus_with_topology;
|
||||
|
||||
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
|
||||
#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
|
||||
|
@ -36,6 +37,7 @@ extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
|||
|
||||
#define mc_capable() 1
|
||||
|
||||
void topology_init_early(void);
|
||||
int topology_cpu_init(struct cpu *);
|
||||
int topology_set_cpu_management(int fc);
|
||||
void topology_schedule_update(void);
|
||||
|
@ -45,6 +47,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
|
|||
|
||||
#else /* CONFIG_SCHED_TOPOLOGY */
|
||||
|
||||
static inline void topology_init_early(void) { }
|
||||
static inline void topology_schedule_update(void) { }
|
||||
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
|
||||
static inline void topology_expect_change(void) { }
|
||||
|
|
|
@ -924,6 +924,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
cpu_init();
|
||||
numa_setup();
|
||||
smp_detect_cpus();
|
||||
topology_init_early();
|
||||
|
||||
/*
|
||||
* Create kernel page tables and switch to virtual addressing.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
|
@ -51,6 +52,8 @@ static struct mask_info drawer_info;
|
|||
struct cpu_topology_s390 cpu_topology[NR_CPUS];
|
||||
EXPORT_SYMBOL_GPL(cpu_topology);
|
||||
|
||||
cpumask_t cpus_with_topology;
|
||||
|
||||
static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
|
||||
{
|
||||
cpumask_t mask;
|
||||
|
@ -106,6 +109,7 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
|
|||
cpumask_set_cpu(lcpu + i, &drawer->mask);
|
||||
cpumask_set_cpu(lcpu + i, &book->mask);
|
||||
cpumask_set_cpu(lcpu + i, &socket->mask);
|
||||
cpumask_set_cpu(lcpu + i, &cpus_with_topology);
|
||||
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
|
||||
}
|
||||
}
|
||||
|
@ -231,6 +235,8 @@ static void update_cpu_masks(void)
|
|||
topo->socket_id = cpu;
|
||||
topo->book_id = cpu;
|
||||
topo->drawer_id = cpu;
|
||||
if (cpu_present(cpu))
|
||||
cpumask_set_cpu(cpu, &cpus_with_topology);
|
||||
}
|
||||
}
|
||||
numa_update_cpu_topology();
|
||||
|
@ -241,12 +247,12 @@ void store_topology(struct sysinfo_15_1_x *info)
|
|||
stsi(info, 15, 1, min(topology_max_mnest, 4));
|
||||
}
|
||||
|
||||
int arch_update_cpu_topology(void)
|
||||
static int __arch_update_cpu_topology(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info = tl_info;
|
||||
struct device *dev;
|
||||
int cpu, rc = 0;
|
||||
int rc = 0;
|
||||
|
||||
cpumask_clear(&cpus_with_topology);
|
||||
if (MACHINE_HAS_TOPOLOGY) {
|
||||
rc = 1;
|
||||
store_topology(info);
|
||||
|
@ -255,6 +261,15 @@ int arch_update_cpu_topology(void)
|
|||
update_cpu_masks();
|
||||
if (!MACHINE_HAS_TOPOLOGY)
|
||||
topology_update_polarization_simple();
|
||||
return rc;
|
||||
}
|
||||
|
||||
int arch_update_cpu_topology(void)
|
||||
{
|
||||
struct device *dev;
|
||||
int cpu, rc;
|
||||
|
||||
rc = __arch_update_cpu_topology();
|
||||
for_each_online_cpu(cpu) {
|
||||
dev = get_cpu_device(cpu);
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
|
@ -438,20 +453,20 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
|
|||
nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
|
||||
nr_masks = max(nr_masks, 1);
|
||||
for (i = 0; i < nr_masks; i++) {
|
||||
mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
|
||||
mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
|
||||
mask = mask->next;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init s390_topology_init(void)
|
||||
void __init topology_init_early(void)
|
||||
{
|
||||
struct sysinfo_15_1_x *info;
|
||||
int i;
|
||||
|
||||
set_sched_topology(s390_topology);
|
||||
if (!MACHINE_HAS_TOPOLOGY)
|
||||
return 0;
|
||||
tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
|
||||
goto out;
|
||||
tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
|
||||
info = tl_info;
|
||||
store_topology(info);
|
||||
pr_info("The CPU configuration topology of the machine is:");
|
||||
|
@ -461,9 +476,9 @@ static int __init s390_topology_init(void)
|
|||
alloc_masks(info, &socket_info, 1);
|
||||
alloc_masks(info, &book_info, 2);
|
||||
alloc_masks(info, &drawer_info, 3);
|
||||
return 0;
|
||||
out:
|
||||
__arch_update_cpu_topology();
|
||||
}
|
||||
early_initcall(s390_topology_init);
|
||||
|
||||
static int __init topology_init(void)
|
||||
{
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/node.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -307,13 +308,11 @@ fail:
|
|||
/*
|
||||
* Allocate and initialize core to node mapping
|
||||
*/
|
||||
static void create_core_to_node_map(void)
|
||||
static void __ref create_core_to_node_map(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
|
||||
if (emu_cores == NULL)
|
||||
panic("Could not allocate cores to node memory");
|
||||
emu_cores = memblock_virt_alloc(sizeof(*emu_cores), 8);
|
||||
for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
|
||||
emu_cores->to_node_id[i] = NODE_ID_FREE;
|
||||
}
|
||||
|
@ -354,7 +353,7 @@ static struct toptree *toptree_from_topology(void)
|
|||
|
||||
phys = toptree_new(TOPTREE_ID_PHYS, 1);
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
for_each_cpu(cpu, &cpus_with_topology) {
|
||||
top = &cpu_topology[cpu];
|
||||
node = toptree_get_child(phys, 0);
|
||||
drawer = toptree_get_child(node, top->drawer_id);
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/list_sort.h>
|
||||
|
@ -25,10 +26,14 @@
|
|||
* RETURNS:
|
||||
* Pointer to the new tree node or NULL on error
|
||||
*/
|
||||
struct toptree *toptree_alloc(int level, int id)
|
||||
struct toptree __ref *toptree_alloc(int level, int id)
|
||||
{
|
||||
struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
|
||||
struct toptree *res;
|
||||
|
||||
if (slab_is_available())
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
else
|
||||
res = memblock_virt_alloc(sizeof(*res), 8);
|
||||
if (!res)
|
||||
return res;
|
||||
|
||||
|
@ -65,7 +70,7 @@ static void toptree_remove(struct toptree *cand)
|
|||
* cleanly using toptree_remove. Possible children are freed
|
||||
* recursively. In the end @cand itself is freed.
|
||||
*/
|
||||
void toptree_free(struct toptree *cand)
|
||||
void __ref toptree_free(struct toptree *cand)
|
||||
{
|
||||
struct toptree *child, *tmp;
|
||||
|
||||
|
@ -73,7 +78,10 @@ void toptree_free(struct toptree *cand)
|
|||
toptree_remove(cand);
|
||||
toptree_for_each_child_safe(child, tmp, cand)
|
||||
toptree_free(child);
|
||||
kfree(cand);
|
||||
if (slab_is_available())
|
||||
kfree(cand);
|
||||
else
|
||||
memblock_free_early((unsigned long)cand, sizeof(*cand));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in a new issue