percpu: Always align percpu output section to PAGE_SIZE

Percpu allocator honors alignment request upto PAGE_SIZE and both the
percpu addresses in the percpu address space and the translated kernel
addresses should be aligned accordingly.  The calculation of the
former depends on the alignment of percpu output section in the kernel
image.

The linker script macros PERCPU_VADDR() and PERCPU() are used to
define this output section and the latter takes @align parameter.
Several architectures are using @align smaller than PAGE_SIZE breaking
percpu memory alignment.

This patch removes @align parameter from PERCPU(), renames it to
PERCPU_SECTION() and makes it always align to PAGE_SIZE.  While at it,
add PCPU_SETUP_BUG_ON() checks such that alignment problems are
reliably detected and remove percpu alignment comment recently added
in workqueue.c as the condition would trigger BUG way before reaching
there.

For um, this patch raises the alignment of percpu area.  As the area
is in .init, there shouldn't be any noticeable difference.

This problem was discovered by David Howells while debugging boot
failure on mn10300.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Mike Frysinger <vapier@gentoo.org>
Cc: uclinux-dist-devel@blackfin.uclinux.org
Cc: David Howells <dhowells@redhat.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: user-mode-linux-devel@lists.sourceforge.net
This commit is contained in:
Tejun Heo 2011-03-24 18:50:09 +01:00
parent 6c51038900
commit 0415b00d17
20 changed files with 28 additions and 29 deletions

View file

@ -39,7 +39,7 @@ SECTIONS
__init_begin = ALIGN(PAGE_SIZE);
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
/* Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page
needed for the THREAD_SIZE aligned init_task gets freed after init */
. = ALIGN(THREAD_SIZE);

View file

@ -82,7 +82,7 @@ SECTIONS
#endif
}
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
#ifndef CONFIG_XIP_KERNEL
. = ALIGN(PAGE_SIZE);

View file

@ -136,7 +136,7 @@ SECTIONS
. = ALIGN(16);
INIT_DATA_SECTION(16)
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
.exit.data :
{

View file

@ -102,7 +102,7 @@ SECTIONS
#endif
__vmlinux_end = .; /* Last address of the physical file. */
#ifdef CONFIG_ETRAX_ARCH_V32
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
.init.ramfs : {
INIT_RAM_FS

View file

@ -37,7 +37,7 @@ SECTIONS
_einittext = .;
INIT_DATA_SECTION(8)
PERCPU(L1_CACHE_BYTES, 4096)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;

View file

@ -53,7 +53,7 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -115,7 +115,7 @@ SECTIONS
EXIT_DATA
}
PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE)
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -70,7 +70,7 @@ SECTIONS
.exit.text : { EXIT_TEXT; }
.exit.data : { EXIT_DATA; }
PERCPU(32, PAGE_SIZE)
PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -145,7 +145,7 @@ SECTIONS
EXIT_DATA
}
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */

View file

@ -160,7 +160,7 @@ SECTIONS
INIT_RAM_FS
}
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
. = ALIGN(8);
.machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {

View file

@ -77,7 +77,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
PERCPU(0x100, PAGE_SIZE)
PERCPU_SECTION(0x100)
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */

View file

@ -66,7 +66,7 @@ SECTIONS
__machvec_end = .;
}
PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L1_CACHE_BYTES)
/*
* .exit.text is discarded at runtime, not link time, to deal with

View file

@ -108,7 +108,7 @@ SECTIONS
__sun4v_2insn_patch_end = .;
}
PERCPU(SMP_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
__init_end = .;

View file

@ -60,7 +60,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_sinitdata) = .;
INIT_DATA_SECTION(16) :data =0
PERCPU(L2_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(L2_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einitdata) = .;

View file

@ -42,7 +42,7 @@
INIT_SETUP(0)
}
PERCPU(32, 32)
PERCPU_SECTION(32)
.initcall.init : {
INIT_CALLS

View file

@ -319,7 +319,7 @@ SECTIONS
}
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
PERCPU_SECTION(INTERNODE_CACHE_BYTES)
#endif
. = ALIGN(PAGE_SIZE);

View file

@ -155,7 +155,7 @@ SECTIONS
INIT_RAM_FS
}
PERCPU(XCHAL_ICACHE_LINESIZE, PAGE_SIZE)
PERCPU_SECTION(XCHAL_ICACHE_LINESIZE)
/* We need this dummy segment here */

View file

@ -15,7 +15,7 @@
* HEAD_TEXT_SECTION
* INIT_TEXT_SECTION(PAGE_SIZE)
* INIT_DATA_SECTION(...)
* PERCPU(CACHELINE_SIZE, PAGE_SIZE)
* PERCPU_SECTION(CACHELINE_SIZE)
* __init_end = .;
*
* _stext = .;
@ -709,7 +709,7 @@
*
* Note that this macros defines __per_cpu_load as an absolute symbol.
* If there is no need to put the percpu section at a predetermined
* address, use PERCPU().
* address, use PERCPU_SECTION.
*/
#define PERCPU_VADDR(cacheline, vaddr, phdr) \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
@ -729,20 +729,19 @@
. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
/**
* PERCPU - define output section for percpu area, simple version
* PERCPU_SECTION - define output section for percpu area, simple version
* @cacheline: cacheline size
* @align: required alignment
*
* Align to @align and outputs output section for percpu area. This macro
* doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* Align to PAGE_SIZE and outputs output section for percpu area. This
* macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
* __per_cpu_start will be identical.
*
* This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
* This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
* except that __per_cpu_load is defined as a relative symbol against
* .data..percpu which is required for relocatable x86_32 configuration.
*/
#define PERCPU(cacheline, align) \
. = ALIGN(align); \
#define PERCPU_SECTION(cacheline) \
. = ALIGN(PAGE_SIZE); \
.data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__per_cpu_load) = .; \
VMLINUX_SYMBOL(__per_cpu_start) = .; \

View file

@ -2860,9 +2860,7 @@ static int alloc_cwqs(struct workqueue_struct *wq)
}
}
/* just in case, make sure it's actually aligned
* - this is affected by PERCPU() alignment in vmlinux.lds.S
*/
/* just in case, make sure it's actually aligned */
BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
return wq->cpu_wq.v ? 0 : -ENOMEM;
}

View file

@ -1216,8 +1216,10 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
#ifdef CONFIG_SMP
PCPU_SETUP_BUG_ON(!ai->static_size);
PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
#endif
PCPU_SETUP_BUG_ON(!base_addr);
PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);