x86: Cleanup highmap after brk is concluded
Now cleanup_highmap actually is in two steps: one is early in head64.c and only clears above _end; a second one is in init_memory_mapping() and tries to clean from _brk_end to _end. It should check if those boundaries are PMD_SIZE aligned but currently does not. Also init_memory_mapping() is called several times for numa or memory hotplug, so we really should not handle initial kernel mappings there. This patch moves cleanup_highmap() down after _brk_end is settled so we can do everything in one step. Also we honor max_pfn_mapped in the implementation of cleanup_highmap. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> LKML-Reference: <alpine.DEB.2.00.1103171739050.3382@kaball-desktop> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
parent
4981d01ead
commit
e5f15b45dd
3 changed files with 9 additions and 30 deletions
|
@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
||||||
/* Make NULL pointers segfault */
|
/* Make NULL pointers segfault */
|
||||||
zap_identity_mappings();
|
zap_identity_mappings();
|
||||||
|
|
||||||
/* Cleanup the over mapped high alias */
|
|
||||||
cleanup_highmap();
|
|
||||||
|
|
||||||
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
|
max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT;
|
||||||
|
|
||||||
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
|
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) {
|
||||||
|
|
|
@ -294,30 +294,11 @@ static void __init init_gbpages(void)
|
||||||
else
|
else
|
||||||
direct_gbpages = 0;
|
direct_gbpages = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init cleanup_highmap_brk_end(void)
|
|
||||||
{
|
|
||||||
pud_t *pud;
|
|
||||||
pmd_t *pmd;
|
|
||||||
|
|
||||||
mmu_cr4_features = read_cr4();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* _brk_end cannot change anymore, but it and _end may be
|
|
||||||
* located on different 2M pages. cleanup_highmap(), however,
|
|
||||||
* can only consider _end when it runs, so destroy any
|
|
||||||
* mappings beyond _brk_end here.
|
|
||||||
*/
|
|
||||||
pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
|
|
||||||
pmd = pmd_offset(pud, _brk_end - 1);
|
|
||||||
while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
|
|
||||||
pmd_clear(pmd);
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
static inline void init_gbpages(void)
|
static inline void init_gbpages(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
static inline void cleanup_highmap_brk_end(void)
|
static void __init cleanup_highmap(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -330,8 +311,6 @@ static void __init reserve_brk(void)
|
||||||
/* Mark brk area as locked down and no longer taking any
|
/* Mark brk area as locked down and no longer taking any
|
||||||
new allocations */
|
new allocations */
|
||||||
_brk_start = 0;
|
_brk_start = 0;
|
||||||
|
|
||||||
cleanup_highmap_brk_end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
@ -950,6 +929,8 @@ void __init setup_arch(char **cmdline_p)
|
||||||
*/
|
*/
|
||||||
reserve_brk();
|
reserve_brk();
|
||||||
|
|
||||||
|
cleanup_highmap();
|
||||||
|
|
||||||
memblock.current_limit = get_max_mapped();
|
memblock.current_limit = get_max_mapped();
|
||||||
memblock_x86_fill();
|
memblock_x86_fill();
|
||||||
|
|
||||||
|
|
|
@ -51,6 +51,7 @@
|
||||||
#include <asm/numa.h>
|
#include <asm/numa.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/init.h>
|
#include <asm/init.h>
|
||||||
|
#include <asm/setup.h>
|
||||||
|
|
||||||
static int __init parse_direct_gbpages_off(char *arg)
|
static int __init parse_direct_gbpages_off(char *arg)
|
||||||
{
|
{
|
||||||
|
@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
|
||||||
* to the compile time generated pmds. This results in invalid pmds up
|
* to the compile time generated pmds. This results in invalid pmds up
|
||||||
* to the point where we hit the physaddr 0 mapping.
|
* to the point where we hit the physaddr 0 mapping.
|
||||||
*
|
*
|
||||||
* We limit the mappings to the region from _text to _end. _end is
|
* We limit the mappings to the region from _text to _brk_end. _brk_end
|
||||||
* rounded up to the 2MB boundary. This catches the invalid pmds as
|
* is rounded up to the 2MB boundary. This catches the invalid pmds as
|
||||||
* well, as they are located before _text:
|
* well, as they are located before _text:
|
||||||
*/
|
*/
|
||||||
void __init cleanup_highmap(void)
|
void __init cleanup_highmap(void)
|
||||||
{
|
{
|
||||||
unsigned long vaddr = __START_KERNEL_map;
|
unsigned long vaddr = __START_KERNEL_map;
|
||||||
unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1;
|
unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
|
||||||
|
unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
|
||||||
pmd_t *pmd = level2_kernel_pgt;
|
pmd_t *pmd = level2_kernel_pgt;
|
||||||
pmd_t *last_pmd = pmd + PTRS_PER_PMD;
|
|
||||||
|
|
||||||
for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) {
|
for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
|
||||||
if (pmd_none(*pmd))
|
if (pmd_none(*pmd))
|
||||||
continue;
|
continue;
|
||||||
if (vaddr < (unsigned long) _text || vaddr > end)
|
if (vaddr < (unsigned long) _text || vaddr > end)
|
||||||
|
|
Loading…
Reference in a new issue