mm: make early_pfn_to_nid() and related defintions close to each other
early_pfn_to_nid() and its helper __early_pfn_to_nid() are spread around include/linux/mm.h, include/linux/mmzone.h and mm/page_alloc.c. Drop unused stub for __early_pfn_to_nid() and move its actual generic implementation close to its users. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Hoan Tran <hoan@os.amperecomputing.com> [arm64] Reviewed-by: Baoquan He <bhe@redhat.com> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@kernel.org> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200412194859.12663-3-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d622abf74f
commit
6f24fbd38c
3 changed files with 27 additions and 37 deletions
|
@ -2445,9 +2445,9 @@ extern void sparse_memory_present_with_active_regions(int nid);
|
||||||
|
|
||||||
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
|
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
|
||||||
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
|
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
|
||||||
static inline int __early_pfn_to_nid(unsigned long pfn,
|
static inline int early_pfn_to_nid(unsigned long pfn)
|
||||||
struct mminit_pfnnid_cache *state)
|
|
||||||
{
|
{
|
||||||
|
BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -1080,15 +1080,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
||||||
#include <asm/sparsemem.h>
|
#include <asm/sparsemem.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
|
|
||||||
!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
|
|
||||||
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
|
|
||||||
{
|
|
||||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_FLATMEM
|
#ifdef CONFIG_FLATMEM
|
||||||
#define pfn_to_nid(pfn) (0)
|
#define pfn_to_nid(pfn) (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1504,6 +1504,31 @@ void __free_pages_core(struct page *page, unsigned int order)
|
||||||
|
|
||||||
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
|
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
|
||||||
|
|
||||||
|
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
|
||||||
|
*/
|
||||||
|
int __meminit __early_pfn_to_nid(unsigned long pfn,
|
||||||
|
struct mminit_pfnnid_cache *state)
|
||||||
|
{
|
||||||
|
unsigned long start_pfn, end_pfn;
|
||||||
|
int nid;
|
||||||
|
|
||||||
|
if (state->last_start <= pfn && pfn < state->last_end)
|
||||||
|
return state->last_nid;
|
||||||
|
|
||||||
|
nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
|
||||||
|
if (nid != NUMA_NO_NODE) {
|
||||||
|
state->last_start = start_pfn;
|
||||||
|
state->last_end = end_pfn;
|
||||||
|
state->last_nid = nid;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nid;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
|
||||||
|
|
||||||
int __meminit early_pfn_to_nid(unsigned long pfn)
|
int __meminit early_pfn_to_nid(unsigned long pfn)
|
||||||
{
|
{
|
||||||
static DEFINE_SPINLOCK(early_pfn_lock);
|
static DEFINE_SPINLOCK(early_pfn_lock);
|
||||||
|
@ -6310,32 +6335,6 @@ void __meminit init_currently_empty_zone(struct zone *zone,
|
||||||
zone->initialized = 1;
|
zone->initialized = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
|
||||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
|
|
||||||
*/
|
|
||||||
int __meminit __early_pfn_to_nid(unsigned long pfn,
|
|
||||||
struct mminit_pfnnid_cache *state)
|
|
||||||
{
|
|
||||||
unsigned long start_pfn, end_pfn;
|
|
||||||
int nid;
|
|
||||||
|
|
||||||
if (state->last_start <= pfn && pfn < state->last_end)
|
|
||||||
return state->last_nid;
|
|
||||||
|
|
||||||
nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
|
|
||||||
if (nid != NUMA_NO_NODE) {
|
|
||||||
state->last_start = start_pfn;
|
|
||||||
state->last_end = end_pfn;
|
|
||||||
state->last_nid = nid;
|
|
||||||
}
|
|
||||||
|
|
||||||
return nid;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
|
* free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
|
||||||
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
|
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
|
||||||
|
|
Loading…
Reference in a new issue