arch: x86: charge page tables to kmemcg
Page tables can bite a relatively big chunk off system memory and their allocations are easy to trigger from userspace, so they should be accounted to kmemcg. This patch marks page table allocations as __GFP_ACCOUNT for x86. Note we must not charge allocations of kernel page tables, because they can be shared among processes from different cgroups so accounting them to a particular one can pin other cgroups for indefinitely long. So we clear __GFP_ACCOUNT flag if a page table is allocated for the kernel. Link: http://lkml.kernel.org/r/7d5c54f6a2bcbe76f03171689440003d87e6c742.1464079538.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5e8d35f849
commit
3e79ec7ddc
2 changed files with 17 additions and 5 deletions
|
@ -81,7 +81,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
|
|||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
struct page *page;
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
|
||||
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
|
||||
|
||||
if (mm == &init_mm)
|
||||
gfp &= ~__GFP_ACCOUNT;
|
||||
page = alloc_pages(gfp, 0);
|
||||
if (!page)
|
||||
return NULL;
|
||||
if (!pgtable_pmd_page_ctor(page)) {
|
||||
|
@ -125,7 +129,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return (pud_t *)get_zeroed_page(GFP_KERNEL);
|
||||
gfp_t gfp = GFP_KERNEL_ACCOUNT;
|
||||
|
||||
if (mm == &init_mm)
|
||||
gfp &= ~__GFP_ACCOUNT;
|
||||
return (pud_t *)get_zeroed_page(gfp);
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <asm/fixmap.h>
|
||||
#include <asm/mtrr.h>
|
||||
|
||||
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
|
||||
#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
|
||||
|
||||
#ifdef CONFIG_HIGHPTE
|
||||
#define PGALLOC_USER_GFP __GFP_HIGHMEM
|
||||
|
@ -18,7 +18,7 @@ gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
|
|||
|
||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||
{
|
||||
return (pte_t *)__get_free_page(PGALLOC_GFP);
|
||||
return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
|
||||
}
|
||||
|
||||
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
|
@ -207,9 +207,13 @@ static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
|
|||
{
|
||||
int i;
|
||||
bool failed = false;
|
||||
gfp_t gfp = PGALLOC_GFP;
|
||||
|
||||
if (mm == &init_mm)
|
||||
gfp &= ~__GFP_ACCOUNT;
|
||||
|
||||
for(i = 0; i < PREALLOCATED_PMDS; i++) {
|
||||
pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
|
||||
pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
|
||||
if (!pmd)
|
||||
failed = true;
|
||||
if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
|
||||
|
|
Loading…
Reference in a new issue