thp: memcg huge memory

Add memcg charge/uncharge to hugepage faults in huge_memory.c.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andrea Arcangeli 2011-01-13 15:46:57 -08:00 committed by Linus Torvalds
parent 152c9ccb75
commit b9bbfbe30a

View file

@ -233,6 +233,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
VM_BUG_ON(!PageCompound(page)); VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr); pgtable = pte_alloc_one(mm, haddr);
if (unlikely(!pgtable)) { if (unlikely(!pgtable)) {
mem_cgroup_uncharge_page(page);
put_page(page); put_page(page);
return VM_FAULT_OOM; return VM_FAULT_OOM;
} }
@ -243,6 +244,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_none(*pmd))) { if (unlikely(!pmd_none(*pmd))) {
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(page);
put_page(page); put_page(page);
pte_free(mm, pgtable); pte_free(mm, pgtable);
} else { } else {
@ -286,6 +288,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = alloc_hugepage(transparent_hugepage_defrag(vma)); page = alloc_hugepage(transparent_hugepage_defrag(vma));
if (unlikely(!page)) if (unlikely(!page))
goto out; goto out;
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
put_page(page);
goto out;
}
return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
} }
@ -402,9 +408,17 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
for (i = 0; i < HPAGE_PMD_NR; i++) { for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
vma, address); vma, address);
if (unlikely(!pages[i])) { if (unlikely(!pages[i] ||
while (--i >= 0) mem_cgroup_newpage_charge(pages[i], mm,
GFP_KERNEL))) {
if (pages[i])
put_page(pages[i]); put_page(pages[i]);
mem_cgroup_uncharge_start();
while (--i >= 0) {
mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]);
}
mem_cgroup_uncharge_end();
kfree(pages); kfree(pages);
ret |= VM_FAULT_OOM; ret |= VM_FAULT_OOM;
goto out; goto out;
@ -455,8 +469,12 @@ out:
out_free_pages: out_free_pages:
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
for (i = 0; i < HPAGE_PMD_NR; i++) mem_cgroup_uncharge_start();
for (i = 0; i < HPAGE_PMD_NR; i++) {
mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]); put_page(pages[i]);
}
mem_cgroup_uncharge_end();
kfree(pages); kfree(pages);
goto out; goto out;
} }
@ -501,14 +519,22 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out; goto out;
} }
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
}
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page); __SetPageUptodate(new_page);
spin_lock(&mm->page_table_lock); spin_lock(&mm->page_table_lock);
put_page(page); put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) if (unlikely(!pmd_same(*pmd, orig_pmd))) {
mem_cgroup_uncharge_page(new_page);
put_page(new_page); put_page(new_page);
else { } else {
pmd_t entry; pmd_t entry;
VM_BUG_ON(!PageHead(page)); VM_BUG_ON(!PageHead(page));
entry = mk_pmd(new_page, vma->vm_page_prot); entry = mk_pmd(new_page, vma->vm_page_prot);