migrate_pages: try to split pages on queuing
We are not able to migrate THPs. It means it's not enough to split only PMD on migration -- we need to split compound page under it too. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Jerome Marchand <jmarchan@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Steve Capper <steve.capper@linaro.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
e9b61f1985
commit
248db92da1
1 changed files with 38 additions and 4 deletions
|
@ -489,14 +489,33 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
struct page *page;
|
||||
struct queue_pages *qp = walk->private;
|
||||
unsigned long flags = qp->flags;
|
||||
int nid;
|
||||
int nid, ret;
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
split_huge_pmd(vma, pmd, addr);
|
||||
if (pmd_trans_unstable(pmd))
|
||||
return 0;
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
ptl = pmd_lock(walk->mm, pmd);
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
page = pmd_page(*pmd);
|
||||
if (is_huge_zero_page(page)) {
|
||||
spin_unlock(ptl);
|
||||
split_huge_pmd(vma, pmd, addr);
|
||||
} else {
|
||||
get_page(page);
|
||||
spin_unlock(ptl);
|
||||
lock_page(page);
|
||||
ret = split_huge_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
if (ret)
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
spin_unlock(ptl);
|
||||
}
|
||||
}
|
||||
|
||||
retry:
|
||||
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
|
||||
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
||||
if (!pte_present(*pte))
|
||||
|
@ -513,6 +532,21 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
nid = page_to_nid(page);
|
||||
if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
|
||||
continue;
|
||||
if (PageTail(page) && PageAnon(page)) {
|
||||
get_page(page);
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
lock_page(page);
|
||||
ret = split_huge_page(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
/* Failed to split -- skip. */
|
||||
if (ret) {
|
||||
pte = pte_offset_map_lock(walk->mm, pmd,
|
||||
addr, &ptl);
|
||||
continue;
|
||||
}
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|
||||
migrate_page_add(page, qp->pagelist, flags);
|
||||
|
|
Loading…
Reference in a new issue