mm: page migration trylock newpage at same level as oldpage
Clean up page migration a little by moving the trylock of newpage from move_to_new_page() into __unmap_and_move(), where the old page has been locked. Adjust unmap_and_move_huge_page() and balloon_page_migrate() accordingly. But make one kind-of-functional change on the way: whereas trylock of newpage used to BUG() if it failed, now simply return -EAGAIN if so. Cutting out BUG()s is good, right? But, to be honest, this is really to extend the usefulness of the custom put_new_page feature, allowing a pool of new pages to be shared perhaps with racing uses. Use an "else" instead of that "skip_unmap" label. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Rafael Aquini <aquini@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2def7424c9
commit
7db7671f83
2 changed files with 28 additions and 28 deletions
|
@ -199,23 +199,17 @@ int balloon_page_migrate(struct page *newpage,
|
|||
struct balloon_dev_info *balloon = balloon_page_device(page);
|
||||
int rc = -EAGAIN;
|
||||
|
||||
/*
|
||||
* Block others from accessing the 'newpage' when we get around to
|
||||
* establishing additional references. We should be the only one
|
||||
* holding a reference to the 'newpage' at this point.
|
||||
*/
|
||||
BUG_ON(!trylock_page(newpage));
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
||||
|
||||
if (WARN_ON(!__is_movable_balloon_page(page))) {
|
||||
dump_page(page, "not movable balloon page");
|
||||
unlock_page(newpage);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (balloon && balloon->migratepage)
|
||||
rc = balloon->migratepage(balloon, newpage, page, mode);
|
||||
|
||||
unlock_page(newpage);
|
||||
return rc;
|
||||
}
|
||||
#endif /* CONFIG_BALLOON_COMPACTION */
|
||||
|
|
46
mm/migrate.c
46
mm/migrate.c
|
@ -727,13 +727,8 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
|||
struct address_space *mapping;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Block others from accessing the page when we get around to
|
||||
* establishing additional references. We are the only one
|
||||
* holding a reference to the new page at this point.
|
||||
*/
|
||||
if (!trylock_page(newpage))
|
||||
BUG();
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
||||
|
||||
/* Prepare mapping for the new page.*/
|
||||
newpage->index = page->index;
|
||||
|
@ -774,9 +769,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
|||
remove_migration_ptes(page, newpage);
|
||||
page->mapping = NULL;
|
||||
}
|
||||
|
||||
unlock_page(newpage);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -861,6 +853,17 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Block others from accessing the new page when we get around to
|
||||
* establishing additional references. We are usually the only one
|
||||
* holding a reference to newpage at this point. We used to have a BUG
|
||||
* here if trylock_page(newpage) fails, but would like to allow for
|
||||
* cases where there might be a race with the previous use of newpage.
|
||||
* This is much like races on refcount of oldpage: just don't BUG().
|
||||
*/
|
||||
if (unlikely(!trylock_page(newpage)))
|
||||
goto out_unlock;
|
||||
|
||||
if (unlikely(isolated_balloon_page(page))) {
|
||||
/*
|
||||
* A ballooned page does not need any special attention from
|
||||
|
@ -870,7 +873,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
* the page migration right away (proteced by page lock).
|
||||
*/
|
||||
rc = balloon_page_migrate(newpage, page, mode);
|
||||
goto out_unlock;
|
||||
goto out_unlock_both;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -889,30 +892,27 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
VM_BUG_ON_PAGE(PageAnon(page), page);
|
||||
if (page_has_private(page)) {
|
||||
try_to_free_buffers(page);
|
||||
goto out_unlock;
|
||||
goto out_unlock_both;
|
||||
}
|
||||
goto skip_unmap;
|
||||
}
|
||||
|
||||
/* Establish migration ptes or remove ptes */
|
||||
if (page_mapped(page)) {
|
||||
} else if (page_mapped(page)) {
|
||||
/* Establish migration ptes */
|
||||
try_to_unmap(page,
|
||||
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
||||
page_was_mapped = 1;
|
||||
}
|
||||
|
||||
skip_unmap:
|
||||
if (!page_mapped(page))
|
||||
rc = move_to_new_page(newpage, page, page_was_mapped, mode);
|
||||
|
||||
if (rc && page_was_mapped)
|
||||
remove_migration_ptes(page, page);
|
||||
|
||||
out_unlock_both:
|
||||
unlock_page(newpage);
|
||||
out_unlock:
|
||||
/* Drop an anon_vma reference if we took one */
|
||||
if (anon_vma)
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
out_unlock:
|
||||
unlock_page(page);
|
||||
out:
|
||||
return rc;
|
||||
|
@ -1056,6 +1056,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
if (PageAnon(hpage))
|
||||
anon_vma = page_get_anon_vma(hpage);
|
||||
|
||||
if (unlikely(!trylock_page(new_hpage)))
|
||||
goto put_anon;
|
||||
|
||||
if (page_mapped(hpage)) {
|
||||
try_to_unmap(hpage,
|
||||
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
|
||||
|
@ -1068,6 +1071,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
if (rc != MIGRATEPAGE_SUCCESS && page_was_mapped)
|
||||
remove_migration_ptes(hpage, hpage);
|
||||
|
||||
unlock_page(new_hpage);
|
||||
|
||||
put_anon:
|
||||
if (anon_vma)
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
|
|
Loading…
Reference in a new issue