mm: make ttu's return boolean
try_to_unmap() returns SWAP_SUCCESS or SWAP_FAIL so it's suitable for boolean return. This patch changes it. Link: http://lkml.kernel.org/r/1489555493-14659-8-git-send-email-minchan@kernel.org Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
33fc80e257
commit
666e5a406c
5 changed files with 21 additions and 30 deletions
|
@ -191,7 +191,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
|
||||||
int page_referenced(struct page *, int is_locked,
|
int page_referenced(struct page *, int is_locked,
|
||||||
struct mem_cgroup *memcg, unsigned long *vm_flags);
|
struct mem_cgroup *memcg, unsigned long *vm_flags);
|
||||||
|
|
||||||
int try_to_unmap(struct page *, enum ttu_flags flags);
|
bool try_to_unmap(struct page *, enum ttu_flags flags);
|
||||||
|
|
||||||
/* Avoid racy checks */
|
/* Avoid racy checks */
|
||||||
#define PVMW_SYNC (1 << 0)
|
#define PVMW_SYNC (1 << 0)
|
||||||
|
@ -281,7 +281,7 @@ static inline int page_referenced(struct page *page, int is_locked,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define try_to_unmap(page, refs) SWAP_FAIL
|
#define try_to_unmap(page, refs) false
|
||||||
|
|
||||||
static inline int page_mkclean(struct page *page)
|
static inline int page_mkclean(struct page *page)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2144,15 +2144,15 @@ static void freeze_page(struct page *page)
|
||||||
{
|
{
|
||||||
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
|
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
|
||||||
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
|
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
|
||||||
int ret;
|
bool unmap_success;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||||
|
|
||||||
if (PageAnon(page))
|
if (PageAnon(page))
|
||||||
ttu_flags |= TTU_MIGRATION;
|
ttu_flags |= TTU_MIGRATION;
|
||||||
|
|
||||||
ret = try_to_unmap(page, ttu_flags);
|
unmap_success = try_to_unmap(page, ttu_flags);
|
||||||
VM_BUG_ON_PAGE(ret, page);
|
VM_BUG_ON_PAGE(!unmap_success, page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unfreeze_page(struct page *page)
|
static void unfreeze_page(struct page *page)
|
||||||
|
|
|
@ -322,7 +322,7 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
|
||||||
* wrong earlier.
|
* wrong earlier.
|
||||||
*/
|
*/
|
||||||
static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
|
static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
|
||||||
int fail, struct page *page, unsigned long pfn,
|
bool fail, struct page *page, unsigned long pfn,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
struct to_kill *tk, *next;
|
struct to_kill *tk, *next;
|
||||||
|
@ -904,13 +904,13 @@ EXPORT_SYMBOL_GPL(get_hwpoison_page);
|
||||||
* Do all that is necessary to remove user space mappings. Unmap
|
* Do all that is necessary to remove user space mappings. Unmap
|
||||||
* the pages and send SIGBUS to the processes if the data was dirty.
|
* the pages and send SIGBUS to the processes if the data was dirty.
|
||||||
*/
|
*/
|
||||||
static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||||
int trapno, int flags, struct page **hpagep)
|
int trapno, int flags, struct page **hpagep)
|
||||||
{
|
{
|
||||||
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
|
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
LIST_HEAD(tokill);
|
LIST_HEAD(tokill);
|
||||||
int ret;
|
bool unmap_success;
|
||||||
int kill = 1, forcekill;
|
int kill = 1, forcekill;
|
||||||
struct page *hpage = *hpagep;
|
struct page *hpage = *hpagep;
|
||||||
|
|
||||||
|
@ -919,20 +919,20 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||||
* other types of pages.
|
* other types of pages.
|
||||||
*/
|
*/
|
||||||
if (PageReserved(p) || PageSlab(p))
|
if (PageReserved(p) || PageSlab(p))
|
||||||
return SWAP_SUCCESS;
|
return true;
|
||||||
if (!(PageLRU(hpage) || PageHuge(p)))
|
if (!(PageLRU(hpage) || PageHuge(p)))
|
||||||
return SWAP_SUCCESS;
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This check implies we don't kill processes if their pages
|
* This check implies we don't kill processes if their pages
|
||||||
* are in the swap cache early. Those are always late kills.
|
* are in the swap cache early. Those are always late kills.
|
||||||
*/
|
*/
|
||||||
if (!page_mapped(hpage))
|
if (!page_mapped(hpage))
|
||||||
return SWAP_SUCCESS;
|
return true;
|
||||||
|
|
||||||
if (PageKsm(p)) {
|
if (PageKsm(p)) {
|
||||||
pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
|
pr_err("Memory failure: %#lx: can't handle KSM pages.\n", pfn);
|
||||||
return SWAP_FAIL;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PageSwapCache(p)) {
|
if (PageSwapCache(p)) {
|
||||||
|
@ -971,8 +971,8 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||||
if (kill)
|
if (kill)
|
||||||
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
|
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
|
||||||
|
|
||||||
ret = try_to_unmap(hpage, ttu);
|
unmap_success = try_to_unmap(hpage, ttu);
|
||||||
if (ret != SWAP_SUCCESS)
|
if (!unmap_success)
|
||||||
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
|
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
|
||||||
pfn, page_mapcount(hpage));
|
pfn, page_mapcount(hpage));
|
||||||
|
|
||||||
|
@ -987,10 +987,9 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||||
* any accesses to the poisoned memory.
|
* any accesses to the poisoned memory.
|
||||||
*/
|
*/
|
||||||
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
|
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
|
||||||
kill_procs(&tokill, forcekill, trapno,
|
kill_procs(&tokill, forcekill, trapno, !unmap_success, p, pfn, flags);
|
||||||
ret != SWAP_SUCCESS, p, pfn, flags);
|
|
||||||
|
|
||||||
return ret;
|
return unmap_success;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_page_hwpoison_huge_page(struct page *hpage)
|
static void set_page_hwpoison_huge_page(struct page *hpage)
|
||||||
|
@ -1230,8 +1229,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
|
||||||
* When the raw error page is thp tail page, hpage points to the raw
|
* When the raw error page is thp tail page, hpage points to the raw
|
||||||
* page after thp split.
|
* page after thp split.
|
||||||
*/
|
*/
|
||||||
if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
|
if (!hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)) {
|
||||||
!= SWAP_SUCCESS) {
|
|
||||||
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
|
action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
|
||||||
res = -EBUSY;
|
res = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -1501,12 +1501,10 @@ static int page_mapcount_is_zero(struct page *page)
|
||||||
*
|
*
|
||||||
* Tries to remove all the page table entries which are mapping this
|
* Tries to remove all the page table entries which are mapping this
|
||||||
* page, used in the pageout path. Caller must hold the page lock.
|
* page, used in the pageout path. Caller must hold the page lock.
|
||||||
* Return values are:
|
|
||||||
*
|
*
|
||||||
* SWAP_SUCCESS - we succeeded in removing all mappings
|
* If unmap is successful, return true. Otherwise, false.
|
||||||
* SWAP_FAIL - the page is unswappable
|
|
||||||
*/
|
*/
|
||||||
int try_to_unmap(struct page *page, enum ttu_flags flags)
|
bool try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||||
{
|
{
|
||||||
struct rmap_walk_control rwc = {
|
struct rmap_walk_control rwc = {
|
||||||
.rmap_one = try_to_unmap_one,
|
.rmap_one = try_to_unmap_one,
|
||||||
|
@ -1531,7 +1529,7 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
|
||||||
else
|
else
|
||||||
rmap_walk(page, &rwc);
|
rmap_walk(page, &rwc);
|
||||||
|
|
||||||
return !page_mapcount(page) ? SWAP_SUCCESS : SWAP_FAIL;
|
return !page_mapcount(page) ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int page_not_mapped(struct page *page)
|
static int page_not_mapped(struct page *page)
|
||||||
|
|
|
@ -972,7 +972,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
int may_enter_fs;
|
int may_enter_fs;
|
||||||
enum page_references references = PAGEREF_RECLAIM_CLEAN;
|
enum page_references references = PAGEREF_RECLAIM_CLEAN;
|
||||||
bool dirty, writeback;
|
bool dirty, writeback;
|
||||||
int ret = SWAP_SUCCESS;
|
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
|
@ -1145,13 +1144,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
* processes. Try to unmap it here.
|
* processes. Try to unmap it here.
|
||||||
*/
|
*/
|
||||||
if (page_mapped(page)) {
|
if (page_mapped(page)) {
|
||||||
switch (ret = try_to_unmap(page,
|
if (!try_to_unmap(page, ttu_flags | TTU_BATCH_FLUSH)) {
|
||||||
ttu_flags | TTU_BATCH_FLUSH)) {
|
|
||||||
case SWAP_FAIL:
|
|
||||||
nr_unmap_fail++;
|
nr_unmap_fail++;
|
||||||
goto activate_locked;
|
goto activate_locked;
|
||||||
case SWAP_SUCCESS:
|
|
||||||
; /* try to free the page below */
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue