5ad6468801
Initial implementation for swapping out KSM's shared pages: add page_referenced_ksm() and try_to_unmap_ksm(), which rmap.c calls when faced with a PageKsm page. Most of what's needed can be got from the rmap_items listed from the stable_node of the ksm page, without discovering the actual vma: so in this patch just fake up a struct vma for page_referenced_one() or try_to_unmap_one(), then refine that in the next patch. Add VM_NONLINEAR to ksm_madvise()'s list of exclusions: it has always been implicit there (being only set with VM_SHARED, already excluded), but let's make it explicit, to help justify the lack of nonlinear unmap. Rely on the page lock to protect against concurrent modifications to that page's node of the stable tree. The awkward part is not swapout but swapin: do_swap_page() and page_add_anon_rmap() now have to allow for new possibilities - perhaps a ksm page still in swapcache, perhaps a swapcache page associated with one location in one anon_vma now needed for another location or anon_vma. (And the vma might even be no longer VM_MERGEABLE when that happens.) ksm_might_need_to_copy() checks for that case, and supplies a duplicate page when necessary, simply leaving it to a subsequent pass of ksmd to rediscover the identity and merge them back into one ksm page. Disappointingly primitive: but the alternative would have to accumulate unswappable info about the swapped out ksm pages, limiting swappability. Remove page_add_ksm_rmap(): page_add_anon_rmap() now has to allow for the particular case it was handling, so just use it instead. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Chris Wright <chrisw@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
175 lines
4.9 KiB
C
175 lines
4.9 KiB
C
#ifndef _LINUX_RMAP_H
|
|
#define _LINUX_RMAP_H
|
|
/*
|
|
* Declarations for Reverse Mapping functions in mm/rmap.c
|
|
*/
|
|
|
|
#include <linux/list.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/memcontrol.h>
|
|
|
|
/*
|
|
* The anon_vma heads a list of private "related" vmas, to scan if
|
|
* an anonymous page pointing to this anon_vma needs to be unmapped:
|
|
* the vmas on the list will be related by forking, or by splitting.
|
|
*
|
|
* Since vmas come and go as they are split and merged (particularly
|
|
* in mprotect), the mapping field of an anonymous page cannot point
|
|
* directly to a vma: instead it points to an anon_vma, on whose list
|
|
* the related vmas can be easily linked or unlinked.
|
|
*
|
|
* After unlinking the last vma on the list, we must garbage collect
|
|
* the anon_vma object itself: we're guaranteed no page can be
|
|
* pointing to this anon_vma once its vma list is empty.
|
|
*/
|
|
struct anon_vma {
|
|
spinlock_t lock; /* Serialize access to vma list */
|
|
/*
|
|
* NOTE: the LSB of the head.next is set by
|
|
* mm_take_all_locks() _after_ taking the above lock. So the
|
|
* head must only be read/written after taking the above lock
|
|
* to be sure to see a valid next pointer. The LSB bit itself
|
|
* is serialized by a system wide lock only visible to
|
|
* mm_take_all_locks() (mm_all_locks_mutex).
|
|
*/
|
|
struct list_head head; /* List of private "related" vmas */
|
|
};
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
static inline struct anon_vma *page_anon_vma(struct page *page)
|
|
{
|
|
if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
|
|
PAGE_MAPPING_ANON)
|
|
return NULL;
|
|
return page_rmapping(page);
|
|
}
|
|
|
|
static inline void anon_vma_lock(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma *anon_vma = vma->anon_vma;
|
|
if (anon_vma)
|
|
spin_lock(&anon_vma->lock);
|
|
}
|
|
|
|
static inline void anon_vma_unlock(struct vm_area_struct *vma)
|
|
{
|
|
struct anon_vma *anon_vma = vma->anon_vma;
|
|
if (anon_vma)
|
|
spin_unlock(&anon_vma->lock);
|
|
}
|
|
|
|
/*
|
|
* anon_vma helper functions.
|
|
*/
|
|
void anon_vma_init(void); /* create anon_vma_cachep */
|
|
int anon_vma_prepare(struct vm_area_struct *);
|
|
void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
|
|
void anon_vma_unlink(struct vm_area_struct *);
|
|
void anon_vma_link(struct vm_area_struct *);
|
|
void __anon_vma_link(struct vm_area_struct *);
|
|
|
|
/*
|
|
* rmap interfaces called when adding or removing pte of page
|
|
*/
|
|
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
|
|
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
|
|
void page_add_file_rmap(struct page *);
|
|
void page_remove_rmap(struct page *);
|
|
|
|
static inline void page_dup_rmap(struct page *page)
|
|
{
|
|
atomic_inc(&page->_mapcount);
|
|
}
|
|
|
|
/*
|
|
* Called from mm/vmscan.c to handle paging out
|
|
*/
|
|
int page_referenced(struct page *, int is_locked,
|
|
struct mem_cgroup *cnt, unsigned long *vm_flags);
|
|
int page_referenced_one(struct page *, struct vm_area_struct *,
|
|
unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
|
|
|
|
enum ttu_flags {
|
|
TTU_UNMAP = 0, /* unmap mode */
|
|
TTU_MIGRATION = 1, /* migration mode */
|
|
TTU_MUNLOCK = 2, /* munlock mode */
|
|
TTU_ACTION_MASK = 0xff,
|
|
|
|
TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
|
|
TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
|
|
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
|
|
};
|
|
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
|
|
|
|
int try_to_unmap(struct page *, enum ttu_flags flags);
|
|
int try_to_unmap_one(struct page *, struct vm_area_struct *,
|
|
unsigned long address, enum ttu_flags flags);
|
|
|
|
/*
|
|
* Called from mm/filemap_xip.c to unmap empty zero page
|
|
*/
|
|
pte_t *page_check_address(struct page *, struct mm_struct *,
|
|
unsigned long, spinlock_t **, int);
|
|
|
|
/*
|
|
* Used by swapoff to help locate where page is expected in vma.
|
|
*/
|
|
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
|
|
|
|
/*
|
|
* Cleans the PTEs of shared mappings.
|
|
* (and since clean PTEs should also be readonly, write protects them too)
|
|
*
|
|
* returns the number of cleaned PTEs.
|
|
*/
|
|
int page_mkclean(struct page *);
|
|
|
|
/*
|
|
* called in munlock()/munmap() path to check for other vmas holding
|
|
* the page mlocked.
|
|
*/
|
|
int try_to_munlock(struct page *);
|
|
|
|
/*
|
|
* Called by memory-failure.c to kill processes.
|
|
*/
|
|
struct anon_vma *page_lock_anon_vma(struct page *page);
|
|
void page_unlock_anon_vma(struct anon_vma *anon_vma);
|
|
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
|
|
|
|
#else /* !CONFIG_MMU */
|
|
|
|
#define anon_vma_init() do {} while (0)
|
|
#define anon_vma_prepare(vma) (0)
|
|
#define anon_vma_link(vma) do {} while (0)
|
|
|
|
static inline int page_referenced(struct page *page, int is_locked,
|
|
struct mem_cgroup *cnt,
|
|
unsigned long *vm_flags)
|
|
{
|
|
*vm_flags = 0;
|
|
return TestClearPageReferenced(page);
|
|
}
|
|
|
|
#define try_to_unmap(page, refs) SWAP_FAIL
|
|
|
|
static inline int page_mkclean(struct page *page)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
/*
|
|
* Return values of try_to_unmap
|
|
*/
|
|
#define SWAP_SUCCESS 0
|
|
#define SWAP_AGAIN 1
|
|
#define SWAP_FAIL 2
|
|
#define SWAP_MLOCK 3
|
|
|
|
#endif /* _LINUX_RMAP_H */
|