mm: migration: share the anon_vma ref counts between KSM and page migration
For clarity of review, KSM and page migration have separate refcounts on the anon_vma. While clear, this is a waste of memory. This patch gets KSM and page migration to share their toys in a spirit of harmony. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
3f6c82728f
commit
7f60c214fd
4 changed files with 24 additions and 40 deletions
|
@ -26,11 +26,17 @@
|
||||||
*/
|
*/
|
||||||
struct anon_vma {
|
struct anon_vma {
|
||||||
spinlock_t lock; /* Serialize access to vma list */
|
spinlock_t lock; /* Serialize access to vma list */
|
||||||
#ifdef CONFIG_KSM
|
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
|
||||||
atomic_t ksm_refcount;
|
|
||||||
#endif
|
/*
|
||||||
#ifdef CONFIG_MIGRATION
|
* The external_refcount is taken by either KSM or page migration
|
||||||
atomic_t migrate_refcount;
|
* to take a reference to an anon_vma when there is no
|
||||||
|
* guarantee that the vma of page tables will exist for
|
||||||
|
* the duration of the operation. A caller that takes
|
||||||
|
* the reference is responsible for clearing up the
|
||||||
|
* anon_vma if they are the last user on release
|
||||||
|
*/
|
||||||
|
atomic_t external_refcount;
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* NOTE: the LSB of the head.next is set by
|
* NOTE: the LSB of the head.next is set by
|
||||||
|
@ -64,46 +70,26 @@ struct anon_vma_chain {
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
#ifdef CONFIG_KSM
|
#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
|
||||||
static inline void ksm_refcount_init(struct anon_vma *anon_vma)
|
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
|
||||||
{
|
{
|
||||||
atomic_set(&anon_vma->ksm_refcount, 0);
|
atomic_set(&anon_vma->external_refcount, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ksm_refcount(struct anon_vma *anon_vma)
|
static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
|
||||||
{
|
{
|
||||||
return atomic_read(&anon_vma->ksm_refcount);
|
return atomic_read(&anon_vma->external_refcount);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void ksm_refcount_init(struct anon_vma *anon_vma)
|
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ksm_refcount(struct anon_vma *anon_vma)
|
static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_KSM */
|
#endif /* CONFIG_KSM */
|
||||||
#ifdef CONFIG_MIGRATION
|
|
||||||
static inline void migrate_refcount_init(struct anon_vma *anon_vma)
|
|
||||||
{
|
|
||||||
atomic_set(&anon_vma->migrate_refcount, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int migrate_refcount(struct anon_vma *anon_vma)
|
|
||||||
{
|
|
||||||
return atomic_read(&anon_vma->migrate_refcount);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static inline void migrate_refcount_init(struct anon_vma *anon_vma)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int migrate_refcount(struct anon_vma *anon_vma)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MIGRATE */
|
|
||||||
|
|
||||||
static inline struct anon_vma *page_anon_vma(struct page *page)
|
static inline struct anon_vma *page_anon_vma(struct page *page)
|
||||||
{
|
{
|
||||||
|
|
4
mm/ksm.c
4
mm/ksm.c
|
@ -318,14 +318,14 @@ static void hold_anon_vma(struct rmap_item *rmap_item,
|
||||||
struct anon_vma *anon_vma)
|
struct anon_vma *anon_vma)
|
||||||
{
|
{
|
||||||
rmap_item->anon_vma = anon_vma;
|
rmap_item->anon_vma = anon_vma;
|
||||||
atomic_inc(&anon_vma->ksm_refcount);
|
atomic_inc(&anon_vma->external_refcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void drop_anon_vma(struct rmap_item *rmap_item)
|
static void drop_anon_vma(struct rmap_item *rmap_item)
|
||||||
{
|
{
|
||||||
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
struct anon_vma *anon_vma = rmap_item->anon_vma;
|
||||||
|
|
||||||
if (atomic_dec_and_lock(&anon_vma->ksm_refcount, &anon_vma->lock)) {
|
if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
|
||||||
int empty = list_empty(&anon_vma->head);
|
int empty = list_empty(&anon_vma->head);
|
||||||
spin_unlock(&anon_vma->lock);
|
spin_unlock(&anon_vma->lock);
|
||||||
if (empty)
|
if (empty)
|
||||||
|
|
|
@ -601,7 +601,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
rcu_locked = 1;
|
rcu_locked = 1;
|
||||||
anon_vma = page_anon_vma(page);
|
anon_vma = page_anon_vma(page);
|
||||||
atomic_inc(&anon_vma->migrate_refcount);
|
atomic_inc(&anon_vma->external_refcount);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -643,7 +643,7 @@ skip_unmap:
|
||||||
rcu_unlock:
|
rcu_unlock:
|
||||||
|
|
||||||
/* Drop an anon_vma reference if we took one */
|
/* Drop an anon_vma reference if we took one */
|
||||||
if (anon_vma && atomic_dec_and_lock(&anon_vma->migrate_refcount, &anon_vma->lock)) {
|
if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->lock)) {
|
||||||
int empty = list_empty(&anon_vma->head);
|
int empty = list_empty(&anon_vma->head);
|
||||||
spin_unlock(&anon_vma->lock);
|
spin_unlock(&anon_vma->lock);
|
||||||
if (empty)
|
if (empty)
|
||||||
|
|
|
@ -250,8 +250,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
|
||||||
list_del(&anon_vma_chain->same_anon_vma);
|
list_del(&anon_vma_chain->same_anon_vma);
|
||||||
|
|
||||||
/* We must garbage collect the anon_vma if it's empty */
|
/* We must garbage collect the anon_vma if it's empty */
|
||||||
empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma) &&
|
empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
|
||||||
!migrate_refcount(anon_vma);
|
|
||||||
spin_unlock(&anon_vma->lock);
|
spin_unlock(&anon_vma->lock);
|
||||||
|
|
||||||
if (empty)
|
if (empty)
|
||||||
|
@ -275,8 +274,7 @@ static void anon_vma_ctor(void *data)
|
||||||
struct anon_vma *anon_vma = data;
|
struct anon_vma *anon_vma = data;
|
||||||
|
|
||||||
spin_lock_init(&anon_vma->lock);
|
spin_lock_init(&anon_vma->lock);
|
||||||
ksm_refcount_init(anon_vma);
|
anonvma_external_refcount_init(anon_vma);
|
||||||
migrate_refcount_init(anon_vma);
|
|
||||||
INIT_LIST_HEAD(&anon_vma->head);
|
INIT_LIST_HEAD(&anon_vma->head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue