page cache: Add and replace pages using the XArray
Use the XArray APIs to add and replace pages in the page cache. This removes two uses of the radix tree preload API and is significantly shorter code. It also removes the last user of __radix_tree_create() outside radix-tree.c itself, so make it static. Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
parent
0d3f929666
commit
74d609585d
4 changed files with 66 additions and 90 deletions
|
@ -231,9 +231,6 @@ static inline int radix_tree_exception(void *arg)
|
||||||
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
|
return unlikely((unsigned long)arg & RADIX_TREE_ENTRY_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __radix_tree_create(struct radix_tree_root *, unsigned long index,
|
|
||||||
unsigned order, struct radix_tree_node **nodep,
|
|
||||||
void __rcu ***slotp);
|
|
||||||
int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
|
int __radix_tree_insert(struct radix_tree_root *, unsigned long index,
|
||||||
unsigned order, void *);
|
unsigned order, void *);
|
||||||
static inline int radix_tree_insert(struct radix_tree_root *root,
|
static inline int radix_tree_insert(struct radix_tree_root *root,
|
||||||
|
|
|
@ -299,8 +299,12 @@ void *workingset_eviction(struct address_space *mapping, struct page *page);
|
||||||
bool workingset_refault(void *shadow);
|
bool workingset_refault(void *shadow);
|
||||||
void workingset_activation(struct page *page);
|
void workingset_activation(struct page *page);
|
||||||
|
|
||||||
/* Do not use directly, use workingset_lookup_update */
|
/* Only track the nodes of mappings with shadow entries */
|
||||||
void workingset_update_node(struct radix_tree_node *node);
|
void workingset_update_node(struct xa_node *node);
|
||||||
|
#define mapping_set_update(xas, mapping) do { \
|
||||||
|
if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
|
||||||
|
xas_set_update(xas, workingset_update_node); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* Returns workingset_update_node() if the mapping has shadow entries. */
|
/* Returns workingset_update_node() if the mapping has shadow entries. */
|
||||||
#define workingset_lookup_update(mapping) \
|
#define workingset_lookup_update(mapping) \
|
||||||
|
|
|
@ -700,9 +700,9 @@ static bool delete_node(struct radix_tree_root *root,
|
||||||
*
|
*
|
||||||
* Returns -ENOMEM, or 0 for success.
|
* Returns -ENOMEM, or 0 for success.
|
||||||
*/
|
*/
|
||||||
int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
|
static int __radix_tree_create(struct radix_tree_root *root,
|
||||||
unsigned order, struct radix_tree_node **nodep,
|
unsigned long index, unsigned order,
|
||||||
void __rcu ***slotp)
|
struct radix_tree_node **nodep, void __rcu ***slotp)
|
||||||
{
|
{
|
||||||
struct radix_tree_node *node = NULL, *child;
|
struct radix_tree_node *node = NULL, *child;
|
||||||
void __rcu **slot = (void __rcu **)&root->xa_head;
|
void __rcu **slot = (void __rcu **)&root->xa_head;
|
||||||
|
|
139
mm/filemap.c
139
mm/filemap.c
|
@ -111,35 +111,6 @@
|
||||||
* ->tasklist_lock (memory_failure, collect_procs_ao)
|
* ->tasklist_lock (memory_failure, collect_procs_ao)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int page_cache_tree_insert(struct address_space *mapping,
|
|
||||||
struct page *page, void **shadowp)
|
|
||||||
{
|
|
||||||
struct radix_tree_node *node;
|
|
||||||
void **slot;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
error = __radix_tree_create(&mapping->i_pages, page->index, 0,
|
|
||||||
&node, &slot);
|
|
||||||
if (error)
|
|
||||||
return error;
|
|
||||||
if (*slot) {
|
|
||||||
void *p;
|
|
||||||
|
|
||||||
p = radix_tree_deref_slot_protected(slot,
|
|
||||||
&mapping->i_pages.xa_lock);
|
|
||||||
if (!xa_is_value(p))
|
|
||||||
return -EEXIST;
|
|
||||||
|
|
||||||
mapping->nrexceptional--;
|
|
||||||
if (shadowp)
|
|
||||||
*shadowp = p;
|
|
||||||
}
|
|
||||||
__radix_tree_replace(&mapping->i_pages, node, slot, page,
|
|
||||||
workingset_lookup_update(mapping));
|
|
||||||
mapping->nrpages++;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void page_cache_tree_delete(struct address_space *mapping,
|
static void page_cache_tree_delete(struct address_space *mapping,
|
||||||
struct page *page, void *shadow)
|
struct page *page, void *shadow)
|
||||||
{
|
{
|
||||||
|
@ -775,51 +746,44 @@ EXPORT_SYMBOL(file_write_and_wait_range);
|
||||||
* locked. This function does not add the new page to the LRU, the
|
* locked. This function does not add the new page to the LRU, the
|
||||||
* caller must do that.
|
* caller must do that.
|
||||||
*
|
*
|
||||||
* The remove + add is atomic. The only way this function can fail is
|
* The remove + add is atomic. This function cannot fail.
|
||||||
* memory allocation failure.
|
|
||||||
*/
|
*/
|
||||||
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
int error;
|
struct address_space *mapping = old->mapping;
|
||||||
|
void (*freepage)(struct page *) = mapping->a_ops->freepage;
|
||||||
|
pgoff_t offset = old->index;
|
||||||
|
XA_STATE(xas, &mapping->i_pages, offset);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(old), old);
|
VM_BUG_ON_PAGE(!PageLocked(old), old);
|
||||||
VM_BUG_ON_PAGE(!PageLocked(new), new);
|
VM_BUG_ON_PAGE(!PageLocked(new), new);
|
||||||
VM_BUG_ON_PAGE(new->mapping, new);
|
VM_BUG_ON_PAGE(new->mapping, new);
|
||||||
|
|
||||||
error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
|
get_page(new);
|
||||||
if (!error) {
|
new->mapping = mapping;
|
||||||
struct address_space *mapping = old->mapping;
|
new->index = offset;
|
||||||
void (*freepage)(struct page *);
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
pgoff_t offset = old->index;
|
xas_lock_irqsave(&xas, flags);
|
||||||
freepage = mapping->a_ops->freepage;
|
xas_store(&xas, new);
|
||||||
|
|
||||||
get_page(new);
|
old->mapping = NULL;
|
||||||
new->mapping = mapping;
|
/* hugetlb pages do not participate in page cache accounting. */
|
||||||
new->index = offset;
|
if (!PageHuge(old))
|
||||||
|
__dec_node_page_state(new, NR_FILE_PAGES);
|
||||||
|
if (!PageHuge(new))
|
||||||
|
__inc_node_page_state(new, NR_FILE_PAGES);
|
||||||
|
if (PageSwapBacked(old))
|
||||||
|
__dec_node_page_state(new, NR_SHMEM);
|
||||||
|
if (PageSwapBacked(new))
|
||||||
|
__inc_node_page_state(new, NR_SHMEM);
|
||||||
|
xas_unlock_irqrestore(&xas, flags);
|
||||||
|
mem_cgroup_migrate(old, new);
|
||||||
|
if (freepage)
|
||||||
|
freepage(old);
|
||||||
|
put_page(old);
|
||||||
|
|
||||||
xa_lock_irqsave(&mapping->i_pages, flags);
|
return 0;
|
||||||
__delete_from_page_cache(old, NULL);
|
|
||||||
error = page_cache_tree_insert(mapping, new, NULL);
|
|
||||||
BUG_ON(error);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* hugetlb pages do not participate in page cache accounting.
|
|
||||||
*/
|
|
||||||
if (!PageHuge(new))
|
|
||||||
__inc_node_page_state(new, NR_FILE_PAGES);
|
|
||||||
if (PageSwapBacked(new))
|
|
||||||
__inc_node_page_state(new, NR_SHMEM);
|
|
||||||
xa_unlock_irqrestore(&mapping->i_pages, flags);
|
|
||||||
mem_cgroup_migrate(old, new);
|
|
||||||
radix_tree_preload_end();
|
|
||||||
if (freepage)
|
|
||||||
freepage(old);
|
|
||||||
put_page(old);
|
|
||||||
}
|
|
||||||
|
|
||||||
return error;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
EXPORT_SYMBOL_GPL(replace_page_cache_page);
|
||||||
|
|
||||||
|
@ -828,12 +792,15 @@ static int __add_to_page_cache_locked(struct page *page,
|
||||||
pgoff_t offset, gfp_t gfp_mask,
|
pgoff_t offset, gfp_t gfp_mask,
|
||||||
void **shadowp)
|
void **shadowp)
|
||||||
{
|
{
|
||||||
|
XA_STATE(xas, &mapping->i_pages, offset);
|
||||||
int huge = PageHuge(page);
|
int huge = PageHuge(page);
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
int error;
|
int error;
|
||||||
|
void *old;
|
||||||
|
|
||||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||||
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
||||||
|
mapping_set_update(&xas, mapping);
|
||||||
|
|
||||||
if (!huge) {
|
if (!huge) {
|
||||||
error = mem_cgroup_try_charge(page, current->mm,
|
error = mem_cgroup_try_charge(page, current->mm,
|
||||||
|
@ -842,39 +809,47 @@ static int __add_to_page_cache_locked(struct page *page,
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
|
|
||||||
if (error) {
|
|
||||||
if (!huge)
|
|
||||||
mem_cgroup_cancel_charge(page, memcg, false);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
get_page(page);
|
get_page(page);
|
||||||
page->mapping = mapping;
|
page->mapping = mapping;
|
||||||
page->index = offset;
|
page->index = offset;
|
||||||
|
|
||||||
xa_lock_irq(&mapping->i_pages);
|
do {
|
||||||
error = page_cache_tree_insert(mapping, page, shadowp);
|
xas_lock_irq(&xas);
|
||||||
radix_tree_preload_end();
|
old = xas_load(&xas);
|
||||||
if (unlikely(error))
|
if (old && !xa_is_value(old))
|
||||||
goto err_insert;
|
xas_set_err(&xas, -EEXIST);
|
||||||
|
xas_store(&xas, page);
|
||||||
|
if (xas_error(&xas))
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
if (xa_is_value(old)) {
|
||||||
|
mapping->nrexceptional--;
|
||||||
|
if (shadowp)
|
||||||
|
*shadowp = old;
|
||||||
|
}
|
||||||
|
mapping->nrpages++;
|
||||||
|
|
||||||
|
/* hugetlb pages do not participate in page cache accounting */
|
||||||
|
if (!huge)
|
||||||
|
__inc_node_page_state(page, NR_FILE_PAGES);
|
||||||
|
unlock:
|
||||||
|
xas_unlock_irq(&xas);
|
||||||
|
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
|
||||||
|
|
||||||
|
if (xas_error(&xas))
|
||||||
|
goto error;
|
||||||
|
|
||||||
/* hugetlb pages do not participate in page cache accounting. */
|
|
||||||
if (!huge)
|
|
||||||
__inc_node_page_state(page, NR_FILE_PAGES);
|
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
|
||||||
if (!huge)
|
if (!huge)
|
||||||
mem_cgroup_commit_charge(page, memcg, false, false);
|
mem_cgroup_commit_charge(page, memcg, false, false);
|
||||||
trace_mm_filemap_add_to_page_cache(page);
|
trace_mm_filemap_add_to_page_cache(page);
|
||||||
return 0;
|
return 0;
|
||||||
err_insert:
|
error:
|
||||||
page->mapping = NULL;
|
page->mapping = NULL;
|
||||||
/* Leave page->index set: truncation relies upon it */
|
/* Leave page->index set: truncation relies upon it */
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
|
||||||
if (!huge)
|
if (!huge)
|
||||||
mem_cgroup_cancel_charge(page, memcg, false);
|
mem_cgroup_cancel_charge(page, memcg, false);
|
||||||
put_page(page);
|
put_page(page);
|
||||||
return error;
|
return xas_error(&xas);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in a new issue