f94a62e910
Introduce a set of macros that generate functions to handle page flags. A page flag function group typically starts with either SETPAGEFLAG(<part of function name>,<part of PG_ flagname>) to create a set of page flag operations that are atomic. Or __SETPAGEFLAG(<part of function name>,<part of PG_ flagname) to create a set of page flag operations that are not atomic. Then additional operations can be added using the following macros TESTSCFLAG Create additional atomic test-and-set and test-and-clear functions TESTSETFLAG Create additional test and set function TESTCLEARFLAG Create additional test and clear function SETPAGEFLAG Create additional atomic set function CLEARPAGEFLAG Create additional atomic clear function __TESTPAGEFLAG Create additional non atomic set function __SETPAGEFLAG Create additional non atomic clear function Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
353 lines
12 KiB
C
353 lines
12 KiB
C
/*
|
|
* Macros for manipulating and testing page->flags
|
|
*/
|
|
|
|
#ifndef PAGE_FLAGS_H
|
|
#define PAGE_FLAGS_H
|
|
|
|
#include <linux/types.h>
|
|
#ifndef __GENERATING_BOUNDS_H
|
|
#include <linux/mm_types.h>
|
|
#include <linux/bounds.h>
|
|
#endif /* !__GENERATING_BOUNDS_H */
|
|
|
|
/*
|
|
* Various page->flags bits:
|
|
*
|
|
* PG_reserved is set for special pages, which can never be swapped out. Some
|
|
* of them might not even exist (eg empty_bad_page)...
|
|
*
|
|
* The PG_private bitflag is set on pagecache pages if they contain filesystem
|
|
* specific data (which is normally at page->private). It can be used by
|
|
* private allocations for its own usage.
|
|
*
|
|
* During initiation of disk I/O, PG_locked is set. This bit is set before I/O
|
|
* and cleared when writeback _starts_ or when read _completes_. PG_writeback
|
|
* is set before writeback starts and cleared when it finishes.
|
|
*
|
|
* PG_locked also pins a page in pagecache, and blocks truncation of the file
|
|
* while it is held.
|
|
*
|
|
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
|
|
* to become unlocked.
|
|
*
|
|
* PG_uptodate tells whether the page's contents is valid. When a read
|
|
* completes, the page becomes uptodate, unless a disk I/O error happened.
|
|
*
|
|
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
|
|
* file-backed pagecache (see mm/vmscan.c).
|
|
*
|
|
* PG_error is set to indicate that an I/O error occurred on this page.
|
|
*
|
|
* PG_arch_1 is an architecture specific page state bit. The generic code
|
|
* guarantees that this bit is cleared for a page when it first is entered into
|
|
* the page cache.
|
|
*
|
|
* PG_highmem pages are not permanently mapped into the kernel virtual address
|
|
* space, they need to be kmapped separately for doing IO on the pages. The
|
|
* struct page (these bits with information) are always mapped into kernel
|
|
* address space...
|
|
*
|
|
* PG_buddy is set to indicate that the page is free and in the buddy system
|
|
* (see mm/page_alloc.c).
|
|
*
|
|
*/
|
|
|
|
/*
|
|
* Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
|
|
* locked- and dirty-page accounting.
|
|
*
|
|
* The page flags field is split into two parts, the main flags area
|
|
* which extends from the low bits upwards, and the fields area which
|
|
* extends from the high bits downwards.
|
|
*
|
|
* | FIELD | ... | FLAGS |
|
|
* N-1 ^ 0
|
|
* (NR_PAGEFLAGS)
|
|
*
|
|
* The fields area is reserved for fields mapping zone, node (for NUMA) and
|
|
* SPARSEMEM section (for variants of SPARSEMEM that require section ids like
|
|
* SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
|
|
*/
|
|
enum pageflags {
|
|
PG_locked, /* Page is locked. Don't touch. */
|
|
PG_error,
|
|
PG_referenced,
|
|
PG_uptodate,
|
|
PG_dirty,
|
|
PG_lru,
|
|
PG_active,
|
|
PG_slab,
|
|
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
|
|
PG_checked = PG_owner_priv_1, /* Used by some filesystems */
|
|
PG_pinned = PG_owner_priv_1, /* Xen pinned pagetable */
|
|
PG_arch_1,
|
|
PG_reserved,
|
|
PG_private, /* If pagecache, has fs-private data */
|
|
PG_writeback, /* Page is under writeback */
|
|
PG_compound, /* A compound page */
|
|
PG_swapcache, /* Swap page: swp_entry_t in private */
|
|
PG_mappedtodisk, /* Has blocks allocated on-disk */
|
|
PG_reclaim, /* To be reclaimed asap */
|
|
/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
|
|
PG_readahead = PG_reclaim, /* Reminder to do async read-ahead */
|
|
PG_buddy, /* Page is free, on buddy lists */
|
|
|
|
#if (BITS_PER_LONG > 32)
|
|
/*
|
|
* 64-bit-only flags build down from bit 31
|
|
*
|
|
* 32 bit -------------------------------| FIELDS | FLAGS |
|
|
* 64 bit | FIELDS | ?????? FLAGS |
|
|
* 63 32 0
|
|
*/
|
|
PG_uncached = 31, /* Page has been mapped as uncached */
|
|
#endif
|
|
__NR_PAGEFLAGS
|
|
};
|
|
|
|
#ifndef __GENERATING_BOUNDS_H
|
|
|
|
/*
|
|
* Macros to create function definitions for page flags
|
|
*/
|
|
#define TESTPAGEFLAG(uname, lname) \
|
|
static inline int Page##uname(struct page *page) \
|
|
{ return test_bit(PG_##lname, &page->flags); }
|
|
|
|
#define SETPAGEFLAG(uname, lname) \
|
|
static inline void SetPage##uname(struct page *page) \
|
|
{ set_bit(PG_##lname, &page->flags); }
|
|
|
|
#define CLEARPAGEFLAG(uname, lname) \
|
|
static inline void ClearPage##uname(struct page *page) \
|
|
{ clear_bit(PG_##lname, &page->flags); }
|
|
|
|
#define __SETPAGEFLAG(uname, lname) \
|
|
static inline void __SetPage##uname(struct page *page) \
|
|
{ __set_bit(PG_##lname, &page->flags); }
|
|
|
|
#define __CLEARPAGEFLAG(uname, lname) \
|
|
static inline void __ClearPage##uname(struct page *page) \
|
|
{ __clear_bit(PG_##lname, &page->flags); }
|
|
|
|
#define TESTSETFLAG(uname, lname) \
|
|
static inline int TestSetPage##uname(struct page *page) \
|
|
{ return test_and_set_bit(PG_##lname, &page->flags); }
|
|
|
|
#define TESTCLEARFLAG(uname, lname) \
|
|
static inline int TestClearPage##uname(struct page *page) \
|
|
{ return test_and_clear_bit(PG_##lname, &page->flags); }
|
|
|
|
|
|
#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
|
|
SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
|
|
|
|
#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
|
|
__SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
|
|
|
|
#define TESTSCFLAG(uname, lname) \
|
|
TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
|
|
|
|
/*
|
|
* Manipulation of page state flags
|
|
*/
|
|
#define PageLocked(page) \
|
|
test_bit(PG_locked, &(page)->flags)
|
|
#define SetPageLocked(page) \
|
|
set_bit(PG_locked, &(page)->flags)
|
|
#define TestSetPageLocked(page) \
|
|
test_and_set_bit(PG_locked, &(page)->flags)
|
|
#define ClearPageLocked(page) \
|
|
clear_bit(PG_locked, &(page)->flags)
|
|
#define TestClearPageLocked(page) \
|
|
test_and_clear_bit(PG_locked, &(page)->flags)
|
|
|
|
#define PageError(page) test_bit(PG_error, &(page)->flags)
|
|
#define SetPageError(page) set_bit(PG_error, &(page)->flags)
|
|
#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
|
|
|
|
#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
|
|
#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
|
|
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
|
|
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
|
|
|
|
static inline int PageUptodate(struct page *page)
|
|
{
|
|
int ret = test_bit(PG_uptodate, &(page)->flags);
|
|
|
|
/*
|
|
* Must ensure that the data we read out of the page is loaded
|
|
* _after_ we've loaded page->flags to check for PageUptodate.
|
|
* We can skip the barrier if the page is not uptodate, because
|
|
* we wouldn't be reading anything from it.
|
|
*
|
|
* See SetPageUptodate() for the other side of the story.
|
|
*/
|
|
if (ret)
|
|
smp_rmb();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline void __SetPageUptodate(struct page *page)
|
|
{
|
|
smp_wmb();
|
|
__set_bit(PG_uptodate, &(page)->flags);
|
|
#ifdef CONFIG_S390
|
|
page_clear_dirty(page);
|
|
#endif
|
|
}
|
|
|
|
static inline void SetPageUptodate(struct page *page)
|
|
{
|
|
#ifdef CONFIG_S390
|
|
if (!test_and_set_bit(PG_uptodate, &page->flags))
|
|
page_clear_dirty(page);
|
|
#else
|
|
/*
|
|
* Memory barrier must be issued before setting the PG_uptodate bit,
|
|
* so that all previous stores issued in order to bring the page
|
|
* uptodate are actually visible before PageUptodate becomes true.
|
|
*
|
|
* s390 doesn't need an explicit smp_wmb here because the test and
|
|
* set bit already provides full barriers.
|
|
*/
|
|
smp_wmb();
|
|
set_bit(PG_uptodate, &(page)->flags);
|
|
#endif
|
|
}
|
|
|
|
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
|
|
|
|
#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
|
|
#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
|
|
#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
|
|
#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
|
|
#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
|
|
#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
|
|
|
|
#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
|
|
#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
|
|
#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
|
|
#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
|
|
|
|
#define PageActive(page) test_bit(PG_active, &(page)->flags)
|
|
#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
|
|
#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
|
|
#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
|
|
|
|
#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
|
|
#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
|
|
#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#define PageHighMem(page) is_highmem(page_zone(page))
|
|
#else
|
|
#define PageHighMem(page) 0 /* needed to optimize away at compile time */
|
|
#endif
|
|
|
|
#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
|
|
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
|
|
#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
|
|
|
|
#define PagePinned(page) test_bit(PG_pinned, &(page)->flags)
|
|
#define SetPagePinned(page) set_bit(PG_pinned, &(page)->flags)
|
|
#define ClearPagePinned(page) clear_bit(PG_pinned, &(page)->flags)
|
|
|
|
#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
|
|
#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
|
|
#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
|
|
#define __ClearPageReserved(page) __clear_bit(PG_reserved, &(page)->flags)
|
|
|
|
#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
|
|
#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
|
|
#define PagePrivate(page) test_bit(PG_private, &(page)->flags)
|
|
#define __SetPagePrivate(page) __set_bit(PG_private, &(page)->flags)
|
|
#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
|
|
|
|
/*
|
|
* Only test-and-set exist for PG_writeback. The unconditional operators are
|
|
* risky: they bypass page accounting.
|
|
*/
|
|
#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
|
|
#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
|
|
&(page)->flags)
|
|
#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
|
|
&(page)->flags)
|
|
|
|
#define PageBuddy(page) test_bit(PG_buddy, &(page)->flags)
|
|
#define __SetPageBuddy(page) __set_bit(PG_buddy, &(page)->flags)
|
|
#define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags)
|
|
|
|
#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
|
|
#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
|
|
#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
|
|
|
|
#define PageReadahead(page) test_bit(PG_readahead, &(page)->flags)
|
|
#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags)
|
|
#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
|
|
|
|
#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
|
|
#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
|
|
#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
|
|
#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
|
|
|
|
#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
|
|
#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
|
|
#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
|
|
|
|
/*
|
|
* PG_reclaim is used in combination with PG_compound to mark the
|
|
* head and tail of a compound page
|
|
*
|
|
* PG_compound & PG_reclaim => Tail page
|
|
* PG_compound & ~PG_reclaim => Head page
|
|
*/
|
|
|
|
#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
|
|
|
|
#define PageTail(page) (((page)->flags & PG_head_tail_mask) \
|
|
== PG_head_tail_mask)
|
|
|
|
static inline void __SetPageTail(struct page *page)
|
|
{
|
|
page->flags |= PG_head_tail_mask;
|
|
}
|
|
|
|
static inline void __ClearPageTail(struct page *page)
|
|
{
|
|
page->flags &= ~PG_head_tail_mask;
|
|
}
|
|
|
|
#define PageHead(page) (((page)->flags & PG_head_tail_mask) \
|
|
== (1L << PG_compound))
|
|
#define __SetPageHead(page) __SetPageCompound(page)
|
|
#define __ClearPageHead(page) __ClearPageCompound(page)
|
|
|
|
#ifdef CONFIG_SWAP
|
|
#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
|
|
#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
|
|
#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
|
|
#else
|
|
#define PageSwapCache(page) 0
|
|
#endif
|
|
|
|
#define PageUncached(page) test_bit(PG_uncached, &(page)->flags)
|
|
#define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
|
|
#define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
|
|
|
|
struct page; /* forward declaration */
|
|
|
|
extern void cancel_dirty_page(struct page *page, unsigned int account_size);
|
|
|
|
int test_clear_page_writeback(struct page *page);
|
|
int test_set_page_writeback(struct page *page);
|
|
|
|
static inline void set_page_writeback(struct page *page)
|
|
{
|
|
test_set_page_writeback(page);
|
|
}
|
|
|
|
#endif /* !__GENERATING_BOUNDS_H */
|
|
#endif /* PAGE_FLAGS_H */
|