Breakout page_order() to internal.h to avoid special knowledge of the buddy allocator

The statistics patch later needs to know what order a free page is on the free
lists.  Rather than having special knowledge of page_private() when
PageBuddy() is set, this patch places out page_order() in internal.h and adds
a VM_BUG_ON to catch using it on non-PageBuddy pages.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mel Gorman 2007-10-16 01:26:10 -07:00 committed by Linus Torvalds
parent ea3061d227
commit 48f13bf3e7
2 changed files with 10 additions and 10 deletions

View file

@ -37,4 +37,14 @@ static inline void __put_page(struct page *page)
extern void fastcall __init __free_pages_bootmem(struct page *page, extern void fastcall __init __free_pages_bootmem(struct page *page,
unsigned int order); unsigned int order);
/*
* function for dealing with page's order in buddy system.
* zone->lock is already acquired when we use these.
* So, we don't need atomic page->flags operations here.
*/
static inline unsigned long page_order(struct page *page)
{
VM_BUG_ON(!PageBuddy(page));
return page_private(page);
}
#endif #endif

View file

@ -313,16 +313,6 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i); clear_highpage(page + i);
} }
/*
* function for dealing with page's order in buddy system.
* zone->lock is already acquired when we use these.
* So, we don't need atomic page->flags operations here.
*/
static inline unsigned long page_order(struct page *page)
{
return page_private(page);
}
static inline void set_page_order(struct page *page, int order) static inline void set_page_order(struct page *page, int order)
{ {
set_page_private(page, order); set_page_private(page, order);