2005-04-17 00:20:36 +02:00
|
|
|
#ifndef _LINUX_HUGETLB_H
|
|
|
|
#define _LINUX_HUGETLB_H
|
|
|
|
|
2007-07-30 00:36:13 +02:00
|
|
|
#include <linux/fs.h>
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
|
|
|
|
#include <linux/mempolicy.h>
|
2007-03-02 00:46:08 +01:00
|
|
|
#include <linux/shm.h>
|
2005-06-22 02:14:44 +02:00
|
|
|
#include <asm/tlbflush.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
struct ctl_table;
|
|
|
|
|
|
|
|
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
return vma->vm_flags & VM_HUGETLB;
|
|
|
|
}
|
|
|
|
|
2008-07-24 06:27:23 +02:00
|
|
|
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
2005-04-17 00:20:36 +02:00
|
|
|
int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
|
2008-02-08 13:18:18 +01:00
|
|
|
int hugetlb_overcommit_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
|
2007-07-17 13:03:13 +02:00
|
|
|
int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
|
2005-04-17 00:20:36 +02:00
|
|
|
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
2007-11-15 01:59:33 +01:00
|
|
|
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int, int);
|
hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed
After patch 2 in this series, a process that successfully calls mmap() for
a MAP_PRIVATE mapping will be guaranteed to successfully fault until a
process calls fork(). At that point, the next write fault from the parent
could fail due to COW if the child still has a reference.
We only reserve pages for the parent but a copy must be made to avoid
leaking data from the parent to the child after fork(). Reserves could be
taken for both parent and child at fork time to guarantee faults but if
the mapping is large it is highly likely we will not have sufficient pages
for the reservation, and it is common to fork only to exec() immediatly
after. A failure here would be very undesirable.
Note that the current behaviour of mainline with MAP_PRIVATE pages is
pretty bad. The following situation is allowed to occur today.
1. Process calls mmap(MAP_PRIVATE)
2. Process calls mlock() to fault all pages and makes sure it succeeds
3. Process forks()
4. Process writes to MAP_PRIVATE mapping while child still exists
5. If the COW fails at this point, the process gets SIGKILLed even though it
had taken care to ensure the pages existed
This patch improves the situation by guaranteeing the reliability of the
process that successfully calls mmap(). When the parent performs COW, it
will try to satisfy the allocation without using reserves. If that fails
the parent will steal the page leaving any children without a page.
Faults from the child after that point will result in failure. If the
child COW happens first, an attempt will be made to allocate the page
without reserves and the child will get SIGKILLed on failure.
To summarise the new behaviour:
1. If the original mapper performs COW on a private mapping with multiple
references, it will attempt to allocate a hugepage from the pool or
the buddy allocator without using the existing reserves. On fail, VMAs
mapping the same area are traversed and the page being COW'd is unmapped
where found. It will then steal the original page as the last mapper in
the normal way.
2. The VMAs the pages were unmapped from are flagged to note that pages
with data no longer exist. Future no-page faults on those VMAs will
terminate the process as otherwise it would appear that data was corrupted.
A warning is printed to the console that this situation occured.
2. If the child performs COW first, it will attempt to satisfy the COW
from the pool if there are enough pages or via the buddy allocator if
overcommit is allowed and the buddy allocator can satisfy the request. If
it fails, the child will be killed.
If the pool is large enough, existing applications will not notice that
the reserves were a factor. Existing applications depending on the
no-reserves been set are unlikely to exist as for much of the history of
hugetlbfs, pages were prefaulted at mmap(), allocating the pages at that
point or failing the mmap().
[npiggin@suse.de: fix CONFIG_HUGETLB=n build]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 06:27:25 +02:00
|
|
|
void unmap_hugepage_range(struct vm_area_struct *,
|
|
|
|
unsigned long, unsigned long, struct page *);
|
|
|
|
void __unmap_hugepage_range(struct vm_area_struct *,
|
|
|
|
unsigned long, unsigned long, struct page *);
|
2005-04-17 00:20:36 +02:00
|
|
|
int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
|
|
|
|
int hugetlb_report_meminfo(char *);
|
|
|
|
int hugetlb_report_node_meminfo(int, char *);
|
|
|
|
unsigned long hugetlb_total_pages(void);
|
2005-10-20 17:24:28 +02:00
|
|
|
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|
|
|
unsigned long address, int write_access);
|
2008-07-24 06:27:23 +02:00
|
|
|
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
|
|
|
struct vm_area_struct *vma);
|
2006-06-23 11:03:15 +02:00
|
|
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2007-07-17 13:03:13 +02:00
|
|
|
extern unsigned long hugepages_treat_as_movable;
|
2005-04-17 00:20:36 +02:00
|
|
|
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
|
|
|
extern int sysctl_hugetlb_shm_group;
|
2008-07-24 06:27:52 +02:00
|
|
|
extern struct list_head huge_boot_pages;
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2005-06-22 02:14:44 +02:00
|
|
|
/* arch callbacks */
|
|
|
|
|
2008-07-24 06:27:41 +02:00
|
|
|
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|
|
|
unsigned long addr, unsigned long sz);
|
2005-06-22 02:14:44 +02:00
|
|
|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
|
2006-12-07 05:32:03 +01:00
|
|
|
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
|
2005-06-22 02:14:44 +02:00
|
|
|
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
|
|
|
|
int write);
|
|
|
|
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
|
|
|
|
pmd_t *pmd, int write);
|
2008-07-24 06:27:50 +02:00
|
|
|
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|
|
|
pud_t *pud, int write);
|
2005-06-22 02:14:44 +02:00
|
|
|
int pmd_huge(pmd_t pmd);
|
2008-07-24 06:27:50 +02:00
|
|
|
int pud_huge(pud_t pmd);
|
2006-03-22 09:08:50 +01:00
|
|
|
void hugetlb_change_protection(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, unsigned long end, pgprot_t newprot);
|
2005-06-22 02:14:44 +02:00
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#else /* !CONFIG_HUGETLB_PAGE */
|
|
|
|
|
|
|
|
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2008-07-24 06:27:23 +02:00
|
|
|
|
|
|
|
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
static inline unsigned long hugetlb_total_pages(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-15 01:59:33 +01:00
|
|
|
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
|
2005-04-17 00:20:36 +02:00
|
|
|
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
|
|
|
|
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
|
|
|
|
#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
|
hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed
After patch 2 in this series, a process that successfully calls mmap() for
a MAP_PRIVATE mapping will be guaranteed to successfully fault until a
process calls fork(). At that point, the next write fault from the parent
could fail due to COW if the child still has a reference.
We only reserve pages for the parent but a copy must be made to avoid
leaking data from the parent to the child after fork(). Reserves could be
taken for both parent and child at fork time to guarantee faults but if
the mapping is large it is highly likely we will not have sufficient pages
for the reservation, and it is common to fork only to exec() immediatly
after. A failure here would be very undesirable.
Note that the current behaviour of mainline with MAP_PRIVATE pages is
pretty bad. The following situation is allowed to occur today.
1. Process calls mmap(MAP_PRIVATE)
2. Process calls mlock() to fault all pages and makes sure it succeeds
3. Process forks()
4. Process writes to MAP_PRIVATE mapping while child still exists
5. If the COW fails at this point, the process gets SIGKILLed even though it
had taken care to ensure the pages existed
This patch improves the situation by guaranteeing the reliability of the
process that successfully calls mmap(). When the parent performs COW, it
will try to satisfy the allocation without using reserves. If that fails
the parent will steal the page leaving any children without a page.
Faults from the child after that point will result in failure. If the
child COW happens first, an attempt will be made to allocate the page
without reserves and the child will get SIGKILLed on failure.
To summarise the new behaviour:
1. If the original mapper performs COW on a private mapping with multiple
references, it will attempt to allocate a hugepage from the pool or
the buddy allocator without using the existing reserves. On fail, VMAs
mapping the same area are traversed and the page being COW'd is unmapped
where found. It will then steal the original page as the last mapper in
the normal way.
2. The VMAs the pages were unmapped from are flagged to note that pages
with data no longer exist. Future no-page faults on those VMAs will
terminate the process as otherwise it would appear that data was corrupted.
A warning is printed to the console that this situation occured.
2. If the child performs COW first, it will attempt to satisfy the COW
from the pool if there are enough pages or via the buddy allocator if
overcommit is allowed and the buddy allocator can satisfy the request. If
it fails, the child will be killed.
If the pool is large enough, existing applications will not notice that
the reserves were a factor. Existing applications depending on the
no-reserves been set are unlikely to exist as for much of the history of
hugetlbfs, pages were prefaulted at mmap(), allocating the pages at that
point or failing the mmap().
[npiggin@suse.de: fix CONFIG_HUGETLB=n build]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Adam Litke <agl@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 06:27:25 +02:00
|
|
|
#define unmap_hugepage_range(vma, start, end, page) BUG()
|
2005-04-17 00:20:36 +02:00
|
|
|
#define hugetlb_report_meminfo(buf) 0
|
|
|
|
#define hugetlb_report_node_meminfo(n, buf) 0
|
|
|
|
#define follow_huge_pmd(mm, addr, pmd, write) NULL
|
2008-07-24 06:27:50 +02:00
|
|
|
#define follow_huge_pud(mm, addr, pud, write) NULL
|
2008-07-24 06:27:41 +02:00
|
|
|
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
|
2005-04-17 00:20:36 +02:00
|
|
|
#define pmd_huge(x) 0
|
2008-07-24 06:27:50 +02:00
|
|
|
#define pud_huge(x) 0
|
2005-04-17 00:20:36 +02:00
|
|
|
#define is_hugepage_only_range(mm, addr, len) 0
|
[PATCH] hugepage: Fix hugepage logic in free_pgtables()
free_pgtables() has special logic to call hugetlb_free_pgd_range() instead
of the normal free_pgd_range() on hugepage VMAs. However, the test it uses
to do so is incorrect: it calls is_hugepage_only_range on a hugepage sized
range at the start of the vma. is_hugepage_only_range() will return true
if the given range has any intersection with a hugepage address region, and
in this case the given region need not be hugepage aligned. So, for
example, this test can return true if called on, say, a 4k VMA immediately
preceding a (nicely aligned) hugepage VMA.
At present we get away with this because the powerpc version of
hugetlb_free_pgd_range() is just a call to free_pgd_range(). On ia64 (the
only other arch with a non-trivial is_hugepage_only_range()) we get away
with it for a different reason; the hugepage area is not contiguous with
the rest of the user address space, and VMAs are not permitted in between,
so the test can't return a false positive there.
Nonetheless this should be fixed. We do that in the patch below by
replacing the is_hugepage_only_range() test with an explicit test of the
VMA using is_vm_hugetlb_page().
This in turn changes behaviour for platforms where is_hugepage_only_range()
returns false always (everything except powerpc and ia64). We address this
by ensuring that hugetlb_free_pgd_range() is defined to be identical to
free_pgd_range() (instead of a no-op) on everything except ia64. Even so,
it will prevent some otherwise possible coalescing of calls down to
free_pgd_range(). Since this only happens for hugepage VMAs, removing this
small optimization seems unlikely to cause any trouble.
This patch causes no regressions on the libhugetlbfs testsuite - ppc64
POWER5 (8-way), ppc64 G5 (2-way) and i386 Pentium M (UP).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-22 09:08:57 +01:00
|
|
|
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
|
2005-10-20 17:24:28 +02:00
|
|
|
#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
|
2005-04-17 00:20:36 +02:00
|
|
|
|
2006-03-22 09:08:50 +01:00
|
|
|
#define hugetlb_change_protection(vma, address, end, newprot)
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#ifndef HPAGE_MASK
|
2005-11-14 01:06:42 +01:00
|
|
|
#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
|
|
|
|
#define HPAGE_SIZE PAGE_SIZE
|
2005-04-17 00:20:36 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* !CONFIG_HUGETLB_PAGE */
|
|
|
|
|
|
|
|
#ifdef CONFIG_HUGETLBFS
|
|
|
|
struct hugetlbfs_config {
|
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
umode_t mode;
|
|
|
|
long nr_blocks;
|
|
|
|
long nr_inodes;
|
2008-07-24 06:27:43 +02:00
|
|
|
struct hstate *hstate;
|
2005-04-17 00:20:36 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct hugetlbfs_sb_info {
|
|
|
|
long max_blocks; /* blocks allowed */
|
|
|
|
long free_blocks; /* blocks free */
|
|
|
|
long max_inodes; /* inodes allowed */
|
|
|
|
long free_inodes; /* inodes free */
|
|
|
|
spinlock_t stat_lock;
|
2008-07-24 06:27:43 +02:00
|
|
|
struct hstate *hstate;
|
2005-04-17 00:20:36 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct hugetlbfs_inode_info {
|
|
|
|
struct shared_policy policy;
|
|
|
|
struct inode vfs_inode;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
|
|
|
|
{
|
|
|
|
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
|
|
|
|
{
|
|
|
|
return sb->s_fs_info;
|
|
|
|
}
|
|
|
|
|
2006-03-28 11:56:42 +02:00
|
|
|
extern const struct file_operations hugetlbfs_file_operations;
|
2005-04-17 00:20:36 +02:00
|
|
|
extern struct vm_operations_struct hugetlb_vm_ops;
|
2007-06-16 19:16:16 +02:00
|
|
|
struct file *hugetlb_file_setup(const char *name, size_t);
|
2007-11-15 01:59:41 +01:00
|
|
|
int hugetlb_get_quota(struct address_space *mapping, long delta);
|
|
|
|
void hugetlb_put_quota(struct address_space *mapping, long delta);
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
static inline int is_file_hugepages(struct file *file)
|
|
|
|
{
|
2007-03-02 00:46:08 +01:00
|
|
|
if (file->f_op == &hugetlbfs_file_operations)
|
|
|
|
return 1;
|
|
|
|
if (is_file_shm_hugepages(file))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
2005-04-17 00:20:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_file_hugepages(struct file *file)
|
|
|
|
{
|
|
|
|
file->f_op = &hugetlbfs_file_operations;
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_HUGETLBFS */
|
|
|
|
|
|
|
|
#define is_file_hugepages(file) 0
|
|
|
|
#define set_file_hugepages(file) BUG()
|
2007-06-16 19:16:16 +02:00
|
|
|
#define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
#endif /* !CONFIG_HUGETLBFS */
|
|
|
|
|
2007-05-06 23:49:00 +02:00
|
|
|
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
|
|
|
|
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags);
|
|
|
|
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
|
|
|
|
|
2008-07-24 06:27:41 +02:00
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
|
2008-07-24 06:27:44 +02:00
|
|
|
#define HSTATE_NAME_LEN 32
|
2008-07-24 06:27:41 +02:00
|
|
|
/* Defines one hugetlb page size */
|
|
|
|
struct hstate {
|
|
|
|
int hugetlb_next_nid;
|
|
|
|
unsigned int order;
|
|
|
|
unsigned long mask;
|
|
|
|
unsigned long max_huge_pages;
|
|
|
|
unsigned long nr_huge_pages;
|
|
|
|
unsigned long free_huge_pages;
|
|
|
|
unsigned long resv_huge_pages;
|
|
|
|
unsigned long surplus_huge_pages;
|
|
|
|
unsigned long nr_overcommit_huge_pages;
|
|
|
|
struct list_head hugepage_freelists[MAX_NUMNODES];
|
|
|
|
unsigned int nr_huge_pages_node[MAX_NUMNODES];
|
|
|
|
unsigned int free_huge_pages_node[MAX_NUMNODES];
|
|
|
|
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
|
2008-07-24 06:27:44 +02:00
|
|
|
char name[HSTATE_NAME_LEN];
|
2008-07-24 06:27:41 +02:00
|
|
|
};
|
|
|
|
|
2008-07-24 06:27:52 +02:00
|
|
|
struct huge_bootmem_page {
|
|
|
|
struct list_head list;
|
|
|
|
struct hstate *hstate;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* arch callback */
|
|
|
|
int __init alloc_bootmem_huge_page(struct hstate *h);
|
|
|
|
|
2008-07-24 06:27:42 +02:00
|
|
|
void __init hugetlb_add_hstate(unsigned order);
|
|
|
|
struct hstate *size_to_hstate(unsigned long size);
|
|
|
|
|
|
|
|
#ifndef HUGE_MAX_HSTATE
|
|
|
|
#define HUGE_MAX_HSTATE 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
extern struct hstate hstates[HUGE_MAX_HSTATE];
|
|
|
|
extern unsigned int default_hstate_idx;
|
|
|
|
|
|
|
|
#define default_hstate (hstates[default_hstate_idx])
|
2008-07-24 06:27:41 +02:00
|
|
|
|
2008-07-24 06:27:43 +02:00
|
|
|
static inline struct hstate *hstate_inode(struct inode *i)
|
2008-07-24 06:27:41 +02:00
|
|
|
{
|
2008-07-24 06:27:43 +02:00
|
|
|
struct hugetlbfs_sb_info *hsb;
|
|
|
|
hsb = HUGETLBFS_SB(i->i_sb);
|
|
|
|
return hsb->hstate;
|
2008-07-24 06:27:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct hstate *hstate_file(struct file *f)
|
|
|
|
{
|
2008-07-24 06:27:43 +02:00
|
|
|
return hstate_inode(f->f_dentry->d_inode);
|
2008-07-24 06:27:41 +02:00
|
|
|
}
|
|
|
|
|
2008-07-24 06:27:43 +02:00
|
|
|
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
|
2008-07-24 06:27:41 +02:00
|
|
|
{
|
2008-07-24 06:27:43 +02:00
|
|
|
return hstate_file(vma->vm_file);
|
2008-07-24 06:27:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long huge_page_size(struct hstate *h)
|
|
|
|
{
|
|
|
|
return (unsigned long)PAGE_SIZE << h->order;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long huge_page_mask(struct hstate *h)
|
|
|
|
{
|
|
|
|
return h->mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int huge_page_order(struct hstate *h)
|
|
|
|
{
|
|
|
|
return h->order;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned huge_page_shift(struct hstate *h)
|
|
|
|
{
|
|
|
|
return h->order + PAGE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int pages_per_huge_page(struct hstate *h)
|
|
|
|
{
|
|
|
|
return 1 << h->order;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int blocks_per_huge_page(struct hstate *h)
|
|
|
|
{
|
|
|
|
return huge_page_size(h) / 512;
|
|
|
|
}
|
|
|
|
|
|
|
|
#include <asm/hugetlb.h>
|
|
|
|
|
2008-07-24 06:27:42 +02:00
|
|
|
static inline struct hstate *page_hstate(struct page *page)
|
|
|
|
{
|
|
|
|
return size_to_hstate(PAGE_SIZE << compound_order(page));
|
|
|
|
}
|
|
|
|
|
2008-07-24 06:27:41 +02:00
|
|
|
#else
|
|
|
|
struct hstate {};
|
2008-07-24 06:27:52 +02:00
|
|
|
#define alloc_bootmem_huge_page(h) NULL
|
2008-07-24 06:27:41 +02:00
|
|
|
#define hstate_file(f) NULL
|
|
|
|
#define hstate_vma(v) NULL
|
|
|
|
#define hstate_inode(i) NULL
|
|
|
|
#define huge_page_size(h) PAGE_SIZE
|
|
|
|
#define huge_page_mask(h) PAGE_MASK
|
|
|
|
#define huge_page_order(h) 0
|
|
|
|
#define huge_page_shift(h) PAGE_SHIFT
|
|
|
|
#define pages_per_huge_page(h) 1
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#endif /* _LINUX_HUGETLB_H */
|