[PATCH] mm/msync.c cleanup
This is not problem actually, but sync_page_range() is using for exported function to filesystems. The msync_xxx is more readable at least to me. Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Acked-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
662f3a0b94
commit
b57b98d147
1 changed files with 14 additions and 14 deletions
24
mm/msync.c
24
mm/msync.c
|
@ -22,7 +22,7 @@
|
||||||
* threads/the swapper from ripping pte's out from under us.
|
* threads/the swapper from ripping pte's out from under us.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void sync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
@ -50,7 +50,7 @@ static void sync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
pte_unmap(pte - 1);
|
pte_unmap(pte - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
static inline void msync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
|
@ -61,11 +61,11 @@ static inline void sync_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
continue;
|
continue;
|
||||||
sync_pte_range(vma, pmd, addr, next);
|
msync_pte_range(vma, pmd, addr, next);
|
||||||
} while (pmd++, addr = next, addr != end);
|
} while (pmd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void sync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
static inline void msync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
|
@ -76,11 +76,11 @@ static inline void sync_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
|
||||||
next = pud_addr_end(addr, end);
|
next = pud_addr_end(addr, end);
|
||||||
if (pud_none_or_clear_bad(pud))
|
if (pud_none_or_clear_bad(pud))
|
||||||
continue;
|
continue;
|
||||||
sync_pmd_range(vma, pud, addr, next);
|
msync_pmd_range(vma, pud, addr, next);
|
||||||
} while (pud++, addr = next, addr != end);
|
} while (pud++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sync_page_range(struct vm_area_struct *vma,
|
static void msync_page_range(struct vm_area_struct *vma,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
|
@ -101,13 +101,13 @@ static void sync_page_range(struct vm_area_struct *vma,
|
||||||
next = pgd_addr_end(addr, end);
|
next = pgd_addr_end(addr, end);
|
||||||
if (pgd_none_or_clear_bad(pgd))
|
if (pgd_none_or_clear_bad(pgd))
|
||||||
continue;
|
continue;
|
||||||
sync_pud_range(vma, pgd, addr, next);
|
msync_pud_range(vma, pgd, addr, next);
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
static inline void filemap_sync(struct vm_area_struct *vma,
|
static inline void filemap_msync(struct vm_area_struct *vma,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
const size_t chunk = 64 * 1024; /* bytes */
|
const size_t chunk = 64 * 1024; /* bytes */
|
||||||
|
@ -117,15 +117,15 @@ static inline void filemap_sync(struct vm_area_struct *vma,
|
||||||
next = addr + chunk;
|
next = addr + chunk;
|
||||||
if (next > end || next < addr)
|
if (next > end || next < addr)
|
||||||
next = end;
|
next = end;
|
||||||
sync_page_range(vma, addr, next);
|
msync_page_range(vma, addr, next);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
} while (addr = next, addr != end);
|
} while (addr = next, addr != end);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void filemap_sync(struct vm_area_struct *vma,
|
static inline void filemap_msync(struct vm_area_struct *vma,
|
||||||
unsigned long addr, unsigned long end)
|
unsigned long addr, unsigned long end)
|
||||||
{
|
{
|
||||||
sync_page_range(vma, addr, end);
|
msync_page_range(vma, addr, end);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -150,7 +150,7 @@ static int msync_interval(struct vm_area_struct *vma,
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
if (file && (vma->vm_flags & VM_SHARED)) {
|
if (file && (vma->vm_flags & VM_SHARED)) {
|
||||||
filemap_sync(vma, addr, end);
|
filemap_msync(vma, addr, end);
|
||||||
|
|
||||||
if (flags & MS_SYNC) {
|
if (flags & MS_SYNC) {
|
||||||
struct address_space *mapping = file->f_mapping;
|
struct address_space *mapping = file->f_mapping;
|
||||||
|
|
Loading…
Reference in a new issue