Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: "A few little subsystems and a start of a lot of MM patches. Subsystems affected by this patch series: squashfs, ocfs2, parisc, vfs. With mm subsystems: slab-generic, slub, debug, pagecache, gup, swap, memcg, pagemap, memory-failure, vmalloc, kasan" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (128 commits) kasan: move kasan_report() into report.c mm/mm_init.c: report kasan-tag information stored in page->flags ubsan: entirely disable alignment checks under UBSAN_TRAP kasan: fix clang compilation warning due to stack protector x86/mm: remove vmalloc faulting mm: remove vmalloc_sync_(un)mappings() x86/mm/32: implement arch_sync_kernel_mappings() x86/mm/64: implement arch_sync_kernel_mappings() mm/ioremap: track which page-table levels were modified mm/vmalloc: track which page-table levels were modified mm: add functions to track page directory modifications s390: use __vmalloc_node in stack_alloc powerpc: use __vmalloc_node in alloc_vm_stack arm64: use __vmalloc_node in arch_alloc_vmap_stack mm: remove vmalloc_user_node_flags mm: switch the test_vmalloc module to use __vmalloc_node mm: remove __vmalloc_node_flags_caller mm: remove both instances of __vmalloc_node_flags mm: remove the prot argument to __vmalloc_node mm: remove the pgprot argument to __vmalloc ...
This commit is contained in:
commit
94709049fb
195 changed files with 2199 additions and 2198 deletions
|
@ -1329,6 +1329,10 @@ PAGE_SIZE multiple when read back.
|
|||
workingset_activate
|
||||
Number of refaulted pages that were immediately activated
|
||||
|
||||
workingset_restore
|
||||
Number of restored pages which have been detected as an active
|
||||
workingset before they got reclaimed.
|
||||
|
||||
workingset_nodereclaim
|
||||
Number of times a shadow node has been reclaimed
|
||||
|
||||
|
@ -1370,6 +1374,22 @@ PAGE_SIZE multiple when read back.
|
|||
The total amount of swap currently being used by the cgroup
|
||||
and its descendants.
|
||||
|
||||
memory.swap.high
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default is "max".
|
||||
|
||||
Swap usage throttle limit. If a cgroup's swap usage exceeds
|
||||
this limit, all its further allocations will be throttled to
|
||||
allow userspace to implement custom out-of-memory procedures.
|
||||
|
||||
This limit marks a point of no return for the cgroup. It is NOT
|
||||
designed to manage the amount of swapping a workload does
|
||||
during regular operation. Compare to memory.swap.max, which
|
||||
prohibits swapping past a set amount, but lets the cgroup
|
||||
continue unimpeded as long as other memory can be reclaimed.
|
||||
|
||||
Healthy workloads are not expected to reach this limit.
|
||||
|
||||
memory.swap.max
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default is "max".
|
||||
|
@ -1383,6 +1403,10 @@ PAGE_SIZE multiple when read back.
|
|||
otherwise, a value change in this file generates a file
|
||||
modified event.
|
||||
|
||||
high
|
||||
The number of times the cgroup's swap usage was over
|
||||
the high threshold.
|
||||
|
||||
max
|
||||
The number of times the cgroup's swap usage was about
|
||||
to go over the max boundary and swap allocation
|
||||
|
|
|
@ -213,7 +213,7 @@ Here are the routines, one by one:
|
|||
there will be no entries in the cache for the kernel address
|
||||
space for virtual addresses in the range 'start' to 'end-1'.
|
||||
|
||||
The first of these two routines is invoked after map_vm_area()
|
||||
The first of these two routines is invoked after map_kernel_range()
|
||||
has installed the page table entries. The second is invoked
|
||||
before unmap_kernel_range() deletes the page table entries.
|
||||
|
||||
|
|
|
@ -239,6 +239,7 @@ prototypes::
|
|||
int (*readpage)(struct file *, struct page *);
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
int (*set_page_dirty)(struct page *page);
|
||||
void (*readahead)(struct readahead_control *);
|
||||
int (*readpages)(struct file *filp, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages);
|
||||
int (*write_begin)(struct file *, struct address_space *mapping,
|
||||
|
@ -271,7 +272,8 @@ writepage: yes, unlocks (see below)
|
|||
readpage: yes, unlocks
|
||||
writepages:
|
||||
set_page_dirty no
|
||||
readpages:
|
||||
readahead: yes, unlocks
|
||||
readpages: no
|
||||
write_begin: locks the page exclusive
|
||||
write_end: yes, unlocks exclusive
|
||||
bmap:
|
||||
|
@ -295,6 +297,8 @@ the request handler (/dev/loop).
|
|||
->readpage() unlocks the page, either synchronously or via I/O
|
||||
completion.
|
||||
|
||||
->readahead() unlocks the pages that I/O is attempted on like ->readpage().
|
||||
|
||||
->readpages() populates the pagecache with the passed pages and starts
|
||||
I/O against them. They come unlocked upon I/O completion.
|
||||
|
||||
|
|
|
@ -1043,8 +1043,8 @@ PageTables
|
|||
amount of memory dedicated to the lowest level of page
|
||||
tables.
|
||||
NFS_Unstable
|
||||
NFS pages sent to the server, but not yet committed to stable
|
||||
storage
|
||||
Always zero. Previous counted pages which had been written to
|
||||
the server, but has not been committed to stable storage.
|
||||
Bounce
|
||||
Memory used for block device "bounce buffers"
|
||||
WritebackTmp
|
||||
|
|
|
@ -706,6 +706,7 @@ cache in your filesystem. The following members are defined:
|
|||
int (*readpage)(struct file *, struct page *);
|
||||
int (*writepages)(struct address_space *, struct writeback_control *);
|
||||
int (*set_page_dirty)(struct page *page);
|
||||
void (*readahead)(struct readahead_control *);
|
||||
int (*readpages)(struct file *filp, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages);
|
||||
int (*write_begin)(struct file *, struct address_space *mapping,
|
||||
|
@ -781,12 +782,26 @@ cache in your filesystem. The following members are defined:
|
|||
If defined, it should set the PageDirty flag, and the
|
||||
PAGECACHE_TAG_DIRTY tag in the radix tree.
|
||||
|
||||
``readahead``
|
||||
Called by the VM to read pages associated with the address_space
|
||||
object. The pages are consecutive in the page cache and are
|
||||
locked. The implementation should decrement the page refcount
|
||||
after starting I/O on each page. Usually the page will be
|
||||
unlocked by the I/O completion handler. If the filesystem decides
|
||||
to stop attempting I/O before reaching the end of the readahead
|
||||
window, it can simply return. The caller will decrement the page
|
||||
refcount and unlock the remaining pages for you. Set PageUptodate
|
||||
if the I/O completes successfully. Setting PageError on any page
|
||||
will be ignored; simply unlock the page if an I/O error occurs.
|
||||
|
||||
``readpages``
|
||||
called by the VM to read pages associated with the address_space
|
||||
object. This is essentially just a vector version of readpage.
|
||||
Instead of just one page, several pages are requested.
|
||||
readpages is only used for read-ahead, so read errors are
|
||||
ignored. If anything goes wrong, feel free to give up.
|
||||
This interface is deprecated and will be removed by the end of
|
||||
2020; implement readahead instead.
|
||||
|
||||
``write_begin``
|
||||
Called by the generic buffered write code to ask the filesystem
|
||||
|
|
|
@ -49,7 +49,7 @@ Possible debug options are::
|
|||
P Poisoning (object and padding)
|
||||
U User tracking (free and alloc)
|
||||
T Trace (please only use on single slabs)
|
||||
A Toggle failslab filter mark for the cache
|
||||
A Enable failslab filter mark for the cache
|
||||
O Switch debugging off for caches that would have
|
||||
caused higher minimum slab orders
|
||||
- Switch all debugging off (useful if the kernel is
|
||||
|
|
|
@ -81,7 +81,7 @@ CONFIG_PARTITION_ADVANCED=y
|
|||
CONFIG_BINFMT_MISC=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_ZSMALLOC=m
|
||||
CONFIG_PGTABLE_MAPPING=y
|
||||
CONFIG_ZSMALLOC_PGTABLE_MAPPING=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
|
|
|
@ -407,6 +407,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||
#define __pgprot_modify(prot,mask,bits) \
|
||||
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
|
||||
|
||||
#define pgprot_nx(prot) \
|
||||
__pgprot_modify(prot, 0, PTE_PXN)
|
||||
|
||||
/*
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
|
|
|
@ -19,10 +19,8 @@ static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
|
|||
{
|
||||
BUILD_BUG_ON(!IS_ENABLED(CONFIG_VMAP_STACK));
|
||||
|
||||
return __vmalloc_node_range(stack_size, THREAD_ALIGN,
|
||||
VMALLOC_START, VMALLOC_END,
|
||||
THREADINFO_GFP, PAGE_KERNEL, 0, node,
|
||||
__builtin_return_address(0));
|
||||
return __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
#endif /* __ASM_VMAP_STACK_H */
|
||||
|
|
|
@ -252,7 +252,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
|||
}
|
||||
|
||||
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
|
||||
unsigned long val)
|
||||
u64 val)
|
||||
{
|
||||
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
|
||||
static const char units[] = "KMGTPE";
|
||||
|
|
|
@ -93,10 +93,8 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
|
||||
#define set_pte_at(mm, addr, ptep, pteval) \
|
||||
do { \
|
||||
pte_t old_pte; \
|
||||
unsigned long flags; \
|
||||
spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
|
||||
old_pte = *ptep; \
|
||||
set_pte(ptep, pteval); \
|
||||
purge_tlb_entries(mm, addr); \
|
||||
spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
|
||||
|
|
|
@ -699,10 +699,6 @@ static inline void iosync(void)
|
|||
*
|
||||
* * iounmap undoes such a mapping and can be hooked
|
||||
*
|
||||
* * __ioremap_at (and the pending __iounmap_at) are low level functions to
|
||||
* create hand-made mappings for use only by the PCI code and cannot
|
||||
* currently be hooked. Must be page aligned.
|
||||
*
|
||||
* * __ioremap_caller is the same as above but takes an explicit caller
|
||||
* reference rather than using __builtin_return_address(0)
|
||||
*
|
||||
|
@ -719,6 +715,8 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
|
|||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size);
|
||||
|
||||
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
|
||||
unsigned long size, pgprot_t prot);
|
||||
void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
|
||||
|
@ -727,10 +725,6 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
|
|||
extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
|
||||
pgprot_t prot, void *caller);
|
||||
|
||||
extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
|
||||
unsigned long size, pgprot_t prot);
|
||||
extern void __iounmap_at(void *ea, unsigned long size);
|
||||
|
||||
/*
|
||||
* When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
|
||||
* which needs some additional definitions here. They basically allow PIO
|
||||
|
|
|
@ -66,7 +66,7 @@ struct pci_controller {
|
|||
|
||||
void __iomem *io_base_virt;
|
||||
#ifdef CONFIG_PPC64
|
||||
void *io_base_alloc;
|
||||
void __iomem *io_base_alloc;
|
||||
#endif
|
||||
resource_size_t io_base_phys;
|
||||
resource_size_t pci_io_size;
|
||||
|
|
|
@ -748,9 +748,8 @@ void do_IRQ(struct pt_regs *regs)
|
|||
|
||||
static void *__init alloc_vm_stack(void)
|
||||
{
|
||||
return __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, VMALLOC_START,
|
||||
VMALLOC_END, THREADINFO_GFP, PAGE_KERNEL,
|
||||
0, NUMA_NO_NODE, (void*)_RET_IP_);
|
||||
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
|
||||
NUMA_NO_NODE, (void *)_RET_IP_);
|
||||
}
|
||||
|
||||
static void __init vmap_irqstack_init(void)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -38,6 +39,22 @@ EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
|
|||
#define ISA_SPACE_MASK 0x1
|
||||
#define ISA_SPACE_IO 0x1
|
||||
|
||||
static void remap_isa_base(phys_addr_t pa, unsigned long size)
|
||||
{
|
||||
WARN_ON_ONCE(ISA_IO_BASE & ~PAGE_MASK);
|
||||
WARN_ON_ONCE(pa & ~PAGE_MASK);
|
||||
WARN_ON_ONCE(size & ~PAGE_MASK);
|
||||
|
||||
if (slab_is_available()) {
|
||||
if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
|
||||
pgprot_noncached(PAGE_KERNEL)))
|
||||
unmap_kernel_range(ISA_IO_BASE, size);
|
||||
} else {
|
||||
early_ioremap_range(ISA_IO_BASE, pa, size,
|
||||
pgprot_noncached(PAGE_KERNEL));
|
||||
}
|
||||
}
|
||||
|
||||
static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
|
||||
unsigned long phb_io_base_phys)
|
||||
{
|
||||
|
@ -105,15 +122,13 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
|
|||
if (size > 0x10000)
|
||||
size = 0x10000;
|
||||
|
||||
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
|
||||
size, pgprot_noncached(PAGE_KERNEL));
|
||||
remap_isa_base(phb_io_base_phys, size);
|
||||
return;
|
||||
|
||||
inval_range:
|
||||
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
|
||||
"mapping 64k\n");
|
||||
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
|
||||
0x10000, pgprot_noncached(PAGE_KERNEL));
|
||||
remap_isa_base(phb_io_base_phys, 0x10000);
|
||||
}
|
||||
|
||||
|
||||
|
@ -248,8 +263,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
|
|||
* and map it
|
||||
*/
|
||||
isa_io_base = ISA_IO_BASE;
|
||||
__ioremap_at(pbase, (void *)ISA_IO_BASE,
|
||||
size, pgprot_noncached(PAGE_KERNEL));
|
||||
remap_isa_base(pbase, size);
|
||||
|
||||
pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
|
||||
}
|
||||
|
@ -297,7 +311,7 @@ static void isa_bridge_remove(void)
|
|||
isa_bridge_pcidev = NULL;
|
||||
|
||||
/* Unmap the ISA area */
|
||||
__iounmap_at((void *)ISA_IO_BASE, 0x10000);
|
||||
unmap_kernel_range(ISA_IO_BASE, 0x10000);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -109,23 +109,47 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
|
|||
/* Get the host bridge */
|
||||
hose = pci_bus_to_host(bus);
|
||||
|
||||
/* Check if we have IOs allocated */
|
||||
if (hose->io_base_alloc == NULL)
|
||||
return 0;
|
||||
|
||||
pr_debug("IO unmapping for PHB %pOF\n", hose->dn);
|
||||
pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
|
||||
|
||||
/* This is a PHB, we fully unmap the IO area */
|
||||
vunmap(hose->io_base_alloc);
|
||||
|
||||
iounmap(hose->io_base_alloc);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
|
||||
|
||||
static int pcibios_map_phb_io_space(struct pci_controller *hose)
|
||||
void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long addr;
|
||||
|
||||
WARN_ON_ONCE(paddr & ~PAGE_MASK);
|
||||
WARN_ON_ONCE(size & ~PAGE_MASK);
|
||||
|
||||
/*
|
||||
* Let's allocate some IO space for that guy. We don't pass VM_IOREMAP
|
||||
* because we don't care about alignment tricks that the core does in
|
||||
* that case. Maybe we should due to stupid card with incomplete
|
||||
* address decoding but I'd rather not deal with those outside of the
|
||||
* reserved 64K legacy region.
|
||||
*/
|
||||
area = __get_vm_area_caller(size, 0, PHB_IO_BASE, PHB_IO_END,
|
||||
__builtin_return_address(0));
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
addr = (unsigned long)area->addr;
|
||||
if (ioremap_page_range(addr, addr + size, paddr,
|
||||
pgprot_noncached(PAGE_KERNEL))) {
|
||||
unmap_kernel_range(addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return (void __iomem *)addr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ioremap_phb);
|
||||
|
||||
static int pcibios_map_phb_io_space(struct pci_controller *hose)
|
||||
{
|
||||
unsigned long phys_page;
|
||||
unsigned long size_page;
|
||||
unsigned long io_virt_offset;
|
||||
|
@ -146,12 +170,11 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
|
|||
* with incomplete address decoding but I'd rather not deal with
|
||||
* those outside of the reserved 64K legacy region.
|
||||
*/
|
||||
area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
|
||||
if (area == NULL)
|
||||
hose->io_base_alloc = ioremap_phb(phys_page, size_page);
|
||||
if (!hose->io_base_alloc)
|
||||
return -ENOMEM;
|
||||
hose->io_base_alloc = area->addr;
|
||||
hose->io_base_virt = (void __iomem *)(area->addr +
|
||||
hose->io_base_phys - phys_page);
|
||||
hose->io_base_virt = hose->io_base_alloc +
|
||||
hose->io_base_phys - phys_page;
|
||||
|
||||
pr_debug("IO mapping for PHB %pOF\n", hose->dn);
|
||||
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
|
||||
|
@ -159,11 +182,6 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
|
|||
pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
|
||||
hose->pci_io_size, size_page);
|
||||
|
||||
/* Establish the mapping */
|
||||
if (__ioremap_at(phys_page, area->addr, size_page,
|
||||
pgprot_noncached(PAGE_KERNEL)) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Fixup hose IO resource */
|
||||
io_virt_offset = pcibios_io_space_offset(hose);
|
||||
hose->io_resource.start += io_virt_offset;
|
||||
|
|
|
@ -4,56 +4,6 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/**
|
||||
* Low level function to establish the page tables for an IO mapping
|
||||
*/
|
||||
void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
int ret;
|
||||
unsigned long va = (unsigned long)ea;
|
||||
|
||||
/* We don't support the 4K PFN hack with ioremap */
|
||||
if (pgprot_val(prot) & H_PAGE_4K_PFN)
|
||||
return NULL;
|
||||
|
||||
if ((ea + size) >= (void *)IOREMAP_END) {
|
||||
pr_warn("Outside the supported range\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
WARN_ON(pa & ~PAGE_MASK);
|
||||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||
WARN_ON(size & ~PAGE_MASK);
|
||||
|
||||
if (slab_is_available()) {
|
||||
ret = ioremap_page_range(va, va + size, pa, prot);
|
||||
if (ret)
|
||||
unmap_kernel_range(va, size);
|
||||
} else {
|
||||
ret = early_ioremap_range(va, pa, size, prot);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
return (void __iomem *)ea;
|
||||
}
|
||||
EXPORT_SYMBOL(__ioremap_at);
|
||||
|
||||
/**
|
||||
* Low level function to tear down the page tables for an IO mapping. This is
|
||||
* used for mappings that are manipulated manually, like partial unmapping of
|
||||
* PCI IOs or ISA space.
|
||||
*/
|
||||
void __iounmap_at(void *ea, unsigned long size)
|
||||
{
|
||||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||
WARN_ON(size & ~PAGE_MASK);
|
||||
|
||||
unmap_kernel_range((unsigned long)ea, size);
|
||||
}
|
||||
EXPORT_SYMBOL(__iounmap_at);
|
||||
|
||||
void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
|
||||
pgprot_t prot, void *caller)
|
||||
{
|
||||
|
|
|
@ -473,9 +473,9 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
|||
#define PAGE_SHARED __pgprot(0)
|
||||
#define PAGE_KERNEL __pgprot(0)
|
||||
#define swapper_pg_dir NULL
|
||||
#define TASK_SIZE 0xffffffffUL
|
||||
#define VMALLOC_START 0
|
||||
|
||||
#define TASK_SIZE 0xffffffffUL
|
||||
#define VMALLOC_END TASK_SIZE
|
||||
|
||||
static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
|||
}
|
||||
|
||||
static void note_page(struct ptdump_state *pt_st, unsigned long addr,
|
||||
int level, unsigned long val)
|
||||
int level, u64 val)
|
||||
{
|
||||
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
|
||||
u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
|
||||
|
|
|
@ -305,12 +305,9 @@ void *restart_stack __section(.data);
|
|||
unsigned long stack_alloc(void)
|
||||
{
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
return (unsigned long)
|
||||
__vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
|
||||
VMALLOC_START, VMALLOC_END,
|
||||
THREADINFO_GFP,
|
||||
PAGE_KERNEL, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
return (unsigned long)__vmalloc_node(THREAD_SIZE, THREAD_SIZE,
|
||||
THREADINFO_GFP, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
#else
|
||||
return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
|
||||
#endif
|
||||
|
|
|
@ -103,7 +103,8 @@ static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
|
|||
#if defined(CONFIG_MMU)
|
||||
struct vm_struct *vma;
|
||||
|
||||
vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
|
||||
vma = __get_vm_area_caller(map->size, VM_ALLOC, map->sq_addr,
|
||||
SQ_ADDRMAX, __builtin_return_address(0));
|
||||
if (!vma)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -97,8 +97,7 @@ static int hv_cpu_init(unsigned int cpu)
|
|||
* not be stopped in the case of CPU offlining and the VM will hang.
|
||||
*/
|
||||
if (!*hvp) {
|
||||
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
|
||||
}
|
||||
|
||||
if (*hvp) {
|
||||
|
@ -379,7 +378,7 @@ void __init hyperv_init(void)
|
|||
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
||||
|
||||
hv_hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
|
||||
hv_hypercall_pg = vmalloc_exec(PAGE_SIZE);
|
||||
if (hv_hypercall_pg == NULL) {
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
||||
goto remove_cpuhp_state;
|
||||
|
|
|
@ -1279,8 +1279,7 @@ extern struct kmem_cache *x86_fpu_cache;
|
|||
#define __KVM_HAVE_ARCH_VM_ALLOC
|
||||
static inline struct kvm *kvm_arch_alloc_vm(void)
|
||||
{
|
||||
return __vmalloc(kvm_x86_ops.vm_size,
|
||||
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
|
||||
return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
}
|
||||
void kvm_arch_free_vm(struct kvm *kvm);
|
||||
|
||||
|
|
|
@ -20,6 +20,8 @@ typedef union {
|
|||
|
||||
#define SHARED_KERNEL_PMD 0
|
||||
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED
|
||||
|
||||
/*
|
||||
* traditional i386 two-level paging structure:
|
||||
*/
|
||||
|
|
|
@ -27,6 +27,8 @@ typedef union {
|
|||
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
|
||||
#endif
|
||||
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
|
||||
|
||||
/*
|
||||
* PGDIR_SHIFT determines what a top-level page table entry can map
|
||||
*/
|
||||
|
|
|
@ -159,4 +159,6 @@ extern unsigned int ptrs_per_p4d;
|
|||
|
||||
#define PGD_KERNEL_START ((PAGE_SIZE / 2) / sizeof(pgd_t))
|
||||
|
||||
#define ARCH_PAGE_TABLE_SYNC_MASK (pgtable_l5_enabled() ? PGTBL_PGD_MODIFIED : PGTBL_P4D_MODIFIED)
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
|
||||
|
|
|
@ -194,7 +194,6 @@ enum page_cache_mode {
|
|||
#define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
|
||||
#define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
|
||||
#define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
|
||||
#define __PAGE_KERNEL_RX (__PP| 0| 0|___A| 0|___D| 0|___G)
|
||||
#define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
|
||||
#define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
|
||||
#define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
|
||||
|
@ -220,7 +219,6 @@ enum page_cache_mode {
|
|||
#define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC)
|
||||
#define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC)
|
||||
#define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0)
|
||||
#define PAGE_KERNEL_RX __pgprot_mask(__PAGE_KERNEL_RX | _ENC)
|
||||
#define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC)
|
||||
#define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC)
|
||||
#define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
|
||||
|
@ -284,6 +282,12 @@ typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
|
|||
|
||||
typedef struct { pgdval_t pgd; } pgd_t;
|
||||
|
||||
static inline pgprot_t pgprot_nx(pgprot_t prot)
|
||||
{
|
||||
return __pgprot(pgprot_val(prot) | _PAGE_NX);
|
||||
}
|
||||
#define pgprot_nx pgprot_nx
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
|
||||
/*
|
||||
|
|
|
@ -12,27 +12,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
|
|||
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
/* This runs runs on the previous thread's stack. */
|
||||
static inline void prepare_switch_to(struct task_struct *next)
|
||||
{
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* If we switch to a stack that has a top-level paging entry
|
||||
* that is not present in the current mm, the resulting #PF will
|
||||
* will be promoted to a double-fault and we'll panic. Probe
|
||||
* the new stack now so that vmalloc_fault can fix up the page
|
||||
* tables if needed. This can only happen if we use a stack
|
||||
* in vmap space.
|
||||
*
|
||||
* We assume that the stack is aligned so that it never spans
|
||||
* more than one top-level paging entry.
|
||||
*
|
||||
* To minimize cache pollution, just follow the stack pointer.
|
||||
*/
|
||||
READ_ONCE(*(unsigned char *)next->thread.sp);
|
||||
#endif
|
||||
}
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
|
||||
/*
|
||||
|
@ -67,8 +46,6 @@ struct fork_frame {
|
|||
|
||||
#define switch_to(prev, next, last) \
|
||||
do { \
|
||||
prepare_switch_to(next); \
|
||||
\
|
||||
((last) = __switch_to_asm((prev), (next))); \
|
||||
} while (0)
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ static int map_irq_stack(unsigned int cpu)
|
|||
pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
|
||||
va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -287,9 +287,9 @@ void __init setup_per_cpu_areas(void)
|
|||
/*
|
||||
* Sync back kernel address range again. We already did this in
|
||||
* setup_arch(), but percpu data also needs to be available in
|
||||
* the smpboot asm. We can't reliably pick up percpu mappings
|
||||
* using vmalloc_fault(), because exception dispatch needs
|
||||
* percpu data.
|
||||
* the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
|
||||
* swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
|
||||
* there too.
|
||||
*
|
||||
* FIXME: Can the later sync in setup_cpu_entry_areas() replace
|
||||
* this call?
|
||||
|
|
|
@ -336,8 +336,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
|
|||
/* Avoid using vmalloc for smaller buffers. */
|
||||
size = npages * sizeof(struct page *);
|
||||
if (size > PAGE_SIZE)
|
||||
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
|
||||
else
|
||||
pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
|
||||
|
||||
|
|
|
@ -249,10 +249,22 @@ static void note_wx(struct pg_state *st, unsigned long addr)
|
|||
(void *)st->start_address);
|
||||
}
|
||||
|
||||
static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
|
||||
static void effective_prot(struct ptdump_state *pt_st, int level, u64 val)
|
||||
{
|
||||
return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
|
||||
((prot1 | prot2) & _PAGE_NX);
|
||||
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
|
||||
pgprotval_t prot = val & PTE_FLAGS_MASK;
|
||||
pgprotval_t effective;
|
||||
|
||||
if (level > 0) {
|
||||
pgprotval_t higher_prot = st->prot_levels[level - 1];
|
||||
|
||||
effective = (higher_prot & prot & (_PAGE_USER | _PAGE_RW)) |
|
||||
((higher_prot | prot) & _PAGE_NX);
|
||||
} else {
|
||||
effective = prot;
|
||||
}
|
||||
|
||||
st->prot_levels[level] = effective;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -261,7 +273,7 @@ static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
|
|||
* print what we collected so far.
|
||||
*/
|
||||
static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
|
||||
unsigned long val)
|
||||
u64 val)
|
||||
{
|
||||
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
|
||||
pgprotval_t new_prot, new_eff;
|
||||
|
@ -270,16 +282,10 @@ static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
|
|||
struct seq_file *m = st->seq;
|
||||
|
||||
new_prot = val & PTE_FLAGS_MASK;
|
||||
|
||||
if (level > 0) {
|
||||
new_eff = effective_prot(st->prot_levels[level - 1],
|
||||
new_prot);
|
||||
} else {
|
||||
new_eff = new_prot;
|
||||
}
|
||||
|
||||
if (level >= 0)
|
||||
st->prot_levels[level] = new_eff;
|
||||
if (!val)
|
||||
new_eff = 0;
|
||||
else
|
||||
new_eff = st->prot_levels[level];
|
||||
|
||||
/*
|
||||
* If we have a "break" in the series, we need to flush the state that
|
||||
|
@ -374,6 +380,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m,
|
|||
struct pg_state st = {
|
||||
.ptdump = {
|
||||
.note_page = note_page,
|
||||
.effective_prot = effective_prot,
|
||||
.range = ptdump_ranges
|
||||
},
|
||||
.level = -1,
|
||||
|
|
|
@ -190,16 +190,13 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
|||
return pmd_k;
|
||||
}
|
||||
|
||||
static void vmalloc_sync(void)
|
||||
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long address;
|
||||
unsigned long addr;
|
||||
|
||||
if (SHARED_KERNEL_PMD)
|
||||
return;
|
||||
|
||||
for (address = VMALLOC_START & PMD_MASK;
|
||||
address >= TASK_SIZE_MAX && address < VMALLOC_END;
|
||||
address += PMD_SIZE) {
|
||||
for (addr = start & PMD_MASK;
|
||||
addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
|
||||
addr += PMD_SIZE) {
|
||||
struct page *page;
|
||||
|
||||
spin_lock(&pgd_lock);
|
||||
|
@ -210,61 +207,13 @@ static void vmalloc_sync(void)
|
|||
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
||||
|
||||
spin_lock(pgt_lock);
|
||||
vmalloc_sync_one(page_address(page), address);
|
||||
vmalloc_sync_one(page_address(page), addr);
|
||||
spin_unlock(pgt_lock);
|
||||
}
|
||||
spin_unlock(&pgd_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void vmalloc_sync_mappings(void)
|
||||
{
|
||||
vmalloc_sync();
|
||||
}
|
||||
|
||||
void vmalloc_sync_unmappings(void)
|
||||
{
|
||||
vmalloc_sync();
|
||||
}
|
||||
|
||||
/*
|
||||
* 32-bit:
|
||||
*
|
||||
* Handle a fault on the vmalloc or module mapping area
|
||||
*/
|
||||
static noinline int vmalloc_fault(unsigned long address)
|
||||
{
|
||||
unsigned long pgd_paddr;
|
||||
pmd_t *pmd_k;
|
||||
pte_t *pte_k;
|
||||
|
||||
/* Make sure we are in vmalloc area: */
|
||||
if (!(address >= VMALLOC_START && address < VMALLOC_END))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Synchronize this task's top level page-table
|
||||
* with the 'reference' page table.
|
||||
*
|
||||
* Do _not_ use "current" here. We might be inside
|
||||
* an interrupt in the middle of a task switch..
|
||||
*/
|
||||
pgd_paddr = read_cr3_pa();
|
||||
pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
|
||||
if (!pmd_k)
|
||||
return -1;
|
||||
|
||||
if (pmd_large(*pmd_k))
|
||||
return 0;
|
||||
|
||||
pte_k = pte_offset_kernel(pmd_k, address);
|
||||
if (!pte_present(*pte_k))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(vmalloc_fault);
|
||||
|
||||
/*
|
||||
* Did it hit the DOS screen memory VA from vm86 mode?
|
||||
*/
|
||||
|
@ -329,96 +278,6 @@ out:
|
|||
|
||||
#else /* CONFIG_X86_64: */
|
||||
|
||||
void vmalloc_sync_mappings(void)
|
||||
{
|
||||
/*
|
||||
* 64-bit mappings might allocate new p4d/pud pages
|
||||
* that need to be propagated to all tasks' PGDs.
|
||||
*/
|
||||
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
|
||||
}
|
||||
|
||||
void vmalloc_sync_unmappings(void)
|
||||
{
|
||||
/*
|
||||
* Unmappings never allocate or free p4d/pud pages.
|
||||
* No work is required here.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit:
|
||||
*
|
||||
* Handle a fault on the vmalloc area
|
||||
*/
|
||||
static noinline int vmalloc_fault(unsigned long address)
|
||||
{
|
||||
pgd_t *pgd, *pgd_k;
|
||||
p4d_t *p4d, *p4d_k;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
/* Make sure we are in vmalloc area: */
|
||||
if (!(address >= VMALLOC_START && address < VMALLOC_END))
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Copy kernel mappings over when needed. This can also
|
||||
* happen within a race in page table update. In the later
|
||||
* case just flush:
|
||||
*/
|
||||
pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
|
||||
pgd_k = pgd_offset_k(address);
|
||||
if (pgd_none(*pgd_k))
|
||||
return -1;
|
||||
|
||||
if (pgtable_l5_enabled()) {
|
||||
if (pgd_none(*pgd)) {
|
||||
set_pgd(pgd, *pgd_k);
|
||||
arch_flush_lazy_mmu_mode();
|
||||
} else {
|
||||
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
|
||||
}
|
||||
}
|
||||
|
||||
/* With 4-level paging, copying happens on the p4d level. */
|
||||
p4d = p4d_offset(pgd, address);
|
||||
p4d_k = p4d_offset(pgd_k, address);
|
||||
if (p4d_none(*p4d_k))
|
||||
return -1;
|
||||
|
||||
if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
|
||||
set_p4d(p4d, *p4d_k);
|
||||
arch_flush_lazy_mmu_mode();
|
||||
} else {
|
||||
BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud))
|
||||
return -1;
|
||||
|
||||
if (pud_large(*pud))
|
||||
return 0;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
if (pmd_none(*pmd))
|
||||
return -1;
|
||||
|
||||
if (pmd_large(*pmd))
|
||||
return 0;
|
||||
|
||||
pte = pte_offset_kernel(pmd, address);
|
||||
if (!pte_present(*pte))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(vmalloc_fault);
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
static const char errata93_warning[] =
|
||||
KERN_ERR
|
||||
|
@ -1257,29 +1116,6 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
|
|||
*/
|
||||
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
|
||||
|
||||
/*
|
||||
* We can fault-in kernel-space virtual memory on-demand. The
|
||||
* 'reference' page table is init_mm.pgd.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may
|
||||
* be in an interrupt or a critical region, and should
|
||||
* only copy the information from the master page table,
|
||||
* nothing more.
|
||||
*
|
||||
* Before doing this on-demand faulting, ensure that the
|
||||
* fault is not any of the following:
|
||||
* 1. A fault on a PTE with a reserved bit set.
|
||||
* 2. A fault caused by a user-mode access. (Do not demand-
|
||||
* fault kernel memory due to user-mode accesses).
|
||||
* 3. A fault caused by a page-level protection violation.
|
||||
* (A demand fault would be on a non-present page which
|
||||
* would have X86_PF_PROT==0).
|
||||
*/
|
||||
if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
|
||||
if (vmalloc_fault(address) >= 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Was the fault spurious, caused by lazy TLB invalidation? */
|
||||
if (spurious_kernel_fault(hw_error_code, address))
|
||||
return;
|
||||
|
|
|
@ -218,6 +218,11 @@ void sync_global_pgds(unsigned long start, unsigned long end)
|
|||
sync_global_pgds_l4(start, end);
|
||||
}
|
||||
|
||||
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
|
||||
{
|
||||
sync_global_pgds(start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: This function is marked __ref because it calls __init function
|
||||
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
|
||||
|
|
|
@ -448,13 +448,7 @@ static void __init pti_clone_user_shared(void)
|
|||
* the sp1 and sp2 slots.
|
||||
*
|
||||
* This is done for all possible CPUs during boot to ensure
|
||||
* that it's propagated to all mms. If we were to add one of
|
||||
* these mappings during CPU hotplug, we would need to take
|
||||
* some measure to make sure that every mm that subsequently
|
||||
* ran on that CPU would have the relevant PGD entry in its
|
||||
* pagetables. The usual vmalloc_fault() mechanism would not
|
||||
* work for page faults taken in entry_SYSCALL_64 before RSP
|
||||
* is set up.
|
||||
* that it's propagated to all mms.
|
||||
*/
|
||||
|
||||
unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
|
||||
|
|
|
@ -161,34 +161,6 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void sync_current_stack_to_mm(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long sp = current_stack_pointer;
|
||||
pgd_t *pgd = pgd_offset(mm, sp);
|
||||
|
||||
if (pgtable_l5_enabled()) {
|
||||
if (unlikely(pgd_none(*pgd))) {
|
||||
pgd_t *pgd_ref = pgd_offset_k(sp);
|
||||
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* "pgd" is faked. The top level entries are "p4d"s, so sync
|
||||
* the p4d. This compiles to approximately the same code as
|
||||
* the 5-level case.
|
||||
*/
|
||||
p4d_t *p4d = p4d_offset(pgd, sp);
|
||||
|
||||
if (unlikely(p4d_none(*p4d))) {
|
||||
pgd_t *pgd_ref = pgd_offset_k(sp);
|
||||
p4d_t *p4d_ref = p4d_offset(pgd_ref, sp);
|
||||
|
||||
set_p4d(p4d, *p4d_ref);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
|
||||
{
|
||||
unsigned long next_tif = task_thread_info(next)->flags;
|
||||
|
@ -377,15 +349,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
|||
*/
|
||||
cond_ibpb(tsk);
|
||||
|
||||
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
|
||||
/*
|
||||
* If our current stack is in vmalloc space and isn't
|
||||
* mapped in the new pgd, we'll double-fault. Forcibly
|
||||
* map it.
|
||||
*/
|
||||
sync_current_stack_to_mm(next);
|
||||
}
|
||||
|
||||
/*
|
||||
* Stop remote flushes for the previous mm.
|
||||
* Skip kernel threads; we never send init_mm TLB flushing IPIs,
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/blk-mq.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
|
|
|
@ -167,12 +167,6 @@ int ghes_estatus_pool_init(int num_ghes)
|
|||
if (!addr)
|
||||
goto err_pool_alloc;
|
||||
|
||||
/*
|
||||
* New allocation must be visible in all pgd before it can be found by
|
||||
* an NMI allocating from the pool.
|
||||
*/
|
||||
vmalloc_sync_mappings();
|
||||
|
||||
rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
|
||||
if (rc)
|
||||
goto err_pool_add;
|
||||
|
|
|
@ -445,7 +445,7 @@ static ssize_t node_read_meminfo(struct device *dev,
|
|||
nid, sum_zone_node_page_state(nid, NR_KERNEL_SCS_KB),
|
||||
#endif
|
||||
nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
|
||||
nid, 0UL,
|
||||
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
|
||||
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
|
||||
nid, K(sreclaimable +
|
||||
|
|
|
@ -396,9 +396,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
|||
bytes = sizeof(struct page *)*want;
|
||||
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
|
||||
if (!new_pages) {
|
||||
new_pages = __vmalloc(bytes,
|
||||
GFP_NOIO | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
new_pages = __vmalloc(bytes, GFP_NOIO | __GFP_ZERO);
|
||||
if (!new_pages)
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -919,7 +919,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
|
|||
|
||||
static int loop_kthread_worker_fn(void *worker_ptr)
|
||||
{
|
||||
current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
|
||||
current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
|
||||
return kthread_worker_fn(worker_ptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -377,6 +377,7 @@ static int dax_open(struct inode *inode, struct file *filp)
|
|||
inode->i_mapping->a_ops = &dev_dax_aops;
|
||||
filp->f_mapping = inode->i_mapping;
|
||||
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
|
||||
filp->f_sb_err = file_sample_sb_err(filp);
|
||||
filp->private_data = dev_dax;
|
||||
inode->i_flags = S_DAX;
|
||||
|
||||
|
|
|
@ -43,15 +43,6 @@
|
|||
|
||||
#define DEBUG_SCATTER 0
|
||||
|
||||
static inline void *drm_vmalloc_dma(unsigned long size)
|
||||
{
|
||||
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
|
||||
return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
|
||||
#else
|
||||
return vmalloc_32(size);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void drm_sg_cleanup(struct drm_sg_mem * entry)
|
||||
{
|
||||
struct page *page;
|
||||
|
@ -126,7 +117,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
|
||||
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
|
||||
if (!entry->virtual) {
|
||||
kfree(entry->busaddr);
|
||||
kfree(entry->pagelist);
|
||||
|
|
|
@ -154,8 +154,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
|||
file_size += sizeof(*iter.hdr) * n_obj;
|
||||
|
||||
/* Allocate the file in vmalloc memory, it's likely to be big */
|
||||
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
|
||||
PAGE_KERNEL);
|
||||
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
|
||||
__GFP_NORETRY);
|
||||
if (!iter.start) {
|
||||
mutex_unlock(&gpu->mmu_context->lock);
|
||||
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
|
||||
|
|
|
@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
|
|||
{
|
||||
struct mock_dmabuf *mock = to_mock(dma_buf);
|
||||
|
||||
return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
|
||||
return vm_map_ram(mock->pages, mock->npages, 0);
|
||||
}
|
||||
|
||||
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
|
||||
|
|
|
@ -145,9 +145,8 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init)
|
|||
int ret = 0;
|
||||
|
||||
map_size = pblk_trans_map_size(pblk);
|
||||
pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN
|
||||
| __GFP_RETRY_MAYFAIL | __GFP_HIGHMEM,
|
||||
PAGE_KERNEL);
|
||||
pblk->trans_map = __vmalloc(map_size, GFP_KERNEL | __GFP_NOWARN |
|
||||
__GFP_RETRY_MAYFAIL | __GFP_HIGHMEM);
|
||||
if (!pblk->trans_map) {
|
||||
pblk_err(pblk, "failed to allocate L2P (need %zu of memory)\n",
|
||||
map_size);
|
||||
|
|
|
@ -400,13 +400,13 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
|
|||
*/
|
||||
if (gfp_mask & __GFP_NORETRY) {
|
||||
unsigned noio_flag = memalloc_noio_save();
|
||||
void *ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
|
||||
void *ptr = __vmalloc(c->block_size, gfp_mask);
|
||||
|
||||
memalloc_noio_restore(noio_flag);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
|
||||
return __vmalloc(c->block_size, gfp_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -324,14 +324,6 @@ static void end_bitmap_write(struct buffer_head *bh, int uptodate)
|
|||
wake_up(&bitmap->write_wait);
|
||||
}
|
||||
|
||||
/* copied from buffer.c */
|
||||
static void
|
||||
__clear_page_buffers(struct page *page)
|
||||
{
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
}
|
||||
static void free_buffers(struct page *page)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
|
@ -345,7 +337,7 @@ static void free_buffers(struct page *page)
|
|||
free_buffer_head(bh);
|
||||
bh = next;
|
||||
}
|
||||
__clear_page_buffers(page);
|
||||
detach_page_private(page);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
|
@ -374,7 +366,7 @@ static int read_page(struct file *file, unsigned long index,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
attach_page_buffers(page, bh);
|
||||
attach_page_private(page, bh);
|
||||
blk_cur = index << (PAGE_SHIFT - inode->i_blkbits);
|
||||
while (bh) {
|
||||
block = blk_cur;
|
||||
|
|
|
@ -309,8 +309,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
|
|||
if (buf->db_attach)
|
||||
buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
|
||||
else
|
||||
buf->vaddr = vm_map_ram(buf->pages,
|
||||
buf->num_pages, -1, PAGE_KERNEL);
|
||||
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
|
||||
}
|
||||
|
||||
/* add offset in case userptr is not page-aligned */
|
||||
|
|
|
@ -107,8 +107,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
|
|||
buf->vaddr = (__force void *)
|
||||
ioremap(__pfn_to_phys(nums[0]), size + offset);
|
||||
} else {
|
||||
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
|
||||
PAGE_KERNEL);
|
||||
buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
|
||||
}
|
||||
|
||||
if (!buf->vaddr)
|
||||
|
|
|
@ -92,7 +92,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
|
|||
{
|
||||
struct ivtv_dma_page_info user_dma;
|
||||
struct ivtv_user_dma *dma = &itv->udma;
|
||||
int i, err;
|
||||
int err;
|
||||
|
||||
IVTV_DEBUG_DMA("ivtv_udma_setup, dst: 0x%08x\n", (unsigned int)ivtv_dest_addr);
|
||||
|
||||
|
@ -111,16 +111,15 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get user pages for DMA Xfer */
|
||||
err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
|
||||
/* Pin user pages for DMA Xfer */
|
||||
err = pin_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
|
||||
dma->map, FOLL_FORCE);
|
||||
|
||||
if (user_dma.page_count != err) {
|
||||
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
|
||||
err, user_dma.page_count);
|
||||
if (err >= 0) {
|
||||
for (i = 0; i < err; i++)
|
||||
put_page(dma->map[i]);
|
||||
unpin_user_pages(dma->map, err);
|
||||
return -EINVAL;
|
||||
}
|
||||
return err;
|
||||
|
@ -130,9 +129,7 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
|
|||
|
||||
/* Fill SG List with new values */
|
||||
if (ivtv_udma_fill_sg_list(dma, &user_dma, 0) < 0) {
|
||||
for (i = 0; i < dma->page_count; i++) {
|
||||
put_page(dma->map[i]);
|
||||
}
|
||||
unpin_user_pages(dma->map, dma->page_count);
|
||||
dma->page_count = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -153,7 +150,6 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
|
|||
void ivtv_udma_unmap(struct ivtv *itv)
|
||||
{
|
||||
struct ivtv_user_dma *dma = &itv->udma;
|
||||
int i;
|
||||
|
||||
IVTV_DEBUG_INFO("ivtv_unmap_user_dma\n");
|
||||
|
||||
|
@ -169,10 +165,7 @@ void ivtv_udma_unmap(struct ivtv *itv)
|
|||
/* sync DMA */
|
||||
ivtv_udma_sync_for_cpu(itv);
|
||||
|
||||
/* Release User Pages */
|
||||
for (i = 0; i < dma->page_count; i++) {
|
||||
put_page(dma->map[i]);
|
||||
}
|
||||
unpin_user_pages(dma->map, dma->page_count);
|
||||
dma->page_count = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
|
|||
struct yuv_playback_info *yi = &itv->yuv_info;
|
||||
u8 frame = yi->draw_frame;
|
||||
struct yuv_frame_info *f = &yi->new_frame_info[frame];
|
||||
int i;
|
||||
int y_pages, uv_pages;
|
||||
unsigned long y_buffer_offset, uv_buffer_offset;
|
||||
int y_decode_height, uv_decode_height, y_size;
|
||||
|
@ -62,12 +61,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
|
|||
ivtv_udma_get_page_info (&y_dma, (unsigned long)args->y_source, 720 * y_decode_height);
|
||||
ivtv_udma_get_page_info (&uv_dma, (unsigned long)args->uv_source, 360 * uv_decode_height);
|
||||
|
||||
/* Get user pages for DMA Xfer */
|
||||
y_pages = get_user_pages_unlocked(y_dma.uaddr,
|
||||
/* Pin user pages for DMA Xfer */
|
||||
y_pages = pin_user_pages_unlocked(y_dma.uaddr,
|
||||
y_dma.page_count, &dma->map[0], FOLL_FORCE);
|
||||
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
|
||||
if (y_pages == y_dma.page_count) {
|
||||
uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
|
||||
uv_pages = pin_user_pages_unlocked(uv_dma.uaddr,
|
||||
uv_dma.page_count, &dma->map[y_pages],
|
||||
FOLL_FORCE);
|
||||
}
|
||||
|
@ -81,8 +80,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
|
|||
uv_pages, uv_dma.page_count);
|
||||
|
||||
if (uv_pages >= 0) {
|
||||
for (i = 0; i < uv_pages; i++)
|
||||
put_page(dma->map[y_pages + i]);
|
||||
unpin_user_pages(&dma->map[y_pages], uv_pages);
|
||||
rc = -EFAULT;
|
||||
} else {
|
||||
rc = uv_pages;
|
||||
|
@ -93,8 +91,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
|
|||
y_pages, y_dma.page_count);
|
||||
}
|
||||
if (y_pages >= 0) {
|
||||
for (i = 0; i < y_pages; i++)
|
||||
put_page(dma->map[i]);
|
||||
unpin_user_pages(dma->map, y_pages);
|
||||
/*
|
||||
* Inherit the -EFAULT from rc's
|
||||
* initialization, but allow it to be
|
||||
|
@ -112,9 +109,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
|
|||
/* Fill & map SG List */
|
||||
if (ivtv_udma_fill_sg_list (dma, &uv_dma, ivtv_udma_fill_sg_list (dma, &y_dma, 0)) < 0) {
|
||||
IVTV_DEBUG_WARN("could not allocate bounce buffers for highmem userspace buffers\n");
|
||||
for (i = 0; i < dma->page_count; i++) {
|
||||
put_page(dma->map[i]);
|
||||
}
|
||||
unpin_user_pages(dma->map, dma->page_count);
|
||||
dma->page_count = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -281,10 +281,10 @@ static int ivtvfb_prep_dec_dma_to_device(struct ivtv *itv,
|
|||
/* Map User DMA */
|
||||
if (ivtv_udma_setup(itv, ivtv_dest_addr, userbuf, size_in_bytes) <= 0) {
|
||||
mutex_unlock(&itv->udma.lock);
|
||||
IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with get_user_pages: %d bytes, %d pages returned\n",
|
||||
IVTVFB_WARN("ivtvfb_prep_dec_dma_to_device, Error with pin_user_pages: %d bytes, %d pages returned\n",
|
||||
size_in_bytes, itv->udma.page_count);
|
||||
|
||||
/* get_user_pages must have failed completely */
|
||||
/* pin_user_pages must have failed completely */
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -1297,7 +1297,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
|
|||
if (!ubi_dbg_chk_io(ubi))
|
||||
return 0;
|
||||
|
||||
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
|
||||
buf1 = __vmalloc(len, GFP_NOFS);
|
||||
if (!buf1) {
|
||||
ubi_err(ubi, "cannot allocate memory to check writes");
|
||||
return 0;
|
||||
|
@ -1361,7 +1361,7 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
|
|||
if (!ubi_dbg_chk_io(ubi))
|
||||
return 0;
|
||||
|
||||
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
|
||||
buf = __vmalloc(len, GFP_NOFS);
|
||||
if (!buf) {
|
||||
ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
|
||||
return 0;
|
||||
|
|
|
@ -178,10 +178,9 @@ static int electra_cf_probe(struct platform_device *ofdev)
|
|||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct electra_cf_socket *cf;
|
||||
struct resource mem, io;
|
||||
int status;
|
||||
int status = -ENOMEM;
|
||||
const unsigned int *prop;
|
||||
int err;
|
||||
struct vm_struct *area;
|
||||
|
||||
err = of_address_to_resource(np, 0, &mem);
|
||||
if (err)
|
||||
|
@ -202,30 +201,19 @@ static int electra_cf_probe(struct platform_device *ofdev)
|
|||
cf->mem_phys = mem.start;
|
||||
cf->mem_size = PAGE_ALIGN(resource_size(&mem));
|
||||
cf->mem_base = ioremap(cf->mem_phys, cf->mem_size);
|
||||
if (!cf->mem_base)
|
||||
goto out_free_cf;
|
||||
cf->io_size = PAGE_ALIGN(resource_size(&io));
|
||||
|
||||
area = __get_vm_area(cf->io_size, 0, PHB_IO_BASE, PHB_IO_END);
|
||||
if (area == NULL) {
|
||||
status = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
cf->io_virt = (void __iomem *)(area->addr);
|
||||
cf->io_virt = ioremap_phb(io.start, cf->io_size);
|
||||
if (!cf->io_virt)
|
||||
goto out_unmap_mem;
|
||||
|
||||
cf->gpio_base = ioremap(0xfc103000, 0x1000);
|
||||
if (!cf->gpio_base)
|
||||
goto out_unmap_virt;
|
||||
dev_set_drvdata(device, cf);
|
||||
|
||||
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
|
||||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
|
||||
pgprot_noncached(PAGE_KERNEL)) == NULL)) {
|
||||
dev_err(device, "can't ioremap ranges\n");
|
||||
status = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
|
||||
cf->io_base = (unsigned long)cf->io_virt - VMALLOC_END;
|
||||
|
||||
cf->iomem.start = (unsigned long)cf->mem_base;
|
||||
cf->iomem.end = (unsigned long)cf->mem_base + (mem.end - mem.start);
|
||||
cf->iomem.flags = IORESOURCE_MEM;
|
||||
|
@ -305,14 +293,13 @@ fail1:
|
|||
if (cf->irq)
|
||||
free_irq(cf->irq, cf);
|
||||
|
||||
if (cf->io_virt)
|
||||
__iounmap_at(cf->io_virt, cf->io_size);
|
||||
if (cf->mem_base)
|
||||
iounmap(cf->mem_base);
|
||||
if (cf->gpio_base)
|
||||
iounmap(cf->gpio_base);
|
||||
if (area)
|
||||
device_init_wakeup(&ofdev->dev, 0);
|
||||
iounmap(cf->gpio_base);
|
||||
out_unmap_virt:
|
||||
device_init_wakeup(&ofdev->dev, 0);
|
||||
iounmap(cf->io_virt);
|
||||
out_unmap_mem:
|
||||
iounmap(cf->mem_base);
|
||||
out_free_cf:
|
||||
kfree(cf);
|
||||
return status;
|
||||
|
||||
|
@ -330,7 +317,7 @@ static int electra_cf_remove(struct platform_device *ofdev)
|
|||
free_irq(cf->irq, cf);
|
||||
del_timer_sync(&cf->timer);
|
||||
|
||||
__iounmap_at(cf->io_virt, cf->io_size);
|
||||
iounmap(cf->io_virt);
|
||||
iounmap(cf->mem_base);
|
||||
iounmap(cf->gpio_base);
|
||||
release_mem_region(cf->mem_phys, cf->mem_size);
|
||||
|
|
|
@ -136,8 +136,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
|
|||
|
||||
while (bufsize >= SECTOR_SIZE) {
|
||||
buf = __vmalloc(bufsize,
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY,
|
||||
PAGE_KERNEL);
|
||||
GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY);
|
||||
if (buf) {
|
||||
*buflen = bufsize;
|
||||
return buf;
|
||||
|
|
|
@ -99,12 +99,12 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
|
|||
|
||||
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
|
||||
{
|
||||
void *addr = vm_map_ram(pages, num, -1, pgprot);
|
||||
void *addr = vmap(pages, num, VM_MAP, pgprot);
|
||||
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
memset(addr, 0, PAGE_SIZE * num);
|
||||
vm_unmap_ram(addr, num);
|
||||
vunmap(addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -15,14 +15,12 @@ struct imgu_device;
|
|||
* @size: size of the buffer in bytes.
|
||||
* @vaddr: kernel virtual address.
|
||||
* @daddr: iova dma address to access IPU3.
|
||||
* @vma: private, a pointer to &struct vm_struct,
|
||||
* used for imgu_dmamap_free.
|
||||
*/
|
||||
struct imgu_css_map {
|
||||
size_t size;
|
||||
void *vaddr;
|
||||
dma_addr_t daddr;
|
||||
struct vm_struct *vma;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -96,6 +96,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
|
|||
unsigned long shift = iova_shift(&imgu->iova_domain);
|
||||
struct device *dev = &imgu->pci_dev->dev;
|
||||
size_t size = PAGE_ALIGN(len);
|
||||
int count = size >> PAGE_SHIFT;
|
||||
struct page **pages;
|
||||
dma_addr_t iovaddr;
|
||||
struct iova *iova;
|
||||
|
@ -114,7 +115,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
|
|||
|
||||
/* Call IOMMU driver to setup pgt */
|
||||
iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
|
||||
for (i = 0; i < size / PAGE_SIZE; ++i) {
|
||||
for (i = 0; i < count; ++i) {
|
||||
rval = imgu_mmu_map(imgu->mmu, iovaddr,
|
||||
page_to_phys(pages[i]), PAGE_SIZE);
|
||||
if (rval)
|
||||
|
@ -123,33 +124,23 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
|
|||
iovaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* Now grab a virtual region */
|
||||
map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
|
||||
if (!map->vma)
|
||||
map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
|
||||
if (!map->vaddr)
|
||||
goto out_unmap;
|
||||
|
||||
map->vma->pages = pages;
|
||||
/* And map it in KVA */
|
||||
if (map_vm_area(map->vma, PAGE_KERNEL, pages))
|
||||
goto out_vunmap;
|
||||
|
||||
map->pages = pages;
|
||||
map->size = size;
|
||||
map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
|
||||
map->vaddr = map->vma->addr;
|
||||
|
||||
dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
|
||||
size, &map->daddr, map->vma->addr);
|
||||
size, &map->daddr, map->vaddr);
|
||||
|
||||
return map->vma->addr;
|
||||
|
||||
out_vunmap:
|
||||
vunmap(map->vma->addr);
|
||||
return map->vaddr;
|
||||
|
||||
out_unmap:
|
||||
imgu_dmamap_free_buffer(pages, size);
|
||||
imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
|
||||
i * PAGE_SIZE);
|
||||
map->vma = NULL;
|
||||
|
||||
out_free_iova:
|
||||
__free_iova(&imgu->iova_domain, iova);
|
||||
|
@ -177,8 +168,6 @@ void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
|
|||
*/
|
||||
void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
|
||||
{
|
||||
struct vm_struct *area = map->vma;
|
||||
|
||||
dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
|
||||
__func__, map->size, &map->daddr, map->vaddr);
|
||||
|
||||
|
@ -187,11 +176,8 @@ void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
|
|||
|
||||
imgu_dmamap_unmap(imgu, map);
|
||||
|
||||
if (WARN_ON(!area) || WARN_ON(!area->pages))
|
||||
return;
|
||||
|
||||
imgu_dmamap_free_buffer(area->pages, map->size);
|
||||
vunmap(map->vaddr);
|
||||
imgu_dmamap_free_buffer(map->pages, map->size);
|
||||
map->vaddr = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -614,10 +614,9 @@ static int blkdev_readpage(struct file * file, struct page * page)
|
|||
return block_read_full_page(page, blkdev_get_block);
|
||||
}
|
||||
|
||||
static int blkdev_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void blkdev_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block);
|
||||
mpage_readahead(rac, blkdev_get_block);
|
||||
}
|
||||
|
||||
static int blkdev_write_begin(struct file *file, struct address_space *mapping,
|
||||
|
@ -2085,7 +2084,7 @@ static int blkdev_writepages(struct address_space *mapping,
|
|||
|
||||
static const struct address_space_operations def_blk_aops = {
|
||||
.readpage = blkdev_readpage,
|
||||
.readpages = blkdev_readpages,
|
||||
.readahead = blkdev_readahead,
|
||||
.writepage = blkdev_writepage,
|
||||
.write_begin = blkdev_write_begin,
|
||||
.write_end = blkdev_write_end,
|
||||
|
|
|
@ -980,9 +980,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset,
|
|||
btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info,
|
||||
"page private not zero on page %llu",
|
||||
(unsigned long long)page_offset(page));
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
detach_page_private(page);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3076,22 +3076,16 @@ static int submit_extent_page(unsigned int opf,
|
|||
static void attach_extent_buffer_page(struct extent_buffer *eb,
|
||||
struct page *page)
|
||||
{
|
||||
if (!PagePrivate(page)) {
|
||||
SetPagePrivate(page);
|
||||
get_page(page);
|
||||
set_page_private(page, (unsigned long)eb);
|
||||
} else {
|
||||
if (!PagePrivate(page))
|
||||
attach_page_private(page, eb);
|
||||
else
|
||||
WARN_ON(page->private != (unsigned long)eb);
|
||||
}
|
||||
}
|
||||
|
||||
void set_page_extent_mapped(struct page *page)
|
||||
{
|
||||
if (!PagePrivate(page)) {
|
||||
SetPagePrivate(page);
|
||||
get_page(page);
|
||||
set_page_private(page, EXTENT_PAGE_PRIVATE);
|
||||
}
|
||||
if (!PagePrivate(page))
|
||||
attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
|
||||
}
|
||||
|
||||
static struct extent_map *
|
||||
|
@ -4367,51 +4361,32 @@ int extent_writepages(struct address_space *mapping,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int extent_readpages(struct address_space *mapping, struct list_head *pages,
|
||||
unsigned nr_pages)
|
||||
void extent_readahead(struct readahead_control *rac)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
unsigned long bio_flags = 0;
|
||||
struct page *pagepool[16];
|
||||
struct extent_map *em_cached = NULL;
|
||||
int nr = 0;
|
||||
u64 prev_em_start = (u64)-1;
|
||||
int nr;
|
||||
|
||||
while (!list_empty(pages)) {
|
||||
u64 contig_end = 0;
|
||||
while ((nr = readahead_page_batch(rac, pagepool))) {
|
||||
u64 contig_start = page_offset(pagepool[0]);
|
||||
u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1;
|
||||
|
||||
for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
|
||||
struct page *page = lru_to_page(pages);
|
||||
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
|
||||
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
readahead_gfp_mask(mapping))) {
|
||||
put_page(page);
|
||||
break;
|
||||
}
|
||||
|
||||
pagepool[nr++] = page;
|
||||
contig_end = page_offset(page) + PAGE_SIZE - 1;
|
||||
}
|
||||
|
||||
if (nr) {
|
||||
u64 contig_start = page_offset(pagepool[0]);
|
||||
|
||||
ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end);
|
||||
|
||||
contiguous_readpages(pagepool, nr, contig_start,
|
||||
contig_end, &em_cached, &bio, &bio_flags,
|
||||
&prev_em_start);
|
||||
}
|
||||
contiguous_readpages(pagepool, nr, contig_start, contig_end,
|
||||
&em_cached, &bio, &bio_flags, &prev_em_start);
|
||||
}
|
||||
|
||||
if (em_cached)
|
||||
free_extent_map(em_cached);
|
||||
|
||||
if (bio)
|
||||
return submit_one_bio(bio, 0, bio_flags);
|
||||
return 0;
|
||||
if (bio) {
|
||||
if (submit_one_bio(bio, 0, bio_flags))
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4929,10 +4904,7 @@ static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
|
|||
* We need to make sure we haven't be attached
|
||||
* to a new eb.
|
||||
*/
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
/* One for the page private */
|
||||
put_page(page);
|
||||
detach_page_private(page);
|
||||
}
|
||||
|
||||
if (mapped)
|
||||
|
|
|
@ -198,8 +198,7 @@ int extent_writepages(struct address_space *mapping,
|
|||
struct writeback_control *wbc);
|
||||
int btree_write_cache_pages(struct address_space *mapping,
|
||||
struct writeback_control *wbc);
|
||||
int extent_readpages(struct address_space *mapping, struct list_head *pages,
|
||||
unsigned nr_pages);
|
||||
void extent_readahead(struct readahead_control *rac);
|
||||
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
__u64 start, __u64 len);
|
||||
void set_page_extent_mapped(struct page *page);
|
||||
|
|
|
@ -4856,8 +4856,8 @@ static void evict_inode_truncate_pages(struct inode *inode)
|
|||
|
||||
/*
|
||||
* Keep looping until we have no more ranges in the io tree.
|
||||
* We can have ongoing bios started by readpages (called from readahead)
|
||||
* that have their endio callback (extent_io.c:end_bio_extent_readpage)
|
||||
* We can have ongoing bios started by readahead that have
|
||||
* their endio callback (extent_io.c:end_bio_extent_readpage)
|
||||
* still in progress (unlocked the pages in the bio but did not yet
|
||||
* unlocked the ranges in the io tree). Therefore this means some
|
||||
* ranges can still be locked and eviction started because before
|
||||
|
@ -7050,11 +7050,11 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
|||
* for it to complete) and then invalidate the pages for
|
||||
* this range (through invalidate_inode_pages2_range()),
|
||||
* but that can lead us to a deadlock with a concurrent
|
||||
* call to readpages() (a buffered read or a defrag call
|
||||
* call to readahead (a buffered read or a defrag call
|
||||
* triggered a readahead) on a page lock due to an
|
||||
* ordered dio extent we created before but did not have
|
||||
* yet a corresponding bio submitted (whence it can not
|
||||
* complete), which makes readpages() wait for that
|
||||
* complete), which makes readahead wait for that
|
||||
* ordered extent to complete while holding a lock on
|
||||
* that page.
|
||||
*/
|
||||
|
@ -8293,21 +8293,16 @@ static int btrfs_writepages(struct address_space *mapping,
|
|||
return extent_writepages(mapping, wbc);
|
||||
}
|
||||
|
||||
static int
|
||||
btrfs_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void btrfs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return extent_readpages(mapping, pages, nr_pages);
|
||||
extent_readahead(rac);
|
||||
}
|
||||
|
||||
static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
|
||||
{
|
||||
int ret = try_release_extent_mapping(page, gfp_flags);
|
||||
if (ret == 1) {
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
}
|
||||
if (ret == 1)
|
||||
detach_page_private(page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -8329,14 +8324,8 @@ static int btrfs_migratepage(struct address_space *mapping,
|
|||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
return ret;
|
||||
|
||||
if (page_has_private(page)) {
|
||||
ClearPagePrivate(page);
|
||||
get_page(newpage);
|
||||
set_page_private(newpage, page_private(page));
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
SetPagePrivate(newpage);
|
||||
}
|
||||
if (page_has_private(page))
|
||||
attach_page_private(newpage, detach_page_private(page));
|
||||
|
||||
if (PagePrivate2(page)) {
|
||||
ClearPagePrivate2(page);
|
||||
|
@ -8458,11 +8447,7 @@ again:
|
|||
}
|
||||
|
||||
ClearPageChecked(page);
|
||||
if (PagePrivate(page)) {
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
}
|
||||
detach_page_private(page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -10553,7 +10538,7 @@ static const struct address_space_operations btrfs_aops = {
|
|||
.readpage = btrfs_readpage,
|
||||
.writepage = btrfs_writepage,
|
||||
.writepages = btrfs_writepages,
|
||||
.readpages = btrfs_readpages,
|
||||
.readahead = btrfs_readahead,
|
||||
.direct_IO = btrfs_direct_IO,
|
||||
.invalidatepage = btrfs_invalidatepage,
|
||||
.releasepage = btrfs_releasepage,
|
||||
|
|
23
fs/buffer.c
23
fs/buffer.c
|
@ -123,14 +123,6 @@ void __wait_on_buffer(struct buffer_head * bh)
|
|||
}
|
||||
EXPORT_SYMBOL(__wait_on_buffer);
|
||||
|
||||
static void
|
||||
__clear_page_buffers(struct page *page)
|
||||
{
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
static void buffer_io_error(struct buffer_head *bh, char *msg)
|
||||
{
|
||||
if (!test_bit(BH_Quiet, &bh->b_state))
|
||||
|
@ -906,7 +898,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head)
|
|||
bh = bh->b_this_page;
|
||||
} while (bh);
|
||||
tail->b_this_page = head;
|
||||
attach_page_buffers(page, head);
|
||||
attach_page_private(page, head);
|
||||
}
|
||||
|
||||
static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
|
||||
|
@ -1154,12 +1146,19 @@ EXPORT_SYMBOL(mark_buffer_dirty);
|
|||
|
||||
void mark_buffer_write_io_error(struct buffer_head *bh)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
set_buffer_write_io_error(bh);
|
||||
/* FIXME: do we need to set this in both places? */
|
||||
if (bh->b_page && bh->b_page->mapping)
|
||||
mapping_set_error(bh->b_page->mapping, -EIO);
|
||||
if (bh->b_assoc_map)
|
||||
mapping_set_error(bh->b_assoc_map, -EIO);
|
||||
rcu_read_lock();
|
||||
sb = READ_ONCE(bh->b_bdev->bd_super);
|
||||
if (sb)
|
||||
errseq_set(&sb->s_wb_err, -EIO);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(mark_buffer_write_io_error);
|
||||
|
||||
|
@ -1580,7 +1579,7 @@ void create_empty_buffers(struct page *page,
|
|||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
}
|
||||
attach_page_buffers(page, head);
|
||||
attach_page_private(page, head);
|
||||
spin_unlock(&page->mapping->private_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(create_empty_buffers);
|
||||
|
@ -2567,7 +2566,7 @@ static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
|
|||
bh->b_this_page = head;
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
attach_page_buffers(page, head);
|
||||
attach_page_private(page, head);
|
||||
spin_unlock(&page->mapping->private_lock);
|
||||
}
|
||||
|
||||
|
@ -3227,7 +3226,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
|
|||
bh = next;
|
||||
} while (bh != head);
|
||||
*buffers_to_free = head;
|
||||
__clear_page_buffers(page);
|
||||
detach_page_private(page);
|
||||
return 1;
|
||||
failed:
|
||||
return 0;
|
||||
|
|
|
@ -280,47 +280,36 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int erofs_raw_access_readpages(struct file *filp,
|
||||
struct address_space *mapping,
|
||||
struct list_head *pages,
|
||||
unsigned int nr_pages)
|
||||
static void erofs_raw_access_readahead(struct readahead_control *rac)
|
||||
{
|
||||
erofs_off_t last_block;
|
||||
struct bio *bio = NULL;
|
||||
gfp_t gfp = readahead_gfp_mask(mapping);
|
||||
struct page *page = list_last_entry(pages, struct page, lru);
|
||||
struct page *page;
|
||||
|
||||
trace_erofs_readpages(mapping->host, page, nr_pages, true);
|
||||
|
||||
for (; nr_pages; --nr_pages) {
|
||||
page = list_entry(pages->prev, struct page, lru);
|
||||
trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
|
||||
readahead_count(rac), true);
|
||||
|
||||
while ((page = readahead_page(rac))) {
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
|
||||
if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
|
||||
bio = erofs_read_raw_page(bio, mapping, page,
|
||||
&last_block, nr_pages, true);
|
||||
bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
|
||||
readahead_count(rac), true);
|
||||
|
||||
/* all the page errors are ignored when readahead */
|
||||
if (IS_ERR(bio)) {
|
||||
pr_err("%s, readahead error at page %lu of nid %llu\n",
|
||||
__func__, page->index,
|
||||
EROFS_I(mapping->host)->nid);
|
||||
/* all the page errors are ignored when readahead */
|
||||
if (IS_ERR(bio)) {
|
||||
pr_err("%s, readahead error at page %lu of nid %llu\n",
|
||||
__func__, page->index,
|
||||
EROFS_I(rac->mapping->host)->nid);
|
||||
|
||||
bio = NULL;
|
||||
}
|
||||
bio = NULL;
|
||||
}
|
||||
|
||||
/* pages could still be locked */
|
||||
put_page(page);
|
||||
}
|
||||
DBG_BUGON(!list_empty(pages));
|
||||
|
||||
/* the rare case (end in gaps) */
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int erofs_get_block(struct inode *inode, sector_t iblock,
|
||||
|
@ -358,7 +347,7 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
|
|||
/* for uncompressed (aligned) files and raw access for other files */
|
||||
const struct address_space_operations erofs_raw_access_aops = {
|
||||
.readpage = erofs_raw_access_readpage,
|
||||
.readpages = erofs_raw_access_readpages,
|
||||
.readahead = erofs_raw_access_readahead,
|
||||
.bmap = erofs_bmap,
|
||||
};
|
||||
|
||||
|
|
|
@ -274,7 +274,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
|
|||
|
||||
i = 0;
|
||||
while (1) {
|
||||
dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);
|
||||
dst = vm_map_ram(rq->out, nrpages_out, -1);
|
||||
|
||||
/* retry two more times (totally 3 times) */
|
||||
if (dst || ++i >= 3)
|
||||
|
|
|
@ -1305,28 +1305,23 @@ static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
|
|||
return nr <= sbi->max_sync_decompress_pages;
|
||||
}
|
||||
|
||||
static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned int nr_pages)
|
||||
static void z_erofs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
struct inode *const inode = mapping->host;
|
||||
struct inode *const inode = rac->mapping->host;
|
||||
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
||||
|
||||
bool sync = should_decompress_synchronously(sbi, nr_pages);
|
||||
bool sync = should_decompress_synchronously(sbi, readahead_count(rac));
|
||||
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
|
||||
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
|
||||
struct page *head = NULL;
|
||||
struct page *page, *head = NULL;
|
||||
LIST_HEAD(pagepool);
|
||||
|
||||
trace_erofs_readpages(mapping->host, lru_to_page(pages),
|
||||
nr_pages, false);
|
||||
trace_erofs_readpages(inode, readahead_index(rac),
|
||||
readahead_count(rac), false);
|
||||
|
||||
f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
|
||||
|
||||
for (; nr_pages; --nr_pages) {
|
||||
struct page *page = lru_to_page(pages);
|
||||
f.headoffset = readahead_pos(rac);
|
||||
|
||||
while ((page = readahead_page(rac))) {
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
|
||||
/*
|
||||
* A pure asynchronous readahead is indicated if
|
||||
|
@ -1335,11 +1330,6 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
|
|||
*/
|
||||
sync &= !(PageReadahead(page) && !head);
|
||||
|
||||
if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
|
||||
list_add(&page->lru, &pagepool);
|
||||
continue;
|
||||
}
|
||||
|
||||
set_page_private(page, (unsigned long)head);
|
||||
head = page;
|
||||
}
|
||||
|
@ -1368,11 +1358,10 @@ static int z_erofs_readpages(struct file *filp, struct address_space *mapping,
|
|||
|
||||
/* clean up the remaining free pages */
|
||||
put_pages_list(&pagepool);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct address_space_operations z_erofs_aops = {
|
||||
.readpage = z_erofs_readpage,
|
||||
.readpages = z_erofs_readpages,
|
||||
.readahead = z_erofs_readahead,
|
||||
};
|
||||
|
||||
|
|
|
@ -372,10 +372,9 @@ static int exfat_readpage(struct file *file, struct page *page)
|
|||
return mpage_readpage(page, exfat_get_block);
|
||||
}
|
||||
|
||||
static int exfat_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned int nr_pages)
|
||||
static void exfat_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, exfat_get_block);
|
||||
mpage_readahead(rac, exfat_get_block);
|
||||
}
|
||||
|
||||
static int exfat_writepage(struct page *page, struct writeback_control *wbc)
|
||||
|
@ -502,7 +501,7 @@ int exfat_block_truncate_page(struct inode *inode, loff_t from)
|
|||
|
||||
static const struct address_space_operations exfat_aops = {
|
||||
.readpage = exfat_readpage,
|
||||
.readpages = exfat_readpages,
|
||||
.readahead = exfat_readahead,
|
||||
.writepage = exfat_writepage,
|
||||
.writepages = exfat_writepages,
|
||||
.write_begin = exfat_write_begin,
|
||||
|
|
|
@ -877,11 +877,9 @@ static int ext2_readpage(struct file *file, struct page *page)
|
|||
return mpage_readpage(page, ext2_get_block);
|
||||
}
|
||||
|
||||
static int
|
||||
ext2_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void ext2_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, ext2_get_block);
|
||||
mpage_readahead(rac, ext2_get_block);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -967,7 +965,7 @@ ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc
|
|||
|
||||
const struct address_space_operations ext2_aops = {
|
||||
.readpage = ext2_readpage,
|
||||
.readpages = ext2_readpages,
|
||||
.readahead = ext2_readahead,
|
||||
.writepage = ext2_writepage,
|
||||
.write_begin = ext2_write_begin,
|
||||
.write_end = ext2_write_end,
|
||||
|
@ -981,7 +979,7 @@ const struct address_space_operations ext2_aops = {
|
|||
|
||||
const struct address_space_operations ext2_nobh_aops = {
|
||||
.readpage = ext2_readpage,
|
||||
.readpages = ext2_readpages,
|
||||
.readahead = ext2_readahead,
|
||||
.writepage = ext2_nobh_writepage,
|
||||
.write_begin = ext2_nobh_write_begin,
|
||||
.write_end = nobh_write_end,
|
||||
|
|
|
@ -3317,9 +3317,8 @@ static inline void ext4_set_de_type(struct super_block *sb,
|
|||
}
|
||||
|
||||
/* readpages.c */
|
||||
extern int ext4_mpage_readpages(struct address_space *mapping,
|
||||
struct list_head *pages, struct page *page,
|
||||
unsigned nr_pages, bool is_readahead);
|
||||
extern int ext4_mpage_readpages(struct inode *inode,
|
||||
struct readahead_control *rac, struct page *page);
|
||||
extern int __init ext4_init_post_read_processing(void);
|
||||
extern void ext4_exit_post_read_processing(void);
|
||||
|
||||
|
|
|
@ -3224,23 +3224,20 @@ static int ext4_readpage(struct file *file, struct page *page)
|
|||
ret = ext4_readpage_inline(inode, page);
|
||||
|
||||
if (ret == -EAGAIN)
|
||||
return ext4_mpage_readpages(page->mapping, NULL, page, 1,
|
||||
false);
|
||||
return ext4_mpage_readpages(inode, NULL, page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
ext4_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void ext4_readahead(struct readahead_control *rac)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct inode *inode = rac->mapping->host;
|
||||
|
||||
/* If the file has inline data, no need to do readpages. */
|
||||
/* If the file has inline data, no need to do readahead. */
|
||||
if (ext4_has_inline_data(inode))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true);
|
||||
ext4_mpage_readpages(inode, rac, NULL);
|
||||
}
|
||||
|
||||
static void ext4_invalidatepage(struct page *page, unsigned int offset,
|
||||
|
@ -3605,7 +3602,7 @@ static int ext4_set_page_dirty(struct page *page)
|
|||
|
||||
static const struct address_space_operations ext4_aops = {
|
||||
.readpage = ext4_readpage,
|
||||
.readpages = ext4_readpages,
|
||||
.readahead = ext4_readahead,
|
||||
.writepage = ext4_writepage,
|
||||
.writepages = ext4_writepages,
|
||||
.write_begin = ext4_write_begin,
|
||||
|
@ -3622,7 +3619,7 @@ static const struct address_space_operations ext4_aops = {
|
|||
|
||||
static const struct address_space_operations ext4_journalled_aops = {
|
||||
.readpage = ext4_readpage,
|
||||
.readpages = ext4_readpages,
|
||||
.readahead = ext4_readahead,
|
||||
.writepage = ext4_writepage,
|
||||
.writepages = ext4_writepages,
|
||||
.write_begin = ext4_write_begin,
|
||||
|
@ -3638,7 +3635,7 @@ static const struct address_space_operations ext4_journalled_aops = {
|
|||
|
||||
static const struct address_space_operations ext4_da_aops = {
|
||||
.readpage = ext4_readpage,
|
||||
.readpages = ext4_readpages,
|
||||
.readahead = ext4_readahead,
|
||||
.writepage = ext4_writepage,
|
||||
.writepages = ext4_writepages,
|
||||
.write_begin = ext4_da_write_begin,
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
*
|
||||
* This was originally taken from fs/mpage.c
|
||||
*
|
||||
* The intent is the ext4_mpage_readpages() function here is intended
|
||||
* to replace mpage_readpages() in the general case, not just for
|
||||
* The ext4_mpage_readpages() function here is intended to
|
||||
* replace mpage_readahead() in the general case, not just for
|
||||
* encrypted files. It has some limitations (see below), where it
|
||||
* will fall back to read_block_full_page(), but these limitations
|
||||
* should only be hit when page_size != block_size.
|
||||
|
@ -221,14 +221,12 @@ static inline loff_t ext4_readpage_limit(struct inode *inode)
|
|||
return i_size_read(inode);
|
||||
}
|
||||
|
||||
int ext4_mpage_readpages(struct address_space *mapping,
|
||||
struct list_head *pages, struct page *page,
|
||||
unsigned nr_pages, bool is_readahead)
|
||||
int ext4_mpage_readpages(struct inode *inode,
|
||||
struct readahead_control *rac, struct page *page)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
sector_t last_block_in_bio = 0;
|
||||
|
||||
struct inode *inode = mapping->host;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
|
||||
const unsigned blocksize = 1 << blkbits;
|
||||
|
@ -241,6 +239,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
int length;
|
||||
unsigned relative_block = 0;
|
||||
struct ext4_map_blocks map;
|
||||
unsigned int nr_pages = rac ? readahead_count(rac) : 1;
|
||||
|
||||
map.m_pblk = 0;
|
||||
map.m_lblk = 0;
|
||||
|
@ -251,14 +250,9 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
int fully_mapped = 1;
|
||||
unsigned first_hole = blocks_per_page;
|
||||
|
||||
if (pages) {
|
||||
page = lru_to_page(pages);
|
||||
|
||||
if (rac) {
|
||||
page = readahead_page(rac);
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping, page->index,
|
||||
readahead_gfp_mask(mapping)))
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
if (page_has_buffers(page))
|
||||
|
@ -381,7 +375,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
|
||||
bio->bi_end_io = mpage_end_io;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ,
|
||||
is_readahead ? REQ_RAHEAD : 0);
|
||||
rac ? REQ_RAHEAD : 0);
|
||||
}
|
||||
|
||||
length = first_hole << blkbits;
|
||||
|
@ -406,10 +400,9 @@ int ext4_mpage_readpages(struct address_space *mapping,
|
|||
else
|
||||
unlock_page(page);
|
||||
next_page:
|
||||
if (pages)
|
||||
if (rac)
|
||||
put_page(page);
|
||||
}
|
||||
BUG_ON(pages && !list_empty(pages));
|
||||
if (bio)
|
||||
submit_bio(bio);
|
||||
return 0;
|
||||
|
|
|
@ -342,37 +342,6 @@ static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
|
|||
return desc_size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefetch some pages from the file's Merkle tree.
|
||||
*
|
||||
* This is basically a stripped-down version of __do_page_cache_readahead()
|
||||
* which works on pages past i_size.
|
||||
*/
|
||||
static void ext4_merkle_tree_readahead(struct address_space *mapping,
|
||||
pgoff_t start_index, unsigned long count)
|
||||
{
|
||||
LIST_HEAD(pages);
|
||||
unsigned int nr_pages = 0;
|
||||
struct page *page;
|
||||
pgoff_t index;
|
||||
struct blk_plug plug;
|
||||
|
||||
for (index = start_index; index < start_index + count; index++) {
|
||||
page = xa_load(&mapping->i_pages, index);
|
||||
if (!page || xa_is_value(page)) {
|
||||
page = __page_cache_alloc(readahead_gfp_mask(mapping));
|
||||
if (!page)
|
||||
break;
|
||||
page->index = index;
|
||||
list_add(&page->lru, &pages);
|
||||
nr_pages++;
|
||||
}
|
||||
}
|
||||
blk_start_plug(&plug);
|
||||
ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
static struct page *ext4_read_merkle_tree_page(struct inode *inode,
|
||||
pgoff_t index,
|
||||
unsigned long num_ra_pages)
|
||||
|
@ -386,8 +355,8 @@ static struct page *ext4_read_merkle_tree_page(struct inode *inode,
|
|||
if (page)
|
||||
put_page(page);
|
||||
else if (num_ra_pages > 1)
|
||||
ext4_merkle_tree_readahead(inode->i_mapping, index,
|
||||
num_ra_pages);
|
||||
page_cache_readahead_unbounded(inode->i_mapping, NULL,
|
||||
index, num_ra_pages, 0);
|
||||
page = read_mapping_page(inode->i_mapping, index, NULL);
|
||||
}
|
||||
return page;
|
||||
|
|
|
@ -2177,13 +2177,11 @@ out:
|
|||
* use ->readpage() or do the necessary surgery to decouple ->readpages()
|
||||
* from read-ahead.
|
||||
*/
|
||||
int f2fs_mpage_readpages(struct address_space *mapping,
|
||||
struct list_head *pages, struct page *page,
|
||||
unsigned nr_pages, bool is_readahead)
|
||||
static int f2fs_mpage_readpages(struct inode *inode,
|
||||
struct readahead_control *rac, struct page *page)
|
||||
{
|
||||
struct bio *bio = NULL;
|
||||
sector_t last_block_in_bio = 0;
|
||||
struct inode *inode = mapping->host;
|
||||
struct f2fs_map_blocks map;
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
struct compress_ctx cc = {
|
||||
|
@ -2197,6 +2195,7 @@ int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
.nr_cpages = 0,
|
||||
};
|
||||
#endif
|
||||
unsigned nr_pages = rac ? readahead_count(rac) : 1;
|
||||
unsigned max_nr_pages = nr_pages;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -2210,15 +2209,9 @@ int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
map.m_may_create = false;
|
||||
|
||||
for (; nr_pages; nr_pages--) {
|
||||
if (pages) {
|
||||
page = list_last_entry(pages, struct page, lru);
|
||||
|
||||
if (rac) {
|
||||
page = readahead_page(rac);
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (add_to_page_cache_lru(page, mapping,
|
||||
page_index(page),
|
||||
readahead_gfp_mask(mapping)))
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
|
@ -2228,7 +2221,7 @@ int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
ret = f2fs_read_multi_pages(&cc, &bio,
|
||||
max_nr_pages,
|
||||
&last_block_in_bio,
|
||||
is_readahead, false);
|
||||
rac != NULL, false);
|
||||
f2fs_destroy_compress_ctx(&cc);
|
||||
if (ret)
|
||||
goto set_error_page;
|
||||
|
@ -2251,7 +2244,7 @@ read_single_page:
|
|||
#endif
|
||||
|
||||
ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
|
||||
&bio, &last_block_in_bio, is_readahead);
|
||||
&bio, &last_block_in_bio, rac);
|
||||
if (ret) {
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
set_error_page:
|
||||
|
@ -2260,8 +2253,10 @@ set_error_page:
|
|||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
unlock_page(page);
|
||||
}
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
next_page:
|
||||
if (pages)
|
||||
#endif
|
||||
if (rac)
|
||||
put_page(page);
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
|
@ -2271,16 +2266,15 @@ next_page:
|
|||
ret = f2fs_read_multi_pages(&cc, &bio,
|
||||
max_nr_pages,
|
||||
&last_block_in_bio,
|
||||
is_readahead, false);
|
||||
rac != NULL, false);
|
||||
f2fs_destroy_compress_ctx(&cc);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
BUG_ON(pages && !list_empty(pages));
|
||||
if (bio)
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
return pages ? 0 : ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_read_data_page(struct file *file, struct page *page)
|
||||
|
@ -2299,28 +2293,24 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
|
|||
if (f2fs_has_inline_data(inode))
|
||||
ret = f2fs_read_inline_data(inode, page);
|
||||
if (ret == -EAGAIN)
|
||||
ret = f2fs_mpage_readpages(page_file_mapping(page),
|
||||
NULL, page, 1, false);
|
||||
ret = f2fs_mpage_readpages(inode, NULL, page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_read_data_pages(struct file *file,
|
||||
struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void f2fs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct page *page = list_last_entry(pages, struct page, lru);
|
||||
struct inode *inode = rac->mapping->host;
|
||||
|
||||
trace_f2fs_readpages(inode, page, nr_pages);
|
||||
trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
|
||||
|
||||
if (!f2fs_is_compress_backend_ready(inode))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
/* If the file has inline data, skip readpages */
|
||||
if (f2fs_has_inline_data(inode))
|
||||
return 0;
|
||||
return;
|
||||
|
||||
return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
|
||||
f2fs_mpage_readpages(inode, rac, NULL);
|
||||
}
|
||||
|
||||
int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
|
||||
|
@ -3805,7 +3795,7 @@ static void f2fs_swap_deactivate(struct file *file)
|
|||
|
||||
const struct address_space_operations f2fs_dblock_aops = {
|
||||
.readpage = f2fs_read_data_page,
|
||||
.readpages = f2fs_read_data_pages,
|
||||
.readahead = f2fs_readahead,
|
||||
.writepage = f2fs_write_data_page,
|
||||
.writepages = f2fs_write_data_pages,
|
||||
.write_begin = f2fs_write_begin,
|
||||
|
|
|
@ -3051,19 +3051,12 @@ static inline void f2fs_set_page_private(struct page *page,
|
|||
if (PagePrivate(page))
|
||||
return;
|
||||
|
||||
get_page(page);
|
||||
SetPagePrivate(page);
|
||||
set_page_private(page, data);
|
||||
attach_page_private(page, (void *)data);
|
||||
}
|
||||
|
||||
static inline void f2fs_clear_page_private(struct page *page)
|
||||
{
|
||||
if (!PagePrivate(page))
|
||||
return;
|
||||
|
||||
set_page_private(page, 0);
|
||||
ClearPagePrivate(page);
|
||||
f2fs_put_page(page, 0);
|
||||
detach_page_private(page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3373,9 +3366,6 @@ int f2fs_reserve_new_block(struct dnode_of_data *dn);
|
|||
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
|
||||
int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
|
||||
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
|
||||
int f2fs_mpage_readpages(struct address_space *mapping,
|
||||
struct list_head *pages, struct page *page,
|
||||
unsigned nr_pages, bool is_readahead);
|
||||
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
|
||||
int op_flags, bool for_write);
|
||||
struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
|
||||
|
|
|
@ -222,37 +222,6 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf,
|
|||
return size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prefetch some pages from the file's Merkle tree.
|
||||
*
|
||||
* This is basically a stripped-down version of __do_page_cache_readahead()
|
||||
* which works on pages past i_size.
|
||||
*/
|
||||
static void f2fs_merkle_tree_readahead(struct address_space *mapping,
|
||||
pgoff_t start_index, unsigned long count)
|
||||
{
|
||||
LIST_HEAD(pages);
|
||||
unsigned int nr_pages = 0;
|
||||
struct page *page;
|
||||
pgoff_t index;
|
||||
struct blk_plug plug;
|
||||
|
||||
for (index = start_index; index < start_index + count; index++) {
|
||||
page = xa_load(&mapping->i_pages, index);
|
||||
if (!page || xa_is_value(page)) {
|
||||
page = __page_cache_alloc(readahead_gfp_mask(mapping));
|
||||
if (!page)
|
||||
break;
|
||||
page->index = index;
|
||||
list_add(&page->lru, &pages);
|
||||
nr_pages++;
|
||||
}
|
||||
}
|
||||
blk_start_plug(&plug);
|
||||
f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
||||
static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
|
||||
pgoff_t index,
|
||||
unsigned long num_ra_pages)
|
||||
|
@ -266,8 +235,8 @@ static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
|
|||
if (page)
|
||||
put_page(page);
|
||||
else if (num_ra_pages > 1)
|
||||
f2fs_merkle_tree_readahead(inode->i_mapping, index,
|
||||
num_ra_pages);
|
||||
page_cache_readahead_unbounded(inode->i_mapping, NULL,
|
||||
index, num_ra_pages, 0);
|
||||
page = read_mapping_page(inode->i_mapping, index, NULL);
|
||||
}
|
||||
return page;
|
||||
|
|
|
@ -210,10 +210,9 @@ static int fat_readpage(struct file *file, struct page *page)
|
|||
return mpage_readpage(page, fat_get_block);
|
||||
}
|
||||
|
||||
static int fat_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void fat_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, fat_get_block);
|
||||
mpage_readahead(rac, fat_get_block);
|
||||
}
|
||||
|
||||
static void fat_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
@ -344,7 +343,7 @@ int fat_block_truncate_page(struct inode *inode, loff_t from)
|
|||
|
||||
static const struct address_space_operations fat_aops = {
|
||||
.readpage = fat_readpage,
|
||||
.readpages = fat_readpages,
|
||||
.readahead = fat_readahead,
|
||||
.writepage = fat_writepage,
|
||||
.writepages = fat_writepages,
|
||||
.write_begin = fat_write_begin,
|
||||
|
|
|
@ -198,6 +198,7 @@ static struct file *alloc_file(const struct path *path, int flags,
|
|||
file->f_inode = path->dentry->d_inode;
|
||||
file->f_mapping = path->dentry->d_inode->i_mapping;
|
||||
file->f_wb_err = filemap_sample_wb_err(file->f_mapping);
|
||||
file->f_sb_err = file_sample_sb_err(file);
|
||||
if ((file->f_mode & FMODE_READ) &&
|
||||
likely(fop->read || fop->read_iter))
|
||||
file->f_mode |= FMODE_CAN_READ;
|
||||
|
|
|
@ -1070,7 +1070,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
|||
static unsigned long get_nr_dirty_pages(void)
|
||||
{
|
||||
return global_node_page_state(NR_FILE_DIRTY) +
|
||||
global_node_page_state(NR_UNSTABLE_NFS) +
|
||||
get_nr_dirty_inodes();
|
||||
}
|
||||
|
||||
|
|
100
fs/fuse/file.c
100
fs/fuse/file.c
|
@ -915,84 +915,40 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
|
|||
fuse_readpages_end(fc, &ap->args, err);
|
||||
}
|
||||
|
||||
struct fuse_fill_data {
|
||||
struct fuse_io_args *ia;
|
||||
struct file *file;
|
||||
struct inode *inode;
|
||||
unsigned int nr_pages;
|
||||
unsigned int max_pages;
|
||||
};
|
||||
|
||||
static int fuse_readpages_fill(void *_data, struct page *page)
|
||||
static void fuse_readahead(struct readahead_control *rac)
|
||||
{
|
||||
struct fuse_fill_data *data = _data;
|
||||
struct fuse_io_args *ia = data->ia;
|
||||
struct fuse_args_pages *ap = &ia->ap;
|
||||
struct inode *inode = data->inode;
|
||||
struct inode *inode = rac->mapping->host;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
unsigned int i, max_pages, nr_pages = 0;
|
||||
|
||||
fuse_wait_on_page_writeback(inode, page->index);
|
||||
|
||||
if (ap->num_pages &&
|
||||
(ap->num_pages == fc->max_pages ||
|
||||
(ap->num_pages + 1) * PAGE_SIZE > fc->max_read ||
|
||||
ap->pages[ap->num_pages - 1]->index + 1 != page->index)) {
|
||||
data->max_pages = min_t(unsigned int, data->nr_pages,
|
||||
fc->max_pages);
|
||||
fuse_send_readpages(ia, data->file);
|
||||
data->ia = ia = fuse_io_alloc(NULL, data->max_pages);
|
||||
if (!ia) {
|
||||
unlock_page(page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ap = &ia->ap;
|
||||
}
|
||||
|
||||
if (WARN_ON(ap->num_pages >= data->max_pages)) {
|
||||
unlock_page(page);
|
||||
fuse_io_free(ia);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
get_page(page);
|
||||
ap->pages[ap->num_pages] = page;
|
||||
ap->descs[ap->num_pages].length = PAGE_SIZE;
|
||||
ap->num_pages++;
|
||||
data->nr_pages--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fuse_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct fuse_conn *fc = get_fuse_conn(inode);
|
||||
struct fuse_fill_data data;
|
||||
int err;
|
||||
|
||||
err = -EIO;
|
||||
if (is_bad_inode(inode))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
data.file = file;
|
||||
data.inode = inode;
|
||||
data.nr_pages = nr_pages;
|
||||
data.max_pages = min_t(unsigned int, nr_pages, fc->max_pages);
|
||||
;
|
||||
data.ia = fuse_io_alloc(NULL, data.max_pages);
|
||||
err = -ENOMEM;
|
||||
if (!data.ia)
|
||||
goto out;
|
||||
max_pages = min_t(unsigned int, fc->max_pages,
|
||||
fc->max_read / PAGE_SIZE);
|
||||
|
||||
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
|
||||
if (!err) {
|
||||
if (data.ia->ap.num_pages)
|
||||
fuse_send_readpages(data.ia, file);
|
||||
else
|
||||
fuse_io_free(data.ia);
|
||||
for (;;) {
|
||||
struct fuse_io_args *ia;
|
||||
struct fuse_args_pages *ap;
|
||||
|
||||
nr_pages = readahead_count(rac) - nr_pages;
|
||||
if (nr_pages > max_pages)
|
||||
nr_pages = max_pages;
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
ia = fuse_io_alloc(NULL, nr_pages);
|
||||
if (!ia)
|
||||
return;
|
||||
ap = &ia->ap;
|
||||
nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
fuse_wait_on_page_writeback(inode,
|
||||
readahead_index(rac) + i);
|
||||
ap->descs[i].length = PAGE_SIZE;
|
||||
}
|
||||
ap->num_pages = nr_pages;
|
||||
fuse_send_readpages(ia, rac->file);
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
|
@ -3373,10 +3329,10 @@ static const struct file_operations fuse_file_operations = {
|
|||
|
||||
static const struct address_space_operations fuse_file_aops = {
|
||||
.readpage = fuse_readpage,
|
||||
.readahead = fuse_readahead,
|
||||
.writepage = fuse_writepage,
|
||||
.writepages = fuse_writepages,
|
||||
.launder_page = fuse_launder_page,
|
||||
.readpages = fuse_readpages,
|
||||
.set_page_dirty = __set_page_dirty_nobuffers,
|
||||
.bmap = fuse_bmap,
|
||||
.direct_IO = fuse_direct_IO,
|
||||
|
|
|
@ -577,7 +577,7 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
|
|||
}
|
||||
|
||||
/**
|
||||
* gfs2_readpages - Read a bunch of pages at once
|
||||
* gfs2_readahead - Read a bunch of pages at once
|
||||
* @file: The file to read from
|
||||
* @mapping: Address space info
|
||||
* @pages: List of pages to read
|
||||
|
@ -590,31 +590,24 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
|
|||
* obviously not something we'd want to do on too regular a basis.
|
||||
* Any I/O we ignore at this time will be done via readpage later.
|
||||
* 2. We don't handle stuffed files here we let readpage do the honours.
|
||||
* 3. mpage_readpages() does most of the heavy lifting in the common case.
|
||||
* 3. mpage_readahead() does most of the heavy lifting in the common case.
|
||||
* 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
|
||||
*/
|
||||
|
||||
static int gfs2_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void gfs2_readahead(struct readahead_control *rac)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct inode *inode = rac->mapping->host;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||
struct gfs2_holder gh;
|
||||
int ret;
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
|
||||
ret = gfs2_glock_nq(&gh);
|
||||
if (unlikely(ret))
|
||||
if (gfs2_glock_nq(&gh))
|
||||
goto out_uninit;
|
||||
if (!gfs2_is_stuffed(ip))
|
||||
ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
|
||||
mpage_readahead(rac, gfs2_block_map);
|
||||
gfs2_glock_dq(&gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(&gh);
|
||||
if (unlikely(gfs2_withdrawn(sdp)))
|
||||
ret = -EIO;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -833,7 +826,7 @@ static const struct address_space_operations gfs2_aops = {
|
|||
.writepage = gfs2_writepage,
|
||||
.writepages = gfs2_writepages,
|
||||
.readpage = gfs2_readpage,
|
||||
.readpages = gfs2_readpages,
|
||||
.readahead = gfs2_readahead,
|
||||
.bmap = gfs2_bmap,
|
||||
.invalidatepage = gfs2_invalidatepage,
|
||||
.releasepage = gfs2_releasepage,
|
||||
|
@ -847,7 +840,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
|
|||
.writepage = gfs2_jdata_writepage,
|
||||
.writepages = gfs2_jdata_writepages,
|
||||
.readpage = gfs2_readpage,
|
||||
.readpages = gfs2_readpages,
|
||||
.readahead = gfs2_readahead,
|
||||
.set_page_dirty = jdata_set_page_dirty,
|
||||
.bmap = gfs2_bmap,
|
||||
.invalidatepage = gfs2_invalidatepage,
|
||||
|
|
|
@ -354,7 +354,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
|
|||
|
||||
hc = kmalloc(hsize, GFP_NOFS | __GFP_NOWARN);
|
||||
if (hc == NULL)
|
||||
hc = __vmalloc(hsize, GFP_NOFS, PAGE_KERNEL);
|
||||
hc = __vmalloc(hsize, GFP_NOFS);
|
||||
|
||||
if (hc == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -1166,7 +1166,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
|
|||
|
||||
hc2 = kmalloc_array(hsize_bytes, 2, GFP_NOFS | __GFP_NOWARN);
|
||||
if (hc2 == NULL)
|
||||
hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS, PAGE_KERNEL);
|
||||
hc2 = __vmalloc(hsize_bytes * 2, GFP_NOFS);
|
||||
|
||||
if (!hc2)
|
||||
return -ENOMEM;
|
||||
|
@ -1327,7 +1327,7 @@ static void *gfs2_alloc_sort_buffer(unsigned size)
|
|||
if (size < KMALLOC_MAX_SIZE)
|
||||
ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
|
||||
if (!ptr)
|
||||
ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
|
||||
ptr = __vmalloc(size, GFP_NOFS);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -1987,8 +1987,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
|||
|
||||
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
|
||||
if (ht == NULL)
|
||||
ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO,
|
||||
PAGE_KERNEL);
|
||||
ht = __vmalloc(size, GFP_NOFS | __GFP_NOWARN | __GFP_ZERO);
|
||||
if (!ht)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1365,7 +1365,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
|
|||
sdp->sd_quota_bitmap = kzalloc(bm_size, GFP_NOFS | __GFP_NOWARN);
|
||||
if (sdp->sd_quota_bitmap == NULL)
|
||||
sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS |
|
||||
__GFP_ZERO, PAGE_KERNEL);
|
||||
__GFP_ZERO);
|
||||
if (!sdp->sd_quota_bitmap)
|
||||
return error;
|
||||
|
||||
|
|
|
@ -125,10 +125,9 @@ static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
|
|||
return block_write_full_page(page, hpfs_get_block, wbc);
|
||||
}
|
||||
|
||||
static int hpfs_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void hpfs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block);
|
||||
mpage_readahead(rac, hpfs_get_block);
|
||||
}
|
||||
|
||||
static int hpfs_writepages(struct address_space *mapping,
|
||||
|
@ -198,7 +197,7 @@ static int hpfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
const struct address_space_operations hpfs_aops = {
|
||||
.readpage = hpfs_readpage,
|
||||
.writepage = hpfs_writepage,
|
||||
.readpages = hpfs_readpages,
|
||||
.readahead = hpfs_readahead,
|
||||
.writepages = hpfs_writepages,
|
||||
.write_begin = hpfs_write_begin,
|
||||
.write_end = hpfs_write_end,
|
||||
|
|
|
@ -59,24 +59,19 @@ iomap_page_create(struct inode *inode, struct page *page)
|
|||
* migrate_page_move_mapping() assumes that pages with private data have
|
||||
* their count elevated by 1.
|
||||
*/
|
||||
get_page(page);
|
||||
set_page_private(page, (unsigned long)iop);
|
||||
SetPagePrivate(page);
|
||||
attach_page_private(page, iop);
|
||||
return iop;
|
||||
}
|
||||
|
||||
static void
|
||||
iomap_page_release(struct page *page)
|
||||
{
|
||||
struct iomap_page *iop = to_iomap_page(page);
|
||||
struct iomap_page *iop = detach_page_private(page);
|
||||
|
||||
if (!iop)
|
||||
return;
|
||||
WARN_ON_ONCE(atomic_read(&iop->read_count));
|
||||
WARN_ON_ONCE(atomic_read(&iop->write_count));
|
||||
ClearPagePrivate(page);
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
kfree(iop);
|
||||
}
|
||||
|
||||
|
@ -214,9 +209,8 @@ iomap_read_end_io(struct bio *bio)
|
|||
struct iomap_readpage_ctx {
|
||||
struct page *cur_page;
|
||||
bool cur_page_in_bio;
|
||||
bool is_readahead;
|
||||
struct bio *bio;
|
||||
struct list_head *pages;
|
||||
struct readahead_control *rac;
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -308,7 +302,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
if (ctx->bio)
|
||||
submit_bio(ctx->bio);
|
||||
|
||||
if (ctx->is_readahead) /* same as readahead_gfp_mask */
|
||||
if (ctx->rac) /* same as readahead_gfp_mask */
|
||||
gfp |= __GFP_NORETRY | __GFP_NOWARN;
|
||||
ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs));
|
||||
/*
|
||||
|
@ -319,7 +313,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
if (!ctx->bio)
|
||||
ctx->bio = bio_alloc(orig_gfp, 1);
|
||||
ctx->bio->bi_opf = REQ_OP_READ;
|
||||
if (ctx->is_readahead)
|
||||
if (ctx->rac)
|
||||
ctx->bio->bi_opf |= REQ_RAHEAD;
|
||||
ctx->bio->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(ctx->bio, iomap->bdev);
|
||||
|
@ -367,7 +361,7 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
|
|||
}
|
||||
|
||||
/*
|
||||
* Just like mpage_readpages and block_read_full_page we always
|
||||
* Just like mpage_readahead and block_read_full_page we always
|
||||
* return 0 and just mark the page as PageError on errors. This
|
||||
* should be cleaned up all through the stack eventually.
|
||||
*/
|
||||
|
@ -375,36 +369,8 @@ iomap_readpage(struct page *page, const struct iomap_ops *ops)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_readpage);
|
||||
|
||||
static struct page *
|
||||
iomap_next_page(struct inode *inode, struct list_head *pages, loff_t pos,
|
||||
loff_t length, loff_t *done)
|
||||
{
|
||||
while (!list_empty(pages)) {
|
||||
struct page *page = lru_to_page(pages);
|
||||
|
||||
if (page_offset(page) >= (u64)pos + length)
|
||||
break;
|
||||
|
||||
list_del(&page->lru);
|
||||
if (!add_to_page_cache_lru(page, inode->i_mapping, page->index,
|
||||
GFP_NOFS))
|
||||
return page;
|
||||
|
||||
/*
|
||||
* If we already have a page in the page cache at index we are
|
||||
* done. Upper layers don't care if it is uptodate after the
|
||||
* readpages call itself as every page gets checked again once
|
||||
* actually needed.
|
||||
*/
|
||||
*done += PAGE_SIZE;
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static loff_t
|
||||
iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
iomap_readahead_actor(struct inode *inode, loff_t pos, loff_t length,
|
||||
void *data, struct iomap *iomap, struct iomap *srcmap)
|
||||
{
|
||||
struct iomap_readpage_ctx *ctx = data;
|
||||
|
@ -418,10 +384,7 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
ctx->cur_page = NULL;
|
||||
}
|
||||
if (!ctx->cur_page) {
|
||||
ctx->cur_page = iomap_next_page(inode, ctx->pages,
|
||||
pos, length, &done);
|
||||
if (!ctx->cur_page)
|
||||
break;
|
||||
ctx->cur_page = readahead_page(ctx->rac);
|
||||
ctx->cur_page_in_bio = false;
|
||||
}
|
||||
ret = iomap_readpage_actor(inode, pos + done, length - done,
|
||||
|
@ -431,32 +394,43 @@ iomap_readpages_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
return done;
|
||||
}
|
||||
|
||||
int
|
||||
iomap_readpages(struct address_space *mapping, struct list_head *pages,
|
||||
unsigned nr_pages, const struct iomap_ops *ops)
|
||||
/**
|
||||
* iomap_readahead - Attempt to read pages from a file.
|
||||
* @rac: Describes the pages to be read.
|
||||
* @ops: The operations vector for the filesystem.
|
||||
*
|
||||
* This function is for filesystems to call to implement their readahead
|
||||
* address_space operation.
|
||||
*
|
||||
* Context: The @ops callbacks may submit I/O (eg to read the addresses of
|
||||
* blocks from disc), and may wait for it. The caller may be trying to
|
||||
* access a different page, and so sleeping excessively should be avoided.
|
||||
* It may allocate memory, but should avoid costly allocations. This
|
||||
* function is called with memalloc_nofs set, so allocations will not cause
|
||||
* the filesystem to be reentered.
|
||||
*/
|
||||
void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
|
||||
{
|
||||
struct inode *inode = rac->mapping->host;
|
||||
loff_t pos = readahead_pos(rac);
|
||||
loff_t length = readahead_length(rac);
|
||||
struct iomap_readpage_ctx ctx = {
|
||||
.pages = pages,
|
||||
.is_readahead = true,
|
||||
.rac = rac,
|
||||
};
|
||||
loff_t pos = page_offset(list_entry(pages->prev, struct page, lru));
|
||||
loff_t last = page_offset(list_entry(pages->next, struct page, lru));
|
||||
loff_t length = last - pos + PAGE_SIZE, ret = 0;
|
||||
|
||||
trace_iomap_readpages(mapping->host, nr_pages);
|
||||
trace_iomap_readahead(inode, readahead_count(rac));
|
||||
|
||||
while (length > 0) {
|
||||
ret = iomap_apply(mapping->host, pos, length, 0, ops,
|
||||
&ctx, iomap_readpages_actor);
|
||||
loff_t ret = iomap_apply(inode, pos, length, 0, ops,
|
||||
&ctx, iomap_readahead_actor);
|
||||
if (ret <= 0) {
|
||||
WARN_ON_ONCE(ret == 0);
|
||||
goto done;
|
||||
break;
|
||||
}
|
||||
pos += ret;
|
||||
length -= ret;
|
||||
}
|
||||
ret = 0;
|
||||
done:
|
||||
|
||||
if (ctx.bio)
|
||||
submit_bio(ctx.bio);
|
||||
if (ctx.cur_page) {
|
||||
|
@ -464,15 +438,8 @@ done:
|
|||
unlock_page(ctx.cur_page);
|
||||
put_page(ctx.cur_page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that we didn't lose a page due to the arcance calling
|
||||
* conventions..
|
||||
*/
|
||||
WARN_ON_ONCE(!ret && !list_empty(ctx.pages));
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_readpages);
|
||||
EXPORT_SYMBOL_GPL(iomap_readahead);
|
||||
|
||||
/*
|
||||
* iomap_is_partially_uptodate checks whether blocks within a page are
|
||||
|
@ -554,14 +521,8 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
|
|||
if (ret != MIGRATEPAGE_SUCCESS)
|
||||
return ret;
|
||||
|
||||
if (page_has_private(page)) {
|
||||
ClearPagePrivate(page);
|
||||
get_page(newpage);
|
||||
set_page_private(newpage, page_private(page));
|
||||
set_page_private(page, 0);
|
||||
put_page(page);
|
||||
SetPagePrivate(newpage);
|
||||
}
|
||||
if (page_has_private(page))
|
||||
attach_page_private(newpage, detach_page_private(page));
|
||||
|
||||
if (mode != MIGRATE_SYNC_NO_COPY)
|
||||
migrate_page_copy(newpage, page);
|
||||
|
|
|
@ -39,7 +39,7 @@ DEFINE_EVENT(iomap_readpage_class, name, \
|
|||
TP_PROTO(struct inode *inode, int nr_pages), \
|
||||
TP_ARGS(inode, nr_pages))
|
||||
DEFINE_READPAGE_EVENT(iomap_readpage);
|
||||
DEFINE_READPAGE_EVENT(iomap_readpages);
|
||||
DEFINE_READPAGE_EVENT(iomap_readahead);
|
||||
|
||||
DECLARE_EVENT_CLASS(iomap_range_class,
|
||||
TP_PROTO(struct inode *inode, unsigned long off, unsigned int len),
|
||||
|
|
|
@ -1185,10 +1185,9 @@ static int isofs_readpage(struct file *file, struct page *page)
|
|||
return mpage_readpage(page, isofs_get_block);
|
||||
}
|
||||
|
||||
static int isofs_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void isofs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, isofs_get_block);
|
||||
mpage_readahead(rac, isofs_get_block);
|
||||
}
|
||||
|
||||
static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
|
||||
|
@ -1198,7 +1197,7 @@ static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
|
|||
|
||||
static const struct address_space_operations isofs_aops = {
|
||||
.readpage = isofs_readpage,
|
||||
.readpages = isofs_readpages,
|
||||
.readahead = isofs_readahead,
|
||||
.bmap = _isofs_bmap
|
||||
};
|
||||
|
||||
|
|
|
@ -296,10 +296,9 @@ static int jfs_readpage(struct file *file, struct page *page)
|
|||
return mpage_readpage(page, jfs_get_block);
|
||||
}
|
||||
|
||||
static int jfs_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void jfs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, jfs_get_block);
|
||||
mpage_readahead(rac, jfs_get_block);
|
||||
}
|
||||
|
||||
static void jfs_write_failed(struct address_space *mapping, loff_t to)
|
||||
|
@ -358,7 +357,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|||
|
||||
const struct address_space_operations jfs_aops = {
|
||||
.readpage = jfs_readpage,
|
||||
.readpages = jfs_readpages,
|
||||
.readahead = jfs_readahead,
|
||||
.writepage = jfs_writepage,
|
||||
.writepages = jfs_writepages,
|
||||
.write_begin = jfs_write_begin,
|
||||
|
|
38
fs/mpage.c
38
fs/mpage.c
|
@ -91,7 +91,7 @@ mpage_alloc(struct block_device *bdev,
|
|||
}
|
||||
|
||||
/*
|
||||
* support function for mpage_readpages. The fs supplied get_block might
|
||||
* support function for mpage_readahead. The fs supplied get_block might
|
||||
* return an up to date buffer. This is used to map that buffer into
|
||||
* the page, which allows readpage to avoid triggering a duplicate call
|
||||
* to get_block.
|
||||
|
@ -338,13 +338,8 @@ confused:
|
|||
}
|
||||
|
||||
/**
|
||||
* mpage_readpages - populate an address space with some pages & start reads against them
|
||||
* @mapping: the address_space
|
||||
* @pages: The address of a list_head which contains the target pages. These
|
||||
* pages have their ->index populated and are otherwise uninitialised.
|
||||
* The page at @pages->prev has the lowest file offset, and reads should be
|
||||
* issued in @pages->prev to @pages->next order.
|
||||
* @nr_pages: The number of pages at *@pages
|
||||
* mpage_readahead - start reads against pages
|
||||
* @rac: Describes which pages to read.
|
||||
* @get_block: The filesystem's block mapper function.
|
||||
*
|
||||
* This function walks the pages and the blocks within each page, building and
|
||||
|
@ -381,36 +376,25 @@ confused:
|
|||
*
|
||||
* This all causes the disk requests to be issued in the correct order.
|
||||
*/
|
||||
int
|
||||
mpage_readpages(struct address_space *mapping, struct list_head *pages,
|
||||
unsigned nr_pages, get_block_t get_block)
|
||||
void mpage_readahead(struct readahead_control *rac, get_block_t get_block)
|
||||
{
|
||||
struct page *page;
|
||||
struct mpage_readpage_args args = {
|
||||
.get_block = get_block,
|
||||
.is_readahead = true,
|
||||
};
|
||||
unsigned page_idx;
|
||||
|
||||
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
|
||||
struct page *page = lru_to_page(pages);
|
||||
|
||||
while ((page = readahead_page(rac))) {
|
||||
prefetchw(&page->flags);
|
||||
list_del(&page->lru);
|
||||
if (!add_to_page_cache_lru(page, mapping,
|
||||
page->index,
|
||||
readahead_gfp_mask(mapping))) {
|
||||
args.page = page;
|
||||
args.nr_pages = nr_pages - page_idx;
|
||||
args.bio = do_mpage_readpage(&args);
|
||||
}
|
||||
args.page = page;
|
||||
args.nr_pages = readahead_count(rac);
|
||||
args.bio = do_mpage_readpage(&args);
|
||||
put_page(page);
|
||||
}
|
||||
BUG_ON(!list_empty(pages));
|
||||
if (args.bio)
|
||||
mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(mpage_readpages);
|
||||
EXPORT_SYMBOL(mpage_readahead);
|
||||
|
||||
/*
|
||||
* This isn't called much at all
|
||||
|
@ -563,7 +547,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
|
|||
* Page has buffers, but they are all unmapped. The page was
|
||||
* created by pagein or read over a hole which was handled by
|
||||
* block_read_full_page(). If this address_space is also
|
||||
* using mpage_readpages then this can rarely happen.
|
||||
* using mpage_readahead then this can rarely happen.
|
||||
*/
|
||||
goto confused;
|
||||
}
|
||||
|
|
|
@ -582,7 +582,7 @@ retry:
|
|||
if (!arg->layoutupdate_pages)
|
||||
return -ENOMEM;
|
||||
|
||||
start_p = __vmalloc(buffer_size, GFP_NOFS, PAGE_KERNEL);
|
||||
start_p = __vmalloc(buffer_size, GFP_NOFS);
|
||||
if (!start_p) {
|
||||
kfree(arg->layoutupdate_pages);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -668,7 +668,8 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
|
|||
}
|
||||
|
||||
/*
|
||||
* Record the page as unstable and mark its inode as dirty.
|
||||
* Record the page as unstable (an extra writeback period) and mark its
|
||||
* inode as dirty.
|
||||
*/
|
||||
static inline
|
||||
void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
|
||||
|
@ -676,8 +677,11 @@ void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
|
|||
if (!cinfo->dreq) {
|
||||
struct inode *inode = page_file_mapping(page)->host;
|
||||
|
||||
inc_node_page_state(page, NR_UNSTABLE_NFS);
|
||||
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
|
||||
/* This page is really still in write-back - just that the
|
||||
* writeback is happening on the server now.
|
||||
*/
|
||||
inc_node_page_state(page, NR_WRITEBACK);
|
||||
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
|
||||
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -946,9 +946,9 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
|
|||
static void
|
||||
nfs_clear_page_commit(struct page *page)
|
||||
{
|
||||
dec_node_page_state(page, NR_UNSTABLE_NFS);
|
||||
dec_node_page_state(page, NR_WRITEBACK);
|
||||
dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
|
||||
WB_RECLAIMABLE);
|
||||
WB_WRITEBACK);
|
||||
}
|
||||
|
||||
/* Called holding the request lock on @req */
|
||||
|
|
|
@ -979,12 +979,13 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
|
|||
|
||||
if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
|
||||
/*
|
||||
* We want less throttling in balance_dirty_pages()
|
||||
* and shrink_inactive_list() so that nfs to
|
||||
* We want throttling in balance_dirty_pages()
|
||||
* and shrink_inactive_list() to only consider
|
||||
* the backingdev we are writing to, so that nfs to
|
||||
* localhost doesn't cause nfsd to lock up due to all
|
||||
* the client's dirty pages or its congested queue.
|
||||
*/
|
||||
current->flags |= PF_LESS_THROTTLE;
|
||||
current->flags |= PF_LOCAL_THROTTLE;
|
||||
|
||||
exp = fhp->fh_export;
|
||||
use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
|
||||
|
@ -1037,7 +1038,7 @@ out_nfserr:
|
|||
nfserr = nfserrno(host_err);
|
||||
}
|
||||
if (test_bit(RQ_LOCAL, &rqstp->rq_flags))
|
||||
current_restore_flags(pflags, PF_LESS_THROTTLE);
|
||||
current_restore_flags(pflags, PF_LOCAL_THROTTLE);
|
||||
return nfserr;
|
||||
}
|
||||
|
||||
|
|
|
@ -145,18 +145,9 @@ static int nilfs_readpage(struct file *file, struct page *page)
|
|||
return mpage_readpage(page, nilfs_get_block);
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_readpages() - implement readpages() method of nilfs_aops {}
|
||||
* address_space_operations.
|
||||
* @file - file struct of the file to be read
|
||||
* @mapping - address_space struct used for reading multiple pages
|
||||
* @pages - the pages to be read
|
||||
* @nr_pages - number of pages to be read
|
||||
*/
|
||||
static int nilfs_readpages(struct file *file, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned int nr_pages)
|
||||
static void nilfs_readahead(struct readahead_control *rac)
|
||||
{
|
||||
return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
|
||||
mpage_readahead(rac, nilfs_get_block);
|
||||
}
|
||||
|
||||
static int nilfs_writepages(struct address_space *mapping,
|
||||
|
@ -308,7 +299,7 @@ const struct address_space_operations nilfs_aops = {
|
|||
.readpage = nilfs_readpage,
|
||||
.writepages = nilfs_writepages,
|
||||
.set_page_dirty = nilfs_set_page_dirty,
|
||||
.readpages = nilfs_readpages,
|
||||
.readahead = nilfs_readahead,
|
||||
.write_begin = nilfs_write_begin,
|
||||
.write_end = nilfs_write_end,
|
||||
/* .releasepage = nilfs_releasepage, */
|
||||
|
|
|
@ -1732,7 +1732,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
|
|||
bh = bh->b_this_page;
|
||||
} while (bh);
|
||||
tail->b_this_page = head;
|
||||
attach_page_buffers(page, head);
|
||||
attach_page_private(page, head);
|
||||
} else
|
||||
buffers_to_free = bh;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask)
|
|||
/* return (void *)__get_free_page(gfp_mask); */
|
||||
}
|
||||
if (likely((size >> PAGE_SHIFT) < totalram_pages()))
|
||||
return __vmalloc(size, gfp_mask, PAGE_KERNEL);
|
||||
return __vmalloc(size, gfp_mask);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -504,7 +504,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
|
|||
bh = bh->b_this_page;
|
||||
} while (bh);
|
||||
tail->b_this_page = head;
|
||||
attach_page_buffers(page, head);
|
||||
attach_page_private(page, head);
|
||||
}
|
||||
bh = head = page_buffers(page);
|
||||
BUG_ON(!bh);
|
||||
|
|
|
@ -350,14 +350,11 @@ out:
|
|||
* grow out to a tree. If need be, detecting boundary extents could
|
||||
* trivially be added in a future version of ocfs2_get_block().
|
||||
*/
|
||||
static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
|
||||
struct list_head *pages, unsigned nr_pages)
|
||||
static void ocfs2_readahead(struct readahead_control *rac)
|
||||
{
|
||||
int ret, err = -EIO;
|
||||
struct inode *inode = mapping->host;
|
||||
int ret;
|
||||
struct inode *inode = rac->mapping->host;
|
||||
struct ocfs2_inode_info *oi = OCFS2_I(inode);
|
||||
loff_t start;
|
||||
struct page *last;
|
||||
|
||||
/*
|
||||
* Use the nonblocking flag for the dlm code to avoid page
|
||||
|
@ -365,36 +362,31 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping,
|
|||
*/
|
||||
ret = ocfs2_inode_lock_full(inode, NULL, 0, OCFS2_LOCK_NONBLOCK);
|
||||
if (ret)
|
||||
return err;
|
||||
return;
|
||||
|
||||
if (down_read_trylock(&oi->ip_alloc_sem) == 0) {
|
||||
ocfs2_inode_unlock(inode, 0);
|
||||
return err;
|
||||
}
|
||||
if (down_read_trylock(&oi->ip_alloc_sem) == 0)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Don't bother with inline-data. There isn't anything
|
||||
* to read-ahead in that case anyway...
|
||||
*/
|
||||
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
|
||||
goto out_unlock;
|
||||
goto out_up;
|
||||
|
||||
/*
|
||||
* Check whether a remote node truncated this file - we just
|
||||
* drop out in that case as it's not worth handling here.
|
||||
*/
|
||||
last = lru_to_page(pages);
|
||||
start = (loff_t)last->index << PAGE_SHIFT;
|
||||
if (start >= i_size_read(inode))
|
||||
goto out_unlock;
|
||||
if (readahead_pos(rac) >= i_size_read(inode))
|
||||
goto out_up;
|
||||
|
||||
err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block);
|
||||
mpage_readahead(rac, ocfs2_get_block);
|
||||
|
||||
out_unlock:
|
||||
out_up:
|
||||
up_read(&oi->ip_alloc_sem);
|
||||
out_unlock:
|
||||
ocfs2_inode_unlock(inode, 0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Note: Because we don't support holes, our allocation has
|
||||
|
@ -2474,7 +2466,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|||
|
||||
const struct address_space_operations ocfs2_aops = {
|
||||
.readpage = ocfs2_readpage,
|
||||
.readpages = ocfs2_readpages,
|
||||
.readahead = ocfs2_readahead,
|
||||
.writepage = ocfs2_writepage,
|
||||
.write_begin = ocfs2_write_begin,
|
||||
.write_end = ocfs2_write_end,
|
||||
|
|
|
@ -2760,6 +2760,7 @@ leave:
|
|||
* Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
|
||||
*/
|
||||
int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
|
||||
__must_hold(&dlm->spinlock)
|
||||
{
|
||||
int ret;
|
||||
int lock_dropped = 0;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue