[ARM] Resolve fuse and direct-IO failures due to missing cache flushes
fuse does not work on ARM due to cache incoherency issues - fuse wants to use get_user_pages() to copy data from the current process into kernel space. However, since this accesses userspace via the kernel mapping, the kernel mapping can be out of date wrt data written to userspace. This can lead to unpredictable behaviour (in the case of fuse) or data corruption for direct-IO. This resolves debian bug #402876 Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
a6f36be326
commit
6020dff092
2 changed files with 49 additions and 0 deletions
|
@ -202,3 +202,42 @@ void flush_dcache_page(struct page *page)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
/*
|
||||
* Flush an anonymous page so that users of get_user_pages()
|
||||
* can safely access the data. The expected sequence is:
|
||||
*
|
||||
* get_user_pages()
|
||||
* -> flush_anon_page
|
||||
* memcpy() to/from page
|
||||
* if written to page, flush_dcache_page()
|
||||
*/
|
||||
void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
/* VIPT non-aliasing caches need do nothing */
|
||||
if (cache_is_vipt_nonaliasing())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Write back and invalidate userspace mapping.
|
||||
*/
|
||||
pfn = page_to_pfn(page);
|
||||
if (cache_is_vivt()) {
|
||||
flush_cache_page(vma, vmaddr, pfn);
|
||||
} else {
|
||||
/*
|
||||
* For aliasing VIPT, we can flush an alias of the
|
||||
* userspace address only.
|
||||
*/
|
||||
flush_pfn_alias(pfn, vmaddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate kernel mapping. No data should be contained
|
||||
* in this mapping of the page. FIXME: this is overkill
|
||||
* since we actually ask for a write-back and invalidate.
|
||||
*/
|
||||
__cpuc_flush_dcache_page(page_address(page));
|
||||
}
|
||||
|
|
|
@ -357,6 +357,16 @@ extern void flush_dcache_page(struct page *);
|
|||
|
||||
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
|
||||
|
||||
#define ARCH_HAS_FLUSH_ANON_PAGE
|
||||
static inline void flush_anon_page(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long vmaddr)
|
||||
{
|
||||
extern void __flush_anon_page(struct vm_area_struct *vma,
|
||||
struct page *, unsigned long);
|
||||
if (PageAnon(page))
|
||||
__flush_anon_page(vma, page, vmaddr);
|
||||
}
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) \
|
||||
write_lock_irq(&(mapping)->tree_lock)
|
||||
#define flush_dcache_mmap_unlock(mapping) \
|
||||
|
|
Loading…
Reference in a new issue