xen/arm/arm64: merge xen/mm32.c into xen/mm.c
Merge xen/mm32.c into xen/mm.c. As a consequence the code gets compiled on arm64 too. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
3567258d28
commit
5121872afe
4 changed files with 86 additions and 138 deletions
|
@ -1 +1 @@
|
|||
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o
|
||||
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
@ -16,6 +20,86 @@
|
|||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/interface.h>
|
||||
|
||||
enum dma_cache_op {
|
||||
DMA_UNMAP,
|
||||
DMA_MAP,
|
||||
};
|
||||
|
||||
/* functions called by SWIOTLB */
|
||||
|
||||
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
||||
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
|
||||
{
|
||||
unsigned long pfn;
|
||||
size_t left = size;
|
||||
|
||||
pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
|
||||
offset %= PAGE_SIZE;
|
||||
|
||||
do {
|
||||
size_t len = left;
|
||||
|
||||
/* TODO: cache flush */
|
||||
|
||||
offset = 0;
|
||||
pfn++;
|
||||
left -= len;
|
||||
} while (left);
|
||||
}
|
||||
|
||||
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
|
||||
}
|
||||
|
||||
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
|
||||
}
|
||||
|
||||
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||
return;
|
||||
|
||||
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
|
||||
}
|
||||
|
||||
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||
return;
|
||||
|
||||
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
||||
unsigned int address_bits,
|
||||
dma_addr_t *dma_handle)
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <xen/features.h>
|
||||
enum dma_cache_op {
|
||||
DMA_UNMAP,
|
||||
DMA_MAP,
|
||||
};
|
||||
|
||||
/* functions called by SWIOTLB */
|
||||
|
||||
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
||||
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
|
||||
{
|
||||
unsigned long pfn;
|
||||
size_t left = size;
|
||||
|
||||
pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
|
||||
offset %= PAGE_SIZE;
|
||||
|
||||
do {
|
||||
size_t len = left;
|
||||
|
||||
/* TODO: cache flush */
|
||||
|
||||
offset = 0;
|
||||
pfn++;
|
||||
left -= len;
|
||||
} while (left);
|
||||
}
|
||||
|
||||
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
|
||||
}
|
||||
|
||||
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
|
||||
}
|
||||
|
||||
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||
return;
|
||||
|
||||
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
|
||||
}
|
||||
|
||||
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||
return;
|
||||
|
||||
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (is_device_dma_coherent(hwdev))
|
||||
return;
|
||||
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
||||
}
|
||||
|
||||
int __init xen_mm32_init(void)
|
||||
{
|
||||
if (!xen_initial_domain())
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(xen_mm32_init);
|
|
@ -1,43 +1 @@
|
|||
#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
|
||||
#define _ASM_ARM64_XEN_PAGE_COHERENT_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <linux/dma-attrs.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
||||
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
||||
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
|
||||
#include <../../arm/include/asm/xen/page-coherent.h>
|
||||
|
|
Loading…
Reference in a new issue