xen: unify foreign GFN map/unmap for auto-xlated physmap guests
Auto-translated physmap guests (arm, arm64 and x86 PVHVM/PVH) map and unmap foreign GFNs using the same method (updating the physmap). Unify the two arm and x86 implementations into one commont one. Note that on arm and arm64, the correct error code will be returned (instead of always -EFAULT) and map/unmap failure warnings are no longer printed. These changes are required if the foreign domain is paging (-ENOENT failures are expected and must be propagated up to the caller). Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
This commit is contained in:
parent
b3b06c7eb7
commit
628c28eefd
6 changed files with 154 additions and 194 deletions
|
@ -53,105 +53,21 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
|
|||
|
||||
static __read_mostly int xen_events_irq = -1;
|
||||
|
||||
/* map fgmfn of domid to lpfn in the current domain */
|
||||
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
||||
unsigned int domid)
|
||||
{
|
||||
int rc;
|
||||
struct xen_add_to_physmap_range xatp = {
|
||||
.domid = DOMID_SELF,
|
||||
.foreign_domid = domid,
|
||||
.size = 1,
|
||||
.space = XENMAPSPACE_gmfn_foreign,
|
||||
};
|
||||
xen_ulong_t idx = fgmfn;
|
||||
xen_pfn_t gpfn = lpfn;
|
||||
int err = 0;
|
||||
|
||||
set_xen_guest_handle(xatp.idxs, &idx);
|
||||
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
||||
set_xen_guest_handle(xatp.errs, &err);
|
||||
|
||||
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
||||
if (rc || err) {
|
||||
pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
|
||||
rc, err, lpfn, fgmfn);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct remap_data {
|
||||
xen_pfn_t fgmfn; /* foreign domain's gmfn */
|
||||
pgprot_t prot;
|
||||
domid_t domid;
|
||||
struct vm_area_struct *vma;
|
||||
int index;
|
||||
struct page **pages;
|
||||
struct xen_remap_mfn_info *info;
|
||||
};
|
||||
|
||||
static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
struct remap_data *info = data;
|
||||
struct page *page = info->pages[info->index++];
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
|
||||
if (map_foreign_page(pfn, info->fgmfn, info->domid))
|
||||
return -EFAULT;
|
||||
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t mfn, int nr,
|
||||
pgprot_t prot, unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
int err;
|
||||
struct remap_data data;
|
||||
|
||||
/* TBD: Batching, current sole caller only does page at a time */
|
||||
if (nr > 1)
|
||||
return -EINVAL;
|
||||
|
||||
data.fgmfn = mfn;
|
||||
data.prot = prot;
|
||||
data.domid = domid;
|
||||
data.vma = vma;
|
||||
data.index = 0;
|
||||
data.pages = pages;
|
||||
err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
|
||||
remap_pte_fn, &data);
|
||||
return err;
|
||||
return xen_xlate_remap_gfn_range(vma, addr, mfn, nr,
|
||||
prot, domid, pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
|
||||
|
||||
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct xen_remove_from_physmap xrp;
|
||||
unsigned long rc, pfn;
|
||||
|
||||
pfn = page_to_pfn(pages[i]);
|
||||
|
||||
xrp.domid = DOMID_SELF;
|
||||
xrp.gpfn = pfn;
|
||||
rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
|
||||
if (rc) {
|
||||
pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
|
||||
pfn, rc);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return xen_xlate_unmap_gfn_range(vma, nr, pages);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
|
||||
|
||||
|
|
|
@ -2436,95 +2436,6 @@ void __init xen_hvm_init_mmu_ops(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
/*
|
||||
* Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
|
||||
* space creating new guest on pvh dom0 and needing to map domU pages.
|
||||
*/
|
||||
static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
|
||||
unsigned int domid)
|
||||
{
|
||||
int rc, err = 0;
|
||||
xen_pfn_t gpfn = lpfn;
|
||||
xen_ulong_t idx = fgfn;
|
||||
|
||||
struct xen_add_to_physmap_range xatp = {
|
||||
.domid = DOMID_SELF,
|
||||
.foreign_domid = domid,
|
||||
.size = 1,
|
||||
.space = XENMAPSPACE_gmfn_foreign,
|
||||
};
|
||||
set_xen_guest_handle(xatp.idxs, &idx);
|
||||
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
||||
set_xen_guest_handle(xatp.errs, &err);
|
||||
|
||||
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xlate_remove_from_p2m(unsigned long spfn, int count)
|
||||
{
|
||||
struct xen_remove_from_physmap xrp;
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
xrp.domid = DOMID_SELF;
|
||||
xrp.gpfn = spfn+i;
|
||||
rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
|
||||
if (rc)
|
||||
break;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct xlate_remap_data {
|
||||
unsigned long fgfn; /* foreign domain's gfn */
|
||||
pgprot_t prot;
|
||||
domid_t domid;
|
||||
int index;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
int rc;
|
||||
struct xlate_remap_data *remap = data;
|
||||
unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
|
||||
pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
|
||||
|
||||
rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
|
||||
if (rc)
|
||||
return rc;
|
||||
native_set_pte(ptep, pteval);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xlate_remap_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long mfn,
|
||||
int nr, pgprot_t prot, unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
int err;
|
||||
struct xlate_remap_data pvhdata;
|
||||
|
||||
BUG_ON(!pages);
|
||||
|
||||
pvhdata.fgfn = mfn;
|
||||
pvhdata.prot = prot;
|
||||
pvhdata.domid = domid;
|
||||
pvhdata.index = 0;
|
||||
pvhdata.pages = pages;
|
||||
err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
|
||||
xlate_map_pte_fn, &pvhdata);
|
||||
flush_tlb_all();
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define REMAP_BATCH_SIZE 16
|
||||
|
||||
struct remap_data {
|
||||
|
@ -2564,8 +2475,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
|
|||
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
/* We need to update the local page tables and the xen HAP */
|
||||
return xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
|
||||
domid, pages);
|
||||
return xen_xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
|
||||
domid, pages);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
@ -2609,22 +2520,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
|
|||
return 0;
|
||||
|
||||
#ifdef CONFIG_XEN_PVH
|
||||
while (numpgs--) {
|
||||
/*
|
||||
* The mmu has already cleaned up the process mmu
|
||||
* resources at this point (lookup_address will return
|
||||
* NULL).
|
||||
*/
|
||||
unsigned long pfn = page_to_pfn(pages[numpgs]);
|
||||
|
||||
xlate_remove_from_p2m(pfn, 1);
|
||||
}
|
||||
/*
|
||||
* We don't need to flush tlbs because as part of
|
||||
* xlate_remove_from_p2m, the hypervisor will do tlb flushes
|
||||
* after removing the p2m entries from the EPT/NPT
|
||||
*/
|
||||
return 0;
|
||||
return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
|
|
|
@ -253,4 +253,10 @@ config XEN_EFI
|
|||
def_bool y
|
||||
depends on X86_64 && EFI
|
||||
|
||||
config XEN_AUTO_XLATE
|
||||
def_bool y
|
||||
depends on ARM || ARM64 || XEN_PVHVM
|
||||
help
|
||||
Support for auto-translated physmap guests.
|
||||
|
||||
endmenu
|
||||
|
|
|
@ -37,6 +37,7 @@ obj-$(CONFIG_XEN_ACPI_HOTPLUG_CPU) += xen-acpi-cpuhotplug.o
|
|||
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
|
||||
obj-$(CONFIG_XEN_EFI) += efi.o
|
||||
obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
|
||||
obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
|
||||
xen-evtchn-y := evtchn.o
|
||||
xen-gntdev-y := gntdev.o
|
||||
xen-gntalloc-y := gntalloc.o
|
||||
|
|
133
drivers/xen/xlate_mmu.c
Normal file
133
drivers/xen/xlate_mmu.c
Normal file
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
* MMU operations common to all auto-translated physmap guests.
|
||||
*
|
||||
* Copyright (C) 2015 Citrix Systems R&D Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation; or, when distributed
|
||||
* separately from the Linux kernel or incorporated into other
|
||||
* software packages, subject to the following license:
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this source file (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy, modify,
|
||||
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
* and to permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
|
||||
/* map fgmfn of domid to lpfn in the current domain */
|
||||
static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
|
||||
unsigned int domid)
|
||||
{
|
||||
int rc;
|
||||
struct xen_add_to_physmap_range xatp = {
|
||||
.domid = DOMID_SELF,
|
||||
.foreign_domid = domid,
|
||||
.size = 1,
|
||||
.space = XENMAPSPACE_gmfn_foreign,
|
||||
};
|
||||
xen_ulong_t idx = fgmfn;
|
||||
xen_pfn_t gpfn = lpfn;
|
||||
int err = 0;
|
||||
|
||||
set_xen_guest_handle(xatp.idxs, &idx);
|
||||
set_xen_guest_handle(xatp.gpfns, &gpfn);
|
||||
set_xen_guest_handle(xatp.errs, &err);
|
||||
|
||||
rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
|
||||
return rc < 0 ? rc : err;
|
||||
}
|
||||
|
||||
struct remap_data {
|
||||
xen_pfn_t fgmfn; /* foreign domain's gmfn */
|
||||
pgprot_t prot;
|
||||
domid_t domid;
|
||||
struct vm_area_struct *vma;
|
||||
int index;
|
||||
struct page **pages;
|
||||
struct xen_remap_mfn_info *info;
|
||||
};
|
||||
|
||||
static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
struct remap_data *info = data;
|
||||
struct page *page = info->pages[info->index++];
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
|
||||
int rc;
|
||||
|
||||
rc = map_foreign_page(pfn, info->fgmfn, info->domid);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
set_pte_at(info->vma->vm_mm, addr, ptep, pte);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int xen_xlate_remap_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t gfn, int nr,
|
||||
pgprot_t prot, unsigned domid,
|
||||
struct page **pages)
|
||||
{
|
||||
int err;
|
||||
struct remap_data data;
|
||||
|
||||
/* TBD: Batching, current sole caller only does page at a time */
|
||||
if (nr > 1)
|
||||
return -EINVAL;
|
||||
|
||||
data.fgmfn = gfn;
|
||||
data.prot = prot;
|
||||
data.domid = domid;
|
||||
data.vma = vma;
|
||||
data.index = 0;
|
||||
data.pages = pages;
|
||||
err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
|
||||
remap_pte_fn, &data);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_range);
|
||||
|
||||
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct xen_remove_from_physmap xrp;
|
||||
unsigned long pfn;
|
||||
|
||||
pfn = page_to_pfn(pages[i]);
|
||||
|
||||
xrp.domid = DOMID_SELF;
|
||||
xrp.gpfn = pfn;
|
||||
(void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
|
|
@ -34,6 +34,14 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
|
|||
struct page **pages);
|
||||
int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
|
||||
int numpgs, struct page **pages);
|
||||
int xen_xlate_remap_gfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
xen_pfn_t gfn, int nr,
|
||||
pgprot_t prot,
|
||||
unsigned domid,
|
||||
struct page **pages);
|
||||
int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
|
||||
int nr, struct page **pages);
|
||||
|
||||
bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
|
||||
|
||||
|
|
Loading…
Reference in a new issue