IOMMU Updates for Linux v3.14
A few patches have been queued up for this merge window: * Improvements for the ARM-SMMU driver (IOMMU_EXEC support, IOMMU group support) * Updates and fixes for the shmobile IOMMU driver * Various fixes to generic IOMMU code and the Intel IOMMU driver * Some cleanups in IOMMU drivers (dev_is_pci() usage) -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJS6XrCAAoJECvwRC2XARrjgy4P/itemtg2U+603Ldje8WcPo0E OCO/0VVSmCTYKUJDZY0hiVwqmhe5gFL3Hm/gGwkS0+UJenFXMmi+aVaPp4pCpgH+ dL2HD3dIEvi14bisrdxG/8MdR6mIx0qzKtnZLkKSR4LXwucyLHvC/DaCoOytb7Yk 7s+eEuo0hj0jAkiqSG/zLEtKElTEnoAAkLOjMy46orecJ5q4HusPZekLtWZs2ETe x3NS63Unb9g1iSQJWIA7HnQlxWIr2+iynoamHHJRiVFzqRF0W0sGvQY3auG0DSCn 70WRNE1rKfEkfXMJxosRQ4394YUQdAkt8MBENNcJcC6E1n5PBi0cEZXH6mCnEIlG jXzIKUY9fz68ZboaqIxXv4Hb+JLlPXCvPBvQzIQiKRgVxd8nncEjn5I9MHdf+je5 BmJlzJLJvP4cFvW8Hc8k2Oq101b1kEcSCLARWWvE9/bk9xIUyrqBkR4XjC0vb6qq 1HbKVdZ7KFKCkBHy9xMpr7CUjKiDiiLeUmqlhyjcK9spicuNIZQnC11HemL6/USP oR6Ext9RGhvz+ch656+5+L6f6FURVP8/ywKiJ3RjmvXV5/fCYo3WMitOB2qzlWCy SYXAczAOMOdOo+1Dxbghrr+7HzUWPqgfPmntZEPGMZhfuZ6xXr+7pGLjAhHb4vcR SZxqkDo1cprqrR9KFAWC =YKLk -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU Updates from Joerg Roedel: "A few patches have been queued up for this merge window: - improvements for the ARM-SMMU driver (IOMMU_EXEC support, IOMMU group support) - updates and fixes for the shmobile IOMMU driver - various fixes to generic IOMMU code and the Intel IOMMU driver - some cleanups in IOMMU drivers (dev_is_pci() usage)" * tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits) iommu/vt-d: Fix signedness bug in alloc_irte() iommu/vt-d: free all resources if failed to initialize DMARs iommu/vt-d, trivial: clean sparse warnings iommu/vt-d: fix wrong return value of dmar_table_init() iommu/vt-d: release invalidation queue when destroying IOMMU unit iommu/vt-d: fix access after free issue in function free_dmar_iommu() iommu/vt-d: keep shared resources when failed to initialize iommu devices iommu/vt-d: fix invalid memory access when freeing DMAR irq iommu/vt-d, trivial: simplify code with existing macros iommu/vt-d, trivial: use defined macro instead of hardcoding iommu/vt-d: mark internal functions as static iommu/vt-d, trivial: clean up unused code iommu/vt-d, trivial: check suitable flag in function detect_intel_iommu() iommu/vt-d, trivial: print correct domain id of static identity domain iommu/vt-d, trivial: refine support of 64bit guest address iommu/vt-d: fix resource leakage on error recovery path in iommu_init_domains() iommu/vt-d: fix a race window in allocating domain ID for virtual machines iommu/vt-d: fix PCI device reference leakage on error recovery path drm/msm: Fix link error with !MSM_IOMMU iommu/vt-d: use dedicated bitmap to track remapping entry allocation status ...
This commit is contained in:
commit
b3a4bcaa5a
17 changed files with 279 additions and 280 deletions
|
@ -4,6 +4,7 @@ config DRM_MSM
|
|||
depends on DRM
|
||||
depends on ARCH_MSM
|
||||
depends on ARCH_MSM8960
|
||||
depends on MSM_IOMMU
|
||||
select DRM_KMS_HELPER
|
||||
select SHMEM
|
||||
select TMPFS
|
||||
|
|
|
@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
|
|||
bool "IOMMU for Renesas IPMMU/IPMMUI"
|
||||
default n
|
||||
depends on ARM
|
||||
depends on SH_MOBILE || COMPILE_TEST
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU
|
||||
select SHMOBILE_IPMMU
|
||||
|
|
|
@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
|
|||
if (!dev || !dev->dma_mask)
|
||||
return false;
|
||||
|
||||
/* No device or no PCI device */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
/* No PCI device */
|
||||
if (!dev_is_pci(dev))
|
||||
return false;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
* - v7/v8 long-descriptor format
|
||||
* - Non-secure access to the SMMU
|
||||
* - 4k and 64k pages, with contiguous pte hints.
|
||||
* - Up to 39-bit addressing
|
||||
* - Up to 42-bit addressing (dependent on VA_BITS)
|
||||
* - Context fault reporting
|
||||
*/
|
||||
|
||||
|
@ -61,12 +61,13 @@
|
|||
#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
|
||||
|
||||
/* Page table bits */
|
||||
#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
|
||||
#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
|
||||
#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
|
||||
#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
|
||||
#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
|
||||
#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
|
||||
#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
|
||||
#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
|
||||
|
||||
#if PAGE_SIZE == SZ_4K
|
||||
#define ARM_SMMU_PTE_CONT_ENTRIES 16
|
||||
|
@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
|||
unsigned long pfn, int flags, int stage)
|
||||
{
|
||||
pte_t *pte, *start;
|
||||
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
|
||||
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
|
||||
|
||||
if (pmd_none(*pmd)) {
|
||||
/* Allocate a new set of tables */
|
||||
|
@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
|
|||
}
|
||||
|
||||
/* If no access, create a faulting entry to avoid TLB fills */
|
||||
if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
|
||||
if (flags & IOMMU_EXEC)
|
||||
pteval &= ~ARM_SMMU_PTE_XN;
|
||||
else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
|
||||
pteval &= ~ARM_SMMU_PTE_PAGE;
|
||||
|
||||
pteval |= ARM_SMMU_PTE_SH_IS;
|
||||
|
@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev)
|
|||
{
|
||||
struct arm_smmu_device *child, *parent, *smmu;
|
||||
struct arm_smmu_master *master = NULL;
|
||||
struct iommu_group *group;
|
||||
int ret;
|
||||
|
||||
if (dev->archdata.iommu) {
|
||||
dev_warn(dev, "IOMMU driver already assigned to device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&arm_smmu_devices_lock);
|
||||
list_for_each_entry(parent, &arm_smmu_devices, list) {
|
||||
|
@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev)
|
|||
if (!master)
|
||||
return -ENODEV;
|
||||
|
||||
group = iommu_group_alloc();
|
||||
if (IS_ERR(group)) {
|
||||
dev_err(dev, "Failed to allocate IOMMU group\n");
|
||||
return PTR_ERR(group);
|
||||
}
|
||||
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
iommu_group_put(group);
|
||||
dev->archdata.iommu = smmu;
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void arm_smmu_remove_device(struct device *dev)
|
||||
{
|
||||
dev->archdata.iommu = NULL;
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
|
||||
static struct iommu_ops arm_smmu_ops = {
|
||||
|
@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
* allocation (PTRS_PER_PGD).
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
/* Current maximum output size of 39 bits */
|
||||
smmu->s1_output_size = min(39UL, size);
|
||||
#else
|
||||
smmu->s1_output_size = min(32UL, size);
|
||||
|
@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
} else {
|
||||
#ifdef CONFIG_64BIT
|
||||
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
|
||||
size = min(39, arm_smmu_id_size_to_bits(size));
|
||||
size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
|
||||
#else
|
||||
size = 32;
|
||||
#endif
|
||||
|
|
|
@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
|
|||
struct acpi_table_header * __initdata dmar_tbl;
|
||||
static acpi_size dmar_tbl_size;
|
||||
|
||||
static int alloc_iommu(struct dmar_drhd_unit *drhd);
|
||||
static void free_iommu(struct intel_iommu *iommu);
|
||||
|
||||
static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
|
||||
{
|
||||
/*
|
||||
|
@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
|
|||
if (!pdev) {
|
||||
pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
|
||||
segment, scope->bus, path->device, path->function);
|
||||
*dev = NULL;
|
||||
return 0;
|
||||
}
|
||||
if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
|
||||
|
@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
|||
ret = dmar_parse_one_dev_scope(scope,
|
||||
&(*devices)[index], segment);
|
||||
if (ret) {
|
||||
kfree(*devices);
|
||||
dmar_free_dev_scope(devices, cnt);
|
||||
return ret;
|
||||
}
|
||||
index ++;
|
||||
|
@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
|||
return 0;
|
||||
}
|
||||
|
||||
void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
|
||||
{
|
||||
if (*devices && *cnt) {
|
||||
while (--*cnt >= 0)
|
||||
pci_dev_put((*devices)[*cnt]);
|
||||
kfree(*devices);
|
||||
*devices = NULL;
|
||||
*cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
|
||||
* structure which uniquely represent one DMA remapping hardware unit
|
||||
|
@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
|
||||
{
|
||||
if (dmaru->devices && dmaru->devices_cnt)
|
||||
dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
|
||||
if (dmaru->iommu)
|
||||
free_iommu(dmaru->iommu);
|
||||
kfree(dmaru);
|
||||
}
|
||||
|
||||
static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
|
||||
{
|
||||
struct acpi_dmar_hardware_unit *drhd;
|
||||
int ret = 0;
|
||||
|
||||
drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
|
||||
|
||||
if (dmaru->include_all)
|
||||
return 0;
|
||||
|
||||
ret = dmar_parse_dev_scope((void *)(drhd + 1),
|
||||
((void *)drhd) + drhd->header.length,
|
||||
&dmaru->devices_cnt, &dmaru->devices,
|
||||
drhd->segment);
|
||||
if (ret) {
|
||||
list_del(&dmaru->list);
|
||||
kfree(dmaru);
|
||||
}
|
||||
return ret;
|
||||
return dmar_parse_dev_scope((void *)(drhd + 1),
|
||||
((void *)drhd) + drhd->header.length,
|
||||
&dmaru->devices_cnt, &dmaru->devices,
|
||||
drhd->segment);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
|
@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
|
|||
int __init dmar_dev_scope_init(void)
|
||||
{
|
||||
static int dmar_dev_scope_initialized;
|
||||
struct dmar_drhd_unit *drhd, *drhd_n;
|
||||
struct dmar_drhd_unit *drhd;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (dmar_dev_scope_initialized)
|
||||
|
@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
|
|||
if (list_empty(&dmar_drhd_units))
|
||||
goto fail;
|
||||
|
||||
list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) {
|
||||
list_for_each_entry(drhd, &dmar_drhd_units, list) {
|
||||
ret = dmar_parse_dev(drhd);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
@ -456,24 +472,23 @@ int __init dmar_table_init(void)
|
|||
static int dmar_table_initialized;
|
||||
int ret;
|
||||
|
||||
if (dmar_table_initialized)
|
||||
return 0;
|
||||
if (dmar_table_initialized == 0) {
|
||||
ret = parse_dmar_table();
|
||||
if (ret < 0) {
|
||||
if (ret != -ENODEV)
|
||||
pr_info("parse DMAR table failure.\n");
|
||||
} else if (list_empty(&dmar_drhd_units)) {
|
||||
pr_info("No DMAR devices found\n");
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
dmar_table_initialized = 1;
|
||||
|
||||
ret = parse_dmar_table();
|
||||
if (ret) {
|
||||
if (ret != -ENODEV)
|
||||
pr_info("parse DMAR table failure.\n");
|
||||
return ret;
|
||||
if (ret < 0)
|
||||
dmar_table_initialized = ret;
|
||||
else
|
||||
dmar_table_initialized = 1;
|
||||
}
|
||||
|
||||
if (list_empty(&dmar_drhd_units)) {
|
||||
pr_info("No DMAR devices found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
|
||||
}
|
||||
|
||||
static void warn_invalid_dmar(u64 addr, const char *message)
|
||||
|
@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
|
|||
dmi_get_system_info(DMI_PRODUCT_VERSION));
|
||||
}
|
||||
|
||||
int __init check_zero_address(void)
|
||||
static int __init check_zero_address(void)
|
||||
{
|
||||
struct acpi_table_dmar *dmar;
|
||||
struct acpi_dmar_header *entry_header;
|
||||
|
@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
|
|||
if (ret)
|
||||
ret = check_zero_address();
|
||||
{
|
||||
struct acpi_table_dmar *dmar;
|
||||
|
||||
dmar = (struct acpi_table_dmar *) dmar_tbl;
|
||||
|
||||
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
|
||||
dmar->flags & 0x1)
|
||||
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
|
||||
|
||||
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
|
||||
iommu_detected = 1;
|
||||
/* Make sure ACS will be enabled */
|
||||
|
@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
|
|||
x86_init.iommu.iommu_init = intel_iommu_init;
|
||||
#endif
|
||||
}
|
||||
early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
|
||||
early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
|
||||
dmar_tbl = NULL;
|
||||
|
||||
return ret ? 1 : -ENODEV;
|
||||
|
@ -647,7 +654,7 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
{
|
||||
struct intel_iommu *iommu;
|
||||
u32 ver, sts;
|
||||
|
@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
return err;
|
||||
}
|
||||
|
||||
void free_iommu(struct intel_iommu *iommu)
|
||||
static void free_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
if (!iommu)
|
||||
return;
|
||||
if (iommu->irq) {
|
||||
free_irq(iommu->irq, iommu);
|
||||
irq_set_handler_data(iommu->irq, NULL);
|
||||
destroy_irq(iommu->irq);
|
||||
}
|
||||
|
||||
free_dmar_iommu(iommu);
|
||||
if (iommu->qi) {
|
||||
free_page((unsigned long)iommu->qi->desc);
|
||||
kfree(iommu->qi->desc_status);
|
||||
kfree(iommu->qi);
|
||||
}
|
||||
|
||||
if (iommu->reg)
|
||||
unmap_iommu(iommu);
|
||||
|
@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
|||
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
|
||||
if (!desc_page) {
|
||||
kfree(qi);
|
||||
iommu->qi = 0;
|
||||
iommu->qi = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
|||
if (!qi->desc_status) {
|
||||
free_page((unsigned long) qi->desc);
|
||||
kfree(qi);
|
||||
iommu->qi = 0;
|
||||
iommu->qi = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
|
|||
"Blocked an interrupt request due to source-id verification failure",
|
||||
};
|
||||
|
||||
#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1)
|
||||
|
||||
const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
|
||||
static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
|
||||
{
|
||||
if (fault_reason >= 0x20 && (fault_reason - 0x20 <
|
||||
ARRAY_SIZE(irq_remap_fault_reasons))) {
|
||||
|
@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
|
|||
int __init enable_drhd_fault_handling(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
/*
|
||||
* Enable fault control interrupt.
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
int ret;
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
for_each_iommu(iommu, drhd) {
|
||||
u32 fault_status;
|
||||
ret = dmar_set_interrupt(iommu);
|
||||
int ret = dmar_set_interrupt(iommu);
|
||||
|
||||
if (ret) {
|
||||
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
|
||||
|
@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
|
|||
return 0;
|
||||
return dmar->flags & 0x1;
|
||||
}
|
||||
|
||||
static int __init dmar_free_unused_resources(void)
|
||||
{
|
||||
struct dmar_drhd_unit *dmaru, *dmaru_n;
|
||||
|
||||
/* DMAR units are in use */
|
||||
if (irq_remapping_enabled || intel_iommu_enabled)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
|
||||
list_del(&dmaru->list);
|
||||
dmar_free_drhd(dmaru);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(dmar_free_unused_resources);
|
||||
IOMMU_INIT_POST(detect_intel_iommu);
|
||||
|
|
|
@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
|
|||
* Use LIODN of the PCI controller while attaching a
|
||||
* PCI device.
|
||||
*/
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
if (dev_is_pci(dev)) {
|
||||
pdev = to_pci_dev(dev);
|
||||
pci_ctl = pci_bus_to_host(pdev->bus);
|
||||
/*
|
||||
|
@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
|
|||
* Use LIODN of the PCI controller while detaching a
|
||||
* PCI device.
|
||||
*/
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
if (dev_is_pci(dev)) {
|
||||
pdev = to_pci_dev(dev);
|
||||
pci_ctl = pci_bus_to_host(pdev->bus);
|
||||
/*
|
||||
|
@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
|
|||
* For platform devices we allocate a separate group for
|
||||
* each of the devices.
|
||||
*/
|
||||
if (dev->bus == &pci_bus_type) {
|
||||
if (dev_is_pci(dev)) {
|
||||
pdev = to_pci_dev(dev);
|
||||
/* Don't create device groups for virtual PCI bridges */
|
||||
if (pdev->subordinate)
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
|
||||
|
||||
#define MAX_AGAW_WIDTH 64
|
||||
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
|
||||
|
||||
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
|
||||
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
|
||||
|
@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
|
|||
|
||||
static inline int agaw_to_width(int agaw)
|
||||
{
|
||||
return 30 + agaw * LEVEL_STRIDE;
|
||||
return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
|
||||
}
|
||||
|
||||
static inline int width_to_agaw(int width)
|
||||
{
|
||||
return (width - 30) / LEVEL_STRIDE;
|
||||
return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
|
||||
}
|
||||
|
||||
static inline unsigned int level_to_offset_bits(int level)
|
||||
|
@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
|
|||
|
||||
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
|
||||
{
|
||||
return 1 << ((lvl - 1) * LEVEL_STRIDE);
|
||||
return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
|
||||
}
|
||||
|
||||
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
|
||||
|
@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
|
|||
pte->val = 0;
|
||||
}
|
||||
|
||||
static inline void dma_set_pte_readable(struct dma_pte *pte)
|
||||
{
|
||||
pte->val |= DMA_PTE_READ;
|
||||
}
|
||||
|
||||
static inline void dma_set_pte_writable(struct dma_pte *pte)
|
||||
{
|
||||
pte->val |= DMA_PTE_WRITE;
|
||||
}
|
||||
|
||||
static inline void dma_set_pte_snp(struct dma_pte *pte)
|
||||
{
|
||||
pte->val |= DMA_PTE_SNP;
|
||||
}
|
||||
|
||||
static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
|
||||
{
|
||||
pte->val = (pte->val & ~3) | (prot & 3);
|
||||
}
|
||||
|
||||
static inline u64 dma_pte_addr(struct dma_pte *pte)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
|
@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
|
||||
{
|
||||
pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool dma_pte_present(struct dma_pte *pte)
|
||||
{
|
||||
return (pte->val & 3) != 0;
|
||||
|
@ -406,7 +382,7 @@ struct device_domain_info {
|
|||
|
||||
static void flush_unmaps_timeout(unsigned long data);
|
||||
|
||||
DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
|
||||
static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
|
||||
|
||||
#define HIGH_WATER_MARK 250
|
||||
struct deferred_flush_tables {
|
||||
|
@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
|
|||
struct dmar_drhd_unit *drhd = NULL;
|
||||
int i;
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
for_each_active_drhd_unit(drhd) {
|
||||
if (segment != drhd->segment)
|
||||
continue;
|
||||
|
||||
|
@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
|
|||
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
|
||||
unsigned int large_page = 1;
|
||||
struct dma_pte *first_pte, *pte;
|
||||
int order;
|
||||
|
||||
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
|
||||
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
|
||||
|
@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
|
|||
|
||||
} while (start_pfn && start_pfn <= last_pfn);
|
||||
|
||||
order = (large_page - 1) * 9;
|
||||
return order;
|
||||
return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
|
||||
}
|
||||
|
||||
static void dma_pte_free_level(struct dmar_domain *domain, int level,
|
||||
|
@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
|
|||
unsigned long nlongs;
|
||||
|
||||
ndomains = cap_ndoms(iommu->cap);
|
||||
pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id,
|
||||
ndomains);
|
||||
pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
|
||||
iommu->seq_id, ndomains);
|
||||
nlongs = BITS_TO_LONGS(ndomains);
|
||||
|
||||
spin_lock_init(&iommu->lock);
|
||||
|
@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
|
|||
*/
|
||||
iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!iommu->domain_ids) {
|
||||
printk(KERN_ERR "Allocating domain id array failed\n");
|
||||
pr_err("IOMMU%d: allocating domain id array failed\n",
|
||||
iommu->seq_id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
|
||||
GFP_KERNEL);
|
||||
if (!iommu->domains) {
|
||||
printk(KERN_ERR "Allocating domain array failed\n");
|
||||
pr_err("IOMMU%d: allocating domain array failed\n",
|
||||
iommu->seq_id);
|
||||
kfree(iommu->domain_ids);
|
||||
iommu->domain_ids = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
|
|||
static void domain_exit(struct dmar_domain *domain);
|
||||
static void vm_domain_exit(struct dmar_domain *domain);
|
||||
|
||||
void free_dmar_iommu(struct intel_iommu *iommu)
|
||||
static void free_dmar_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
int i;
|
||||
int i, count;
|
||||
unsigned long flags;
|
||||
|
||||
if ((iommu->domains) && (iommu->domain_ids)) {
|
||||
|
@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
|
|||
clear_bit(i, iommu->domain_ids);
|
||||
|
||||
spin_lock_irqsave(&domain->iommu_lock, flags);
|
||||
if (--domain->iommu_count == 0) {
|
||||
count = --domain->iommu_count;
|
||||
spin_unlock_irqrestore(&domain->iommu_lock, flags);
|
||||
if (count == 0) {
|
||||
if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
|
||||
vm_domain_exit(domain);
|
||||
else
|
||||
domain_exit(domain);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->iommu_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
if (iommu->gcmd & DMA_GCMD_TE)
|
||||
iommu_disable_translation(iommu);
|
||||
|
||||
if (iommu->irq) {
|
||||
irq_set_handler_data(iommu->irq, NULL);
|
||||
/* This will mask the irq */
|
||||
free_irq(iommu->irq, iommu);
|
||||
destroy_irq(iommu->irq);
|
||||
}
|
||||
|
||||
kfree(iommu->domains);
|
||||
kfree(iommu->domain_ids);
|
||||
iommu->domains = NULL;
|
||||
iommu->domain_ids = NULL;
|
||||
|
||||
g_iommus[iommu->seq_id] = NULL;
|
||||
|
||||
|
@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
|
|||
if (!si_domain)
|
||||
return -EFAULT;
|
||||
|
||||
pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
ret = iommu_attach_domain(si_domain, iommu);
|
||||
if (ret) {
|
||||
|
@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
|
|||
}
|
||||
|
||||
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
|
||||
pr_debug("IOMMU: identity mapping domain is domain %d\n",
|
||||
si_domain->id);
|
||||
|
||||
if (hw)
|
||||
return 0;
|
||||
|
@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
|
|||
goto error;
|
||||
}
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
|
||||
iommu = drhd->iommu;
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
g_iommus[iommu->seq_id] = iommu;
|
||||
|
||||
ret = iommu_init_domains(iommu);
|
||||
|
@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
|
|||
/*
|
||||
* Start from the sane iommu hardware state.
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
|
||||
iommu = drhd->iommu;
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
/*
|
||||
* If the queued invalidation is already initialized by us
|
||||
* (for example, while enabling interrupt-remapping) then
|
||||
|
@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
|
|||
dmar_disable_qi(iommu);
|
||||
}
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
|
||||
iommu = drhd->iommu;
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
if (dmar_enable_qi(iommu)) {
|
||||
/*
|
||||
* Queued Invalidate not enabled, use Register Based
|
||||
|
@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
|
|||
* global invalidate iotlb
|
||||
* enable translation
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
for_each_iommu(iommu, drhd) {
|
||||
if (drhd->ignored) {
|
||||
/*
|
||||
* we always have to disable PMRs or DMA may fail on
|
||||
* this device
|
||||
*/
|
||||
if (force_on)
|
||||
iommu_disable_protect_mem_regions(drhd->iommu);
|
||||
iommu_disable_protect_mem_regions(iommu);
|
||||
continue;
|
||||
}
|
||||
iommu = drhd->iommu;
|
||||
|
||||
iommu_flush_write_buffer(iommu);
|
||||
|
||||
|
@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
|
|||
|
||||
return 0;
|
||||
error:
|
||||
for_each_drhd_unit(drhd) {
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
iommu = drhd->iommu;
|
||||
free_iommu(iommu);
|
||||
}
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
free_dmar_iommu(iommu);
|
||||
kfree(deferred_flush);
|
||||
kfree(g_iommus);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
|
|||
struct pci_dev *pdev;
|
||||
int found;
|
||||
|
||||
if (unlikely(dev->bus != &pci_bus_type))
|
||||
if (unlikely(!dev_is_pci(dev)))
|
||||
return 1;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
|
@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
|
|||
}
|
||||
}
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
for_each_active_drhd_unit(drhd) {
|
||||
int i;
|
||||
if (drhd->ignored || drhd->include_all)
|
||||
if (drhd->include_all)
|
||||
continue;
|
||||
|
||||
for (i = 0; i < drhd->devices_cnt; i++)
|
||||
|
@ -3514,18 +3468,12 @@ static int __init
|
|||
rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
|
||||
{
|
||||
struct acpi_dmar_reserved_memory *rmrr;
|
||||
int ret;
|
||||
|
||||
rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
|
||||
ret = dmar_parse_dev_scope((void *)(rmrr + 1),
|
||||
((void *)rmrr) + rmrr->header.length,
|
||||
&rmrru->devices_cnt, &rmrru->devices, rmrr->segment);
|
||||
|
||||
if (ret || (rmrru->devices_cnt == 0)) {
|
||||
list_del(&rmrru->list);
|
||||
kfree(rmrru);
|
||||
}
|
||||
return ret;
|
||||
return dmar_parse_dev_scope((void *)(rmrr + 1),
|
||||
((void *)rmrr) + rmrr->header.length,
|
||||
&rmrru->devices_cnt, &rmrru->devices,
|
||||
rmrr->segment);
|
||||
}
|
||||
|
||||
static LIST_HEAD(dmar_atsr_units);
|
||||
|
@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
|
|||
|
||||
static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
|
||||
{
|
||||
int rc;
|
||||
struct acpi_dmar_atsr *atsr;
|
||||
|
||||
if (atsru->include_all)
|
||||
return 0;
|
||||
|
||||
atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
|
||||
rc = dmar_parse_dev_scope((void *)(atsr + 1),
|
||||
(void *)atsr + atsr->header.length,
|
||||
&atsru->devices_cnt, &atsru->devices,
|
||||
atsr->segment);
|
||||
if (rc || !atsru->devices_cnt) {
|
||||
list_del(&atsru->list);
|
||||
kfree(atsru);
|
||||
return dmar_parse_dev_scope((void *)(atsr + 1),
|
||||
(void *)atsr + atsr->header.length,
|
||||
&atsru->devices_cnt, &atsru->devices,
|
||||
atsr->segment);
|
||||
}
|
||||
|
||||
static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
|
||||
{
|
||||
dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
|
||||
kfree(atsru);
|
||||
}
|
||||
|
||||
static void intel_iommu_free_dmars(void)
|
||||
{
|
||||
struct dmar_rmrr_unit *rmrru, *rmrr_n;
|
||||
struct dmar_atsr_unit *atsru, *atsr_n;
|
||||
|
||||
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
|
||||
list_del(&rmrru->list);
|
||||
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
|
||||
kfree(rmrru);
|
||||
}
|
||||
|
||||
return rc;
|
||||
list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
|
||||
list_del(&atsru->list);
|
||||
intel_iommu_free_atsr(atsru);
|
||||
}
|
||||
}
|
||||
|
||||
int dmar_find_matched_atsr_unit(struct pci_dev *dev)
|
||||
|
@ -3610,17 +3574,17 @@ found:
|
|||
|
||||
int __init dmar_parse_rmrr_atsr_dev(void)
|
||||
{
|
||||
struct dmar_rmrr_unit *rmrr, *rmrr_n;
|
||||
struct dmar_atsr_unit *atsr, *atsr_n;
|
||||
struct dmar_rmrr_unit *rmrr;
|
||||
struct dmar_atsr_unit *atsr;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
|
||||
list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
|
||||
ret = rmrr_parse_dev(rmrr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
|
||||
list_for_each_entry(atsr, &dmar_atsr_units, list) {
|
||||
ret = atsr_parse_dev(atsr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
|
|||
|
||||
int __init intel_iommu_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = -ENODEV;
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
/* VT-d is required for a TXT/tboot launch, so enforce that */
|
||||
force_on = tboot_force_iommu();
|
||||
|
@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
|
|||
if (dmar_table_init()) {
|
||||
if (force_on)
|
||||
panic("tboot: Failed to initialize DMAR table\n");
|
||||
return -ENODEV;
|
||||
goto out_free_dmar;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable translation if already enabled prior to OS handover.
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
|
||||
iommu = drhd->iommu;
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
if (iommu->gcmd & DMA_GCMD_TE)
|
||||
iommu_disable_translation(iommu);
|
||||
}
|
||||
|
||||
if (dmar_dev_scope_init() < 0) {
|
||||
if (force_on)
|
||||
panic("tboot: Failed to initialize DMAR device scope\n");
|
||||
return -ENODEV;
|
||||
goto out_free_dmar;
|
||||
}
|
||||
|
||||
if (no_iommu || dmar_disabled)
|
||||
return -ENODEV;
|
||||
goto out_free_dmar;
|
||||
|
||||
if (iommu_init_mempool()) {
|
||||
if (force_on)
|
||||
panic("tboot: Failed to initialize iommu memory\n");
|
||||
return -ENODEV;
|
||||
goto out_free_dmar;
|
||||
}
|
||||
|
||||
if (list_empty(&dmar_rmrr_units))
|
||||
|
@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
|
|||
if (dmar_init_reserved_ranges()) {
|
||||
if (force_on)
|
||||
panic("tboot: Failed to reserve iommu ranges\n");
|
||||
return -ENODEV;
|
||||
goto out_free_mempool;
|
||||
}
|
||||
|
||||
init_no_remapping_devices();
|
||||
|
@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
|
|||
if (force_on)
|
||||
panic("tboot: Failed to initialize DMARs\n");
|
||||
printk(KERN_ERR "IOMMU: dmar init failed\n");
|
||||
put_iova_domain(&reserved_iova_list);
|
||||
iommu_exit_mempool();
|
||||
return ret;
|
||||
goto out_free_reserved_range;
|
||||
}
|
||||
printk(KERN_INFO
|
||||
"PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
|
||||
|
@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
|
|||
intel_iommu_enabled = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_reserved_range:
|
||||
put_iova_domain(&reserved_iova_list);
|
||||
out_free_mempool:
|
||||
iommu_exit_mempool();
|
||||
out_free_dmar:
|
||||
intel_iommu_free_dmars();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
|
||||
|
@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
|
|||
}
|
||||
|
||||
/* domain id for virtual machine, it won't be set in context */
|
||||
static unsigned long vm_domid;
|
||||
static atomic_t vm_domid = ATOMIC_INIT(0);
|
||||
|
||||
static struct dmar_domain *iommu_alloc_vm_domain(void)
|
||||
{
|
||||
|
@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
|
|||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->id = vm_domid++;
|
||||
domain->id = atomic_inc_return(&vm_domid);
|
||||
domain->nid = -1;
|
||||
memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
|
||||
domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
|
||||
|
@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
|
|||
unsigned long i;
|
||||
unsigned long ndomains;
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
if (drhd->ignored)
|
||||
continue;
|
||||
iommu = drhd->iommu;
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
ndomains = cap_ndoms(iommu->cap);
|
||||
for_each_set_bit(i, iommu->domain_ids, ndomains) {
|
||||
if (iommu->domains[i] == domain) {
|
||||
|
|
|
@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
|
|||
|
||||
static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
|
||||
|
||||
static int __init parse_ioapics_under_ir(void);
|
||||
|
||||
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
return cfg ? &cfg->irq_2_iommu : NULL;
|
||||
}
|
||||
|
||||
int get_irte(int irq, struct irte *entry)
|
||||
static int get_irte(int irq, struct irte *entry)
|
||||
{
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
unsigned long flags;
|
||||
|
@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|||
struct ir_table *table = iommu->ir_table;
|
||||
struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
|
||||
struct irq_cfg *cfg = irq_get_chip_data(irq);
|
||||
u16 index, start_index;
|
||||
unsigned int mask = 0;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
int index;
|
||||
|
||||
if (!count || !irq_iommu)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* start the IRTE search from index 0.
|
||||
*/
|
||||
index = start_index = 0;
|
||||
|
||||
if (count > 1) {
|
||||
count = __roundup_pow_of_two(count);
|
||||
mask = ilog2(count);
|
||||
|
@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
|
|||
}
|
||||
|
||||
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
do {
|
||||
for (i = index; i < index + count; i++)
|
||||
if (table->base[i].present)
|
||||
break;
|
||||
/* empty index found */
|
||||
if (i == index + count)
|
||||
break;
|
||||
|
||||
index = (index + count) % INTR_REMAP_TABLE_ENTRIES;
|
||||
|
||||
if (index == start_index) {
|
||||
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
printk(KERN_ERR "can't allocate an IRTE\n");
|
||||
return -1;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
for (i = index; i < index + count; i++)
|
||||
table->base[i].present = 1;
|
||||
|
||||
cfg->remapped = 1;
|
||||
irq_iommu->iommu = iommu;
|
||||
irq_iommu->irte_index = index;
|
||||
irq_iommu->sub_handle = 0;
|
||||
irq_iommu->irte_mask = mask;
|
||||
|
||||
index = bitmap_find_free_region(table->bitmap,
|
||||
INTR_REMAP_TABLE_ENTRIES, mask);
|
||||
if (index < 0) {
|
||||
pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
|
||||
} else {
|
||||
cfg->remapped = 1;
|
||||
irq_iommu->iommu = iommu;
|
||||
irq_iommu->irte_index = index;
|
||||
irq_iommu->sub_handle = 0;
|
||||
irq_iommu->irte_mask = mask;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
|
||||
return index;
|
||||
|
@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
|
|||
set_64bit(&entry->low, 0);
|
||||
set_64bit(&entry->high, 0);
|
||||
}
|
||||
bitmap_release_region(iommu->ir_table->bitmap, index,
|
||||
irq_iommu->irte_mask);
|
||||
|
||||
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
|
||||
}
|
||||
|
@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
|
|||
return -1;
|
||||
}
|
||||
|
||||
set_irte_sid(irte, 1, 0, sid);
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
|
|||
{
|
||||
struct ir_table *ir_table;
|
||||
struct page *pages;
|
||||
unsigned long *bitmap;
|
||||
|
||||
ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
|
||||
GFP_ATOMIC);
|
||||
|
@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
|
|||
INTR_REMAP_PAGE_ORDER);
|
||||
|
||||
if (!pages) {
|
||||
printk(KERN_ERR "failed to allocate pages of order %d\n",
|
||||
INTR_REMAP_PAGE_ORDER);
|
||||
pr_err("IR%d: failed to allocate pages of order %d\n",
|
||||
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
|
||||
kfree(iommu->ir_table);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
|
||||
sizeof(long), GFP_ATOMIC);
|
||||
if (bitmap == NULL) {
|
||||
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
|
||||
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
|
||||
kfree(ir_table);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ir_table->base = page_address(pages);
|
||||
ir_table->bitmap = bitmap;
|
||||
|
||||
iommu_set_irq_remapping(iommu, mode);
|
||||
return 0;
|
||||
|
@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
|
|||
static int __init intel_irq_remapping_supported(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
|
||||
if (disable_irq_remap)
|
||||
return 0;
|
||||
|
@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
|
|||
if (!dmar_ir_support())
|
||||
return 0;
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
|
||||
for_each_iommu(iommu, drhd)
|
||||
if (!ecap_ir_support(iommu->ecap))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
|
|||
static int __init intel_enable_irq_remapping(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
bool x2apic_present;
|
||||
int setup = 0;
|
||||
int eim = 0;
|
||||
|
@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
|
|||
}
|
||||
|
||||
if (x2apic_present) {
|
||||
pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
|
||||
|
||||
eim = !dmar_x2apic_optout();
|
||||
if (!eim)
|
||||
printk(KERN_WARNING
|
||||
|
@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
|
|||
"Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
|
||||
}
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
|
||||
for_each_iommu(iommu, drhd) {
|
||||
/*
|
||||
* If the queued invalidation is already initialized,
|
||||
* shouldn't disable it.
|
||||
|
@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
|
|||
/*
|
||||
* check for the Interrupt-remapping support
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
|
||||
for_each_iommu(iommu, drhd) {
|
||||
if (!ecap_ir_support(iommu->ecap))
|
||||
continue;
|
||||
|
||||
|
@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
|
|||
/*
|
||||
* Enable queued invalidation for all the DRHD's.
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
int ret;
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
ret = dmar_enable_qi(iommu);
|
||||
for_each_iommu(iommu, drhd) {
|
||||
int ret = dmar_enable_qi(iommu);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
|
||||
|
@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
|
|||
/*
|
||||
* Setup Interrupt-remapping for all the DRHD's now.
|
||||
*/
|
||||
for_each_drhd_unit(drhd) {
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
|
||||
for_each_iommu(iommu, drhd) {
|
||||
if (!ecap_ir_support(iommu->ecap))
|
||||
continue;
|
||||
|
||||
|
@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
|
|||
* Finds the assocaition between IOAPIC's and its Interrupt-remapping
|
||||
* hardware unit.
|
||||
*/
|
||||
int __init parse_ioapics_under_ir(void)
|
||||
static int __init parse_ioapics_under_ir(void)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
int ir_supported = 0;
|
||||
int ioapic_idx;
|
||||
|
||||
for_each_drhd_unit(drhd) {
|
||||
struct intel_iommu *iommu = drhd->iommu;
|
||||
|
||||
for_each_iommu(iommu, drhd)
|
||||
if (ecap_ir_support(iommu->ecap)) {
|
||||
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
|
||||
return -1;
|
||||
|
||||
ir_supported = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ir_supported)
|
||||
return 0;
|
||||
|
@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int __init ir_dev_scope_init(void)
|
||||
static int __init ir_dev_scope_init(void)
|
||||
{
|
||||
if (!irq_remapping_enabled)
|
||||
return 0;
|
||||
|
|
|
@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
|
|||
return do_setup_msix_irqs(dev, nvec);
|
||||
}
|
||||
|
||||
void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
|
||||
static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
|
||||
{
|
||||
/*
|
||||
* Intr-remapping uses pin number as the virtual vector
|
||||
|
@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
|
|||
vector, attr);
|
||||
}
|
||||
|
||||
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
static int set_remapped_irq_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
if (!config_enabled(CONFIG_SMP) || !remap_ops ||
|
||||
!remap_ops->set_affinity)
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_iommu.h>
|
||||
|
||||
/**
|
||||
* of_get_dma_window - Parse *dma-window property and returns 0 if found.
|
||||
|
|
|
@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
|
|||
kmem_cache_destroy(l1cache);
|
||||
return -ENOMEM;
|
||||
}
|
||||
archdata = kmalloc(sizeof(*archdata), GFP_KERNEL);
|
||||
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
|
||||
if (!archdata) {
|
||||
kmem_cache_destroy(l1cache);
|
||||
kmem_cache_destroy(l2cache);
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_init(&archdata->attach_lock);
|
||||
archdata->attached = NULL;
|
||||
archdata->ipmmu = ipmmu;
|
||||
ipmmu_archdata = archdata;
|
||||
bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
|
||||
|
|
|
@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
|
|||
if (!ipmmu)
|
||||
return;
|
||||
|
||||
mutex_lock(&ipmmu->flush_lock);
|
||||
spin_lock(&ipmmu->flush_lock);
|
||||
if (ipmmu->tlb_enabled)
|
||||
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
|
||||
else
|
||||
ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
|
||||
mutex_unlock(&ipmmu->flush_lock);
|
||||
spin_unlock(&ipmmu->flush_lock);
|
||||
}
|
||||
|
||||
void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
|
||||
|
@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
|
|||
if (!ipmmu)
|
||||
return;
|
||||
|
||||
mutex_lock(&ipmmu->flush_lock);
|
||||
spin_lock(&ipmmu->flush_lock);
|
||||
switch (size) {
|
||||
default:
|
||||
ipmmu->tlb_enabled = 0;
|
||||
|
@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
|
|||
}
|
||||
ipmmu_reg_write(ipmmu, IMTTBR, phys);
|
||||
ipmmu_reg_write(ipmmu, IMASID, asid);
|
||||
mutex_unlock(&ipmmu->flush_lock);
|
||||
spin_unlock(&ipmmu->flush_lock);
|
||||
}
|
||||
|
||||
static int ipmmu_probe(struct platform_device *pdev)
|
||||
|
@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
|
|||
dev_err(&pdev->dev, "cannot allocate device data\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
mutex_init(&ipmmu->flush_lock);
|
||||
spin_lock_init(&ipmmu->flush_lock);
|
||||
ipmmu->dev = &pdev->dev;
|
||||
ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
|
||||
resource_size(res));
|
||||
|
|
|
@ -14,7 +14,7 @@ struct shmobile_ipmmu {
|
|||
struct device *dev;
|
||||
void __iomem *ipmmu_base;
|
||||
int tlb_enabled;
|
||||
struct mutex flush_lock;
|
||||
spinlock_t flush_lock;
|
||||
const char * const *dev_names;
|
||||
unsigned int num_dev_names;
|
||||
};
|
||||
|
|
|
@ -27,7 +27,6 @@ struct root_entry;
|
|||
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern void free_dmar_iommu(struct intel_iommu *iommu);
|
||||
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
||||
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
||||
extern int dmar_disabled;
|
||||
|
@ -41,9 +40,6 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void free_dmar_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
}
|
||||
#define dmar_disabled (1)
|
||||
#define intel_iommu_enabled (0)
|
||||
#endif
|
||||
|
|
|
@ -33,6 +33,7 @@ struct acpi_dmar_header;
|
|||
#define DMAR_X2APIC_OPT_OUT 0x2
|
||||
|
||||
struct intel_iommu;
|
||||
|
||||
#ifdef CONFIG_DMAR_TABLE
|
||||
extern struct acpi_table_header *dmar_tbl;
|
||||
struct dmar_drhd_unit {
|
||||
|
@ -52,6 +53,10 @@ extern struct list_head dmar_drhd_units;
|
|||
#define for_each_drhd_unit(drhd) \
|
||||
list_for_each_entry(drhd, &dmar_drhd_units, list)
|
||||
|
||||
#define for_each_active_drhd_unit(drhd) \
|
||||
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
||||
if (drhd->ignored) {} else
|
||||
|
||||
#define for_each_active_iommu(i, drhd) \
|
||||
list_for_each_entry(drhd, &dmar_drhd_units, list) \
|
||||
if (i=drhd->iommu, drhd->ignored) {} else
|
||||
|
@ -62,13 +67,13 @@ extern struct list_head dmar_drhd_units;
|
|||
|
||||
extern int dmar_table_init(void);
|
||||
extern int dmar_dev_scope_init(void);
|
||||
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
||||
struct pci_dev ***devices, u16 segment);
|
||||
extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt);
|
||||
|
||||
/* Intel IOMMU detection */
|
||||
extern int detect_intel_iommu(void);
|
||||
extern int enable_drhd_fault_handling(void);
|
||||
|
||||
extern int parse_ioapics_under_ir(void);
|
||||
extern int alloc_iommu(struct dmar_drhd_unit *);
|
||||
#else
|
||||
static inline int detect_intel_iommu(void)
|
||||
{
|
||||
|
@ -157,8 +162,6 @@ struct dmar_atsr_unit {
|
|||
int dmar_parse_rmrr_atsr_dev(void);
|
||||
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
||||
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
||||
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
|
||||
struct pci_dev ***devices, u16 segment);
|
||||
extern int intel_iommu_init(void);
|
||||
#else /* !CONFIG_INTEL_IOMMU: */
|
||||
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||
|
|
|
@ -288,6 +288,7 @@ struct q_inval {
|
|||
|
||||
struct ir_table {
|
||||
struct irte *base;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -347,8 +348,6 @@ static inline void __iommu_flush_cache(
|
|||
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
|
||||
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
|
||||
|
||||
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
|
||||
extern void free_iommu(struct intel_iommu *iommu);
|
||||
extern int dmar_enable_qi(struct intel_iommu *iommu);
|
||||
extern void dmar_disable_qi(struct intel_iommu *iommu);
|
||||
extern int dmar_reenable_qi(struct intel_iommu *iommu);
|
||||
|
|
|
@ -24,9 +24,10 @@
|
|||
#include <linux/types.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
#define IOMMU_READ (1)
|
||||
#define IOMMU_WRITE (2)
|
||||
#define IOMMU_CACHE (4) /* DMA cache coherency */
|
||||
#define IOMMU_READ (1 << 0)
|
||||
#define IOMMU_WRITE (1 << 1)
|
||||
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
|
||||
#define IOMMU_EXEC (1 << 3)
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
|
@ -247,6 +248,11 @@ static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct iommu_group *iommu_group_get_by_id(int id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
}
|
||||
|
@ -291,8 +297,8 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
static inline int iommu_domain_has_cap(struct iommu_domain *domain,
|
||||
unsigned long cap)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue