iommu/amd: Enable vAPIC interrupt remapping mode by default
Introduce struct iommu_dev_data.use_vapic flag, which IOMMU driver uses to determine if it should enable vAPIC support, by setting the ga_mode bit in the device's interrupt remapping table entry. Currently, it is enabled for all pass-through device if vAPIC mode is enabled. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
b9fc6b56f4
commit
d98de49a53
3 changed files with 48 additions and 10 deletions
|
@ -137,6 +137,7 @@ struct iommu_dev_data {
|
||||||
bool pri_tlp; /* PASID TLB required for
|
bool pri_tlp; /* PASID TLB required for
|
||||||
PPR completions */
|
PPR completions */
|
||||||
u32 errata; /* Bitmap for errata to apply */
|
u32 errata; /* Bitmap for errata to apply */
|
||||||
|
bool use_vapic; /* Enable device to use vapic mode */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3015,6 +3016,12 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
|
||||||
if (!iommu)
|
if (!iommu)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
#ifdef CONFIG_IRQ_REMAP
|
||||||
|
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
|
||||||
|
(dom->type == IOMMU_DOMAIN_UNMANAGED))
|
||||||
|
dev_data->use_vapic = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3040,6 +3047,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||||
|
|
||||||
ret = attach_device(dev, domain);
|
ret = attach_device(dev, domain);
|
||||||
|
|
||||||
|
#ifdef CONFIG_IRQ_REMAP
|
||||||
|
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) {
|
||||||
|
if (dom->type == IOMMU_DOMAIN_UNMANAGED)
|
||||||
|
dev_data->use_vapic = 1;
|
||||||
|
else
|
||||||
|
dev_data->use_vapic = 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
iommu_completion_wait(iommu);
|
iommu_completion_wait(iommu);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -3801,7 +3817,7 @@ static void free_irte(u16 devid, int index)
|
||||||
|
|
||||||
static void irte_prepare(void *entry,
|
static void irte_prepare(void *entry,
|
||||||
u32 delivery_mode, u32 dest_mode,
|
u32 delivery_mode, u32 dest_mode,
|
||||||
u8 vector, u32 dest_apicid)
|
u8 vector, u32 dest_apicid, int devid)
|
||||||
{
|
{
|
||||||
union irte *irte = (union irte *) entry;
|
union irte *irte = (union irte *) entry;
|
||||||
|
|
||||||
|
@ -3815,13 +3831,14 @@ static void irte_prepare(void *entry,
|
||||||
|
|
||||||
static void irte_ga_prepare(void *entry,
|
static void irte_ga_prepare(void *entry,
|
||||||
u32 delivery_mode, u32 dest_mode,
|
u32 delivery_mode, u32 dest_mode,
|
||||||
u8 vector, u32 dest_apicid)
|
u8 vector, u32 dest_apicid, int devid)
|
||||||
{
|
{
|
||||||
struct irte_ga *irte = (struct irte_ga *) entry;
|
struct irte_ga *irte = (struct irte_ga *) entry;
|
||||||
|
struct iommu_dev_data *dev_data = search_dev_data(devid);
|
||||||
|
|
||||||
irte->lo.val = 0;
|
irte->lo.val = 0;
|
||||||
irte->hi.val = 0;
|
irte->hi.val = 0;
|
||||||
irte->lo.fields_remap.guest_mode = 0;
|
irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
|
||||||
irte->lo.fields_remap.int_type = delivery_mode;
|
irte->lo.fields_remap.int_type = delivery_mode;
|
||||||
irte->lo.fields_remap.dm = dest_mode;
|
irte->lo.fields_remap.dm = dest_mode;
|
||||||
irte->hi.fields.vector = vector;
|
irte->hi.fields.vector = vector;
|
||||||
|
@ -3875,11 +3892,14 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
|
||||||
u8 vector, u32 dest_apicid)
|
u8 vector, u32 dest_apicid)
|
||||||
{
|
{
|
||||||
struct irte_ga *irte = (struct irte_ga *) entry;
|
struct irte_ga *irte = (struct irte_ga *) entry;
|
||||||
|
struct iommu_dev_data *dev_data = search_dev_data(devid);
|
||||||
|
|
||||||
irte->hi.fields.vector = vector;
|
if (!dev_data || !dev_data->use_vapic) {
|
||||||
irte->lo.fields_remap.destination = dest_apicid;
|
irte->hi.fields.vector = vector;
|
||||||
irte->lo.fields_remap.guest_mode = 0;
|
irte->lo.fields_remap.destination = dest_apicid;
|
||||||
modify_irte_ga(devid, index, irte, NULL);
|
irte->lo.fields_remap.guest_mode = 0;
|
||||||
|
modify_irte_ga(devid, index, irte, NULL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#define IRTE_ALLOCATED (~1U)
|
#define IRTE_ALLOCATED (~1U)
|
||||||
|
@ -4022,7 +4042,7 @@ static void irq_remapping_prepare_irte(struct amd_ir_data *data,
|
||||||
data->irq_2_irte.index = index + sub_handle;
|
data->irq_2_irte.index = index + sub_handle;
|
||||||
iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
|
iommu->irte_ops->prepare(data->entry, apic->irq_delivery_mode,
|
||||||
apic->irq_dest_mode, irq_cfg->vector,
|
apic->irq_dest_mode, irq_cfg->vector,
|
||||||
irq_cfg->dest_apicid);
|
irq_cfg->dest_apicid, devid);
|
||||||
|
|
||||||
switch (info->type) {
|
switch (info->type) {
|
||||||
case X86_IRQ_ALLOC_TYPE_IOAPIC:
|
case X86_IRQ_ALLOC_TYPE_IOAPIC:
|
||||||
|
@ -4222,6 +4242,14 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
|
||||||
struct amd_ir_data *ir_data = data->chip_data;
|
struct amd_ir_data *ir_data = data->chip_data;
|
||||||
struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
|
struct irte_ga *irte = (struct irte_ga *) ir_data->entry;
|
||||||
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
|
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
|
||||||
|
struct iommu_dev_data *dev_data = search_dev_data(irte_info->devid);
|
||||||
|
|
||||||
|
/* Note:
|
||||||
|
* This device has never been set up for guest mode.
|
||||||
|
* we should not modify the IRTE
|
||||||
|
*/
|
||||||
|
if (!dev_data || !dev_data->use_vapic)
|
||||||
|
return 0;
|
||||||
|
|
||||||
pi_data->ir_data = ir_data;
|
pi_data->ir_data = ir_data;
|
||||||
|
|
||||||
|
|
|
@ -146,7 +146,7 @@ struct ivmd_header {
|
||||||
bool amd_iommu_dump;
|
bool amd_iommu_dump;
|
||||||
bool amd_iommu_irq_remap __read_mostly;
|
bool amd_iommu_irq_remap __read_mostly;
|
||||||
|
|
||||||
int amd_iommu_guest_ir;
|
int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
|
||||||
|
|
||||||
static bool amd_iommu_detected;
|
static bool amd_iommu_detected;
|
||||||
static bool __initdata amd_iommu_disabled;
|
static bool __initdata amd_iommu_disabled;
|
||||||
|
@ -2019,6 +2019,11 @@ static void early_enable_iommus(void)
|
||||||
iommu_enable(iommu);
|
iommu_enable(iommu);
|
||||||
iommu_flush_all_caches(iommu);
|
iommu_flush_all_caches(iommu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_IRQ_REMAP
|
||||||
|
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
|
||||||
|
amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void enable_iommus_v2(void)
|
static void enable_iommus_v2(void)
|
||||||
|
@ -2044,6 +2049,11 @@ static void disable_iommus(void)
|
||||||
|
|
||||||
for_each_iommu(iommu)
|
for_each_iommu(iommu)
|
||||||
iommu_disable(iommu);
|
iommu_disable(iommu);
|
||||||
|
|
||||||
|
#ifdef CONFIG_IRQ_REMAP
|
||||||
|
if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
|
||||||
|
amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -815,7 +815,7 @@ struct amd_ir_data {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amd_irte_ops {
|
struct amd_irte_ops {
|
||||||
void (*prepare)(void *, u32, u32, u8, u32);
|
void (*prepare)(void *, u32, u32, u8, u32, int);
|
||||||
void (*activate)(void *, u16, u16);
|
void (*activate)(void *, u16, u16);
|
||||||
void (*deactivate)(void *, u16, u16);
|
void (*deactivate)(void *, u16, u16);
|
||||||
void (*set_affinity)(void *, u16, u16, u8, u32);
|
void (*set_affinity)(void *, u16, u16, u8, u32);
|
||||||
|
|
Loading…
Reference in a new issue