Merge branch 'lorenzo/pci/dwc-msi'

* lorenzo/pci/dwc-msi:
  PCI: dwc: Expand maximum number of MSI IRQs from 32 to 256
  PCI: dwc: Remove old MSI IRQs API
  PCI: dwc: Move MSI IRQs allocation to IRQ domains hierarchical API
This commit is contained in:
Bjorn Helgaas 2018-04-04 13:28:45 -05:00 committed by Bjorn Helgaas
commit 1ad9a8730e
12 changed files with 289 additions and 403 deletions

View file

@ -294,15 +294,6 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
{
struct exynos_pcie *ep = arg;
struct dw_pcie *pci = ep->pci;
struct pcie_port *pp = &pci->pp;
return dw_handle_msi_irq(pp);
}
static void exynos_pcie_msi_init(struct exynos_pcie *ep)
{
struct dw_pcie *pci = ep->pci;
@ -428,15 +419,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
dev_err(dev, "failed to get msi irq\n");
return pp->msi_irq;
}
ret = devm_request_irq(dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"exynos-pcie", ep);
if (ret) {
dev_err(dev, "failed to request msi irq\n");
return ret;
}
}
pp->root_bus_nr = -1;

View file

@ -542,15 +542,6 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -EINVAL;
}
static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
{
struct imx6_pcie *imx6_pcie = arg;
struct dw_pcie *pci = imx6_pcie->pci;
struct pcie_port *pp = &pci->pp;
return dw_handle_msi_irq(pp);
}
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@ -674,15 +665,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
dev_err(dev, "failed to get MSI irq\n");
return -ENODEV;
}
ret = devm_request_irq(dev, pp->msi_irq,
imx6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"mx6-pcie-msi", imx6_pcie);
if (ret) {
dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
pp->root_bus_nr = -1;

View file

@ -120,20 +120,15 @@ void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
}
}
static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
{
u32 offset, reg_offset, bit_pos;
u32 reg_offset, bit_pos;
struct keystone_pcie *ks_pcie;
struct msi_desc *msi;
struct pcie_port *pp;
struct dw_pcie *pci;
msi = irq_data_get_msi_desc(d);
pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
pci = to_dw_pcie_from_pp(pp);
ks_pcie = to_keystone_pcie(pci);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
BIT(bit_pos));
@ -162,85 +157,9 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
BIT(bit_pos));
}
static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
int ks_dw_pcie_msi_host_init(struct pcie_port *pp)
{
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
msi = irq_data_get_msi_desc(d);
pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (msi->msi_attrib.maskbit)
pci_msi_mask_irq(d);
}
ks_dw_pcie_msi_clear_irq(pp, offset);
}
static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
{
struct msi_desc *msi;
struct pcie_port *pp;
u32 offset;
msi = irq_data_get_msi_desc(d);
pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
/* Mask the end point if PVM implemented */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (msi->msi_attrib.maskbit)
pci_msi_unmask_irq(d);
}
ks_dw_pcie_msi_set_irq(pp, offset);
}
static struct irq_chip ks_dw_pcie_msi_irq_chip = {
.name = "Keystone-PCIe-MSI-IRQ",
.irq_ack = ks_dw_pcie_msi_irq_ack,
.irq_mask = ks_dw_pcie_msi_irq_mask,
.irq_unmask = ks_dw_pcie_msi_irq_unmask,
};
static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
.map = ks_dw_pcie_msi_map,
};
int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
struct device *dev = pci->dev;
int i;
pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
MAX_MSI_IRQS,
&ks_dw_pcie_msi_domain_ops,
chip);
if (!pp->irq_domain) {
dev_err(dev, "irq domain init failed\n");
return -ENXIO;
}
for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i);
return 0;
return dw_pcie_allocate_domains(pp);
}
void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)

View file

@ -297,6 +297,7 @@ static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
.msi_clear_irq = ks_dw_pcie_msi_clear_irq,
.get_msi_addr = ks_dw_pcie_get_msi_addr,
.msi_host_init = ks_dw_pcie_msi_host_init,
.msi_irq_ack = ks_dw_pcie_msi_irq_ack,
.scan_bus = ks_dw_pcie_v3_65_scan_bus,
};

View file

@ -49,9 +49,9 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
unsigned int devfn, int where, int size, u32 *val);
void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp);
void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
struct msi_controller *chip);
int ks_dw_pcie_msi_host_init(struct pcie_port *pp);
int ks_dw_pcie_link_up(struct dw_pcie *pci);

View file

@ -182,8 +182,7 @@ static int ls1021_pcie_host_init(struct pcie_port *pp)
return ls_pcie_host_init(pp);
}
static int ls_pcie_msi_host_init(struct pcie_port *pp,
struct msi_controller *chip)
static int ls_pcie_msi_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;

View file

@ -383,15 +383,6 @@ static const struct dw_pcie_host_ops artpec6_pcie_host_ops = {
.host_init = artpec6_pcie_host_init,
};
static irqreturn_t artpec6_pcie_msi_handler(int irq, void *arg)
{
struct artpec6_pcie *artpec6_pcie = arg;
struct dw_pcie *pci = artpec6_pcie->pci;
struct pcie_port *pp = &pci->pp;
return dw_handle_msi_irq(pp);
}
static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
struct platform_device *pdev)
{
@ -406,15 +397,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
dev_err(dev, "failed to get MSI irq\n");
return pp->msi_irq;
}
ret = devm_request_irq(dev, pp->msi_irq,
artpec6_pcie_msi_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"artpec6-pcie-msi", artpec6_pcie);
if (ret) {
dev_err(dev, "failed to request MSI irq\n");
return ret;
}
}
pp->root_bus_nr = -1;

View file

@ -8,6 +8,7 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
@ -42,22 +43,46 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
return dw_pcie_write(pci->dbi_base + where, size, val);
}
static struct irq_chip dw_msi_irq_chip = {
static void dw_msi_ack_irq(struct irq_data *d)
{
irq_chip_ack_parent(d);
}
static void dw_msi_mask_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void dw_msi_unmask_irq(struct irq_data *d)
{
pci_msi_unmask_irq(d);
irq_chip_unmask_parent(d);
}
static struct irq_chip dw_pcie_msi_irq_chip = {
.name = "PCI-MSI",
.irq_enable = pci_msi_unmask_irq,
.irq_disable = pci_msi_mask_irq,
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
.irq_ack = dw_msi_ack_irq,
.irq_mask = dw_msi_mask_irq,
.irq_unmask = dw_msi_unmask_irq,
};
static struct msi_domain_info dw_pcie_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
.chip = &dw_pcie_msi_irq_chip,
};
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
u32 val;
int i, pos, irq;
u32 val, num_ctrls;
irqreturn_t ret = IRQ_NONE;
for (i = 0; i < MAX_MSI_CTRLS; i++) {
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
&val);
if (!val)
@ -78,6 +103,194 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
return ret;
}
/* Chained MSI interrupt service routine */
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct pcie_port *pp;
chained_irq_enter(chip, desc);
pp = irq_desc_get_handler_data(desc);
dw_handle_msi_irq(pp);
chained_irq_exit(chip, desc);
}
static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
if (pp->ops->get_msi_addr)
msi_target = pp->ops->get_msi_addr(pp);
else
msi_target = (u64)pp->msi_data;
msg->address_lo = lower_32_bits(msi_target);
msg->address_hi = upper_32_bits(msi_target);
if (pp->ops->get_msi_data)
msg->data = pp->ops->get_msi_data(pp, data->hwirq);
else
msg->data = data->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
(int)data->hwirq, msg->address_hi, msg->address_lo);
}
static int dw_pci_msi_set_affinity(struct irq_data *irq_data,
const struct cpumask *mask, bool force)
{
return -EINVAL;
}
static void dw_pci_bottom_mask(struct irq_data *data)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
if (pp->ops->msi_clear_irq) {
pp->ops->msi_clear_irq(pp, data->hwirq);
} else {
ctrl = data->hwirq / 32;
res = ctrl * 12;
bit = data->hwirq % 32;
pp->irq_status[ctrl] &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void dw_pci_bottom_unmask(struct irq_data *data)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
if (pp->ops->msi_set_irq) {
pp->ops->msi_set_irq(pp, data->hwirq);
} else {
ctrl = data->hwirq / 32;
res = ctrl * 12;
bit = data->hwirq % 32;
pp->irq_status[ctrl] |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4,
pp->irq_status[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void dw_pci_bottom_ack(struct irq_data *d)
{
struct msi_desc *msi = irq_data_get_msi_desc(d);
struct pcie_port *pp;
pp = msi_desc_to_pci_sysdata(msi);
if (pp->ops->msi_irq_ack)
pp->ops->msi_irq_ack(d->hwirq, pp);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
.irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
struct pcie_port *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
raw_spin_lock_irqsave(&pp->lock, flags);
bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
order_base_2(nr_irqs));
raw_spin_unlock_irqrestore(&pp->lock, flags);
if (bit < 0)
return -ENOSPC;
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, bit + i,
&dw_pci_msi_bottom_irq_chip,
pp, handle_edge_irq,
NULL, NULL);
return 0;
}
static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
struct pcie_port *pp = irq_data_get_irq_chip_data(data);
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
bitmap_release_region(pp->msi_irq_in_use, data->hwirq,
order_base_2(nr_irqs));
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
.alloc = dw_pcie_irq_domain_alloc,
.free = dw_pcie_irq_domain_free,
};
int dw_pcie_allocate_domains(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
if (!pp->irq_domain) {
dev_err(pci->dev, "failed to create IRQ domain\n");
return -ENOMEM;
}
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
if (!pp->msi_domain) {
dev_err(pci->dev, "failed to create MSI domain\n");
irq_domain_remove(pp->irq_domain);
return -ENOMEM;
}
return 0;
}
void dw_pcie_free_msi(struct pcie_port *pp)
{
irq_set_chained_handler(pp->msi_irq, NULL);
irq_set_handler_data(pp->msi_irq, NULL);
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@ -96,200 +309,24 @@ void dw_pcie_msi_init(struct pcie_port *pp)
/* program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
(u32)(msi_target & 0xffffffff));
lower_32_bits(msi_target));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
(u32)(msi_target >> 32 & 0xffffffff));
upper_32_bits(msi_target));
}
static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
res = (irq / 32) * 12;
bit = irq % 32;
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val &= ~(1 << bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
unsigned int nvec, unsigned int pos)
{
unsigned int i;
for (i = 0; i < nvec; i++) {
irq_set_msi_desc_off(irq_base, i, NULL);
/* Disable corresponding interrupt on MSI controller */
if (pp->ops->msi_clear_irq)
pp->ops->msi_clear_irq(pp, pos + i);
else
dw_pcie_msi_clear_irq(pp, pos + i);
}
bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
}
static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
{
unsigned int res, bit, val;
res = (irq / 32) * 12;
bit = irq % 32;
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
val |= 1 << bit;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
}
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
int irq, pos0, i;
struct pcie_port *pp;
pp = (struct pcie_port *)msi_desc_to_pci_sysdata(desc);
pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
order_base_2(no_irqs));
if (pos0 < 0)
goto no_valid_irq;
irq = irq_find_mapping(pp->irq_domain, pos0);
if (!irq)
goto no_valid_irq;
/*
* irq_create_mapping (called from dw_pcie_host_init) pre-allocates
* descs so there is no need to allocate descs here. We can therefore
* assume that if irq_find_mapping above returns non-zero, then the
* descs are also successfully allocated.
*/
for (i = 0; i < no_irqs; i++) {
if (irq_set_msi_desc_off(irq, i, desc) != 0) {
clear_irq_range(pp, irq, i, pos0);
goto no_valid_irq;
}
/*Enable corresponding interrupt in MSI interrupt controller */
if (pp->ops->msi_set_irq)
pp->ops->msi_set_irq(pp, pos0 + i);
else
dw_pcie_msi_set_irq(pp, pos0 + i);
}
*pos = pos0;
desc->nvec_used = no_irqs;
desc->msi_attrib.multiple = order_base_2(no_irqs);
return irq;
no_valid_irq:
*pos = pos0;
return -ENOSPC;
}
static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
{
struct msi_msg msg;
u64 msi_target;
if (pp->ops->get_msi_addr)
msi_target = pp->ops->get_msi_addr(pp);
else
msi_target = (u64)pp->msi_data;
msg.address_lo = (u32)(msi_target & 0xffffffff);
msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);
if (pp->ops->get_msi_data)
msg.data = pp->ops->get_msi_data(pp, pos);
else
msg.data = pos;
pci_write_msi_msg(irq, &msg);
}
static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
struct msi_desc *desc)
{
int irq, pos;
struct pcie_port *pp = pdev->bus->sysdata;
if (desc->msi_attrib.is_msix)
return -EINVAL;
irq = assign_irq(1, desc, &pos);
if (irq < 0)
return irq;
dw_msi_setup_msg(pp, irq, pos);
return 0;
}
static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev,
int nvec, int type)
{
#ifdef CONFIG_PCI_MSI
int irq, pos;
struct msi_desc *desc;
struct pcie_port *pp = pdev->bus->sysdata;
/* MSI-X interrupts are not supported */
if (type == PCI_CAP_ID_MSIX)
return -EINVAL;
WARN_ON(!list_is_singular(&pdev->dev.msi_list));
desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list);
irq = assign_irq(nvec, desc, &pos);
if (irq < 0)
return irq;
dw_msi_setup_msg(pp, irq, pos);
return 0;
#else
return -EINVAL;
#endif
}
static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
{
struct irq_data *data = irq_get_irq_data(irq);
struct msi_desc *msi = irq_data_get_msi_desc(data);
struct pcie_port *pp = (struct pcie_port *)msi_desc_to_pci_sysdata(msi);
clear_irq_range(pp, irq, 1, data->hwirq);
}
static struct msi_controller dw_pcie_msi_chip = {
.setup_irq = dw_msi_setup_irq,
.setup_irqs = dw_msi_setup_irqs,
.teardown_irq = dw_msi_teardown_irq,
};
static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
}
static const struct irq_domain_ops msi_domain_ops = {
.map = dw_pcie_msi_map,
};
int dw_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win, *tmp;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
int i, ret;
struct resource_entry *win, *tmp;
int ret;
raw_spin_lock_init(&pci->pp.lock);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
@ -388,20 +425,35 @@ int dw_pcie_host_init(struct pcie_port *pp)
pci->num_viewport = 2;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
if (!pp->ops->msi_host_init) {
pp->irq_domain = irq_domain_add_linear(dev->of_node,
MAX_MSI_IRQS, &msi_domain_ops,
&dw_pcie_msi_chip);
if (!pp->irq_domain) {
dev_err(dev, "irq domain init failed\n");
ret = -ENXIO;
/*
* If a specific SoC driver needs to change the
* default number of vectors, it needs to implement
* the set_num_vectors callback.
*/
if (!pp->ops->set_num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
} else {
pp->ops->set_num_vectors(pp);
if (pp->num_vectors > MAX_MSI_IRQS ||
pp->num_vectors == 0) {
dev_err(dev,
"Invalid number of vectors\n");
goto error;
}
}
for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i);
if (!pp->ops->msi_host_init) {
ret = dw_pcie_allocate_domains(pp);
if (ret)
goto error;
if (pp->msi_irq)
irq_set_chained_handler_and_data(pp->msi_irq,
dw_chained_msi_isr,
pp);
} else {
ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
goto error;
}
@ -421,10 +473,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
bridge->ops = &dw_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
bridge->msi = &dw_pcie_msi_chip;
dw_pcie_msi_chip.dev = dev;
}
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
@ -593,11 +641,17 @@ static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
dw_pcie_setup(pci);
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++)
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + (ctrl * 12), 4,
&pp->irq_status[ctrl]);
/* setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);

View file

@ -25,13 +25,6 @@ struct dw_plat_pcie {
struct dw_pcie *pci;
};
static irqreturn_t dw_plat_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
return dw_handle_msi_irq(pp);
}
static int dw_plat_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@ -63,15 +56,6 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
pp->msi_irq = platform_get_irq(pdev, 0);
if (pp->msi_irq < 0)
return pp->msi_irq;
ret = devm_request_irq(dev, pp->msi_irq,
dw_plat_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"dw-plat-pcie-msi", pp);
if (ret) {
dev_err(dev, "failed to request MSI IRQ\n");
return ret;
}
}
pp->root_bus_nr = -1;

View file

@ -107,13 +107,10 @@
#define MSI_MESSAGE_DATA_32 0x58
#define MSI_MESSAGE_DATA_64 0x5C
/*
* Maximum number of MSI IRQs can be 256 per controller. But keep
* it 32 as of now. Probably we will never need more than 32. If needed,
* then increment it in multiple of 32.
*/
#define MAX_MSI_IRQS 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
#define MSI_DEF_NUM_VECTORS 32
/* Maximum number of inbound/outbound iATUs */
#define MAX_IATU_IN 256
@ -149,7 +146,9 @@ struct dw_pcie_host_ops {
phys_addr_t (*get_msi_addr)(struct pcie_port *pp);
u32 (*get_msi_data)(struct pcie_port *pp, int pos);
void (*scan_bus)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
void (*set_num_vectors)(struct pcie_port *pp);
int (*msi_host_init)(struct pcie_port *pp);
void (*msi_irq_ack)(int irq, struct pcie_port *pp);
};
struct pcie_port {
@ -174,7 +173,11 @@ struct pcie_port {
const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
struct irq_domain *msi_domain;
dma_addr_t msi_data;
u32 num_vectors;
u32 irq_status[MAX_MSI_CTRLS];
raw_spinlock_t lock;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@ -316,8 +319,10 @@ static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
#ifdef CONFIG_PCIE_DW_HOST
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
void dw_pcie_msi_init(struct pcie_port *pp);
void dw_pcie_free_msi(struct pcie_port *pp);
void dw_pcie_setup_rc(struct pcie_port *pp);
int dw_pcie_host_init(struct pcie_port *pp);
int dw_pcie_allocate_domains(struct pcie_port *pp);
#else
static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
@ -328,6 +333,10 @@ static inline void dw_pcie_msi_init(struct pcie_port *pp)
{
}
static inline void dw_pcie_free_msi(struct pcie_port *pp)
{
}
static inline void dw_pcie_setup_rc(struct pcie_port *pp)
{
}
@ -336,6 +345,11 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
{
return 0;
}
static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
{
return 0;
}
#endif
#ifdef CONFIG_PCIE_DW_EP

View file

@ -208,13 +208,6 @@ static struct dw_pcie_host_ops histb_pcie_host_ops = {
.host_init = histb_pcie_host_init,
};
static irqreturn_t histb_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
return dw_handle_msi_irq(pp);
}
static void histb_pcie_host_disable(struct histb_pcie *hipcie)
{
reset_control_assert(hipcie->soft_reset);
@ -413,14 +406,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
dev_err(dev, "Failed to get MSI IRQ\n");
return pp->msi_irq;
}
ret = devm_request_irq(dev, pp->msi_irq,
histb_pcie_msi_irq_handler,
IRQF_SHARED, "histb-pcie-msi", pp);
if (ret) {
dev_err(dev, "cannot request MSI IRQ\n");
return ret;
}
}
hipcie->phy = devm_phy_get(dev, "phy");

View file

@ -181,13 +181,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
return dw_handle_msi_irq(pp);
}
static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
{
struct dw_pcie *pci = pcie->pci;
@ -1261,15 +1254,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
if (pp->msi_irq < 0)
return pp->msi_irq;
ret = devm_request_irq(dev, pp->msi_irq,
qcom_pcie_msi_irq_handler,
IRQF_SHARED | IRQF_NO_THREAD,
"qcom-pcie-msi", pp);
if (ret) {
dev_err(dev, "cannot request msi irq\n");
return ret;
}
}
ret = phy_init(pcie->phy);