[linux-yocto] [PATCH] drivers/pci: MSI/MSIx support for Axxia (X9, XLF) PCIe host driver
Daniel Dragomir
daniel.dragomir at windriver.com
Thu Sep 14 08:52:25 PDT 2017
From: Palani <palaniappan.ramanathan at intel.com>
Implemented the MSI domain ops to support Multiple MSI interrupts
in X9 and XLF. Added support for MSI-X interrupts for all the
PEI(0/1/2) host controllers in X9.
Signed-off-by: Palani <palaniappan.ramanathan at intel.com>
---
drivers/pci/host/pcie-axxia.c | 465 ++++++++++++++++++++++++++++++------------
drivers/pci/host/pcie-axxia.h | 5 +-
2 files changed, 338 insertions(+), 132 deletions(-)
diff --git a/drivers/pci/host/pcie-axxia.c b/drivers/pci/host/pcie-axxia.c
index 99c9020..bab3d77 100644
--- a/drivers/pci/host/pcie-axxia.c
+++ b/drivers/pci/host/pcie-axxia.c
@@ -28,9 +28,12 @@
#include <linux/axxia-pei.h>
#include <linux/time.h>
#include <linux/lsi-ncr.h>
+#include <asm-generic/msi.h>
#include "pcie-axxia.h"
+#define AXXIA_GENERIC_MSI_DOMAIN_IRQ 1
+
#define PEI_GENERAL_CORE_CTL_REG 0x38
#define PEI_SII_PWR_MGMT_REG 0xD4
#define PEI_SII_DBG_0_MON_REG 0xEC
@@ -114,6 +117,9 @@
#define PCIE_MSIX_INTR0_ENABLE 0xb4
#define PCIE_MSIX_INTR0_STATUS 0xb0
+#define AXI_GPREG_EDG_IRQ_STAT_HI 0x3a0
+#define AXI_GPREG_EDG_IRQ_MASK_HI 0x3a4
+#define MSIX_ASSERTED (0x1 << 8)
/* SYSCON */
#define AXXIA_SYSCON_BASE 0x8002C00000
@@ -468,6 +474,222 @@ static struct pci_ops axxia_pciex_pci_ops = {
.write = axxia_pciex_write_config,
};
+static void axxia_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 32) * 12;
+ bit = irq % 32;
+ axxia_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ axxia_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static void axxia_dw_pcie_msix_set_irq(struct pcie_port *pp, int irq, int mask)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 16) * 12;
+ bit = irq % 16;
+ axxia_axi_gpreg_readl(pp, PCIE_MSIX_INTR0_ENABLE + res, &val);
+ if (mask)
+ val |= 1 << bit;
+ else
+ val &= ~(1 << bit);
+ axxia_axi_gpreg_writel(pp, val, PCIE_MSIX_INTR0_ENABLE + res);
+ bit = irq % 32;
+ axxia_axi_gpreg_readl(pp, PEI_MSIX_INTR_ENABLE + res, &val);
+ if (mask)
+ val |= 1 << bit;
+ else
+ val &= ~(1 << bit);
+ axxia_axi_gpreg_writel(pp, val, PEI_MSIX_INTR_ENABLE + res);
+}
+
+static void axxia_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 32) * 12;
+ bit = irq % 32;
+ axxia_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ axxia_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int axxia_check_set_msi_mode(struct pcie_port *pp, u32 is_msix)
+{
+ u32 val;
+
+ if (pp->msi_mode == AXXIA_MSI_UNCONFIGURED) {
+ if (is_msix) {
+ axxia_axi_gpreg_readl(pp, AXI_GPREG_MSTR, &val);
+ val &= ~CFG_MSI_MODE;
+ axxia_axi_gpreg_writel(pp, val, AXI_GPREG_MSTR);
+ pp->msi_mode = AXXIA_MSIX_MODE;
+ } else {
+ axxia_axi_gpreg_readl(pp, AXI_GPREG_MSTR, &val);
+ val |= CFG_MSI_MODE;
+ axxia_axi_gpreg_writel(pp, val, AXI_GPREG_MSTR);
+ pp->msi_mode = AXXIA_MSI_MODE;
+ }
+ } else {
+ if ((is_msix && (pp->msi_mode == AXXIA_MSI_MODE)) ||
+ ((!is_msix) && (pp->msi_mode == AXXIA_MSIX_MODE))) {
+ dev_info(pp->dev,
+ "Axxia already in %s mode, %s not supported\n",
+ pp->msi_mode == AXXIA_MSI_MODE ? "MSI" : "MSIX",
+ pp->msi_mode == AXXIA_MSI_MODE ? "MSIX" : "MSI");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+
+#ifdef AXXIA_GENERIC_MSI_DOMAIN_IRQ
+static struct irq_chip axxia_msi_top_irq_chip = {
+ .name = "PCI-MSI",
+};
+
+static struct msi_domain_info axxia_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .chip = &axxia_msi_top_irq_chip,
+};
+
+static void axxia_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct pcie_port *pp = irq_data_get_irq_chip_data(data);
+ u64 msi_target = virt_to_phys((void *)pp->msi_data);
+
+ if (pp->msi_mode == AXXIA_MSIX_MODE) {
+ msi_target = msi_target + (data->hwirq * 4);
+ msg->address_lo = (u32)(msi_target & 0xffffffff);
+ msg->address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+ msg->data = 0x12345678;
+ } else {
+ msg->address_lo = (u32)(msi_target & 0xffffffff);
+ msg->address_hi = (u32)(msi_target >> 32 & 0xffffffff);
+ msg->data = data->hwirq;
+ }
+
+}
+
+static int axxia_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
+{
+ return -EINVAL;
+}
+
+static struct irq_chip axxia_msi_bottom_irq_chip = {
+ .name = "MSI",
+ .irq_compose_msi_msg = axxia_compose_msi_msg,
+ .irq_set_affinity = axxia_irq_set_affinity,
+};
+
+static int axxia_pcie_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs, void *args)
+{
+ struct pcie_port *pp = domain->host_data;
+ int msi_irq;
+ unsigned i;
+ msi_alloc_info_t *va = args;
+ struct msi_desc *desc = va->desc;
+ int is_msix = 0;
+
+ if (desc) {
+ if (desc->msi_attrib.is_msix)
+ is_msix = 1;
+ else
+ is_msix = 0;
+ } else {
+ dev_err(pp->dev, "msi_desc not set.. Default to MSI\n");
+ is_msix = 0;
+ }
+
+ mutex_lock(&pp->bitmap_lock);
+
+ msi_irq = bitmap_find_next_zero_area(pp->bitmap, MAX_MSI_IRQS,
+ 0, nr_irqs, 0);
+
+ if (((is_msix == 0) && (msi_irq < 32)) ||
+ (is_msix && (msi_irq < MAX_MSI_IRQS)))
+ bitmap_set(pp->bitmap, msi_irq, nr_irqs);
+ else
+ msi_irq = -ENOSPC;
+
+ mutex_unlock(&pp->bitmap_lock);
+ if (msi_irq < 0)
+ return msi_irq;
+
+ axxia_check_set_msi_mode(pp, is_msix);
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, msi_irq + i,
+ &axxia_msi_bottom_irq_chip,
+ domain->host_data, handle_simple_irq, NULL,
+ NULL);
+ if (is_msix)
+ axxia_dw_pcie_msix_set_irq(pp, msi_irq + i, 1);
+ else
+ axxia_dw_pcie_msi_set_irq(pp, msi_irq + i);
+ }
+ return 0;
+}
+
+static void axxia_pcie_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct pcie_port *pp = domain->host_data;
+ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
+ int i;
+
+ mutex_lock(&pp->bitmap_lock);
+ bitmap_clear(pp->bitmap, data->hwirq, nr_irqs);
+ mutex_unlock(&pp->bitmap_lock);
+
+ for (i = 0; i < nr_irqs; i++) {
+ if (pp->msi_mode == AXXIA_MSIX_MODE)
+ axxia_dw_pcie_msix_set_irq(pp, data->hwirq + i, 0);
+ else
+ axxia_dw_pcie_msi_clear_irq(pp, data->hwirq + i);
+ }
+ if (bitmap_empty(pp->bitmap, MAX_MSI_IRQS))
+ pp->msi_mode = AXXIA_MSI_UNCONFIGURED;
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops axxia_msi_domain_ops = {
+ .alloc = axxia_pcie_irq_domain_alloc,
+ .free = axxia_pcie_irq_domain_free,
+};
+
+static int axxia_pcie_allocate_domains(struct pcie_port *pp)
+{
+ pp->irq_domain = irq_domain_add_linear(NULL, MAX_MSI_IRQS,
+ &axxia_msi_domain_ops, pp);
+ if (!pp->irq_domain)
+ return -ENOMEM;
+
+ pp->chip.domain = pci_msi_create_irq_domain(NULL,
+ &axxia_msi_domain_info,
+ pp->irq_domain);
+
+ if (!pp->chip.domain) {
+ irq_domain_remove(pp->irq_domain);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void axxia_free_domains(struct pcie_port *pp)
+{
+ if (pp->chip.domain)
+ irq_domain_remove(pp->chip.domain);
+ if (pp->irq_domain)
+ irq_domain_remove(pp->irq_domain);
+}
+#else
static struct irq_chip axxia_dw_msi_irq_chip = {
.name = "PCI-MSI",
.irq_enable = pci_msi_unmask_irq,
@@ -490,8 +712,7 @@ static int axxia_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
static const struct irq_domain_ops axxia_msi_domain_ops = {
.map = axxia_dw_pcie_msi_map,
};
-
-
+#endif
void axxia_dw_pcie_msi_init(struct pcie_port *pp)
{
u64 msi_target;
@@ -519,66 +740,59 @@ static void axxia_pcie_msi_init(struct pcie_port *pp)
}
/* MSI int handler */
-static int axxia_dw_pcie_handle_msi_irq(struct pcie_port *pp, int offset)
+static int axxia_dw_pcie_handle_msi_irq(struct pcie_port *pp, u32 va)
{
- unsigned long val;
+ unsigned long val = va;
int i, pos, irq;
int ret = 0;
-
- for (i = 0; i < MAX_MSI_CTRLS; i++) {
- axxia_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
- (u32 *)&val);
- if (val) {
- ret = 1;
- pos = 0;
- while ((pos = find_next_bit(&val, 32, pos)) != 32) {
-
- dev_dbg(pp->dev,
- "msi valid i = %d, val = %lx, pos = %d\n",
- i, val, pos);
- irq = irq_find_mapping(pp->irq_domain,
- i * 32 + pos);
- axxia_pcie_wr_own_conf(pp,
- PCIE_MSI_INTR0_STATUS + i * 12,
- 4, 1 << pos);
- generic_handle_irq(irq);
- pos++;
- }
+ i = 0;
+ if (val) {
+ ret = 1;
+ pos = 0;
+ while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+
+ dev_dbg(pp->dev,
+ "msi valid i = %d, val = %lx, pos = %d\n",
+ i, val, pos);
+ irq = irq_find_mapping(pp->irq_domain,
+ i * 32 + pos);
+ axxia_pcie_wr_own_conf(pp,
+ PCIE_MSI_INTR0_STATUS + i * 12,
+ 4, 1 << pos);
+ generic_handle_irq(irq);
+ pos++;
}
}
return ret;
}
/* MSIx int handler */
-static void axxia_dw_pcie_handle_msix_irq(struct pcie_port *pp, int offset)
+int axxia_dw_pcie_handle_msix_irq(struct pcie_port *pp, int offset)
{
- unsigned long val, val1;
+ unsigned long val;
int i, pos, irq;
+ int ret;
- axxia_axi_gpreg_readl(pp, PEI_MSIX_INTR_STATUS,
- (u32 *)&val1);
- if (val1) {
- for (i = 0; i < MAX_MSI_CTRLS*2; i++) {
- axxia_axi_gpreg_readl(pp,
- PCIE_MSIX_INTR0_STATUS + i * 12,
- (u32 *)&val);
-
- if (val) {
- pos = 0;
- while ((pos = find_next_bit(&val, 16, pos))
- != 16) {
- irq = irq_find_mapping(pp->irq_domain,
- i * 16 + pos);
- axxia_axi_gpreg_writel(pp, 1 << pos,
- PCIE_MSIX_INTR0_STATUS + i * 12);
- generic_handle_irq(irq);
- pos++;
- }
- }
+ i = offset;
+ axxia_axi_gpreg_readl(pp,
+ PCIE_MSIX_INTR0_STATUS + i * 12, (u32 *)&val);
+
+ if (val) {
+ ret = 1;
+ pos = 0;
+ while ((pos = find_next_bit(&val, 16, pos))
+ != 16) {
+ irq = irq_find_mapping(pp->irq_domain,
+ i * 16 + pos);
+ axxia_axi_gpreg_writel(pp, 1 << pos,
+ PCIE_MSIX_INTR0_STATUS + i * 12);
+ generic_handle_irq(irq);
+ pos++;
}
-
- axxia_axi_gpreg_writel(pp, val1, PEI_MSIX_INTR_STATUS);
}
+
+ axxia_axi_gpreg_writel(pp, 1 << i, PEI_MSIX_INTR_STATUS);
+ return ret;
}
static void axxia_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -586,8 +800,9 @@ static void axxia_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
struct pcie_port *pp = irq_desc_get_handler_data(desc);
u32 offset = irq - pp->msi_irqs[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 val1;
- dev_dbg(pp->dev, "%s, irq %d i %d\n", __func__, irq, offset);
+ dev_dbg(pp->dev, "%s, irq %d of=%d\n", __func__, irq, offset);
/*
* The chained irq handler installation would have replaced normal
@@ -595,8 +810,17 @@ static void axxia_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- axxia_dw_pcie_handle_msi_irq(pp, offset);
- axxia_dw_pcie_handle_msix_irq(pp, offset);
+ if (offset == 0) {
+ axxia_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS, 4,
+ (u32 *)&val1);
+ if (val1)
+ axxia_dw_pcie_handle_msi_irq(pp, val1);
+ }
+
+ axxia_axi_gpreg_readl(pp, PEI_MSIX_INTR_STATUS,
+ (u32 *)&val1);
+ if ((val1) & (1 << offset))
+ axxia_dw_pcie_handle_msix_irq(pp, offset);
chained_irq_exit(chip, desc);
}
@@ -620,6 +844,12 @@ static void axxia_pcie_enable_interrupts(struct pcie_port *pp)
val |= MSI_ASSERTED;
axxia_cc_gpreg_writel(pp, val,
CC_GPREG_EDG_IRQ_MASK_HI);
+ axxia_axi_gpreg_readl(pp,
+ AXI_GPREG_EDG_IRQ_MASK_HI, &val);
+ val |= MSIX_ASSERTED;
+ axxia_axi_gpreg_writel(pp, val,
+ AXI_GPREG_EDG_IRQ_MASK_HI);
+
} else {
for (i = 0; i < pp->num_msi_irqs; i++) {
irq_set_chained_handler(pp->msi_irqs[i],
@@ -1058,8 +1288,9 @@ static int axxia_pcie_establish_link(struct pcie_port *pp)
static irqreturn_t axxia_pcie_irq_handler(int irq, void *arg)
{
struct pcie_port *pp = arg;
- u32 val;
- int ret;
+ u32 val, val1;
+ int i;
+ int ret = 0;
u32 offset;
axxia_cc_gpreg_readl(pp, CC_GPREG_EDG_IRQ_STAT, &val);
@@ -1089,49 +1320,39 @@ static irqreturn_t axxia_pcie_irq_handler(int irq, void *arg)
axxia_cc_gpreg_readl(pp,
CC_GPREG_EDG_IRQ_STAT_HI, &val);
if (val & MSI_ASSERTED) {
- ret = axxia_dw_pcie_handle_msi_irq(pp, offset);
+ axxia_pcie_rd_own_conf(pp,
+ PCIE_MSI_INTR0_STATUS, 4, (u32 *)&val1);
+ if (val1)
+ ret = axxia_dw_pcie_handle_msi_irq(pp,
+ val1);
axxia_cc_gpreg_writel(pp, MSI_ASSERTED,
CC_GPREG_EDG_IRQ_STAT_HI);
if (!ret)
return IRQ_NONE;
}
+ axxia_axi_gpreg_readl(pp,
+ AXI_GPREG_EDG_IRQ_STAT_HI, &val);
+ if (val & MSIX_ASSERTED) {
+ axxia_axi_gpreg_readl(pp, PEI_MSIX_INTR_STATUS,
+ (u32 *)&val1);
+ for (i = 0 ; i < 32; i++) {
+ if ((val1) & (1 << i))
+ ret =
+ axxia_dw_pcie_handle_msix_irq(
+ pp, i);
+ }
+ axxia_axi_gpreg_writel(pp, MSIX_ASSERTED,
+ AXI_GPREG_EDG_IRQ_STAT_HI);
+ if (!ret)
+ return IRQ_NONE;
+ }
+
}
}
return IRQ_HANDLED;
}
-static void axxia_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
-{
- unsigned int res, bit, val;
-
- res = (irq / 32) * 12;
- bit = irq % 32;
- axxia_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val &= ~(1 << bit);
- axxia_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
-}
-
-static void axxia_dw_pcie_msix_set_irq(struct pcie_port *pp, int irq, int mask)
-{
- unsigned int res, bit, val;
-
- res = (irq / 16) * 12;
- bit = irq % 16;
- axxia_axi_gpreg_readl(pp, PCIE_MSIX_INTR0_ENABLE + res, &val);
- if (mask)
- val |= 1 << bit;
- else
- val &= ~(1 << bit);
- axxia_axi_gpreg_writel(pp, val, PCIE_MSIX_INTR0_ENABLE + res);
- bit = irq % 32;
- axxia_axi_gpreg_readl(pp, PEI_MSIX_INTR_ENABLE + res, &val);
- if (mask)
- val |= 1 << bit;
- else
- val &= ~(1 << bit);
- axxia_axi_gpreg_writel(pp, val, PEI_MSIX_INTR_ENABLE + res);
-}
-
+#ifndef AXXIA_GENERIC_MSI_DOMAIN_IRQ
static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
unsigned int nvec, unsigned int pos, u32 is_msix)
{
@@ -1152,17 +1373,6 @@ static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
pp->msi_mode = AXXIA_MSI_UNCONFIGURED;
}
-static void axxia_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
-{
- unsigned int res, bit, val;
-
- res = (irq / 32) * 12;
- bit = irq % 32;
- axxia_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
- val |= 1 << bit;
- axxia_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
-}
-
static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
{
@@ -1208,35 +1418,6 @@ no_valid_irq:
return -ENOSPC;
}
-static int axxia_check_set_msi_mode(struct pcie_port *pp, u32 is_msix)
-{
- u32 val;
-
- if (pp->msi_mode == AXXIA_MSI_UNCONFIGURED) {
- if (is_msix) {
- axxia_axi_gpreg_readl(pp, AXI_GPREG_MSTR, &val);
- val &= ~CFG_MSI_MODE;
- axxia_axi_gpreg_writel(pp, val, AXI_GPREG_MSTR);
- pp->msi_mode = AXXIA_MSIX_MODE;
- } else {
- axxia_axi_gpreg_readl(pp, AXI_GPREG_MSTR, &val);
- val |= CFG_MSI_MODE;
- axxia_axi_gpreg_writel(pp, val, AXI_GPREG_MSTR);
- pp->msi_mode = AXXIA_MSI_MODE;
- }
- } else {
- if ((is_msix && (pp->msi_mode == AXXIA_MSI_MODE)) ||
- ((!is_msix) && (pp->msi_mode == AXXIA_MSIX_MODE))) {
- dev_info(pp->dev,
- "Axxia already in %s mode, %s not supported\n",
- pp->msi_mode == AXXIA_MSI_MODE ? "MSI" : "MSIX",
- pp->msi_mode == AXXIA_MSI_MODE ? "MSIX" : "MSI");
- return -EINVAL;
- }
- }
- return 0;
-}
-
static void axxia_msi_setup_msg(struct pcie_port *pp, unsigned int irq,
u32 pos, u32 is_msix)
{
@@ -1296,7 +1477,7 @@ static struct msi_controller axxia_dw_pcie_msi_chip = {
.setup_irq = axxia_dw_msi_setup_irq,
.teardown_irq = axxia_dw_msi_teardown_irq,
};
-
+#endif
int axxia_pcie_host_init(struct pcie_port *pp)
{
struct device_node *np = pp->dev->of_node;
@@ -1427,7 +1608,23 @@ int axxia_pcie_host_init(struct pcie_port *pp)
dev_err(pp->dev, "failed to request irq\n");
return ret;
}
+#ifdef AXXIA_GENERIC_MSI_DOMAIN_IRQ
+ ret = BITS_TO_LONGS(MAX_MSI_IRQS) * sizeof(long);
+ pp->bitmap = kzalloc(ret, GFP_KERNEL);
+ if (!pp->bitmap) {
+ dev_err(pp->dev, "PCIE: Error allocating MSI bitmap\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&pp->bitmap_lock);
+
+ ret = axxia_pcie_allocate_domains(pp);
+ if (ret) {
+ pr_err("PCIE: Failed to create MSI IRQ domain\n");
+ return ret;
+ }
+#else
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
MAX_MSI_IRQS, &axxia_msi_domain_ops,
@@ -1440,23 +1637,31 @@ int axxia_pcie_host_init(struct pcie_port *pp)
for (i = 0; i < MAX_MSI_IRQS; i++)
irq_create_mapping(pp->irq_domain, i);
}
-
+#endif
axxia_pcie_enable_interrupts(pp);
bus = pci_create_root_bus(&pdev->dev, pp->root_bus_nr,
&axxia_pciex_pci_ops, pp, &res);
if (!bus)
- return 1;
+ goto fail_ret;
#ifdef CONFIG_PCI_MSI
pp->msi_mode = AXXIA_MSI_UNCONFIGURED;
+#ifdef AXXIA_GENERIC_MSI_DOMAIN_IRQ
+ bus->msi = &pp->chip;
+#else
bus->msi = &axxia_dw_pcie_msi_chip;
#endif
-
+#endif
pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
pci_bus_add_devices(bus);
return 0;
+fail_ret:
+#ifdef AXXIA_GENERIC_MSI_DOMAIN_IRQ
+ axxia_free_domains(pp);
+#endif
+ return 1;
}
static int axxia_pcie_probe(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-axxia.h b/drivers/pci/host/pcie-axxia.h
index 2fa15f6..4624a4d 100644
--- a/drivers/pci/host/pcie-axxia.h
+++ b/drivers/pci/host/pcie-axxia.h
@@ -19,8 +19,7 @@
* it 32 as of now. Probably we will never need more than 32. If needed,
* then increment it in multiple of 32.
*/
-#define MAX_MSI_IRQS 64
-#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+#define MAX_MSI_IRQS 256
#define AXXIA_MSI_IRQL 32
#define AXXIA_MSI_UNCONFIGURED 0
#define AXXIA_MSI_MODE 1
@@ -62,6 +61,8 @@ struct pcie_port {
u32 msi_mode;
unsigned long msi_data;
struct irq_domain *irq_domain;
+ struct mutex bitmap_lock;
+ unsigned long *bitmap;
struct msi_controller chip;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
#if 0
--
2.7.4
More information about the linux-yocto
mailing list