From ca8a8fabb10459753fba73cac0382076f0aab058 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 17 May 2016 11:13:24 -0600 Subject: x86/PCI: VMD: Select device dma ops to override VMD device doesn't usually have device archdata specific dma_ops, so we need to override the default ops for VMD devices. Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas Acked-by Jon Derrick: diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index 7792aba..b1662bf 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c @@ -261,7 +261,7 @@ static struct device *to_vmd_dev(struct device *dev) static struct dma_map_ops *vmd_dma_ops(struct device *dev) { - return to_vmd_dev(dev)->archdata.dma_ops; + return get_dma_ops(to_vmd_dev(dev)); } static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, @@ -367,7 +367,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd) { struct dma_domain *domain = &vmd->dma_domain; - if (vmd->dev->dev.archdata.dma_ops) + if (get_dma_ops(&vmd->dev->dev)) del_dma_domain(domain); } @@ -379,7 +379,7 @@ static void vmd_teardown_dma_ops(struct vmd_dev *vmd) static void vmd_setup_dma_ops(struct vmd_dev *vmd) { - const struct dma_map_ops *source = vmd->dev->dev.archdata.dma_ops; + const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); struct dma_map_ops *dest = &vmd->dma_ops; struct dma_domain *domain = &vmd->dma_domain; -- cgit v0.10.2 From 97e92306357583c1741f0a111c7befe8673b91ee Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 17 May 2016 11:22:18 -0600 Subject: x86/PCI: VMD: Initialize list item in IRQ disable Multiple calls to disable an IRQ would have caused the driver to dereference a poisoned list item. This re-initializes the list to allow multiple requests to disable the IRQ. Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas Acked-by Jon Derrick: diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index b1662bf..3519a15 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c @@ -135,6 +135,7 @@ static void vmd_irq_disable(struct irq_data *data) raw_spin_lock(&list_lock); list_del_rcu(&vmdirq->node); + INIT_LIST_HEAD_RCU(&vmdirq->node); raw_spin_unlock(&list_lock); } -- cgit v0.10.2 From 60fcdac8136b4275da42d6edf9ddb10439350289 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 30 May 2016 16:17:58 +0200 Subject: PCI: hv: Don't leak buffer in hv_pci_onchannelcallback() We don't free buffer on several code paths in hv_pci_onchannelcallback(), put kfree() to the end of the function to fix the issue. Direct { kfree(); return; } can now be replaced with a simple 'break'; Signed-off-by: Vitaly Kuznetsov Signed-off-by: Bjorn Helgaas Acked-by: Jake Oshins diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 7e9b2de..a68ec49 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -1661,10 +1661,8 @@ static void hv_pci_onchannelcallback(void *context) * All incoming packets must be at least as large as a * response. */ - if (bytes_recvd <= sizeof(struct pci_response)) { - kfree(buffer); - return; - } + if (bytes_recvd <= sizeof(struct pci_response)) + break; desc = (struct vmpacket_descriptor *)buffer; switch (desc->type) { @@ -1679,8 +1677,7 @@ static void hv_pci_onchannelcallback(void *context) comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_recvd); - kfree(buffer); - return; + break; case VM_PKT_DATA_INBAND: @@ -1729,6 +1726,8 @@ static void hv_pci_onchannelcallback(void *context) } break; } + + kfree(buffer); } /** -- cgit v0.10.2 From 837d741ea2e6bb23da9cad1667776fc6f0cb67dd Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Fri, 17 Jun 2016 12:45:30 -0500 Subject: PCI: hv: Handle all pending messages in hv_pci_onchannelcallback() When we have an interrupt from the host we have a bit set in event page indicating there are messages for the particular channel. We need to read them all as we won't get signaled for what was on the queue before we cleared the bit in vmbus_on_event(). This applies to all Hyper-V drivers and the pass-through driver should do the same. I did not meet any bugs; the issue was found by code inspection. We don't have many events going through hv_pci_onchannelcallback(), which explains why nobody reported the issue before. While on it, fix handling non-zero vmbus_recvpacket_raw() return values by dropping out. If the return value is not zero, it is wrong to inspect buffer or bytes_recvd as these may contain invalid data. Signed-off-by: Vitaly Kuznetsov Signed-off-by: Bjorn Helgaas Acked-by: Jake Oshins diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index a68ec49..7de341d 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -1657,12 +1657,16 @@ static void hv_pci_onchannelcallback(void *context) continue; } + /* Zero length indicates there are no more packets. */ + if (ret || !bytes_recvd) + break; + /* * All incoming packets must be at least as large as a * response. */ if (bytes_recvd <= sizeof(struct pci_response)) - break; + continue; desc = (struct vmpacket_descriptor *)buffer; switch (desc->type) { @@ -1724,7 +1728,6 @@ static void hv_pci_onchannelcallback(void *context) desc->type, req_id, bytes_recvd); break; } - break; } kfree(buffer); -- cgit v0.10.2 From 3f57ff4f9c78ee0fafb010287019126ce8c1fc01 Mon Sep 17 00:00:00 2001 From: Jon Derrick Date: Mon, 20 Jun 2016 09:39:51 -0600 Subject: x86/PCI: VMD: Use lock save/restore in interrupt enable path Enabling interrupts may result in an interrupt raised and serviced while VMD holds a lock, resulting in contention with the spin lock held while enabling interrupts. The solution is to disable preemption and save/restore the state during interrupt enable and disable. Fixes lockdep: ====================================================== [ INFO: HARDIRQ-safe -> HARDIRQ-unsafe lock order detected ] 4.6.0-2016-06-16-lockdep+ #47 Tainted: G E ------------------------------------------------------ kworker/0:1/447 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire: (list_lock){+.+...}, at: [] vmd_irq_enable+0x3c/0x70 [vmd] and this task is already holding: (&irq_desc_lock_class){-.-...}, at: [] __setup_irq+0xa6/0x610 which would create a new lock dependency: (&irq_desc_lock_class){-.-...} -> (list_lock){+.+...} but this new dependency connects a HARDIRQ-irq-safe lock: (&irq_desc_lock_class){-.-...} ... which became HARDIRQ-irq-safe at: [] __lock_acquire+0x981/0xe00 [] lock_acquire+0x119/0x220 [] _raw_spin_lock+0x3d/0x80 [] handle_level_irq+0x24/0x110 [] handle_irq+0x1a/0x30 [] do_IRQ+0x61/0x120 [] ret_from_intr+0x0/0x20 [] _raw_spin_unlock_irqrestore+0x40/0x60 [] __setup_irq+0x29e/0x610 [] setup_irq+0x41/0x90 [] setup_default_timer_irq+0x1e/0x20 [] hpet_time_init+0x17/0x19 [] x86_late_time_init+0xa/0x11 [] start_kernel+0x382/0x436 [] x86_64_start_reservations+0x2a/0x2c [] x86_64_start_kernel+0x13b/0x14a to a HARDIRQ-irq-unsafe lock: (list_lock){+.+...} ... which became HARDIRQ-irq-unsafe at: ... [] __lock_acquire+0x7ee/0xe00 [] lock_acquire+0x119/0x220 [] _raw_spin_lock+0x3d/0x80 [] vmd_msi_init+0x72/0x150 [vmd] [] msi_domain_alloc+0xb7/0x140 [] irq_domain_alloc_irqs_recursive+0x40/0xa0 [] __irq_domain_alloc_irqs+0x14a/0x330 [] msi_domain_alloc_irqs+0x8c/0x1d0 [] pci_msi_setup_msi_irqs+0x43/0x70 [] pci_enable_msi_range+0x131/0x280 [] pcie_port_device_register+0x320/0x4e0 [] pcie_portdrv_probe+0x34/0x60 [] local_pci_probe+0x45/0xa0 [] pci_device_probe+0xdb/0x130 [] driver_probe_device+0x22c/0x440 [] __device_attach_driver+0x94/0x110 [] bus_for_each_drv+0x5d/0x90 [] __device_attach+0xc0/0x140 [] device_attach+0x10/0x20 [] pci_bus_add_device+0x47/0x90 [] pci_bus_add_devices+0x39/0x70 [] pci_rescan_bus+0x27/0x30 [] vmd_probe+0x68f/0x76c [vmd] [] local_pci_probe+0x45/0xa0 [] work_for_cpu_fn+0x14/0x20 [] process_one_work+0x1f4/0x740 [] worker_thread+0x236/0x4f0 [] kthread+0xf2/0x110 [] ret_from_fork+0x22/0x50 other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(list_lock); local_irq_disable(); lock(&irq_desc_lock_class); lock(list_lock); lock(&irq_desc_lock_class); *** DEADLOCK *** Signed-off-by: Jon Derrick Signed-off-by: Bjorn Helgaas Acked-by: Keith Busch diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index 3519a15..7aa80dc 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c @@ -119,10 +119,11 @@ static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) static void vmd_irq_enable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; + unsigned long flags; - raw_spin_lock(&list_lock); + raw_spin_lock_irqsave(&list_lock, flags); list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); - raw_spin_unlock(&list_lock); + raw_spin_unlock_irqrestore(&list_lock, flags); data->chip->irq_unmask(data); } @@ -130,13 +131,14 @@ static void vmd_irq_enable(struct irq_data *data) static void vmd_irq_disable(struct irq_data *data) { struct vmd_irq *vmdirq = data->chip_data; + unsigned long flags; data->chip->irq_mask(data); - raw_spin_lock(&list_lock); + raw_spin_lock_irqsave(&list_lock, flags); list_del_rcu(&vmdirq->node); INIT_LIST_HEAD_RCU(&vmdirq->node); - raw_spin_unlock(&list_lock); + raw_spin_unlock_irqrestore(&list_lock, flags); } /* @@ -170,13 +172,14 @@ static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd) { int i, best = 0; + unsigned long flags; - raw_spin_lock(&list_lock); + raw_spin_lock_irqsave(&list_lock, flags); for (i = 1; i < vmd->msix_count; i++) if (vmd->irqs[i].count < vmd->irqs[best].count) best = i; vmd->irqs[best].count++; - raw_spin_unlock(&list_lock); + raw_spin_unlock_irqrestore(&list_lock, flags); return &vmd->irqs[best]; } @@ -204,11 +207,12 @@ static void vmd_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq) { struct vmd_irq *vmdirq = irq_get_chip_data(virq); + unsigned long flags; /* XXX: Potential optimization to rebalance */ - raw_spin_lock(&list_lock); + raw_spin_lock_irqsave(&list_lock, flags); vmdirq->irq->count--; - raw_spin_unlock(&list_lock); + raw_spin_unlock_irqrestore(&list_lock, flags); kfree_rcu(vmdirq, rcu); } -- cgit v0.10.2 From e382dffc904d14cb6e2c31e2eefebdca41343943 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 20 Jun 2016 09:39:52 -0600 Subject: x86/PCI: VMD: Use x86_vector_domain as parent domain Otherwise APIC code assumes VMD's IRQ domain can be managed by the APIC, resulting in an invalid cast of irq_data during irq_force_complete_move(). Signed-off-by: Jon Derrick Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index 7aa80dc..0f77cc1 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c @@ -599,7 +599,7 @@ static int vmd_enable_domain(struct vmd_dev *vmd) sd->node = pcibus_to_node(vmd->dev->bus); vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info, - NULL); + x86_vector_domain); if (!vmd->irq_domain) return -ENODEV; -- cgit v0.10.2 From 9c2053040ca7ae52f1143a47ae84502aa7970438 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 20 Jun 2016 09:39:53 -0600 Subject: x86/PCI: VMD: Separate MSI and MSI-X vector sharing Child devices in a VMD domain that want to use MSI are slowing down MSI-X using devices sharing the same vectors. Move all MSI usage to a single VMD vector, and MSI-X devices can share the rest. Signed-off-by: Keith Busch Signed-off-by: Bjorn Helgaas Acked-by: Jon Derrick diff --git a/arch/x86/pci/vmd.c b/arch/x86/pci/vmd.c index 0f77cc1..fd582ab 100644 --- a/arch/x86/pci/vmd.c +++ b/arch/x86/pci/vmd.c @@ -169,11 +169,14 @@ static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, * XXX: We can be even smarter selecting the best IRQ once we solve the * affinity problem. */ -static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd) +static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) { - int i, best = 0; + int i, best = 1; unsigned long flags; + if (!desc->msi_attrib.is_msix || vmd->msix_count == 1) + return &vmd->irqs[0]; + raw_spin_lock_irqsave(&list_lock, flags); for (i = 1; i < vmd->msix_count; i++) if (vmd->irqs[i].count < vmd->irqs[best].count) @@ -188,14 +191,15 @@ static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, unsigned int virq, irq_hw_number_t hwirq, msi_alloc_info_t *arg) { - struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(arg->desc)->bus); + struct msi_desc *desc = arg->desc; + struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); if (!vmdirq) return -ENOMEM; INIT_LIST_HEAD(&vmdirq->node); - vmdirq->irq = vmd_next_irq(vmd); + vmdirq->irq = vmd_next_irq(vmd, desc); vmdirq->virq = virq; irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip, -- cgit v0.10.2 From f8be11ae3d2c9a1338da37ff91ff4c65922d21be Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 22 Jul 2016 15:54:41 -0500 Subject: PCI: altera: Reorder read/write functions Move cra_writel(), cra_readl(), and altera_pcie_link_is_up() so a future patch can use them in altera_pcie_retrain(). No functional change intended. Signed-off-by: Bjorn Helgaas diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index dbac6fb..a1e7822 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -81,6 +81,22 @@ struct tlp_rp_regpair_t { u32 reg1; }; +static inline void cra_writel(struct altera_pcie *pcie, const u32 value, + const u32 reg) +{ + writel_relaxed(value, pcie->cra_base + reg); +} + +static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) +{ + return readl_relaxed(pcie->cra_base + reg); +} + +static bool altera_pcie_link_is_up(struct altera_pcie *pcie) +{ + return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); +} + static void altera_pcie_retrain(struct pci_dev *dev) { u16 linkcap, linkstat; @@ -120,17 +136,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, return false; } -static inline void cra_writel(struct altera_pcie *pcie, const u32 value, - const u32 reg) -{ - writel_relaxed(value, pcie->cra_base + reg); -} - -static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) -{ - return readl_relaxed(pcie->cra_base + reg); -} - static void tlp_write_tx(struct altera_pcie *pcie, struct tlp_rp_regpair_t *tlp_rp_regdata) { @@ -139,11 +144,6 @@ static void tlp_write_tx(struct altera_pcie *pcie, cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); } -static bool altera_pcie_link_is_up(struct altera_pcie *pcie) -{ - return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); -} - static bool altera_pcie_valid_config(struct altera_pcie *pcie, struct pci_bus *bus, int dev) { -- cgit v0.10.2 From c622032ebc538cb3869c312ae3ad235a99da84b6 Mon Sep 17 00:00:00 2001 From: Ley Foon Tan Date: Tue, 21 Jun 2016 16:53:12 +0800 Subject: PCI: altera: Check link status before retrain link Check the link status before retraining. If the link is not up, don't bother trying to retrain it. [bhelgaas: split code move to separate patch, changelog] Signed-off-by: Ley Foon Tan Signed-off-by: Bjorn Helgaas diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index a1e7822..b61025e 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -100,6 +100,10 @@ static bool altera_pcie_link_is_up(struct altera_pcie *pcie) static void altera_pcie_retrain(struct pci_dev *dev) { u16 linkcap, linkstat; + struct altera_pcie *pcie = dev->bus->sysdata; + + if (!altera_pcie_link_is_up(pcie)) + return; /* * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but -- cgit v0.10.2 From 3a928e98a833e1a470a60d2fedf3c55502185fb7 Mon Sep 17 00:00:00 2001 From: Ley Foon Tan Date: Tue, 21 Jun 2016 16:53:13 +0800 Subject: PCI: altera: Poll for link up status after retraining the link Some PCIe devices take a long time to reach link up state after retrain. Poll for link up status after retraining the link. This is to make sure the link is up before we access configuration space. [bhelgaas: changelog] Signed-off-by: Ley Foon Tan Signed-off-by: Bjorn Helgaas diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index b61025e..e4154b2 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c @@ -61,6 +61,8 @@ #define TLP_LOOP 500 #define RP_DEVFN 0 +#define LINK_UP_TIMEOUT 5000 + #define INTX_NUM 4 #define DWORD_MASK 3 @@ -101,6 +103,7 @@ static void altera_pcie_retrain(struct pci_dev *dev) { u16 linkcap, linkstat; struct altera_pcie *pcie = dev->bus->sysdata; + int timeout = 0; if (!altera_pcie_link_is_up(pcie)) return; @@ -115,9 +118,16 @@ static void altera_pcie_retrain(struct pci_dev *dev) return; pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat); - if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) + if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { pcie_capability_set_word(dev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); + while (!altera_pcie_link_is_up(pcie)) { + timeout++; + if (timeout > LINK_UP_TIMEOUT) + break; + udelay(5); + } + } } DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain); -- cgit v0.10.2 From cec6dba24b42fec76820d0ad01b6480c9c81d764 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 14 Jul 2016 12:10:46 +0200 Subject: PCI: xilinx: Fix return value in case of error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In xilinx_pcie_init_irq_domain(), the pattern used to check and return error is: if (!var) { dev_err(...); return PTR_ERR(var); } So the returned value in case of error is always 0, which means 'success'. Change it to return -ENODEV instead. Signed-off-by: Christophe JAILLET Signed-off-by: Bjorn Helgaas Acked-by: Sören Brinkmann diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 65f0fe0..45413b2 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c @@ -550,7 +550,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) pcie_intc_node = of_get_next_child(node, NULL); if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); - return PTR_ERR(pcie_intc_node); + return -ENODEV; } port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, @@ -558,7 +558,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) port); if (!port->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return PTR_ERR(port->irq_domain); + return -ENODEV; } /* Setup MSI */ @@ -569,7 +569,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) &xilinx_pcie_msi_chip); if (!port->irq_domain) { dev_err(dev, "Failed to get a MSI IRQ domain\n"); - return PTR_ERR(port->irq_domain); + return -ENODEV; } xilinx_pcie_enable_msi(port); -- cgit v0.10.2 From 991bfef82f17194fe2a2f161b9552d9633ff21b9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 14 Jul 2016 23:18:27 +0200 Subject: PCI: dra7xx: Fix return value in case of error In dra7xx_pcie_init_irq_domain(), the pattern used to check and return error is: if (!var) { dev_err(...); return PTR_ERR(var); } So the returned value in case of error is always 0, which means 'success'. Change it to return -ENODEV instead. Signed-off-by: Christophe JAILLET Signed-off-by: Bjorn Helgaas Reviewed-by: Kishon Vijay Abraham I diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index f441130..81b3949 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -181,14 +181,14 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) if (!pcie_intc_node) { dev_err(dev, "No PCIe Intc node found\n"); - return PTR_ERR(pcie_intc_node); + return -ENODEV; } pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4, &intx_domain_ops, pp); if (!pp->irq_domain) { dev_err(dev, "Failed to get a INTx IRQ domain\n"); - return PTR_ERR(pp->irq_domain); + return -ENODEV; } return 0; -- cgit v0.10.2 From 0c6e617f656ec259162a41c0849e3a7557c99d95 Mon Sep 17 00:00:00 2001 From: Cathy Avery Date: Tue, 12 Jul 2016 11:31:24 -0400 Subject: PCI: hv: Fix interrupt cleanup path SR-IOV disabled from the host causes a memory leak. pci-hyperv usually first receives a PCI_EJECT notification and then proceeds to delete the hpdev list entry in hv_eject_device_work(). Later in hv_msi_free() since the device is no longer on the device list hpdev is NULL and hv_msi_free returns without freeing int_desc as part of hv_int_desc_free(). Signed-off-by: Cathy Avery Signed-off-by: Bjorn Helgaas Acked-by: Jake Oshins diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 7de341d..6955ffdb 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -732,16 +732,18 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, pdev = msi_desc_to_pci_dev(msi); hbus = info->data; - hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); - if (!hpdev) + int_desc = irq_data_get_irq_chip_data(irq_data); + if (!int_desc) return; - int_desc = irq_data_get_irq_chip_data(irq_data); - if (int_desc) { - irq_data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); + irq_data->chip_data = NULL; + hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); + if (!hpdev) { + kfree(int_desc); + return; } + hv_int_desc_free(hpdev, int_desc); put_pcichild(hpdev, hv_pcidev_ref_by_slot); } -- cgit v0.10.2 From 0a00ab1204e065bfc2f0ecf4794ad9cbf66e86a7 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 30 Jun 2016 11:32:30 +0200 Subject: dt-bindings: add DT binding for the Aardvark PCIe controller Add the documentation for the Device Tree binding for the Aardvark PCIe controller, found on Marvell Armada 3700 ARM64 SoCs. Signed-off-by: Thomas Petazzoni Signed-off-by: Bjorn Helgaas diff --git a/Documentation/devicetree/bindings/pci/aardvark-pci.txt b/Documentation/devicetree/bindings/pci/aardvark-pci.txt new file mode 100644 index 0000000..bbcd9f4 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/aardvark-pci.txt @@ -0,0 +1,56 @@ +Aardvark PCIe controller + +This PCIe controller is used on the Marvell Armada 3700 ARM64 SoC. + +The Device Tree node describing an Aardvark PCIe controller must +contain the following properties: + + - compatible: Should be "marvell,armada-3700-pcie" + - reg: range of registers for the PCIe controller + - interrupts: the interrupt line of the PCIe controller + - #address-cells: set to <3> + - #size-cells: set to <2> + - device_type: set to "pci" + - ranges: ranges for the PCI memory and I/O regions + - #interrupt-cells: set to <1> + - msi-controller: indicates that the PCIe controller can itself + handle MSI interrupts + - msi-parent: pointer to the MSI controller to be used + - interrupt-map-mask and interrupt-map: standard PCI properties to + define the mapping of the PCIe interface to interrupt numbers. + - bus-range: PCI bus numbers covered + +In addition, the Device Tree describing an Aardvark PCIe controller +must include a sub-node that describes the legacy interrupt controller +built into the PCIe controller. This sub-node must have the following +properties: + + - interrupt-controller + - #interrupt-cells: set to <1> + +Example: + + pcie0: pcie@d0070000 { + compatible = "marvell,armada-3700-pcie"; + device_type = "pci"; + status = "disabled"; + reg = <0 0xd0070000 0 0x20000>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + interrupts = ; + #interrupt-cells = <1>; + msi-controller; + msi-parent = <&pcie0>; + ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */ + 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/ + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, + <0 0 0 2 &pcie_intc 1>, + <0 0 0 3 &pcie_intc 2>, + <0 0 0 4 &pcie_intc 3>; + pcie_intc: interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + }; + }; -- cgit v0.10.2 From 8c39d710363c14ecb09219332869707395d1d495 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 30 Jun 2016 11:32:31 +0200 Subject: PCI: aardvark: Add Aardvark PCI host controller driver Add a driver for the Aardvark PCIe controller used on the Marvell Armada 3700 ARM64 SoC. Based on work done by Hezi Shahmoon and Marcin Wojtas . Signed-off-by: Thomas Petazzoni Signed-off-by: Bjorn Helgaas diff --git a/MAINTAINERS b/MAINTAINERS index ed42cb6..0326ecc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8741,6 +8741,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/pci/host/*mvebu* +PCI DRIVER FOR AARDVARK (Marvell Armada 3700) +M: Thomas Petazzoni +L: linux-pci@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: drivers/pci/host/pci-aardvark.c + PCI DRIVER FOR NVIDIA TEGRA M: Thierry Reding L: linux-tegra@vger.kernel.org diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 5d2374e..c9806c4 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -16,6 +16,15 @@ config PCI_MVEBU depends on ARM depends on OF +config PCI_AARDVARK + bool "Aardvark PCIe controller" + depends on ARCH_MVEBU && ARM64 + depends on OF + depends on PCI_MSI_IRQ_DOMAIN + help + Add support for Aardvark 64bit PCIe Host Controller. This + controller is part of the South Bridge of the Marvel Armada + 3700 SoC. config PCIE_XILINX_NWL bool "NWL PCIe Core" diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 9c8698e..66f45b6 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o +obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c new file mode 100644 index 0000000..642583d --- /dev/null +++ b/drivers/pci/host/pci-aardvark.c @@ -0,0 +1,1012 @@ +/* + * Driver for the Aardvark PCIe controller, used on Marvell Armada + * 3700. + * + * Copyright (C) 2016 Marvell + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PCIe core registers */ +#define PCIE_CORE_CMD_STATUS_REG 0x4 +#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) +#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) +#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) +#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8 +#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4) +#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 +#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 +#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 +#define PCIE_CORE_LINK_L0S_ENTRY BIT(0) +#define PCIE_CORE_LINK_TRAINING BIT(5) +#define PCIE_CORE_LINK_WIDTH_SHIFT 20 +#define PCIE_CORE_ERR_CAPCTL_REG 0x118 +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) +#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) + +/* PIO registers base address and register offsets */ +#define PIO_BASE_ADDR 0x4000 +#define PIO_CTRL (PIO_BASE_ADDR + 0x0) +#define PIO_CTRL_TYPE_MASK GENMASK(3, 0) +#define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) +#define PIO_STAT (PIO_BASE_ADDR + 0x4) +#define PIO_COMPLETION_STATUS_SHIFT 7 +#define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) +#define PIO_COMPLETION_STATUS_OK 0 +#define PIO_COMPLETION_STATUS_UR 1 +#define PIO_COMPLETION_STATUS_CRS 2 +#define PIO_COMPLETION_STATUS_CA 4 +#define PIO_NON_POSTED_REQ BIT(0) +#define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) +#define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) +#define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) +#define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) +#define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) +#define PIO_START (PIO_BASE_ADDR + 0x1c) +#define PIO_ISR (PIO_BASE_ADDR + 0x20) +#define PIO_ISRM (PIO_BASE_ADDR + 0x24) + +/* Aardvark Control registers */ +#define CONTROL_BASE_ADDR 0x4800 +#define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) +#define PCIE_GEN_SEL_MSK 0x3 +#define PCIE_GEN_SEL_SHIFT 0x0 +#define SPEED_GEN_1 0 +#define SPEED_GEN_2 1 +#define SPEED_GEN_3 2 +#define IS_RC_MSK 1 +#define IS_RC_SHIFT 2 +#define LANE_CNT_MSK 0x18 +#define LANE_CNT_SHIFT 0x3 +#define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) +#define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) +#define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) +#define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) +#define LINK_TRAINING_EN BIT(6) +#define LEGACY_INTA BIT(28) +#define LEGACY_INTB BIT(29) +#define LEGACY_INTC BIT(30) +#define LEGACY_INTD BIT(31) +#define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) +#define HOT_RESET_GEN BIT(0) +#define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) +#define PCIE_CORE_CTRL2_RESERVED 0x7 +#define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) +#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) +#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) +#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) +#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) +#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) +#define PCIE_ISR0_MSI_INT_PENDING BIT(24) +#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) +#define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) +#define PCIE_ISR0_ALL_MASK GENMASK(26, 0) +#define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) +#define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) +#define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) +#define PCIE_ISR1_FLUSH BIT(5) +#define PCIE_ISR1_ALL_MASK GENMASK(5, 4) +#define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) +#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) +#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) +#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) +#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) + +/* PCIe window configuration */ +#define OB_WIN_BASE_ADDR 0x4c00 +#define OB_WIN_BLOCK_SIZE 0x20 +#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ + OB_WIN_BLOCK_SIZE * (win) + \ + (offset)) +#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) +#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) +#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) +#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) +#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) +#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) +#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) + +/* PCIe window types */ +#define OB_PCIE_MEM 0x0 +#define OB_PCIE_IO 0x4 + +/* LMI registers base address and register offsets */ +#define LMI_BASE_ADDR 0x6000 +#define CFG_REG (LMI_BASE_ADDR + 0x0) +#define LTSSM_SHIFT 24 +#define LTSSM_MASK 0x3f +#define LTSSM_L0 0x10 +#define RC_BAR_CONFIG 0x300 + +/* PCIe core controller registers */ +#define CTRL_CORE_BASE_ADDR 0x18000 +#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) +#define CTRL_MODE_SHIFT 0x0 +#define CTRL_MODE_MASK 0x1 +#define PCIE_CORE_MODE_DIRECT 0x0 +#define PCIE_CORE_MODE_COMMAND 0x1 + +/* PCIe Central Interrupts Registers */ +#define CENTRAL_INT_BASE_ADDR 0x1b000 +#define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) +#define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) +#define PCIE_IRQ_CMDQ_INT BIT(0) +#define PCIE_IRQ_MSI_STATUS_INT BIT(1) +#define PCIE_IRQ_CMD_SENT_DONE BIT(3) +#define PCIE_IRQ_DMA_INT BIT(4) +#define PCIE_IRQ_IB_DXFERDONE BIT(5) +#define PCIE_IRQ_OB_DXFERDONE BIT(6) +#define PCIE_IRQ_OB_RXFERDONE BIT(7) +#define PCIE_IRQ_COMPQ_INT BIT(12) +#define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) +#define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) +#define PCIE_IRQ_CORE_INT BIT(16) +#define PCIE_IRQ_CORE_INT_PIO BIT(17) +#define PCIE_IRQ_DPMU_INT BIT(18) +#define PCIE_IRQ_PCIE_MIS_INT BIT(19) +#define PCIE_IRQ_MSI_INT1_DET BIT(20) +#define PCIE_IRQ_MSI_INT2_DET BIT(21) +#define PCIE_IRQ_RC_DBELL_DET BIT(22) +#define PCIE_IRQ_EP_STATUS BIT(23) +#define PCIE_IRQ_ALL_MASK 0xfff0fb +#define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT + +/* Transaction types */ +#define PCIE_CONFIG_RD_TYPE0 0x8 +#define PCIE_CONFIG_RD_TYPE1 0x9 +#define PCIE_CONFIG_WR_TYPE0 0xa +#define PCIE_CONFIG_WR_TYPE1 0xb + +/* PCI_BDF shifts 8bit, so we need extra 4bit shift */ +#define PCIE_BDF(dev) (dev << 4) +#define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) +#define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) +#define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) +#define PCIE_CONF_REG(reg) ((reg) & 0xffc) +#define PCIE_CONF_ADDR(bus, devfn, where) \ + (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ + PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) + +#define PIO_TIMEOUT_MS 1 + +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + +#define LEGACY_IRQ_NUM 4 +#define MSI_IRQ_NUM 32 + +struct advk_pcie { + struct platform_device *pdev; + void __iomem *base; + struct list_head resources; + struct irq_domain *irq_domain; + struct irq_chip irq_chip; + struct msi_controller msi; + struct irq_domain *msi_domain; + struct irq_chip msi_irq_chip; + DECLARE_BITMAP(msi_irq_in_use, MSI_IRQ_NUM); + struct mutex msi_used_lock; + u16 msi_msg; + int root_bus_nr; +}; + +static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) +{ + writel(val, pcie->base + reg); +} + +static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) +{ + return readl(pcie->base + reg); +} + +static int advk_pcie_link_up(struct advk_pcie *pcie) +{ + u32 val, ltssm_state; + + val = advk_readl(pcie, CFG_REG); + ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; + return ltssm_state >= LTSSM_L0; +} + +static int advk_pcie_wait_for_link(struct advk_pcie *pcie) +{ + int retries; + + /* check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (advk_pcie_link_up(pcie)) { + dev_info(&pcie->pdev->dev, "link up\n"); + return 0; + } + + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } + + dev_err(&pcie->pdev->dev, "link never came up\n"); + + return -ETIMEDOUT; +} + +/* + * Set PCIe address window register which could be used for memory + * mapping. + */ +static void advk_pcie_set_ob_win(struct advk_pcie *pcie, + u32 win_num, u32 match_ms, + u32 match_ls, u32 mask_ms, + u32 mask_ls, u32 remap_ms, + u32 remap_ls, u32 action) +{ + advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num)); + advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num)); + advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num)); + advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num)); + advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num)); + advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num)); + advk_writel(pcie, action, OB_WIN_ACTIONS(win_num)); + advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num)); +} + +static void advk_pcie_setup_hw(struct advk_pcie *pcie) +{ + u32 reg; + int i; + + /* Point PCIe unit MBUS decode windows to DRAM space */ + for (i = 0; i < 8; i++) + advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0); + + /* Set to Direct mode */ + reg = advk_readl(pcie, CTRL_CONFIG_REG); + reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); + reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); + advk_writel(pcie, reg, CTRL_CONFIG_REG); + + /* Set PCI global control register to RC mode */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg |= (IS_RC_MSK << IS_RC_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Set Advanced Error Capabilities and Control PF0 register */ + reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | + PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | + PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; + advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG); + + /* Set PCIe Device Control and Status 1 PF0 register */ + reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | + (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | + PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT; + advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); + + /* Program PCIe Control 2 to disable strict ordering */ + reg = PCIE_CORE_CTRL2_RESERVED | + PCIE_CORE_CTRL2_TD_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Set GEN2 */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg &= ~PCIE_GEN_SEL_MSK; + reg |= SPEED_GEN_2; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Set lane X1 */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg &= ~LANE_CNT_MSK; + reg |= LANE_COUNT_1; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Enable link training */ + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); + reg |= LINK_TRAINING_EN; + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); + + /* Enable MSI */ + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_MSI_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Clear all interrupts */ + advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); + advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); + + /* Disable All ISR0/1 Sources */ + reg = PCIE_ISR0_ALL_MASK; + reg &= ~PCIE_ISR0_MSI_INT_PENDING; + advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); + + advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); + + /* Unmask all MSI's */ + advk_writel(pcie, 0, PCIE_MSI_MASK_REG); + + /* Enable summary interrupt for GIC SPI source */ + reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); + advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); + + reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); + reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; + advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); + + /* Bypass the address window mapping for PIO */ + reg = advk_readl(pcie, PIO_CTRL); + reg |= PIO_CTRL_ADDR_WIN_DISABLE; + advk_writel(pcie, reg, PIO_CTRL); + + /* Start link training */ + reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG); + reg |= PCIE_CORE_LINK_TRAINING; + advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); + + advk_pcie_wait_for_link(pcie); + + reg = PCIE_CORE_LINK_L0S_ENTRY | + (1 << PCIE_CORE_LINK_WIDTH_SHIFT); + advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); + + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); + reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | + PCIE_CORE_CMD_IO_ACCESS_EN | + PCIE_CORE_CMD_MEM_IO_REQ_EN; + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); +} + +static void advk_pcie_check_pio_status(struct advk_pcie *pcie) +{ + u32 reg; + unsigned int status; + char *strcomp_status, *str_posted; + + reg = advk_readl(pcie, PIO_STAT); + status = (reg & PIO_COMPLETION_STATUS_MASK) >> + PIO_COMPLETION_STATUS_SHIFT; + + if (!status) + return; + + switch (status) { + case PIO_COMPLETION_STATUS_UR: + strcomp_status = "UR"; + break; + case PIO_COMPLETION_STATUS_CRS: + strcomp_status = "CRS"; + break; + case PIO_COMPLETION_STATUS_CA: + strcomp_status = "CA"; + break; + default: + strcomp_status = "Unknown"; + break; + } + + if (reg & PIO_NON_POSTED_REQ) + str_posted = "Non-posted"; + else + str_posted = "Posted"; + + dev_err(&pcie->pdev->dev, "%s PIO Response Status: %s, %#x @ %#x\n", + str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); +} + +static int advk_pcie_wait_pio(struct advk_pcie *pcie) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); + + while (time_before(jiffies, timeout)) { + u32 start, isr; + + start = advk_readl(pcie, PIO_START); + isr = advk_readl(pcie, PIO_ISR); + if (!start && isr) + return 0; + } + + dev_err(&pcie->pdev->dev, "config read/write timed out\n"); + return -ETIMEDOUT; +} + +static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 *val) +{ + struct advk_pcie *pcie = bus->sysdata; + u32 reg; + int ret; + + if (PCI_SLOT(devfn) != 0) { + *val = 0xffffffff; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + /* Start PIO */ + advk_writel(pcie, 0, PIO_START); + advk_writel(pcie, 1, PIO_ISR); + + /* Program the control register */ + reg = advk_readl(pcie, PIO_CTRL); + reg &= ~PIO_CTRL_TYPE_MASK; + if (bus->number == pcie->root_bus_nr) + reg |= PCIE_CONFIG_RD_TYPE0; + else + reg |= PCIE_CONFIG_RD_TYPE1; + advk_writel(pcie, reg, PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where); + advk_writel(pcie, reg, PIO_ADDR_LS); + advk_writel(pcie, 0, PIO_ADDR_MS); + + /* Program the data strobe */ + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); + + /* Start the transfer */ + advk_writel(pcie, 1, PIO_START); + + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED; + + advk_pcie_check_pio_status(pcie); + + /* Get the read result */ + *val = advk_readl(pcie, PIO_RD_DATA); + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 3))) & 0xffff; + + return PCIBIOS_SUCCESSFUL; +} + +static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, + int where, int size, u32 val) +{ + struct advk_pcie *pcie = bus->sysdata; + u32 reg; + u32 data_strobe = 0x0; + int offset; + int ret; + + if (PCI_SLOT(devfn) != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (where % size) + return PCIBIOS_SET_FAILED; + + /* Start PIO */ + advk_writel(pcie, 0, PIO_START); + advk_writel(pcie, 1, PIO_ISR); + + /* Program the control register */ + reg = advk_readl(pcie, PIO_CTRL); + reg &= ~PIO_CTRL_TYPE_MASK; + if (bus->number == pcie->root_bus_nr) + reg |= PCIE_CONFIG_WR_TYPE0; + else + reg |= PCIE_CONFIG_WR_TYPE1; + advk_writel(pcie, reg, PIO_CTRL); + + /* Program the address registers */ + reg = PCIE_CONF_ADDR(bus->number, devfn, where); + advk_writel(pcie, reg, PIO_ADDR_LS); + advk_writel(pcie, 0, PIO_ADDR_MS); + + /* Calculate the write strobe */ + offset = where & 0x3; + reg = val << (8 * offset); + data_strobe = GENMASK(size - 1, 0) << offset; + + /* Program the data register */ + advk_writel(pcie, reg, PIO_WR_DATA); + + /* Program the data strobe */ + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); + + /* Start the transfer */ + advk_writel(pcie, 1, PIO_START); + + ret = advk_pcie_wait_pio(pcie); + if (ret < 0) + return PCIBIOS_SET_FAILED; + + advk_pcie_check_pio_status(pcie); + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops advk_pcie_ops = { + .read = advk_pcie_rd_conf, + .write = advk_pcie_wr_conf, +}; + +static int advk_pcie_alloc_msi(struct advk_pcie *pcie) +{ + int hwirq; + + mutex_lock(&pcie->msi_used_lock); + hwirq = find_first_zero_bit(pcie->msi_irq_in_use, MSI_IRQ_NUM); + if (hwirq >= MSI_IRQ_NUM) + hwirq = -ENOSPC; + else + set_bit(hwirq, pcie->msi_irq_in_use); + mutex_unlock(&pcie->msi_used_lock); + + return hwirq; +} + +static void advk_pcie_free_msi(struct advk_pcie *pcie, int hwirq) +{ + mutex_lock(&pcie->msi_used_lock); + if (!test_bit(hwirq, pcie->msi_irq_in_use)) + dev_err(&pcie->pdev->dev, "trying to free unused MSI#%d\n", + hwirq); + else + clear_bit(hwirq, pcie->msi_irq_in_use); + mutex_unlock(&pcie->msi_used_lock); +} + +static int advk_pcie_setup_msi_irq(struct msi_controller *chip, + struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct advk_pcie *pcie = pdev->bus->sysdata; + struct msi_msg msg; + int virq, hwirq; + phys_addr_t msi_msg_phys; + + /* We support MSI, but not MSI-X */ + if (desc->msi_attrib.is_msix) + return -EINVAL; + + hwirq = advk_pcie_alloc_msi(pcie); + if (hwirq < 0) + return hwirq; + + virq = irq_create_mapping(pcie->msi_domain, hwirq); + if (!virq) { + advk_pcie_free_msi(pcie, hwirq); + return -EINVAL; + } + + irq_set_msi_desc(virq, desc); + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + + msg.address_lo = lower_32_bits(msi_msg_phys); + msg.address_hi = upper_32_bits(msi_msg_phys); + msg.data = virq; + + pci_write_msi_msg(virq, &msg); + + return 0; +} + +static void advk_pcie_teardown_msi_irq(struct msi_controller *chip, + unsigned int irq) +{ + struct irq_data *d = irq_get_irq_data(irq); + struct msi_desc *msi = irq_data_get_msi_desc(d); + struct advk_pcie *pcie = msi_desc_to_pci_sysdata(msi); + unsigned long hwirq = d->hwirq; + + irq_dispose_mapping(irq); + advk_pcie_free_msi(pcie, hwirq); +} + +static int advk_pcie_msi_map(struct irq_domain *domain, + unsigned int virq, irq_hw_number_t hw) +{ + struct advk_pcie *pcie = domain->host_data; + + irq_set_chip_and_handler(virq, &pcie->msi_irq_chip, + handle_simple_irq); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_msi_irq_ops = { + .map = advk_pcie_msi_map, +}; + +static void advk_pcie_irq_mask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 mask; + + mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + mask |= PCIE_ISR0_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); +} + +static void advk_pcie_irq_unmask(struct irq_data *d) +{ + struct advk_pcie *pcie = d->domain->host_data; + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 mask; + + mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq); + advk_writel(pcie, mask, PCIE_ISR0_MASK_REG); +} + +static int advk_pcie_irq_map(struct irq_domain *h, + unsigned int virq, irq_hw_number_t hwirq) +{ + struct advk_pcie *pcie = h->host_data; + + advk_pcie_irq_mask(irq_get_irq_data(virq)); + irq_set_status_flags(virq, IRQ_LEVEL); + irq_set_chip_and_handler(virq, &pcie->irq_chip, + handle_level_irq); + irq_set_chip_data(virq, pcie); + + return 0; +} + +static const struct irq_domain_ops advk_pcie_irq_domain_ops = { + .map = advk_pcie_irq_map, + .xlate = irq_domain_xlate_onecell, +}; + +static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct irq_chip *msi_irq_chip; + struct msi_controller *msi; + phys_addr_t msi_msg_phys; + int ret; + + msi_irq_chip = &pcie->msi_irq_chip; + + msi_irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-msi", + dev_name(dev)); + if (!msi_irq_chip->name) + return -ENOMEM; + + msi_irq_chip->irq_enable = pci_msi_unmask_irq; + msi_irq_chip->irq_disable = pci_msi_mask_irq; + msi_irq_chip->irq_mask = pci_msi_mask_irq; + msi_irq_chip->irq_unmask = pci_msi_unmask_irq; + + msi = &pcie->msi; + + msi->setup_irq = advk_pcie_setup_msi_irq; + msi->teardown_irq = advk_pcie_teardown_msi_irq; + msi->of_node = node; + + mutex_init(&pcie->msi_used_lock); + + msi_msg_phys = virt_to_phys(&pcie->msi_msg); + + advk_writel(pcie, lower_32_bits(msi_msg_phys), + PCIE_MSI_ADDR_LOW_REG); + advk_writel(pcie, upper_32_bits(msi_msg_phys), + PCIE_MSI_ADDR_HIGH_REG); + + pcie->msi_domain = + irq_domain_add_linear(NULL, MSI_IRQ_NUM, + &advk_pcie_msi_irq_ops, pcie); + if (!pcie->msi_domain) + return -ENOMEM; + + ret = of_pci_msi_chip_add(msi); + if (ret < 0) { + irq_domain_remove(pcie->msi_domain); + return ret; + } + + return 0; +} + +static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) +{ + of_pci_msi_chip_remove(&pcie->msi); + irq_domain_remove(pcie->msi_domain); +} + +static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) +{ + struct device *dev = &pcie->pdev->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + struct irq_chip *irq_chip; + + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -ENODEV; + } + + irq_chip = &pcie->irq_chip; + + irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", + dev_name(dev)); + if (!irq_chip->name) { + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + irq_chip->irq_mask = advk_pcie_irq_mask; + irq_chip->irq_mask_ack = advk_pcie_irq_mask; + irq_chip->irq_unmask = advk_pcie_irq_unmask; + + pcie->irq_domain = + irq_domain_add_linear(pcie_intc_node, LEGACY_IRQ_NUM, + &advk_pcie_irq_domain_ops, pcie); + if (!pcie->irq_domain) { + dev_err(dev, "Failed to get a INTx IRQ domain\n"); + of_node_put(pcie_intc_node); + return -ENOMEM; + } + + return 0; +} + +static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) +{ + irq_domain_remove(pcie->irq_domain); +} + +static void advk_pcie_handle_msi(struct advk_pcie *pcie) +{ + u32 msi_val, msi_mask, msi_status, msi_idx; + u16 msi_data; + + msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); + msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); + msi_status = msi_val & ~msi_mask; + + for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { + if (!(BIT(msi_idx) & msi_status)) + continue; + + advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); + msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; + generic_handle_irq(msi_data); + } + + advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, + PCIE_ISR0_REG); +} + +static void advk_pcie_handle_int(struct advk_pcie *pcie) +{ + u32 val, mask, status; + int i, virq; + + val = advk_readl(pcie, PCIE_ISR0_REG); + mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); + status = val & ((~mask) & PCIE_ISR0_ALL_MASK); + + if (!status) { + advk_writel(pcie, val, PCIE_ISR0_REG); + return; + } + + /* Process MSI interrupts */ + if (status & PCIE_ISR0_MSI_INT_PENDING) + advk_pcie_handle_msi(pcie); + + /* Process legacy interrupts */ + for (i = 0; i < LEGACY_IRQ_NUM; i++) { + if (!(status & PCIE_ISR0_INTX_ASSERT(i))) + continue; + + advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i), + PCIE_ISR0_REG); + + virq = irq_find_mapping(pcie->irq_domain, i); + generic_handle_irq(virq); + } +} + +static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) +{ + struct advk_pcie *pcie = arg; + u32 status; + + status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); + if (!(status & PCIE_IRQ_CORE_INT)) + return IRQ_NONE; + + advk_pcie_handle_int(pcie); + + /* Clear interrupt */ + advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); + + return IRQ_HANDLED; +} + +static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) +{ + int err, res_valid = 0; + struct device *dev = &pcie->pdev->dev; + struct device_node *np = dev->of_node; + struct resource_entry *win; + resource_size_t iobase; + + INIT_LIST_HEAD(&pcie->resources); + + err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pcie->resources, + &iobase); + if (err) + return err; + + resource_list_for_each_entry(win, &pcie->resources) { + struct resource *parent = NULL; + struct resource *res = win->res; + + switch (resource_type(res)) { + case IORESOURCE_IO: + parent = &ioport_resource; + advk_pcie_set_ob_win(pcie, 1, + upper_32_bits(res->start), + lower_32_bits(res->start), + 0, 0xF8000000, 0, + lower_32_bits(res->start), + OB_PCIE_IO); + err = pci_remap_iospace(res, iobase); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, res); + continue; + } + break; + case IORESOURCE_MEM: + parent = &iomem_resource; + advk_pcie_set_ob_win(pcie, 0, + upper_32_bits(res->start), + lower_32_bits(res->start), + 0x0, 0xF8000000, 0, + lower_32_bits(res->start), + (2 << 20) | OB_PCIE_MEM); + res_valid |= !(res->flags & IORESOURCE_PREFETCH); + break; + case IORESOURCE_BUS: + pcie->root_bus_nr = res->start; + break; + default: + continue; + } + + if (parent) { + err = devm_request_resource(dev, parent, res); + if (err) + goto out_release_res; + } + } + + if (!res_valid) { + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + goto out_release_res; + } + + return 0; + +out_release_res: + pci_free_resource_list(&pcie->resources); + return err; +} + +static int advk_pcie_probe(struct platform_device *pdev) +{ + struct advk_pcie *pcie; + struct resource *res; + struct pci_bus *bus, *child; + struct msi_controller *msi; + struct device_node *msi_node; + int ret, irq; + + pcie = devm_kzalloc(&pdev->dev, sizeof(struct advk_pcie), + GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->pdev = pdev; + platform_set_drvdata(pdev, pcie); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pcie->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pcie->base)) { + dev_err(&pdev->dev, "Failed to map registers\n"); + return PTR_ERR(pcie->base); + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_irq(&pdev->dev, irq, advk_pcie_irq_handler, + IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", + pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to register interrupt\n"); + return ret; + } + + ret = advk_pcie_parse_request_of_pci_ranges(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to parse resources\n"); + return ret; + } + + advk_pcie_setup_hw(pcie); + + ret = advk_pcie_init_irq_domain(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize irq\n"); + return ret; + } + + ret = advk_pcie_init_msi_irq_domain(pcie); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize irq\n"); + advk_pcie_remove_irq_domain(pcie); + return ret; + } + + msi_node = of_parse_phandle(pdev->dev.of_node, "msi-parent", 0); + if (msi_node) + msi = of_pci_find_msi_chip_by_node(msi_node); + else + msi = NULL; + + bus = pci_scan_root_bus_msi(&pdev->dev, 0, &advk_pcie_ops, + pcie, &pcie->resources, &pcie->msi); + if (!bus) { + advk_pcie_remove_msi_irq_domain(pcie); + advk_pcie_remove_irq_domain(pcie); + return -ENOMEM; + } + + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + + pci_bus_add_devices(bus); + + return 0; +} + +static const struct of_device_id advk_pcie_of_match_table[] = { + { .compatible = "marvell,armada-3700-pcie", }, + {}, +}; + +static struct platform_driver advk_pcie_driver = { + .driver = { + .name = "advk-pcie", + .of_match_table = advk_pcie_of_match_table, + /* Driver unloading/unbinding currently not supported */ + .suppress_bind_attrs = true, + }, + .probe = advk_pcie_probe, +}; +module_platform_driver(advk_pcie_driver); + +MODULE_AUTHOR("Hezi Shahmoon "); +MODULE_DESCRIPTION("Aardvark PCIe driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 76f6386b25cc2359a547750b5d128ddab3c43cfb Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 30 Jun 2016 11:32:32 +0200 Subject: arm64: dts: marvell: Add Aardvark PCIe support for Armada 3700 Add the SoC-level description of the PCIe controller found on the Marvell Armada 3700 and enable this PCIe controller on the development board for this SoC. Signed-off-by: Thomas Petazzoni Signed-off-by: Bjorn Helgaas diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts index 86110a6..1372e9a6 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts @@ -76,3 +76,8 @@ &usb3 { status = "okay"; }; + +/* CON17 (PCIe) / CON12 (mini-PCIe) */ +&pcie0 { + status = "okay"; +}; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index 9e2efb8..8a9cae9 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -141,5 +141,30 @@ <0x1d40000 0x40000>; /* GICR */ }; }; + + pcie0: pcie@d0070000 { + compatible = "marvell,armada-3700-pcie"; + device_type = "pci"; + status = "disabled"; + reg = <0 0xd0070000 0 0x20000>; + #address-cells = <3>; + #size-cells = <2>; + bus-range = <0x00 0xff>; + interrupts = ; + #interrupt-cells = <1>; + msi-parent = <&pcie0>; + msi-controller; + ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */ + 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/ + interrupt-map-mask = <0 0 0 7>; + interrupt-map = <0 0 0 1 &pcie_intc 0>, + <0 0 0 2 &pcie_intc 1>, + <0 0 0 3 &pcie_intc 2>, + <0 0 0 4 &pcie_intc 3>; + pcie_intc: interrupt-controller { + interrupt-controller; + #interrupt-cells = <1>; + }; + }; }; }; -- cgit v0.10.2