diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
commit | 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch) | |
tree | fa581f6dc1c0596391690d1f67eceef3af8246dc /drivers/pci/host | |
parent | d4e493caf788ef44982e131ff9c786546904d934 (diff) |
Linux-libre 4.5-gnu
Diffstat (limited to 'drivers/pci/host')
-rw-r--r-- | drivers/pci/host/Kconfig | 37 | ||||
-rw-r--r-- | drivers/pci/host/Makefile | 2 | ||||
-rw-r--r-- | drivers/pci/host/pci-host-generic.c | 9 | ||||
-rw-r--r-- | drivers/pci/host/pci-imx6.c | 22 | ||||
-rw-r--r-- | drivers/pci/host/pci-layerscape.c | 21 | ||||
-rw-r--r-- | drivers/pci/host/pci-rcar-gen2.c | 77 | ||||
-rw-r--r-- | drivers/pci/host/pci-versatile.c | 5 | ||||
-rw-r--r-- | drivers/pci/host/pcie-designware.c | 62 | ||||
-rw-r--r-- | drivers/pci/host/pcie-hisi.c | 76 | ||||
-rw-r--r-- | drivers/pci/host/pcie-iproc-bcma.c | 1 | ||||
-rw-r--r-- | drivers/pci/host/pcie-iproc-msi.c | 675 | ||||
-rw-r--r-- | drivers/pci/host/pcie-iproc-platform.c | 25 | ||||
-rw-r--r-- | drivers/pci/host/pcie-iproc.c | 221 | ||||
-rw-r--r-- | drivers/pci/host/pcie-iproc.h | 42 | ||||
-rw-r--r-- | drivers/pci/host/pcie-qcom.c | 616 | ||||
-rw-r--r-- | drivers/pci/host/pcie-rcar.c | 200 |
16 files changed, 1888 insertions, 203 deletions
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index c0ad9aaa1..d1cdd9c99 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -14,6 +14,7 @@ config PCI_DRA7XX config PCI_MVEBU bool "Marvell EBU PCIe controller" depends on ARCH_MVEBU || ARCH_DOVE + depends on ARM depends on OF config PCIE_DW @@ -49,8 +50,7 @@ config PCI_RCAR_GEN2 config PCI_RCAR_GEN2_PCIE bool "Renesas R-Car PCIe controller" - depends on ARM - depends on ARCH_SHMOBILE || COMPILE_TEST + depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST) help Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. @@ -119,13 +119,11 @@ config PCI_VERSATILE depends on ARCH_VERSATILE config PCIE_IPROC - tristate "Broadcom iProc PCIe controller" - depends on OF && (ARM || ARM64) - default n + tristate help This enables the iProc PCIe core controller support for Broadcom's - iProc family of SoCs. An appropriate bus interface driver also needs - to be enabled + iProc family of SoCs. An appropriate bus interface driver needs + to be enabled to select this. config PCIE_IPROC_PLATFORM tristate "Broadcom iProc PCIe platform bus driver" @@ -148,6 +146,16 @@ config PCIE_IPROC_BCMA Say Y here if you want to use the Broadcom iProc PCIe controller through the BCMA bus interface +config PCIE_IPROC_MSI + bool "Broadcom iProc PCIe MSI support" + depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA + depends on PCI_MSI + select PCI_MSI_IRQ_DOMAIN + default ARCH_BCM_IPROC + help + Say Y here if you want to enable MSI support for Broadcom's iProc + PCIe controller + config PCIE_ALTERA bool "Altera PCIe controller" depends on ARM || NIOS2 @@ -167,10 +175,21 @@ config PCIE_ALTERA_MSI config PCI_HISI depends on OF && ARM64 - bool "HiSilicon SoC HIP05 PCIe controller" + bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" select PCIEPORTBUS select PCIE_DW help - Say Y here if you want PCIe controller support on HiSilicon HIP05 SoC + Say Y here if you want PCIe controller support on HiSilicon + Hip05 and Hip06 SoCs + +config PCIE_QCOM + bool "Qualcomm PCIe controller" + depends on ARCH_QCOM && OF + select PCIE_DW + select PCIEPORTBUS + help + Say Y here to enable PCIe controller support on Qualcomm SoCs. The + PCIe controller uses the Designware core plus Qualcomm-specific + hardware wrappers. endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 9d4d3c692..7b2f20c6c 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -15,8 +15,10 @@ obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o +obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o obj-$(CONFIG_PCI_HISI) += pcie-hisi.o +obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c index 5434c90db..1652bc70b 100644 --- a/drivers/pci/host/pci-host-generic.c +++ b/drivers/pci/host/pci-host-generic.c @@ -38,16 +38,7 @@ struct gen_pci_cfg_windows { struct gen_pci_cfg_bus_ops *ops; }; -/* - * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI - * sysdata. Add pci_sys_data as the first element in struct gen_pci so - * that when we use a gen_pci pointer as sysdata, it is also a pointer to - * a struct pci_sys_data. - */ struct gen_pci { -#ifdef CONFIG_ARM - struct pci_sys_data sys; -#endif struct pci_host_bridge host; struct gen_pci_cfg_windows cfg; struct list_head resources; diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index 9ce7cd148..fe600964f 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -32,7 +32,7 @@ #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) struct imx6_pcie { - int reset_gpio; + struct gpio_desc *reset_gpio; struct clk *pcie_bus; struct clk *pcie_phy; struct clk *pcie; @@ -122,7 +122,7 @@ static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr) } /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ -static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data) +static int pcie_phy_read(void __iomem *dbi_base, int addr, int *data) { u32 val, phy_ctl; int ret; @@ -287,10 +287,10 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) usleep_range(200, 500); /* Some boards don't have PCIe reset GPIO. */ - if (gpio_is_valid(imx6_pcie->reset_gpio)) { - gpio_set_value(imx6_pcie->reset_gpio, 0); + if (imx6_pcie->reset_gpio) { + gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 0); msleep(100); - gpio_set_value(imx6_pcie->reset_gpio, 1); + gpiod_set_value_cansleep(imx6_pcie->reset_gpio, 1); } return 0; @@ -561,7 +561,6 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) { struct imx6_pcie *imx6_pcie; struct pcie_port *pp; - struct device_node *np = pdev->dev.of_node; struct resource *dbi_base; int ret; @@ -582,15 +581,8 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(pp->dbi_base); /* Fetch GPIOs */ - imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); - if (gpio_is_valid(imx6_pcie->reset_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, - GPIOF_OUT_INIT_LOW, "PCIe reset"); - if (ret) { - dev_err(&pdev->dev, "unable to get reset gpio\n"); - return ret; - } - } + imx6_pcie->reset_gpio = devm_gpiod_get_optional(&pdev->dev, "reset", + GPIOD_OUT_LOW); /* Fetch clocks */ imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c index 3923bed93..f39961bcf 100644 --- a/drivers/pci/host/pci-layerscape.c +++ b/drivers/pci/host/pci-layerscape.c @@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie) iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); } +/* Drop MSG TLP except for Vendor MSG */ +static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) +{ + u32 val; + + val = ioread32(pcie->dbi + PCIE_STRFMR1); + val &= 0xDFFFFFFF; + iowrite32(val, pcie->dbi + PCIE_STRFMR1); +} + static int ls1021_pcie_link_up(struct pcie_port *pp) { u32 state; @@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp) static void ls1021_pcie_host_init(struct pcie_port *pp) { struct ls_pcie *pcie = to_ls_pcie(pp); - u32 val, index[2]; + u32 index[2]; pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, "fsl,pcie-scfg"); @@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); - /* - * LS1021A Workaround for internal TKT228622 - * to fix the INTx hang issue - */ - val = ioread32(pcie->dbi + PCIE_STRFMR1); - val &= 0xffff; - iowrite32(val, pcie->dbi + PCIE_STRFMR1); + ls_pcie_drop_msg_tlp(pcie); } static int ls_pcie_link_up(struct pcie_port *pp) @@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp) iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); ls_pcie_fix_class(pcie); ls_pcie_clear_multifunction(pcie); + ls_pcie_drop_msg_tlp(pcie); iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); } diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index c4f64bfee..9980a4bda 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -15,6 +15,7 @@ #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> @@ -102,6 +103,8 @@ struct rcar_pci_priv { unsigned busnr; int irq; unsigned long window_size; + unsigned long window_addr; + unsigned long window_pci; }; /* PCI configuration space operations */ @@ -239,8 +242,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) RCAR_PCI_ARBITER_PCIBP_MODE; iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); - /* PCI-AHB mapping: 0x40000000 base */ - iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16, + /* PCI-AHB mapping */ + iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16, reg + RCAR_PCIAHB_WIN1_CTR_REG); /* AHB-PCI mapping: OHCI/EHCI registers */ @@ -251,7 +254,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, reg + RCAR_AHBPCI_WIN1_CTR_REG); /* Set PCI-AHB Window1 address */ - iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH, + iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, reg + PCI_BASE_ADDRESS_1); /* Set AHB-PCI bridge PCI communication area address */ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; @@ -284,6 +287,64 @@ static struct pci_ops rcar_pci_ops = { .write = pci_generic_config_write, }; +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "dma-ranges", &rlen); + if (!parser->range) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + return 0; +} + +static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int index = 0; + + /* Failure to parse is ok as we fall back to defaults */ + if (pci_dma_range_parser_init(&parser, np)) + return 0; + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + /* Hardware only allows one inbound 32-bit range */ + if (index) + return -EINVAL; + + pci->window_addr = (unsigned long)range.cpu_addr; + pci->window_pci = (unsigned long)range.pci_addr; + pci->window_size = (unsigned long)range.size; + + /* Catch HW limitations */ + if (!(range.flags & IORESOURCE_PREFETCH)) { + dev_err(pci->dev, "window must be prefetchable\n"); + return -EINVAL; + } + if (pci->window_addr) { + u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); + + if (lowaddr < pci->window_size) { + dev_err(pci->dev, "invalid window size/addr\n"); + return -EINVAL; + } + } + index++; + } + + return 0; +} + static int rcar_pci_probe(struct platform_device *pdev) { struct resource *cfg_res, *mem_res; @@ -329,6 +390,9 @@ static int rcar_pci_probe(struct platform_device *pdev) return priv->irq; } + /* default window addr and size if not specified in DT */ + priv->window_addr = 0x40000000; + priv->window_pci = 0x40000000; priv->window_size = SZ_1G; if (pdev->dev.of_node) { @@ -344,6 +408,12 @@ static int rcar_pci_probe(struct platform_device *pdev) priv->busnr = busnr.start; if (busnr.end != busnr.start) dev_warn(&pdev->dev, "only one bus number supported\n"); + + ret = rcar_pci_parse_map_dma_ranges(priv, pdev->dev.of_node); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse dma-range\n"); + return ret; + } } else { priv->busnr = pdev->id; } @@ -360,6 +430,7 @@ static int rcar_pci_probe(struct platform_device *pdev) } static struct of_device_id rcar_pci_of_match[] = { + { .compatible = "renesas,pci-rcar-gen2", }, { .compatible = "renesas,pci-r8a7790", }, { .compatible = "renesas,pci-r8a7791", }, { .compatible = "renesas,pci-r8a7794", }, diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 0863d9cc2..f843a72dc 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -125,9 +125,6 @@ out_release_res: return err; } -/* Unused, temporary to satisfy ARM arch code */ -struct pci_sys_data sys; - static int versatile_pci_probe(struct platform_device *pdev) { struct resource *res; @@ -208,7 +205,7 @@ static int versatile_pci_probe(struct platform_device *pdev) pci_add_flags(PCI_ENABLE_PROC_DOMAINS); pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); - bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res); + bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res); if (!bus) return -ENOMEM; diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index 02a7452bd..217168278 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -128,32 +128,26 @@ static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg) static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val) { - int ret; - if (pp->ops->rd_own_conf) - ret = pp->ops->rd_own_conf(pp, where, size, val); - else - ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); + return pp->ops->rd_own_conf(pp, where, size, val); - return ret; + return dw_pcie_cfg_read(pp->dbi_base + where, size, val); } static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val) { - int ret; - if (pp->ops->wr_own_conf) - ret = pp->ops->wr_own_conf(pp, where, size, val); - else - ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); + return pp->ops->wr_own_conf(pp, where, size, val); - return ret; + return dw_pcie_cfg_write(pp->dbi_base + where, size, val); } static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, int type, u64 cpu_addr, u64 pci_addr, u32 size) { + u32 val; + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, PCIE_ATU_VIEWPORT); dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); @@ -164,6 +158,12 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); + + /* + * Make sure ATU enable takes effect before any subsequent config + * and I/O accesses. + */ + dw_pcie_readl_rc(pp, PCIE_ATU_CR2, &val); } static struct irq_chip dw_msi_irq_chip = { @@ -384,8 +384,8 @@ int dw_pcie_link_up(struct pcie_port *pp) { if (pp->ops->link_up) return pp->ops->link_up(pp); - else - return 0; + + return 0; } static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, @@ -571,6 +571,9 @@ static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, u64 cpu_addr; void __iomem *va_cfg_base; + if (pp->ops->rd_other_conf) + return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); @@ -605,6 +608,9 @@ static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, u64 cpu_addr; void __iomem *va_cfg_base; + if (pp->ops->wr_other_conf) + return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); + busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); @@ -658,46 +664,30 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct pcie_port *pp = bus->sysdata; - int ret; if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { *val = 0xffffffff; return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number != pp->root_bus_nr) - if (pp->ops->rd_other_conf) - ret = pp->ops->rd_other_conf(pp, bus, devfn, - where, size, val); - else - ret = dw_pcie_rd_other_conf(pp, bus, devfn, - where, size, val); - else - ret = dw_pcie_rd_own_conf(pp, where, size, val); + if (bus->number == pp->root_bus_nr) + return dw_pcie_rd_own_conf(pp, where, size, val); - return ret; + return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); } static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct pcie_port *pp = bus->sysdata; - int ret; if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number != pp->root_bus_nr) - if (pp->ops->wr_other_conf) - ret = pp->ops->wr_other_conf(pp, bus, devfn, - where, size, val); - else - ret = dw_pcie_wr_other_conf(pp, bus, devfn, - where, size, val); - else - ret = dw_pcie_wr_own_conf(pp, where, size, val); + if (bus->number == pp->root_bus_nr) + return dw_pcie_wr_own_conf(pp, where, size, val); - return ret; + return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); } static struct pci_ops dw_pcie_ops = { diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c index 77f7c669a..3e98d4eda 100644 --- a/drivers/pci/host/pcie-hisi.c +++ b/drivers/pci/host/pcie-hisi.c @@ -1,10 +1,11 @@ /* - * PCIe host controller driver for HiSilicon Hip05 SoC + * PCIe host controller driver for HiSilicon SoCs * * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com * - * Author: Zhou Wang <wangzhou1@hisilicon.com> - * Dacai Zhu <zhudacai@hisilicon.com> + * Authors: Zhou Wang <wangzhou1@hisilicon.com> + * Dacai Zhu <zhudacai@hisilicon.com> + * Gabriele Paoloni <gabriele.paoloni@huawei.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,21 +17,31 @@ #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> +#include <linux/of_device.h> #include <linux/regmap.h> #include "pcie-designware.h" -#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 -#define PCIE_LTSSM_LINKUP_STATE 0x11 -#define PCIE_LTSSM_STATE_MASK 0x3F +#define PCIE_LTSSM_LINKUP_STATE 0x11 +#define PCIE_LTSSM_STATE_MASK 0x3F +#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 +#define PCIE_SYS_STATE4 0x31c +#define PCIE_HIP06_CTRL_OFF 0x1000 #define to_hisi_pcie(x) container_of(x, struct hisi_pcie, pp) +struct hisi_pcie; + +struct pcie_soc_ops { + int (*hisi_pcie_link_up)(struct hisi_pcie *pcie); +}; + struct hisi_pcie { struct regmap *subctrl; void __iomem *reg_base; u32 port_id; struct pcie_port pp; + struct pcie_soc_ops *soc_ops; }; static inline void hisi_pcie_apb_writel(struct hisi_pcie *pcie, @@ -44,7 +55,7 @@ static inline u32 hisi_pcie_apb_readl(struct hisi_pcie *pcie, u32 reg) return readl(pcie->reg_base + reg); } -/* Hip05 PCIe host only supports 32-bit config access */ +/* HipXX PCIe host only supports 32-bit config access */ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, u32 *val) { @@ -69,7 +80,7 @@ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, return PCIBIOS_SUCCESSFUL; } -/* Hip05 PCIe host only supports 32-bit config access */ +/* HipXX PCIe host only supports 32-bit config access */ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, u32 val) { @@ -96,10 +107,9 @@ static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, return PCIBIOS_SUCCESSFUL; } -static int hisi_pcie_link_up(struct pcie_port *pp) +static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) { u32 val; - struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp); regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + 0x100 * hisi_pcie->port_id, &val); @@ -107,6 +117,23 @@ static int hisi_pcie_link_up(struct pcie_port *pp) return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); } +static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) +{ + u32 val; + + val = hisi_pcie_apb_readl(hisi_pcie, PCIE_HIP06_CTRL_OFF + + PCIE_SYS_STATE4); + + return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); +} + +static int hisi_pcie_link_up(struct pcie_port *pp) +{ + struct hisi_pcie *hisi_pcie = to_hisi_pcie(pp); + + return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); +} + static struct pcie_host_ops hisi_pcie_host_ops = { .rd_own_conf = hisi_pcie_cfg_read, .wr_own_conf = hisi_pcie_cfg_write, @@ -145,7 +172,9 @@ static int hisi_pcie_probe(struct platform_device *pdev) { struct hisi_pcie *hisi_pcie; struct pcie_port *pp; + const struct of_device_id *match; struct resource *reg; + struct device_driver *driver; int ret; hisi_pcie = devm_kzalloc(&pdev->dev, sizeof(*hisi_pcie), GFP_KERNEL); @@ -154,6 +183,10 @@ static int hisi_pcie_probe(struct platform_device *pdev) pp = &hisi_pcie->pp; pp->dev = &pdev->dev; + driver = (pdev->dev).driver; + + match = of_match_device(driver->of_match_table, &pdev->dev); + hisi_pcie->soc_ops = (struct pcie_soc_ops *) match->data; hisi_pcie->subctrl = syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); @@ -182,11 +215,27 @@ static int hisi_pcie_probe(struct platform_device *pdev) return 0; } +static struct pcie_soc_ops hip05_ops = { + &hisi_pcie_link_up_hip05 +}; + +static struct pcie_soc_ops hip06_ops = { + &hisi_pcie_link_up_hip06 +}; + static const struct of_device_id hisi_pcie_of_match[] = { - {.compatible = "hisilicon,hip05-pcie",}, + { + .compatible = "hisilicon,hip05-pcie", + .data = (void *) &hip05_ops, + }, + { + .compatible = "hisilicon,hip06-pcie", + .data = (void *) &hip06_ops, + }, {}, }; + MODULE_DEVICE_TABLE(of, hisi_pcie_of_match); static struct platform_driver hisi_pcie_driver = { @@ -198,3 +247,8 @@ static struct platform_driver hisi_pcie_driver = { }; module_platform_driver(hisi_pcie_driver); + +MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); +MODULE_AUTHOR("Dacai Zhu <zhudacai@hisilicon.com>"); +MODULE_AUTHOR("Gabriele Paoloni <gabriele.paoloni@huawei.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index 96a7d999f..0d7bee4a0 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -55,6 +55,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) bcma_set_drvdata(bdev, pcie); pcie->base = bdev->io_addr; + pcie->base_addr = bdev->addr; res_mem.start = bdev->addr_s[0]; res_mem.end = bdev->addr_s[0] + SZ_128M - 1; diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c new file mode 100644 index 000000000..9a2973bdc --- /dev/null +++ b/drivers/pci/host/pcie-iproc-msi.c @@ -0,0 +1,675 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/interrupt.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/msi.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/pci.h> + +#include "pcie-iproc.h" + +#define IPROC_MSI_INTR_EN_SHIFT 11 +#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) +#define IPROC_MSI_INT_N_EVENT_SHIFT 1 +#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) +#define IPROC_MSI_EQ_EN_SHIFT 0 +#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) + +#define IPROC_MSI_EQ_MASK 0x3f + +/* Max number of GIC interrupts */ +#define NR_HW_IRQS 6 + +/* Number of entries in each event queue */ +#define EQ_LEN 64 + +/* Size of each event queue memory region */ +#define EQ_MEM_REGION_SIZE SZ_4K + +/* Size of each MSI address region */ +#define MSI_MEM_REGION_SIZE SZ_4K + +enum iproc_msi_reg { + IPROC_MSI_EQ_PAGE = 0, + IPROC_MSI_EQ_PAGE_UPPER, + IPROC_MSI_PAGE, + IPROC_MSI_PAGE_UPPER, + IPROC_MSI_CTRL, + IPROC_MSI_EQ_HEAD, + IPROC_MSI_EQ_TAIL, + IPROC_MSI_INTS_EN, + IPROC_MSI_REG_SIZE, +}; + +struct iproc_msi; + +/** + * iProc MSI group + * + * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI + * event queue. + * + * @msi: pointer to iProc MSI data + * @gic_irq: GIC interrupt + * @eq: Event queue number + */ +struct iproc_msi_grp { + struct iproc_msi *msi; + int gic_irq; + unsigned int eq; +}; + +/** + * iProc event queue based MSI + * + * Only meant to be used on platforms without MSI support integrated into the + * GIC. + * + * @pcie: pointer to iProc PCIe data + * @reg_offsets: MSI register offsets + * @grps: MSI groups + * @nr_irqs: number of total interrupts connected to GIC + * @nr_cpus: number of toal CPUs + * @has_inten_reg: indicates the MSI interrupt enable register needs to be + * set explicitly (required for some legacy platforms) + * @bitmap: MSI vector bitmap + * @bitmap_lock: lock to protect access to the MSI bitmap + * @nr_msi_vecs: total number of MSI vectors + * @inner_domain: inner IRQ domain + * @msi_domain: MSI IRQ domain + * @nr_eq_region: required number of 4K aligned memory region for MSI event + * queues + * @nr_msi_region: required number of 4K aligned address region for MSI posted + * writes + * @eq_cpu: pointer to allocated memory region for MSI event queues + * @eq_dma: DMA address of MSI event queues + * @msi_addr: MSI address + */ +struct iproc_msi { + struct iproc_pcie *pcie; + const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; + struct iproc_msi_grp *grps; + int nr_irqs; + int nr_cpus; + bool has_inten_reg; + unsigned long *bitmap; + struct mutex bitmap_lock; + unsigned int nr_msi_vecs; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + unsigned int nr_eq_region; + unsigned int nr_msi_region; + void *eq_cpu; + dma_addr_t eq_dma; + phys_addr_t msi_addr; +}; + +static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { + { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, +}; + +static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { + { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, + { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, + { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, + { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, +}; + +static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, + enum iproc_msi_reg reg, + unsigned int eq) +{ + struct iproc_pcie *pcie = msi->pcie; + + return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); +} + +static inline void iproc_msi_write_reg(struct iproc_msi *msi, + enum iproc_msi_reg reg, + int eq, u32 val) +{ + struct iproc_pcie *pcie = msi->pcie; + + writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); +} + +static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) +{ + return (hwirq % msi->nr_irqs); +} + +static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, + unsigned long hwirq) +{ + if (msi->nr_msi_region > 1) + return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; + else + return hwirq_to_group(msi, hwirq) * sizeof(u32); +} + +static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) +{ + if (msi->nr_eq_region > 1) + return eq * EQ_MEM_REGION_SIZE; + else + return eq * EQ_LEN * sizeof(u32); +} + +static struct irq_chip iproc_msi_irq_chip = { + .name = "iProc-MSI", +}; + +static struct msi_domain_info iproc_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .chip = &iproc_msi_irq_chip, +}; + +/* + * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a + * dedicated event queue. Each MSI group can support up to 64 MSI vectors. + * + * The number of MSI groups varies between different iProc SoCs. The total + * number of CPU cores also varies. To support MSI IRQ affinity, we + * distribute GIC interrupts across all available CPUs. MSI vector is moved + * from one GIC interrupt to another to steer to the target CPU. + * + * Assuming: + * - the number of MSI groups is M + * - the number of CPU cores is N + * - M is always a multiple of N + * + * Total number of raw MSI vectors = M * 64 + * Total number of supported MSI vectors = (M * 64) / N + */ +static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) +{ + return (hwirq % msi->nr_cpus); +} + +static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, + unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(msi, hwirq)); +} + +static int iproc_msi_irq_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(msi, data->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* steer MSI to the target CPU */ + data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + dma_addr_t addr; + + addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq; +} + +static struct irq_chip iproc_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = iproc_msi_irq_set_affinity, + .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, +}; + +static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct iproc_msi *msi = domain->host_data; + int hwirq; + + mutex_lock(&msi->bitmap_lock); + + /* Allocate 'nr_cpus' number of MSI vectors each time */ + hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, + msi->nr_cpus, 0); + if (hwirq < msi->nr_msi_vecs) { + bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); + } else { + mutex_unlock(&msi->bitmap_lock); + return -ENOSPC; + } + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, NULL, NULL); + + return 0; +} + +static void iproc_msi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + unsigned int hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = iproc_msi_irq_domain_alloc, + .free = iproc_msi_irq_domain_free, +}; + +static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) +{ + u32 *msg, hwirq; + unsigned int offs; + + offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); + msg = (u32 *)(msi->eq_cpu + offs); + hwirq = *msg & IPROC_MSI_EQ_MASK; + + /* + * Since we have multiple hwirq mapped to a single MSI vector, + * now we need to derive the hwirq at CPU0. It can then be used to + * mapped back to virq. + */ + return hwirq_to_canonical_hwirq(msi, hwirq); +} + +static void iproc_msi_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct iproc_msi_grp *grp; + struct iproc_msi *msi; + struct iproc_pcie *pcie; + u32 eq, head, tail, nr_events; + unsigned long hwirq; + int virq; + + chained_irq_enter(chip, desc); + + grp = irq_desc_get_handler_data(desc); + msi = grp->msi; + pcie = msi->pcie; + eq = grp->eq; + + /* + * iProc MSI event queue is tracked by head and tail pointers. Head + * pointer indicates the next entry (MSI data) to be consumed by SW in + * the queue and needs to be updated by SW. iProc MSI core uses the + * tail pointer as the next data insertion point. + * + * Entries between head and tail pointers contain valid MSI data. MSI + * data is guaranteed to be in the event queue memory before the tail + * pointer is updated by the iProc MSI core. + */ + head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, + eq) & IPROC_MSI_EQ_MASK; + do { + tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, + eq) & IPROC_MSI_EQ_MASK; + + /* + * Figure out total number of events (MSI data) to be + * processed. + */ + nr_events = (tail < head) ? + (EQ_LEN - (head - tail)) : (tail - head); + if (!nr_events) + break; + + /* process all outstanding events */ + while (nr_events--) { + hwirq = decode_msi_hwirq(msi, eq, head); + virq = irq_find_mapping(msi->inner_domain, hwirq); + generic_handle_irq(virq); + + head++; + head %= EQ_LEN; + } + + /* + * Now all outstanding events have been processed. Update the + * head pointer. + */ + iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); + + /* + * Now go read the tail pointer again to see if there are new + * oustanding events that came in during the above window. + */ + } while (true); + + chained_irq_exit(chip, desc); +} + +static void iproc_msi_enable(struct iproc_msi *msi) +{ + int i, eq; + u32 val; + + /* Program memory region for each event queue */ + for (i = 0; i < msi->nr_eq_region; i++) { + dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); + + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, + lower_32_bits(addr)); + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, + upper_32_bits(addr)); + } + + /* Program address region for MSI posted writes */ + for (i = 0; i < msi->nr_msi_region; i++) { + phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); + + iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, + lower_32_bits(addr)); + iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, + upper_32_bits(addr)); + } + + for (eq = 0; eq < msi->nr_irqs; eq++) { + /* Enable MSI event queue */ + val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | + IPROC_MSI_EQ_EN; + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); + + /* + * Some legacy platforms require the MSI interrupt enable + * register to be set explicitly. + */ + if (msi->has_inten_reg) { + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); + val |= BIT(eq); + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); + } + } +} + +static void iproc_msi_disable(struct iproc_msi *msi) +{ + u32 eq, val; + + for (eq = 0; eq < msi->nr_irqs; eq++) { + if (msi->has_inten_reg) { + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); + val &= ~BIT(eq); + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); + } + + val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); + val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | + IPROC_MSI_EQ_EN); + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); + } +} + +static int iproc_msi_alloc_domains(struct device_node *node, + struct iproc_msi *msi) +{ + msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, + &msi_domain_ops, msi); + if (!msi->inner_domain) + return -ENOMEM; + + msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + &iproc_msi_domain_info, + msi->inner_domain); + if (!msi->msi_domain) { + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void iproc_msi_free_domains(struct iproc_msi *msi) +{ + if (msi->msi_domain) + irq_domain_remove(msi->msi_domain); + + if (msi->inner_domain) + irq_domain_remove(msi->inner_domain); +} + +static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) +{ + int i; + + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { + irq_set_chained_handler_and_data(msi->grps[i].gic_irq, + NULL, NULL); + } +} + +static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) +{ + int i, ret; + cpumask_var_t mask; + struct iproc_pcie *pcie = msi->pcie; + + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { + irq_set_chained_handler_and_data(msi->grps[i].gic_irq, + iproc_msi_handler, + &msi->grps[i]); + /* Dedicate GIC interrupt to each CPU core */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + ret = irq_set_affinity(msi->grps[i].gic_irq, mask); + if (ret) + dev_err(pcie->dev, + "failed to set affinity for IRQ%d\n", + msi->grps[i].gic_irq); + free_cpumask_var(mask); + } else { + dev_err(pcie->dev, "failed to alloc CPU mask\n"); + ret = -EINVAL; + } + + if (ret) { + /* Free all configured/unconfigured IRQs */ + iproc_msi_irq_free(msi, cpu); + return ret; + } + } + + return 0; +} + +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) +{ + struct iproc_msi *msi; + int i, ret; + unsigned int cpu; + + if (!of_device_is_compatible(node, "brcm,iproc-msi")) + return -ENODEV; + + if (!of_find_property(node, "msi-controller", NULL)) + return -ENODEV; + + if (pcie->msi) + return -EBUSY; + + msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); + if (!msi) + return -ENOMEM; + + msi->pcie = pcie; + pcie->msi = msi; + msi->msi_addr = pcie->base_addr; + mutex_init(&msi->bitmap_lock); + msi->nr_cpus = num_possible_cpus(); + + msi->nr_irqs = of_irq_count(node); + if (!msi->nr_irqs) { + dev_err(pcie->dev, "found no MSI GIC interrupt\n"); + return -ENODEV; + } + + if (msi->nr_irqs > NR_HW_IRQS) { + dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", + msi->nr_irqs); + msi->nr_irqs = NR_HW_IRQS; + } + + if (msi->nr_irqs < msi->nr_cpus) { + dev_err(pcie->dev, + "not enough GIC interrupts for MSI affinity\n"); + return -EINVAL; + } + + if (msi->nr_irqs % msi->nr_cpus != 0) { + msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; + dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", + msi->nr_irqs); + } + + switch (pcie->type) { + case IPROC_PCIE_PAXB: + msi->reg_offsets = iproc_msi_reg_paxb; + msi->nr_eq_region = 1; + msi->nr_msi_region = 1; + break; + case IPROC_PCIE_PAXC: + msi->reg_offsets = iproc_msi_reg_paxc; + msi->nr_eq_region = msi->nr_irqs; + msi->nr_msi_region = msi->nr_irqs; + break; + default: + dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); + return -EINVAL; + } + + if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) + msi->has_inten_reg = true; + + msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; + msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), + sizeof(*msi->bitmap), GFP_KERNEL); + if (!msi->bitmap) + return -ENOMEM; + + msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), + GFP_KERNEL); + if (!msi->grps) + return -ENOMEM; + + for (i = 0; i < msi->nr_irqs; i++) { + unsigned int irq = irq_of_parse_and_map(node, i); + + if (!irq) { + dev_err(pcie->dev, "unable to parse/map interrupt\n"); + ret = -ENODEV; + goto free_irqs; + } + msi->grps[i].gic_irq = irq; + msi->grps[i].msi = msi; + msi->grps[i].eq = i; + } + + /* Reserve memory for event queue and make sure memories are zeroed */ + msi->eq_cpu = dma_zalloc_coherent(pcie->dev, + msi->nr_eq_region * EQ_MEM_REGION_SIZE, + &msi->eq_dma, GFP_KERNEL); + if (!msi->eq_cpu) { + ret = -ENOMEM; + goto free_irqs; + } + + ret = iproc_msi_alloc_domains(node, msi); + if (ret) { + dev_err(pcie->dev, "failed to create MSI domains\n"); + goto free_eq_dma; + } + + for_each_online_cpu(cpu) { + ret = iproc_msi_irq_setup(msi, cpu); + if (ret) + goto free_msi_irq; + } + + iproc_msi_enable(msi); + + return 0; + +free_msi_irq: + for_each_online_cpu(cpu) + iproc_msi_irq_free(msi, cpu); + iproc_msi_free_domains(msi); + +free_eq_dma: + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, + msi->eq_cpu, msi->eq_dma); + +free_irqs: + for (i = 0; i < msi->nr_irqs; i++) { + if (msi->grps[i].gic_irq) + irq_dispose_mapping(msi->grps[i].gic_irq); + } + pcie->msi = NULL; + return ret; +} +EXPORT_SYMBOL(iproc_msi_init); + +void iproc_msi_exit(struct iproc_pcie *pcie) +{ + struct iproc_msi *msi = pcie->msi; + unsigned int i, cpu; + + if (!msi) + return; + + iproc_msi_disable(msi); + + for_each_online_cpu(cpu) + iproc_msi_irq_free(msi, cpu); + + iproc_msi_free_domains(msi); + + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, + msi->eq_cpu, msi->eq_dma); + + for (i = 0; i < msi->nr_irqs; i++) { + if (msi->grps[i].gic_irq) + irq_dispose_mapping(msi->grps[i].gic_irq); + } +} +EXPORT_SYMBOL(iproc_msi_exit); diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index c9550dc8b..1738c5288 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -26,8 +26,21 @@ #include "pcie-iproc.h" +static const struct of_device_id iproc_pcie_of_match_table[] = { + { + .compatible = "brcm,iproc-pcie", + .data = (int *)IPROC_PCIE_PAXB, + }, { + .compatible = "brcm,iproc-pcie-paxc", + .data = (int *)IPROC_PCIE_PAXC, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); + static int iproc_pcie_pltfm_probe(struct platform_device *pdev) { + const struct of_device_id *of_id; struct iproc_pcie *pcie; struct device_node *np = pdev->dev.of_node; struct resource reg; @@ -35,11 +48,16 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) LIST_HEAD(res); int ret; + of_id = of_match_device(iproc_pcie_of_match_table, &pdev->dev); + if (!of_id) + return -EINVAL; + pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->dev = &pdev->dev; + pcie->type = (enum iproc_pcie_type)of_id->data; platform_set_drvdata(pdev, pcie); ret = of_address_to_resource(np, 0, ®); @@ -53,6 +71,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) dev_err(pcie->dev, "unable to map controller registers\n"); return -ENOMEM; } + pcie->base_addr = reg.start; if (of_property_read_bool(np, "brcm,pcie-ob")) { u32 val; @@ -114,12 +133,6 @@ static int iproc_pcie_pltfm_remove(struct platform_device *pdev) return iproc_pcie_remove(pcie); } -static const struct of_device_id iproc_pcie_of_match_table[] = { - { .compatible = "brcm,iproc-pcie", }, - { /* sentinel */ } -}; -MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); - static struct platform_driver iproc_pcie_pltfm_driver = { .driver = { .name = "iproc-pcie", diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index eac719af1..a576aeeb2 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -30,20 +30,16 @@ #include "pcie-iproc.h" -#define CLK_CONTROL_OFFSET 0x000 #define EP_PERST_SOURCE_SELECT_SHIFT 2 #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) #define EP_MODE_SURVIVE_PERST_SHIFT 1 #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) #define RC_PCIE_RST_OUTPUT_SHIFT 0 #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) +#define PAXC_RESET_MASK 0x7f -#define CFG_IND_ADDR_OFFSET 0x120 #define CFG_IND_ADDR_MASK 0x00001ffc -#define CFG_IND_DATA_OFFSET 0x124 - -#define CFG_ADDR_OFFSET 0x1f8 #define CFG_ADDR_BUS_NUM_SHIFT 20 #define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 #define CFG_ADDR_DEV_NUM_SHIFT 15 @@ -55,12 +51,8 @@ #define CFG_ADDR_CFG_TYPE_SHIFT 0 #define CFG_ADDR_CFG_TYPE_MASK 0x00000003 -#define CFG_DATA_OFFSET 0x1fc - -#define SYS_RC_INTX_EN 0x330 #define SYS_RC_INTX_MASK 0xf -#define PCIE_LINK_STATUS_OFFSET 0xf0c #define PCIE_PHYLINKUP_SHIFT 3 #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) #define PCIE_DL_ACTIVE_SHIFT 2 @@ -71,13 +63,54 @@ #define OARR_SIZE_CFG_SHIFT 1 #define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) -#define OARR_LO(window) (0xd20 + (window) * 8) -#define OARR_HI(window) (0xd24 + (window) * 8) -#define OMAP_LO(window) (0xd40 + (window) * 8) -#define OMAP_HI(window) (0xd44 + (window) * 8) - #define MAX_NUM_OB_WINDOWS 2 +#define IPROC_PCIE_REG_INVALID 0xffff + +enum iproc_pcie_reg { + IPROC_PCIE_CLK_CTRL = 0, + IPROC_PCIE_CFG_IND_ADDR, + IPROC_PCIE_CFG_IND_DATA, + IPROC_PCIE_CFG_ADDR, + IPROC_PCIE_CFG_DATA, + IPROC_PCIE_INTX_EN, + IPROC_PCIE_OARR_LO, + IPROC_PCIE_OARR_HI, + IPROC_PCIE_OMAP_LO, + IPROC_PCIE_OMAP_HI, + IPROC_PCIE_LINK_STATUS, +}; + +/* iProc PCIe PAXB registers */ +static const u16 iproc_pcie_reg_paxb[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x120, + [IPROC_PCIE_CFG_IND_DATA] = 0x124, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = 0x330, + [IPROC_PCIE_OARR_LO] = 0xd20, + [IPROC_PCIE_OARR_HI] = 0xd24, + [IPROC_PCIE_OMAP_LO] = 0xd40, + [IPROC_PCIE_OMAP_HI] = 0xd44, + [IPROC_PCIE_LINK_STATUS] = 0xf0c, +}; + +/* iProc PCIe PAXC v1 registers */ +static const u16 iproc_pcie_reg_paxc[] = { + [IPROC_PCIE_CLK_CTRL] = 0x000, + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, + [IPROC_PCIE_CFG_ADDR] = 0x1f8, + [IPROC_PCIE_CFG_DATA] = 0x1fc, + [IPROC_PCIE_INTX_EN] = IPROC_PCIE_REG_INVALID, + [IPROC_PCIE_OARR_LO] = IPROC_PCIE_REG_INVALID, + [IPROC_PCIE_OARR_HI] = IPROC_PCIE_REG_INVALID, + [IPROC_PCIE_OMAP_LO] = IPROC_PCIE_REG_INVALID, + [IPROC_PCIE_OMAP_HI] = IPROC_PCIE_REG_INVALID, + [IPROC_PCIE_LINK_STATUS] = IPROC_PCIE_REG_INVALID, +}; + static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) { struct iproc_pcie *pcie; @@ -91,6 +124,51 @@ static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) return pcie; } +static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) +{ + return !!(reg_offset == IPROC_PCIE_REG_INVALID); +} + +static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg) +{ + return pcie->reg_offsets[reg]; +} + +static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return 0; + + return readl(pcie->base + offset); +} + +static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg, u32 val) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return; + + writel(val, pcie->base + offset); +} + +static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie, + enum iproc_pcie_reg reg, + unsigned window, u32 val) +{ + u16 offset = iproc_pcie_reg_offset(pcie, reg); + + if (iproc_pcie_reg_is_invalid(offset)) + return; + + writel(val, pcie->base + offset + (window * 8)); +} + /** * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c @@ -104,18 +182,29 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, unsigned fn = PCI_FUNC(devfn); unsigned busno = bus->number; u32 val; + u16 offset; /* root complex access */ if (busno == 0) { - if (slot >= 1) + if (slot > 0 || fn > 0) return NULL; - writel(where & CFG_IND_ADDR_MASK, - pcie->base + CFG_IND_ADDR_OFFSET); - return (pcie->base + CFG_IND_DATA_OFFSET); + + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, + where & CFG_IND_ADDR_MASK); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); + if (iproc_pcie_reg_is_invalid(offset)) + return NULL; + else + return (pcie->base + offset); } - if (fn > 1) - return NULL; + /* + * PAXC is connected to an internally emulated EP within the SoC. It + * allows only one device. + */ + if (pcie->type == IPROC_PCIE_PAXC) + if (slot > 0) + return NULL; /* EP device access */ val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | @@ -123,9 +212,12 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, (fn << CFG_ADDR_FUNC_NUM_SHIFT) | (where & CFG_ADDR_REG_NUM_MASK) | (1 & CFG_ADDR_CFG_TYPE_MASK); - writel(val, pcie->base + CFG_ADDR_OFFSET); - - return (pcie->base + CFG_DATA_OFFSET); + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); + offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); + if (iproc_pcie_reg_is_invalid(offset)) + return NULL; + else + return (pcie->base + offset); } static struct pci_ops iproc_pcie_ops = { @@ -138,18 +230,29 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie) { u32 val; + if (pcie->type == IPROC_PCIE_PAXC) { + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); + val &= ~PAXC_RESET_MASK; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + udelay(100); + val |= PAXC_RESET_MASK; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); + udelay(100); + return; + } + /* * Select perst_b signal as reset source. Put the device into reset, * and then bring it out of reset */ - val = readl(pcie->base + CLK_CONTROL_OFFSET); + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & ~RC_PCIE_RST_OUTPUT; - writel(val, pcie->base + CLK_CONTROL_OFFSET); + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); udelay(250); val |= RC_PCIE_RST_OUTPUT; - writel(val, pcie->base + CLK_CONTROL_OFFSET); + iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); msleep(100); } @@ -160,7 +263,14 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) u16 pos, link_status; bool link_is_active = false; - val = readl(pcie->base + PCIE_LINK_STATUS_OFFSET); + /* + * PAXC connects to emulated endpoint devices directly and does not + * have a Serdes. Therefore skip the link detection logic here. + */ + if (pcie->type == IPROC_PCIE_PAXC) + return 0; + + val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { dev_err(pcie->dev, "PHY or data link is INACTIVE!\n"); return -ENODEV; @@ -221,7 +331,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) static void iproc_pcie_enable(struct iproc_pcie *pcie) { - writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); + iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); } /** @@ -245,7 +355,7 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, if (size > max_size) { dev_err(pcie->dev, - "res size 0x%pap exceeds max supported size 0x%llx\n", + "res size %pap exceeds max supported size 0x%llx\n", &size, max_size); return -EINVAL; } @@ -272,11 +382,15 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, axi_addr -= ob->axi_offset; for (i = 0; i < MAX_NUM_OB_WINDOWS; i++) { - writel(lower_32_bits(axi_addr) | OARR_VALID | - (ob->set_oarr_size ? 1 : 0), pcie->base + OARR_LO(i)); - writel(upper_32_bits(axi_addr), pcie->base + OARR_HI(i)); - writel(lower_32_bits(pci_addr), pcie->base + OMAP_LO(i)); - writel(upper_32_bits(pci_addr), pcie->base + OMAP_HI(i)); + iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_LO, i, + lower_32_bits(axi_addr) | OARR_VALID | + (ob->set_oarr_size ? 1 : 0)); + iproc_pcie_ob_write(pcie, IPROC_PCIE_OARR_HI, i, + upper_32_bits(axi_addr)); + iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_LO, i, + lower_32_bits(pci_addr)); + iproc_pcie_ob_write(pcie, IPROC_PCIE_OMAP_HI, i, + upper_32_bits(pci_addr)); size -= ob->window_size; if (size == 0) @@ -319,6 +433,26 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, return 0; } +static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) +{ + struct device_node *msi_node; + + msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); + if (!msi_node) + return -ENODEV; + + /* + * If another MSI controller is being used, the call below should fail + * but that is okay + */ + return iproc_msi_init(pcie, msi_node); +} + +static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) +{ + iproc_msi_exit(pcie); +} + int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { int ret; @@ -340,6 +474,19 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) goto err_exit_phy; } + switch (pcie->type) { + case IPROC_PCIE_PAXB: + pcie->reg_offsets = iproc_pcie_reg_paxb; + break; + case IPROC_PCIE_PAXC: + pcie->reg_offsets = iproc_pcie_reg_paxc; + break; + default: + dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); + ret = -EINVAL; + goto err_power_off_phy; + } + iproc_pcie_reset(pcie); if (pcie->need_ob_cfg) { @@ -373,6 +520,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) iproc_pcie_enable(pcie); + if (IS_ENABLED(CONFIG_PCI_MSI)) + if (iproc_pcie_msi_enable(pcie)) + dev_info(pcie->dev, "not using iProc MSI\n"); + pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); @@ -397,6 +548,8 @@ int iproc_pcie_remove(struct iproc_pcie *pcie) pci_stop_root_bus(pcie->root_bus); pci_remove_root_bus(pcie->root_bus); + iproc_pcie_msi_disable(pcie); + phy_power_off(pcie->phy); phy_exit(pcie->phy); diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index d3dc940f7..e84d93c53 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -15,6 +15,20 @@ #define _PCIE_IPROC_H /** + * iProc PCIe interface type + * + * PAXB is the wrapper used in root complex that can be connected to an + * external endpoint device. + * + * PAXC is the wrapper used in root complex dedicated for internal emulated + * endpoint devices. + */ +enum iproc_pcie_type { + IPROC_PCIE_PAXB = 0, + IPROC_PCIE_PAXC, +}; + +/** * iProc PCIe outbound mapping * @set_oarr_size: indicates the OARR size bit needs to be set * @axi_offset: offset from the AXI address to the internal address used by @@ -27,21 +41,30 @@ struct iproc_pcie_ob { resource_size_t window_size; }; +struct iproc_msi; + /** * iProc PCIe device + * * @dev: pointer to device data structure + * @type: iProc PCIe interface type + * @reg_offsets: register offsets * @base: PCIe host controller I/O register base + * @base_addr: PCIe host controller register base physical address * @sysdata: Per PCI controller data (ARM-specific) * @root_bus: pointer to root bus * @phy: optional PHY device that controls the Serdes - * @irqs: interrupt IDs * @map_irq: function callback to map interrupts - * @need_ob_cfg: indidates SW needs to configure the outbound mapping window + * @need_ob_cfg: indicates SW needs to configure the outbound mapping window * @ob: outbound mapping parameters + * @msi: MSI data */ struct iproc_pcie { struct device *dev; + enum iproc_pcie_type type; + const u16 *reg_offsets; void __iomem *base; + phys_addr_t base_addr; #ifdef CONFIG_ARM struct pci_sys_data sysdata; #endif @@ -50,9 +73,24 @@ struct iproc_pcie { int (*map_irq)(const struct pci_dev *, u8, u8); bool need_ob_cfg; struct iproc_pcie_ob ob; + struct iproc_msi *msi; }; int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); int iproc_pcie_remove(struct iproc_pcie *pcie); +#ifdef CONFIG_PCIE_IPROC_MSI +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node); +void iproc_msi_exit(struct iproc_pcie *pcie); +#else +static inline int iproc_msi_init(struct iproc_pcie *pcie, + struct device_node *node) +{ + return -ENODEV; +} +static inline void iproc_msi_exit(struct iproc_pcie *pcie) +{ +} +#endif + #endif /* _PCIE_IPROC_H */ diff --git a/drivers/pci/host/pcie-qcom.c b/drivers/pci/host/pcie-qcom.c new file mode 100644 index 000000000..e845fba19 --- /dev/null +++ b/drivers/pci/host/pcie-qcom.c @@ -0,0 +1,616 @@ +/* + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright 2015 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "pcie-designware.h" + +#define PCIE20_PARF_PHY_CTRL 0x40 +#define PCIE20_PARF_PHY_REFCLK 0x4C +#define PCIE20_PARF_DBI_BASE_ADDR 0x168 +#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16c +#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 + +#define PCIE20_ELBI_SYS_CTRL 0x04 +#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) + +#define PCIE20_CAP 0x70 + +#define PERST_DELAY_US 1000 + +struct qcom_pcie_resources_v0 { + struct clk *iface_clk; + struct clk *core_clk; + struct clk *phy_clk; + struct reset_control *pci_reset; + struct reset_control *axi_reset; + struct reset_control *ahb_reset; + struct reset_control *por_reset; + struct reset_control *phy_reset; + struct regulator *vdda; + struct regulator *vdda_phy; + struct regulator *vdda_refclk; +}; + +struct qcom_pcie_resources_v1 { + struct clk *iface; + struct clk *aux; + struct clk *master_bus; + struct clk *slave_bus; + struct reset_control *core; + struct regulator *vdda; +}; + +union qcom_pcie_resources { + struct qcom_pcie_resources_v0 v0; + struct qcom_pcie_resources_v1 v1; +}; + +struct qcom_pcie; + +struct qcom_pcie_ops { + int (*get_resources)(struct qcom_pcie *pcie); + int (*init)(struct qcom_pcie *pcie); + void (*deinit)(struct qcom_pcie *pcie); +}; + +struct qcom_pcie { + struct pcie_port pp; + struct device *dev; + union qcom_pcie_resources res; + void __iomem *parf; + void __iomem *dbi; + void __iomem *elbi; + struct phy *phy; + struct gpio_desc *reset; + struct qcom_pcie_ops *ops; +}; + +#define to_qcom_pcie(x) container_of(x, struct qcom_pcie, pp) + +static void qcom_ep_reset_assert(struct qcom_pcie *pcie) +{ + gpiod_set_value(pcie->reset, 1); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) +{ + gpiod_set_value(pcie->reset, 0); + usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); +} + +static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + return dw_handle_msi_irq(pp); +} + +static int qcom_pcie_establish_link(struct qcom_pcie *pcie) +{ + struct device *dev = pcie->dev; + unsigned int retries = 0; + u32 val; + + if (dw_pcie_link_up(&pcie->pp)) + return 0; + + /* enable link training */ + val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); + val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; + writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); + + do { + if (dw_pcie_link_up(&pcie->pp)) + return 0; + usleep_range(250, 1000); + } while (retries < 200); + + dev_warn(dev, "phy link never came up\n"); + + return -ETIMEDOUT; +} + +static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct device *dev = pcie->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->vdda_phy = devm_regulator_get(dev, "vdda_phy"); + if (IS_ERR(res->vdda_phy)) + return PTR_ERR(res->vdda_phy); + + res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk"); + if (IS_ERR(res->vdda_refclk)) + return PTR_ERR(res->vdda_refclk); + + res->iface_clk = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface_clk)) + return PTR_ERR(res->iface_clk); + + res->core_clk = devm_clk_get(dev, "core"); + if (IS_ERR(res->core_clk)) + return PTR_ERR(res->core_clk); + + res->phy_clk = devm_clk_get(dev, "phy"); + if (IS_ERR(res->phy_clk)) + return PTR_ERR(res->phy_clk); + + res->pci_reset = devm_reset_control_get(dev, "pci"); + if (IS_ERR(res->pci_reset)) + return PTR_ERR(res->pci_reset); + + res->axi_reset = devm_reset_control_get(dev, "axi"); + if (IS_ERR(res->axi_reset)) + return PTR_ERR(res->axi_reset); + + res->ahb_reset = devm_reset_control_get(dev, "ahb"); + if (IS_ERR(res->ahb_reset)) + return PTR_ERR(res->ahb_reset); + + res->por_reset = devm_reset_control_get(dev, "por"); + if (IS_ERR(res->por_reset)) + return PTR_ERR(res->por_reset); + + res->phy_reset = devm_reset_control_get(dev, "phy"); + if (IS_ERR(res->phy_reset)) + return PTR_ERR(res->phy_reset); + + return 0; +} + +static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct device *dev = pcie->dev; + + res->vdda = devm_regulator_get(dev, "vdda"); + if (IS_ERR(res->vdda)) + return PTR_ERR(res->vdda); + + res->iface = devm_clk_get(dev, "iface"); + if (IS_ERR(res->iface)) + return PTR_ERR(res->iface); + + res->aux = devm_clk_get(dev, "aux"); + if (IS_ERR(res->aux)) + return PTR_ERR(res->aux); + + res->master_bus = devm_clk_get(dev, "master_bus"); + if (IS_ERR(res->master_bus)) + return PTR_ERR(res->master_bus); + + res->slave_bus = devm_clk_get(dev, "slave_bus"); + if (IS_ERR(res->slave_bus)) + return PTR_ERR(res->slave_bus); + + res->core = devm_reset_control_get(dev, "core"); + if (IS_ERR(res->core)) + return PTR_ERR(res->core); + + return 0; +} + +static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + + reset_control_assert(res->pci_reset); + reset_control_assert(res->axi_reset); + reset_control_assert(res->ahb_reset); + reset_control_assert(res->por_reset); + reset_control_assert(res->pci_reset); + clk_disable_unprepare(res->iface_clk); + clk_disable_unprepare(res->core_clk); + clk_disable_unprepare(res->phy_clk); + regulator_disable(res->vdda); + regulator_disable(res->vdda_phy); + regulator_disable(res->vdda_refclk); +} + +static int qcom_pcie_init_v0(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v0 *res = &pcie->res.v0; + struct device *dev = pcie->dev; + u32 val; + int ret; + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + return ret; + } + + ret = regulator_enable(res->vdda_refclk); + if (ret) { + dev_err(dev, "cannot enable vdda_refclk regulator\n"); + goto err_refclk; + } + + ret = regulator_enable(res->vdda_phy); + if (ret) { + dev_err(dev, "cannot enable vdda_phy regulator\n"); + goto err_vdda_phy; + } + + ret = reset_control_assert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot assert ahb reset\n"); + goto err_assert_ahb; + } + + ret = clk_prepare_enable(res->iface_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_assert_ahb; + } + + ret = clk_prepare_enable(res->phy_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable phy clock\n"); + goto err_clk_phy; + } + + ret = clk_prepare_enable(res->core_clk); + if (ret) { + dev_err(dev, "cannot prepare/enable core clock\n"); + goto err_clk_core; + } + + ret = reset_control_deassert(res->ahb_reset); + if (ret) { + dev_err(dev, "cannot deassert ahb reset\n"); + goto err_deassert_ahb; + } + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + /* enable external reference clock */ + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); + val |= BIT(16); + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + + ret = reset_control_deassert(res->phy_reset); + if (ret) { + dev_err(dev, "cannot deassert phy reset\n"); + return ret; + } + + ret = reset_control_deassert(res->pci_reset); + if (ret) { + dev_err(dev, "cannot deassert pci reset\n"); + return ret; + } + + ret = reset_control_deassert(res->por_reset); + if (ret) { + dev_err(dev, "cannot deassert por reset\n"); + return ret; + } + + ret = reset_control_deassert(res->axi_reset); + if (ret) { + dev_err(dev, "cannot deassert axi reset\n"); + return ret; + } + + /* wait for clock acquisition */ + usleep_range(1000, 1500); + + return 0; + +err_deassert_ahb: + clk_disable_unprepare(res->core_clk); +err_clk_core: + clk_disable_unprepare(res->phy_clk); +err_clk_phy: + clk_disable_unprepare(res->iface_clk); +err_assert_ahb: + regulator_disable(res->vdda_phy); +err_vdda_phy: + regulator_disable(res->vdda_refclk); +err_refclk: + regulator_disable(res->vdda); + + return ret; +} + +static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + + reset_control_assert(res->core); + clk_disable_unprepare(res->slave_bus); + clk_disable_unprepare(res->master_bus); + clk_disable_unprepare(res->iface); + clk_disable_unprepare(res->aux); + regulator_disable(res->vdda); +} + +static int qcom_pcie_init_v1(struct qcom_pcie *pcie) +{ + struct qcom_pcie_resources_v1 *res = &pcie->res.v1; + struct device *dev = pcie->dev; + int ret; + + ret = reset_control_deassert(res->core); + if (ret) { + dev_err(dev, "cannot deassert core reset\n"); + return ret; + } + + ret = clk_prepare_enable(res->aux); + if (ret) { + dev_err(dev, "cannot prepare/enable aux clock\n"); + goto err_res; + } + + ret = clk_prepare_enable(res->iface); + if (ret) { + dev_err(dev, "cannot prepare/enable iface clock\n"); + goto err_aux; + } + + ret = clk_prepare_enable(res->master_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable master_bus clock\n"); + goto err_iface; + } + + ret = clk_prepare_enable(res->slave_bus); + if (ret) { + dev_err(dev, "cannot prepare/enable slave_bus clock\n"); + goto err_master; + } + + ret = regulator_enable(res->vdda); + if (ret) { + dev_err(dev, "cannot enable vdda regulator\n"); + goto err_slave; + } + + /* change DBI base address */ + writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + + val |= BIT(31); + writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); + } + + return 0; +err_slave: + clk_disable_unprepare(res->slave_bus); +err_master: + clk_disable_unprepare(res->master_bus); +err_iface: + clk_disable_unprepare(res->iface); +err_aux: + clk_disable_unprepare(res->aux); +err_res: + reset_control_assert(res->core); + + return ret; +} + +static int qcom_pcie_link_up(struct pcie_port *pp) +{ + struct qcom_pcie *pcie = to_qcom_pcie(pp); + u16 val = readw(pcie->dbi + PCIE20_CAP + PCI_EXP_LNKSTA); + + return !!(val & PCI_EXP_LNKSTA_DLLLA); +} + +static void qcom_pcie_host_init(struct pcie_port *pp) +{ + struct qcom_pcie *pcie = to_qcom_pcie(pp); + int ret; + + qcom_ep_reset_assert(pcie); + + ret = pcie->ops->init(pcie); + if (ret) + goto err_deinit; + + ret = phy_power_on(pcie->phy); + if (ret) + goto err_deinit; + + dw_pcie_setup_rc(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); + + qcom_ep_reset_deassert(pcie); + + ret = qcom_pcie_establish_link(pcie); + if (ret) + goto err; + + return; +err: + qcom_ep_reset_assert(pcie); + phy_power_off(pcie->phy); +err_deinit: + pcie->ops->deinit(pcie); +} + +static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, + u32 *val) +{ + /* the device class is not reported correctly from the register */ + if (where == PCI_CLASS_REVISION && size == 4) { + *val = readl(pp->dbi_base + PCI_CLASS_REVISION); + *val &= 0xff; /* keep revision id */ + *val |= PCI_CLASS_BRIDGE_PCI << 16; + return PCIBIOS_SUCCESSFUL; + } + + return dw_pcie_cfg_read(pp->dbi_base + where, size, val); +} + +static struct pcie_host_ops qcom_pcie_dw_ops = { + .link_up = qcom_pcie_link_up, + .host_init = qcom_pcie_host_init, + .rd_own_conf = qcom_pcie_rd_own_conf, +}; + +static const struct qcom_pcie_ops ops_v0 = { + .get_resources = qcom_pcie_get_resources_v0, + .init = qcom_pcie_init_v0, + .deinit = qcom_pcie_deinit_v0, +}; + +static const struct qcom_pcie_ops ops_v1 = { + .get_resources = qcom_pcie_get_resources_v1, + .init = qcom_pcie_init_v1, + .deinit = qcom_pcie_deinit_v1, +}; + +static int qcom_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct qcom_pcie *pcie; + struct pcie_port *pp; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev); + pcie->dev = dev; + + pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); + if (IS_ERR(pcie->reset)) + return PTR_ERR(pcie->reset); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); + pcie->parf = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->parf)) + return PTR_ERR(pcie->parf); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pcie->dbi = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->dbi)) + return PTR_ERR(pcie->dbi); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + pcie->elbi = devm_ioremap_resource(dev, res); + if (IS_ERR(pcie->elbi)) + return PTR_ERR(pcie->elbi); + + pcie->phy = devm_phy_optional_get(dev, "pciephy"); + if (IS_ERR(pcie->phy)) + return PTR_ERR(pcie->phy); + + ret = pcie->ops->get_resources(pcie); + if (ret) + return ret; + + pp = &pcie->pp; + pp->dev = dev; + pp->dbi_base = pcie->dbi; + pp->root_bus_nr = -1; + pp->ops = &qcom_pcie_dw_ops; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq < 0) + return pp->msi_irq; + + ret = devm_request_irq(dev, pp->msi_irq, + qcom_pcie_msi_irq_handler, + IRQF_SHARED, "qcom-pcie-msi", pp); + if (ret) { + dev_err(dev, "cannot request msi irq\n"); + return ret; + } + } + + ret = phy_init(pcie->phy); + if (ret) + return ret; + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "cannot initialize host\n"); + return ret; + } + + platform_set_drvdata(pdev, pcie); + + return 0; +} + +static int qcom_pcie_remove(struct platform_device *pdev) +{ + struct qcom_pcie *pcie = platform_get_drvdata(pdev); + + qcom_ep_reset_assert(pcie); + phy_power_off(pcie->phy); + phy_exit(pcie->phy); + pcie->ops->deinit(pcie); + + return 0; +} + +static const struct of_device_id qcom_pcie_match[] = { + { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 }, + { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, + { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_pcie_match); + +static struct platform_driver qcom_pcie_driver = { + .probe = qcom_pcie_probe, + .remove = qcom_pcie_remove, + .driver = { + .name = "qcom-pcie", + .of_match_table = qcom_pcie_match, + }, +}; + +module_platform_driver(qcom_pcie_driver); + +MODULE_AUTHOR("Stanimir Varbanov <svarbanov@mm-sol.com>"); +MODULE_DESCRIPTION("Qualcomm PCIe root complex driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 414c33686..4edb5181f 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c @@ -26,6 +26,7 @@ #include <linux/of_platform.h> #include <linux/pci.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/slab.h> #define DRV_NAME "rcar-pcie" @@ -94,6 +95,11 @@ #define H1_PCIEPHYDOUTR 0x040014 #define H1_PCIEPHYSR 0x040018 +/* R-Car Gen2 PHY */ +#define GEN2_PCIEPHYADDR 0x780 +#define GEN2_PCIEPHYDATA 0x784 +#define GEN2_PCIEPHYCTRL 0x78c + #define INT_PCI_MSI_NR 32 #define RCONF(x) (PCICONF(0)+(x)) @@ -108,8 +114,6 @@ #define RCAR_PCI_MAX_RESOURCES 4 #define MAX_NR_INBOUND_MAPS 6 -static unsigned long global_io_offset; - struct rcar_msi { DECLARE_BITMAP(used, INT_PCI_MSI_NR); struct irq_domain *domain; @@ -126,20 +130,10 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) } /* Structure representing the PCIe interface */ -/* - * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI - * sysdata. Add pci_sys_data as the first element in struct gen_pci so - * that when we use a gen_pci pointer as sysdata, it is also a pointer to - * a struct pci_sys_data. - */ struct rcar_pcie { -#ifdef CONFIG_ARM - struct pci_sys_data sys; -#endif struct device *dev; void __iomem *base; - struct resource res[RCAR_PCI_MAX_RESOURCES]; - struct resource busn; + struct list_head resources; int root_bus_nr; struct clk *clk; struct clk *bus_clk; @@ -323,10 +317,9 @@ static struct pci_ops rcar_pcie_ops = { .write = rcar_pcie_write_conf, }; -static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie) +static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, + struct resource *res) { - struct resource *res = &pcie->res[win]; - /* Setup PCIe address space mappings for each resource */ resource_size_t size; resource_size_t res_start; @@ -359,31 +352,33 @@ static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie) rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); } -static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pcie) +static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) { - struct resource *res; - int i; - - pcie->root_bus_nr = pcie->busn.start; + struct resource_entry *win; + int i = 0; /* Setup PCI resources */ - for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) { + resource_list_for_each_entry(win, &pci->resources) { + struct resource *res = win->res; - res = &pcie->res[i]; if (!res->flags) continue; - rcar_pcie_setup_window(i, pcie); - - if (res->flags & IORESOURCE_IO) { - phys_addr_t io_start = pci_pio_to_address(res->start); - pci_ioremap_io(global_io_offset, io_start); - global_io_offset += SZ_64K; + switch (resource_type(res)) { + case IORESOURCE_IO: + case IORESOURCE_MEM: + rcar_pcie_setup_window(i, pci, res); + i++; + break; + case IORESOURCE_BUS: + pci->root_bus_nr = res->start; + break; + default: + continue; } pci_add_resource(resource, res); } - pci_add_resource(resource, &pcie->busn); return 1; } @@ -578,6 +573,26 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie) return -ETIMEDOUT; } +static int rcar_pcie_hw_init_gen2(struct rcar_pcie *pcie) +{ + /* + * These settings come from the R-Car Series, 2nd Generation User's + * Manual, section 50.3.1 (2) Initialization of the physical layer. + */ + rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); + rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); + rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); + rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); + + rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); + /* The following value is for DC connection, no termination resistor */ + rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); + rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); + rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); + + return rcar_pcie_hw_init(pcie); +} + static int rcar_msi_alloc(struct rcar_msi *chip) { int msi; @@ -919,20 +934,71 @@ static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, static const struct of_device_id rcar_pcie_of_match[] = { { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, - { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init }, - { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init }, + { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init_gen2 }, + { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init }, {}, }; MODULE_DEVICE_TABLE(of, rcar_pcie_of_match); +static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci) +{ + pci_free_resource_list(&pci->resources); +} + +static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci) +{ + int err; + struct device *dev = pci->dev; + struct device_node *np = dev->of_node; + resource_size_t iobase; + struct resource_entry *win; + + err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase); + if (err) + return err; + + resource_list_for_each_entry(win, &pci->resources) { + struct resource *parent, *res = win->res; + + switch (resource_type(res)) { + case IORESOURCE_IO: + parent = &ioport_resource; + err = pci_remap_iospace(res, iobase); + if (err) { + dev_warn(dev, "error %d: failed to map resource %pR\n", + err, res); + continue; + } + break; + case IORESOURCE_MEM: + parent = &iomem_resource; + break; + + case IORESOURCE_BUS: + default: + continue; + } + + err = devm_request_resource(dev, parent, res); + if (err) + goto out_release_res; + } + + return 0; + +out_release_res: + rcar_pcie_release_of_pci_ranges(pci); + return err; +} + static int rcar_pcie_probe(struct platform_device *pdev) { struct rcar_pcie *pcie; unsigned int data; - struct of_pci_range range; - struct of_pci_range_parser parser; const struct of_device_id *of_id; - int err, win = 0; + int err; int (*hw_init_fn)(struct rcar_pcie *); pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); @@ -942,16 +1008,9 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie->dev = &pdev->dev; platform_set_drvdata(pdev, pcie); - /* Get the bus range */ - if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) { - dev_err(&pdev->dev, "failed to parse bus-range property\n"); - return -EINVAL; - } + INIT_LIST_HEAD(&pcie->resources); - if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) { - dev_err(&pdev->dev, "missing ranges property\n"); - return -EINVAL; - } + rcar_pcie_parse_request_of_pci_ranges(pcie); err = rcar_pcie_get_resources(pdev, pcie); if (err < 0) { @@ -959,46 +1018,55 @@ static int rcar_pcie_probe(struct platform_device *pdev) return err; } - for_each_of_pci_range(&parser, &range) { - err = of_pci_range_to_resource(&range, pdev->dev.of_node, - &pcie->res[win++]); - if (err < 0) - return err; - - if (win > RCAR_PCI_MAX_RESOURCES) - break; - } - err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); if (err) return err; - if (IS_ENABLED(CONFIG_PCI_MSI)) { - err = rcar_pcie_enable_msi(pcie); - if (err < 0) { - dev_err(&pdev->dev, - "failed to enable MSI support: %d\n", - err); - return err; - } - } - of_id = of_match_device(rcar_pcie_of_match, pcie->dev); if (!of_id || !of_id->data) return -EINVAL; hw_init_fn = of_id->data; + pm_runtime_enable(pcie->dev); + err = pm_runtime_get_sync(pcie->dev); + if (err < 0) { + dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); + goto err_pm_disable; + } + /* Failure to get a link might just be that no cards are inserted */ err = hw_init_fn(pcie); if (err) { dev_info(&pdev->dev, "PCIe link down\n"); - return 0; + err = 0; + goto err_pm_put; } data = rcar_pci_read_reg(pcie, MACSR); dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); - return rcar_pcie_enable(pcie); + if (IS_ENABLED(CONFIG_PCI_MSI)) { + err = rcar_pcie_enable_msi(pcie); + if (err < 0) { + dev_err(&pdev->dev, + "failed to enable MSI support: %d\n", + err); + goto err_pm_put; + } + } + + err = rcar_pcie_enable(pcie); + if (err) + goto err_pm_put; + + return 0; + +err_pm_put: + pm_runtime_put(pcie->dev); + +err_pm_disable: + pm_runtime_disable(pcie->dev); + return err; } static struct platform_driver rcar_pcie_driver = { |