diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-09-11 04:34:46 -0300 |
commit | 863981e96738983919de841ec669e157e6bdaeb0 (patch) | |
tree | d6d89a12e7eb8017837c057935a2271290907f76 /drivers/pci | |
parent | 8dec7c70575785729a6a9e6719a955e9c545bcab (diff) |
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/pci')
37 files changed, 1738 insertions, 456 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 209292e06..56389be5d 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -83,6 +83,9 @@ config HT_IRQ config PCI_ATS bool +config PCI_ECAM + bool + config PCI_IOV bool "PCI IOV support" depends on PCI diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 2154092dd..1fa692573 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -55,6 +55,8 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o obj-$(CONFIG_PCI_STUB) += pci-stub.o +obj-$(CONFIG_PCI_ECAM) += ecam.o + obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o obj-$(CONFIG_OF) += of.o diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c new file mode 100644 index 000000000..f9832ad8e --- /dev/null +++ b/drivers/pci/ecam.c @@ -0,0 +1,164 @@ +/* + * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> + +#include "ecam.h" + +/* + * On 64-bit systems, we do a single ioremap for the whole config space + * since we have enough virtual address range available. On 32-bit, we + * ioremap the config space for each bus individually. + */ +static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT); + +/* + * Create a PCI config space window + * - reserve mem region + * - alloc struct pci_config_window with space for all mappings + * - ioremap the config space + */ +struct pci_config_window *pci_ecam_create(struct device *dev, + struct resource *cfgres, struct resource *busr, + struct pci_ecam_ops *ops) +{ + struct pci_config_window *cfg; + unsigned int bus_range, bus_range_max, bsz; + struct resource *conflict; + int i, err; + + if (busr->start > busr->end) + return ERR_PTR(-EINVAL); + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) + return ERR_PTR(-ENOMEM); + + cfg->ops = ops; + cfg->busr.start = busr->start; + cfg->busr.end = busr->end; + cfg->busr.flags = IORESOURCE_BUS; + bus_range = resource_size(&cfg->busr); + bus_range_max = resource_size(cfgres) >> ops->bus_shift; + if (bus_range > bus_range_max) { + bus_range = bus_range_max; + cfg->busr.end = busr->start + bus_range - 1; + dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n", + cfgres, &cfg->busr, busr); + } + bsz = 1 << ops->bus_shift; + + cfg->res.start = cfgres->start; + cfg->res.end = cfgres->end; + cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + cfg->res.name = "PCI ECAM"; + + conflict = request_resource_conflict(&iomem_resource, &cfg->res); + if (conflict) { + err = -EBUSY; + dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n", + &cfg->res, conflict->name, conflict); + goto err_exit; + } + + if (per_bus_mapping) { + cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL); + if (!cfg->winp) + goto err_exit_malloc; + for (i = 0; i < bus_range; i++) { + cfg->winp[i] = ioremap(cfgres->start + i * bsz, bsz); + if (!cfg->winp[i]) + goto err_exit_iomap; + } + } else { + cfg->win = ioremap(cfgres->start, bus_range * bsz); + if (!cfg->win) + goto err_exit_iomap; + } + + if (ops->init) { + err = ops->init(dev, cfg); + if (err) + goto err_exit; + } + dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr); + return cfg; + +err_exit_iomap: + dev_err(dev, "ECAM ioremap failed\n"); +err_exit_malloc: + err = -ENOMEM; +err_exit: + pci_ecam_free(cfg); + return ERR_PTR(err); +} + +void pci_ecam_free(struct pci_config_window *cfg) +{ + int i; + + if (per_bus_mapping) { + if (cfg->winp) { + for (i = 0; i < resource_size(&cfg->busr); i++) + if (cfg->winp[i]) + iounmap(cfg->winp[i]); + kfree(cfg->winp); + } + } else { + if (cfg->win) + iounmap(cfg->win); + } + if (cfg->res.parent) + release_resource(&cfg->res); + kfree(cfg); +} + +/* + * Function to implement the pci_ops ->map_bus method + */ +void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + struct pci_config_window *cfg = bus->sysdata; + unsigned int devfn_shift = cfg->ops->bus_shift - 8; + unsigned int busn = bus->number; + void __iomem *base; + + if (busn < cfg->busr.start || busn > cfg->busr.end) + return NULL; + + busn -= cfg->busr.start; + if (per_bus_mapping) + base = cfg->winp[busn]; + else + base = cfg->win + (busn << cfg->ops->bus_shift); + return base + (devfn << devfn_shift) + where; +} + +/* ECAM ops */ +struct pci_ecam_ops pci_generic_ecam_ops = { + .bus_shift = 20, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, + } +}; diff --git a/drivers/pci/ecam.h b/drivers/pci/ecam.h new file mode 100644 index 000000000..9878bebd4 --- /dev/null +++ b/drivers/pci/ecam.h @@ -0,0 +1,67 @@ +/* + * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ +#ifndef DRIVERS_PCI_ECAM_H +#define DRIVERS_PCI_ECAM_H + +#include <linux/kernel.h> +#include <linux/platform_device.h> + +/* + * struct to hold pci ops and bus shift of the config window + * for a PCI controller. + */ +struct pci_config_window; +struct pci_ecam_ops { + unsigned int bus_shift; + struct pci_ops pci_ops; + int (*init)(struct device *, + struct pci_config_window *); +}; + +/* + * struct to hold the mappings of a config space window. This + * is expected to be used as sysdata for PCI controllers that + * use ECAM. + */ +struct pci_config_window { + struct resource res; + struct resource busr; + void *priv; + struct pci_ecam_ops *ops; + union { + void __iomem *win; /* 64-bit single mapping */ + void __iomem **winp; /* 32-bit per-bus mapping */ + }; +}; + +/* create and free pci_config_window */ +struct pci_config_window *pci_ecam_create(struct device *dev, + struct resource *cfgres, struct resource *busr, + struct pci_ecam_ops *ops); +void pci_ecam_free(struct pci_config_window *cfg); + +/* map_bus when ->sysdata is an instance of pci_config_window */ +void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); +/* default ECAM ops */ +extern struct pci_ecam_ops pci_generic_ecam_ops; + +#ifdef CONFIG_PCI_HOST_GENERIC +/* for DT-based PCI controllers that support ECAM */ +int pci_host_common_probe(struct platform_device *pdev, + struct pci_ecam_ops *ops); +#endif +#endif diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 7a0780d56..5d2374e4e 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -69,14 +69,17 @@ config PCI_RCAR_GEN2 There are 3 internal PCI controllers available with a single built-in EHCI/OHCI host controller present on each one. -config PCI_RCAR_GEN2_PCIE +config PCIE_RCAR bool "Renesas R-Car PCIe controller" depends on ARCH_RENESAS || (ARM && COMPILE_TEST) + select PCI_MSI + select PCI_MSI_IRQ_DOMAIN help - Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. + Say Y here if you want PCIe controller support on R-Car SoCs. config PCI_HOST_COMMON bool + select PCI_ECAM config PCI_HOST_GENERIC bool "Generic PCI host controller" @@ -231,4 +234,15 @@ config PCI_HOST_THUNDER_ECAM help Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. +config PCIE_ARMADA_8K + bool "Marvell Armada-8K PCIe controller" + depends on ARCH_MVEBU + select PCIE_DW + select PCIEPORTBUS + help + Say Y here if you want to enable PCIe controller support on + Armada-8K SoCs. The PCIe controller on Armada-8K is based on + Designware hardware and therefore the driver re-uses the + Designware core functions to implement the driver. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index d85b5faf9..9c8698e89 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o -obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o +obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o @@ -28,3 +28,4 @@ obj-$(CONFIG_PCI_HISI) += pcie-hisi.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o obj-$(CONFIG_PCI_HOST_THUNDER_ECAM) += pci-thunder-ecam.o obj-$(CONFIG_PCI_HOST_THUNDER_PEM) += pci-thunder-pem.o +obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c index 2ca3a1f30..f44113040 100644 --- a/drivers/pci/host/pci-dra7xx.c +++ b/drivers/pci/host/pci-dra7xx.c @@ -142,13 +142,13 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) static void dra7xx_pcie_host_init(struct pcie_port *pp) { - dw_pcie_setup_rc(pp); - pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; + dw_pcie_setup_rc(pp); + dra7xx_pcie_establish_link(pp); if (IS_ENABLED(CONFIG_PCI_MSI)) dw_pcie_msi_init(pp); diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c index e9f850f07..8cba7ab73 100644 --- a/drivers/pci/host/pci-host-common.c +++ b/drivers/pci/host/pci-host-common.c @@ -22,27 +22,21 @@ #include <linux/of_pci.h> #include <linux/platform_device.h> -#include "pci-host-common.h" +#include "../ecam.h" -static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) -{ - pci_free_resource_list(&pci->resources); -} - -static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) +static int gen_pci_parse_request_of_pci_ranges(struct device *dev, + struct list_head *resources, struct resource **bus_range) { int err, res_valid = 0; - struct device *dev = pci->host.dev.parent; struct device_node *np = dev->of_node; resource_size_t iobase; struct resource_entry *win; - err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, - &iobase); + err = of_pci_get_host_bridge_resources(np, 0, 0xff, resources, &iobase); if (err) return err; - resource_list_for_each_entry(win, &pci->resources) { + resource_list_for_each_entry(win, resources) { struct resource *parent, *res = win->res; switch (resource_type(res)) { @@ -60,7 +54,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) res_valid |= !(res->flags & IORESOURCE_PREFETCH); break; case IORESOURCE_BUS: - pci->cfg.bus_range = res; + *bus_range = res; default: continue; } @@ -79,65 +73,60 @@ static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) return 0; out_release_res: - gen_pci_release_of_pci_ranges(pci); return err; } -static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) +static void gen_pci_unmap_cfg(void *ptr) +{ + pci_ecam_free((struct pci_config_window *)ptr); +} + +static struct pci_config_window *gen_pci_init(struct device *dev, + struct list_head *resources, struct pci_ecam_ops *ops) { int err; - u8 bus_max; - resource_size_t busn; - struct resource *bus_range; - struct device *dev = pci->host.dev.parent; - struct device_node *np = dev->of_node; - u32 sz = 1 << pci->cfg.ops->bus_shift; + struct resource cfgres; + struct resource *bus_range = NULL; + struct pci_config_window *cfg; - err = of_address_to_resource(np, 0, &pci->cfg.res); + /* Parse our PCI ranges and request their resources */ + err = gen_pci_parse_request_of_pci_ranges(dev, resources, &bus_range); + if (err) + goto err_out; + + err = of_address_to_resource(dev->of_node, 0, &cfgres); if (err) { dev_err(dev, "missing \"reg\" property\n"); - return err; + goto err_out; } - /* Limit the bus-range to fit within reg */ - bus_max = pci->cfg.bus_range->start + - (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; - pci->cfg.bus_range->end = min_t(resource_size_t, - pci->cfg.bus_range->end, bus_max); - - pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range), - sizeof(*pci->cfg.win), GFP_KERNEL); - if (!pci->cfg.win) - return -ENOMEM; - - /* Map our Configuration Space windows */ - if (!devm_request_mem_region(dev, pci->cfg.res.start, - resource_size(&pci->cfg.res), - "Configuration Space")) - return -ENOMEM; - - bus_range = pci->cfg.bus_range; - for (busn = bus_range->start; busn <= bus_range->end; ++busn) { - u32 idx = busn - bus_range->start; - - pci->cfg.win[idx] = devm_ioremap(dev, - pci->cfg.res.start + idx * sz, - sz); - if (!pci->cfg.win[idx]) - return -ENOMEM; + cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); + if (IS_ERR(cfg)) { + err = PTR_ERR(cfg); + goto err_out; } - return 0; + err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); + if (err) { + gen_pci_unmap_cfg(cfg); + goto err_out; + } + return cfg; + +err_out: + pci_free_resource_list(resources); + return ERR_PTR(err); } int pci_host_common_probe(struct platform_device *pdev, - struct gen_pci *pci) + struct pci_ecam_ops *ops) { - int err; const char *type; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct pci_bus *bus, *child; + struct pci_config_window *cfg; + struct list_head resources; type = of_get_property(np, "device_type", NULL); if (!type || strcmp(type, "pci")) { @@ -147,29 +136,18 @@ int pci_host_common_probe(struct platform_device *pdev, of_pci_check_probe_only(); - pci->host.dev.parent = dev; - INIT_LIST_HEAD(&pci->host.windows); - INIT_LIST_HEAD(&pci->resources); - - /* Parse our PCI ranges and request their resources */ - err = gen_pci_parse_request_of_pci_ranges(pci); - if (err) - return err; - /* Parse and map our Configuration Space windows */ - err = gen_pci_parse_map_cfg_windows(pci); - if (err) { - gen_pci_release_of_pci_ranges(pci); - return err; - } + INIT_LIST_HEAD(&resources); + cfg = gen_pci_init(dev, &resources, ops); + if (IS_ERR(cfg)) + return PTR_ERR(cfg); /* Do not reassign resources if probe only */ if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); - - bus = pci_scan_root_bus(dev, pci->cfg.bus_range->start, - &pci->cfg.ops->ops, pci, &pci->resources); + bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg, + &resources); if (!bus) { dev_err(dev, "Scanning rootbus failed"); return -ENODEV; diff --git a/drivers/pci/host/pci-host-common.h b/drivers/pci/host/pci-host-common.h deleted file mode 100644 index 09f3fa0a5..000000000 --- a/drivers/pci/host/pci-host-common.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - * Copyright (C) 2014 ARM Limited - * - * Author: Will Deacon <will.deacon@arm.com> - */ - -#ifndef _PCI_HOST_COMMON_H -#define _PCI_HOST_COMMON_H - -#include <linux/kernel.h> -#include <linux/platform_device.h> - -struct gen_pci_cfg_bus_ops { - u32 bus_shift; - struct pci_ops ops; -}; - -struct gen_pci_cfg_windows { - struct resource res; - struct resource *bus_range; - void __iomem **win; - - struct gen_pci_cfg_bus_ops *ops; -}; - -struct gen_pci { - struct pci_host_bridge host; - struct gen_pci_cfg_windows cfg; - struct list_head resources; -}; - -int pci_host_common_probe(struct platform_device *pdev, - struct gen_pci *pci); - -#endif /* _PCI_HOST_COMMON_H */ diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c index e8aa78faa..6eaceab1b 100644 --- a/drivers/pci/host/pci-host-generic.c +++ b/drivers/pci/host/pci-host-generic.c @@ -25,41 +25,12 @@ #include <linux/of_pci.h> #include <linux/platform_device.h> -#include "pci-host-common.h" +#include "../ecam.h" -static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, - unsigned int devfn, - int where) -{ - struct gen_pci *pci = bus->sysdata; - resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 8) | where); -} - -static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = { +static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { .bus_shift = 16, - .ops = { - .map_bus = gen_pci_map_cfg_bus_cam, - .read = pci_generic_config_read, - .write = pci_generic_config_write, - } -}; - -static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, - unsigned int devfn, - int where) -{ - struct gen_pci *pci = bus->sysdata; - resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 12) | where); -} - -static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { - .bus_shift = 20, - .ops = { - .map_bus = gen_pci_map_cfg_bus_ecam, + .pci_ops = { + .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } @@ -70,25 +41,22 @@ static const struct of_device_id gen_pci_of_match[] = { .data = &gen_pci_cfg_cam_bus_ops }, { .compatible = "pci-host-ecam-generic", - .data = &gen_pci_cfg_ecam_bus_ops }, + .data = &pci_generic_ecam_ops }, { }, }; + MODULE_DEVICE_TABLE(of, gen_pci_of_match); static int gen_pci_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; const struct of_device_id *of_id; - struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - - if (!pci) - return -ENOMEM; + struct pci_ecam_ops *ops; - of_id = of_match_node(gen_pci_of_match, dev->of_node); - pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data; + of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node); + ops = (struct pci_ecam_ops *)of_id->data; - return pci_host_common_probe(pdev, pci); + return pci_host_common_probe(pdev, ops); } static struct platform_driver gen_pci_driver = { diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index ed651baa7..7e9b2de2a 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -553,6 +553,8 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, spin_lock_irqsave(&hpdev->hbus->config_lock, flags); /* Choose the function to be read. (See comment above) */ writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start reading. */ + mb(); /* Read from that function's config space. */ switch (size) { case 1: @@ -565,6 +567,11 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, *val = readl(addr); break; } + /* + * Make sure the write was done before we release the spinlock + * allowing consecutive reads/writes. + */ + mb(); spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); } else { dev_err(&hpdev->hbus->hdev->device, @@ -592,6 +599,8 @@ static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, spin_lock_irqsave(&hpdev->hbus->config_lock, flags); /* Choose the function to be written. (See comment above) */ writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); + /* Make sure the function was chosen before we start writing. */ + wmb(); /* Write to that function's config space. */ switch (size) { case 1: @@ -604,6 +613,11 @@ static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, writel(val, addr); break; } + /* + * Make sure the write was done before we release the spinlock + * allowing consecutive reads/writes. + */ + mb(); spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); } else { dev_err(&hpdev->hbus->hdev->device, @@ -1795,14 +1809,14 @@ static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) if (hbus->low_mmio_space && hbus->low_mmio_res) { hbus->low_mmio_res->flags |= IORESOURCE_BUSY; - release_mem_region(hbus->low_mmio_res->start, - resource_size(hbus->low_mmio_res)); + vmbus_free_mmio(hbus->low_mmio_res->start, + resource_size(hbus->low_mmio_res)); } if (hbus->high_mmio_space && hbus->high_mmio_res) { hbus->high_mmio_res->flags |= IORESOURCE_BUSY; - release_mem_region(hbus->high_mmio_res->start, - resource_size(hbus->high_mmio_res)); + vmbus_free_mmio(hbus->high_mmio_res->start, + resource_size(hbus->high_mmio_res)); } } @@ -1880,8 +1894,8 @@ static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) release_low_mmio: if (hbus->low_mmio_res) { - release_mem_region(hbus->low_mmio_res->start, - resource_size(hbus->low_mmio_res)); + vmbus_free_mmio(hbus->low_mmio_res->start, + resource_size(hbus->low_mmio_res)); } return ret; @@ -1924,7 +1938,7 @@ static int hv_allocate_config_window(struct hv_pcibus_device *hbus) static void hv_free_config_window(struct hv_pcibus_device *hbus) { - release_mem_region(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); + vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); } /** @@ -2268,11 +2282,6 @@ static int hv_pci_remove(struct hv_device *hdev) hbus = hv_get_drvdata(hdev); - ret = hv_send_resources_released(hdev); - if (ret) - dev_err(&hdev->device, - "Couldn't send resources released packet(s)\n"); - memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); init_completion(&comp_pkt.host_event); pkt.teardown_packet.completion_func = hv_pci_generic_compl; @@ -2295,6 +2304,11 @@ static int hv_pci_remove(struct hv_device *hdev) pci_unlock_rescan_remove(); } + ret = hv_send_resources_released(hdev); + if (ret) + dev_err(&hdev->device, + "Couldn't send resources released packet(s)\n"); + vmbus_close(hdev->channel); /* Delete any children which might still exist. */ diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index 2f817fa4c..b741a36a6 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -19,6 +19,7 @@ #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> #include <linux/module.h> #include <linux/of_gpio.h> +#include <linux/of_device.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -31,19 +32,29 @@ #define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp) +enum imx6_pcie_variants { + IMX6Q, + IMX6SX, + IMX6QP, +}; + struct imx6_pcie { int reset_gpio; + bool gpio_active_high; struct clk *pcie_bus; struct clk *pcie_phy; + struct clk *pcie_inbound_axi; struct clk *pcie; struct pcie_port pp; struct regmap *iomuxc_gpr; + enum imx6_pcie_variants variant; void __iomem *mem_base; u32 tx_deemph_gen1; u32 tx_deemph_gen2_3p5db; u32 tx_deemph_gen2_6db; u32 tx_swing_full; u32 tx_swing_low; + int link_gen; }; /* PCIe Root Complex registers (memory-mapped) */ @@ -236,37 +247,93 @@ static int imx6_pcie_assert_core_reset(struct pcie_port *pp) struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); u32 val, gpr1, gpr12; - /* - * If the bootloader already enabled the link we need some special - * handling to get the core back into a state where it is safe to - * touch it for configuration. As there is no dedicated reset signal - * wired up for MX6QDL, we need to manually force LTSSM into "detect" - * state before completely disabling LTSSM, which is a prerequisite - * for core configuration. - * - * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong - * indication that the bootloader activated the link. - */ - regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1); - regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12); + switch (imx6_pcie->variant) { + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, + IMX6SX_GPR12_PCIE_TEST_POWERDOWN); + /* Force PCIe PHY reset */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, + IMX6SX_GPR5_PCIE_BTNRST_RESET); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, + IMX6Q_GPR1_PCIE_SW_RST); + break; + case IMX6Q: + /* + * If the bootloader already enabled the link we need some + * special handling to get the core back into a state where + * it is safe to touch it for configuration. As there is + * no dedicated reset signal wired up for MX6QDL, we need + * to manually force LTSSM into "detect" state before + * completely disabling LTSSM, which is a prerequisite for + * core configuration. + * + * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we + * have a strong indication that the bootloader activated + * the link. + */ + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1); + regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12); + + if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) && + (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) { + val = readl(pp->dbi_base + PCIE_PL_PFLR); + val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; + val |= PCIE_PL_PFLR_FORCE_LINK; + writel(val, pp->dbi_base + PCIE_PL_PFLR); + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + } + + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); + break; + } + + return 0; +} + +static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) +{ + struct pcie_port *pp = &imx6_pcie->pp; + int ret = 0; - if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) && - (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) { - val = readl(pp->dbi_base + PCIE_PL_PFLR); - val &= ~PCIE_PL_PFLR_LINK_STATE_MASK; - val |= PCIE_PL_PFLR_FORCE_LINK; - writel(val, pp->dbi_base + PCIE_PL_PFLR); + switch (imx6_pcie->variant) { + case IMX6SX: + ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); + if (ret) { + dev_err(pp->dev, "unable to enable pcie_axi clock\n"); + break; + } regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, - IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); + IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); + break; + case IMX6QP: /* FALLTHROUGH */ + case IMX6Q: + /* power up core phy and enable ref clock */ + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); + /* + * the async reset input need ref clock to sync internally, + * when the ref clock comes after reset, internal synced + * reset time is too short, cannot meet the requirement. + * add one ~10us delay here. + */ + udelay(10); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); + break; } - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); - - return 0; + return ret; } static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) @@ -292,43 +359,60 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) goto err_pcie; } - /* power up core phy and enable ref clock */ - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); - /* - * the async reset input need ref clock to sync internally, - * when the ref clock comes after reset, internal synced - * reset time is too short, cannot meet the requirement. - * add one ~10us delay here. - */ - udelay(10); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, - IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); + ret = imx6_pcie_enable_ref_clk(imx6_pcie); + if (ret) { + dev_err(pp->dev, "unable to enable pcie ref clock\n"); + goto err_ref_clk; + } /* allow the clocks to stabilize */ usleep_range(200, 500); /* Some boards don't have PCIe reset GPIO. */ if (gpio_is_valid(imx6_pcie->reset_gpio)) { - gpio_set_value_cansleep(imx6_pcie->reset_gpio, 0); + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + imx6_pcie->gpio_active_high); msleep(100); - gpio_set_value_cansleep(imx6_pcie->reset_gpio, 1); + gpio_set_value_cansleep(imx6_pcie->reset_gpio, + !imx6_pcie->gpio_active_high); } + + switch (imx6_pcie->variant) { + case IMX6SX: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, + IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); + break; + case IMX6QP: + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, + IMX6Q_GPR1_PCIE_SW_RST, 0); + + usleep_range(200, 500); + break; + case IMX6Q: /* Nothing to do */ + break; + } + return 0; +err_ref_clk: + clk_disable_unprepare(imx6_pcie->pcie); err_pcie: clk_disable_unprepare(imx6_pcie->pcie_bus); err_pcie_bus: clk_disable_unprepare(imx6_pcie->pcie_phy); err_pcie_phy: return ret; - } static void imx6_pcie_init_phy(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); + if (imx6_pcie->variant == IMX6SX) + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, + IMX6SX_GPR12_PCIE_RX_EQ_MASK, + IMX6SX_GPR12_PCIE_RX_EQ_2); + regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); @@ -417,11 +501,15 @@ static int imx6_pcie_establish_link(struct pcie_port *pp) goto err_reset_phy; } - /* Allow Gen2 mode after the link is up. */ - tmp = readl(pp->dbi_base + PCIE_RC_LCR); - tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; - tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; - writel(tmp, pp->dbi_base + PCIE_RC_LCR); + if (imx6_pcie->link_gen == 2) { + /* Allow Gen2 mode after the link is up. */ + tmp = readl(pp->dbi_base + PCIE_RC_LCR); + tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; + tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; + writel(tmp, pp->dbi_base + PCIE_RC_LCR); + } else { + dev_info(pp->dev, "Link: Gen2 disabled\n"); + } /* * Start Directed Speed Change so the best possible speed both link @@ -445,8 +533,7 @@ static int imx6_pcie_establish_link(struct pcie_port *pp) } tmp = readl(pp->dbi_base + PCIE_RC_LCSR); - dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); - + dev_info(pp->dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); return 0; err_reset_phy: @@ -535,6 +622,9 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) pp = &imx6_pcie->pp; pp->dev = &pdev->dev; + imx6_pcie->variant = + (enum imx6_pcie_variants)of_device_get_match_data(&pdev->dev); + /* Added for PCI abort handling */ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0, "imprecise external abort"); @@ -546,9 +636,14 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) /* Fetch GPIOs */ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); + imx6_pcie->gpio_active_high = of_property_read_bool(np, + "reset-gpio-active-high"); if (gpio_is_valid(imx6_pcie->reset_gpio)) { ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio, - GPIOF_OUT_INIT_LOW, "PCIe reset"); + imx6_pcie->gpio_active_high ? + GPIOF_OUT_INIT_HIGH : + GPIOF_OUT_INIT_LOW, + "PCIe reset"); if (ret) { dev_err(&pdev->dev, "unable to get reset gpio\n"); return ret; @@ -577,6 +672,16 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) return PTR_ERR(imx6_pcie->pcie); } + if (imx6_pcie->variant == IMX6SX) { + imx6_pcie->pcie_inbound_axi = devm_clk_get(&pdev->dev, + "pcie_inbound_axi"); + if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { + dev_err(&pdev->dev, + "pcie_incbound_axi clock missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_inbound_axi); + } + } + /* Grab GPR config register range */ imx6_pcie->iomuxc_gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); @@ -606,6 +711,12 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) &imx6_pcie->tx_swing_low)) imx6_pcie->tx_swing_low = 127; + /* Limit link speed */ + ret = of_property_read_u32(pp->dev->of_node, "fsl,max-link-speed", + &imx6_pcie->link_gen); + if (ret) + imx6_pcie->link_gen = 1; + ret = imx6_add_pcie_port(pp, pdev); if (ret < 0) return ret; @@ -623,7 +734,9 @@ static void imx6_pcie_shutdown(struct platform_device *pdev) } static const struct of_device_id imx6_pcie_of_match[] = { - { .compatible = "fsl,imx6q-pcie", }, + { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, + { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, + { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, {}, }; MODULE_DEVICE_TABLE(of, imx6_pcie_of_match); diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c index 6153853ca..41515092e 100644 --- a/drivers/pci/host/pci-keystone-dw.c +++ b/drivers/pci/host/pci-keystone-dw.c @@ -14,6 +14,7 @@ #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/irqreturn.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_pci.h> @@ -53,6 +54,21 @@ #define IRQ_STATUS 0x184 #define MSI_IRQ_OFFSET 4 +/* Error IRQ bits */ +#define ERR_AER BIT(5) /* ECRC error */ +#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ +#define ERR_CORR BIT(3) /* Correctable error */ +#define ERR_NONFATAL BIT(2) /* Non-fatal error */ +#define ERR_FATAL BIT(1) /* Fatal error */ +#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ +#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ + ERR_NONFATAL | ERR_FATAL | ERR_SYS) +#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) +#define ERR_IRQ_STATUS_RAW 0x1c0 +#define ERR_IRQ_STATUS 0x1c4 +#define ERR_IRQ_ENABLE_SET 0x1c8 +#define ERR_IRQ_ENABLE_CLR 0x1cc + /* Config space registers */ #define DEBUG0 0x728 @@ -243,6 +259,28 @@ void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) writel(offset, ks_pcie->va_app_base + IRQ_EOI); } +void ks_dw_pcie_enable_error_irq(void __iomem *reg_base) +{ + writel(ERR_IRQ_ALL, reg_base + ERR_IRQ_ENABLE_SET); +} + +irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev, + void __iomem *reg_base) +{ + u32 status; + + status = readl(reg_base + ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; + if (!status) + return IRQ_NONE; + + if (status & ERR_FATAL_IRQ) + dev_err(dev, "fatal error (status %#010x)\n", status); + + /* Ack the IRQ; status bits are RW1C */ + writel(status, reg_base + ERR_IRQ_STATUS); + return IRQ_HANDLED; +} + static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) { } diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index b71f55bb0..6b8301ef2 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -15,6 +15,7 @@ #include <linux/irqchip/chained_irq.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/msi.h> @@ -159,7 +160,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, char *controller, int *num_irqs) { - int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL; + int temp, max_host_irqs, legacy = 1, *host_irqs; struct device *dev = ks_pcie->pp.dev; struct device_node *np_pcie = dev->of_node, **np_temp; @@ -180,11 +181,15 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, *np_temp = of_find_node_by_name(np_pcie, controller); if (!(*np_temp)) { dev_err(dev, "Node for %s is absent\n", controller); - goto out; + return -EINVAL; } + temp = of_irq_count(*np_temp); - if (!temp) - goto out; + if (!temp) { + dev_err(dev, "No IRQ entries in %s\n", controller); + return -EINVAL; + } + if (temp > max_host_irqs) dev_warn(dev, "Too many %s interrupts defined %u\n", (legacy ? "legacy" : "MSI"), temp); @@ -198,12 +203,13 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, if (!host_irqs[temp]) break; } + if (temp) { *num_irqs = temp; - ret = 0; + return 0; } -out: - return ret; + + return -EINVAL; } static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) @@ -226,6 +232,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) ks_pcie); } } + + if (ks_pcie->error_irq > 0) + ks_dw_pcie_enable_error_irq(ks_pcie->va_app_base); } /* @@ -289,6 +298,14 @@ static struct pcie_host_ops keystone_pcie_host_ops = { .scan_bus = ks_dw_pcie_v3_65_scan_bus, }; +static irqreturn_t pcie_err_irq_handler(int irq, void *priv) +{ + struct keystone_pcie *ks_pcie = priv; + + return ks_dw_pcie_handle_error_irq(ks_pcie->pp.dev, + ks_pcie->va_app_base); +} + static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, struct platform_device *pdev) { @@ -309,6 +326,22 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, return ret; } + /* + * Index 0 is the platform interrupt for error interrupt + * from RC. This is optional. + */ + ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); + if (ks_pcie->error_irq <= 0) + dev_info(&pdev->dev, "no error IRQ defined\n"); + else { + if (request_irq(ks_pcie->error_irq, pcie_err_irq_handler, + IRQF_SHARED, "pcie-error-irq", ks_pcie) < 0) { + dev_err(&pdev->dev, "failed to request error IRQ %d\n", + ks_pcie->error_irq); + return ret; + } + } + pp->root_bus_nr = -1; pp->ops = &keystone_pcie_host_ops; ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); @@ -317,7 +350,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, return ret; } - return ret; + return 0; } static const struct of_device_id ks_pcie_of_match[] = { @@ -346,7 +379,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) struct resource *res; void __iomem *reg_p; struct phy *phy; - int ret = 0; + int ret; ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie), GFP_KERNEL); @@ -376,6 +409,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) devm_release_mem_region(dev, res->start, resource_size(res)); pp->dev = dev; + ks_pcie->np = dev->of_node; platform_set_drvdata(pdev, ks_pcie); ks_pcie->clk = devm_clk_get(dev, "pcie"); if (IS_ERR(ks_pcie->clk)) { diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h index f0944e8c4..a5b0cb2ba 100644 --- a/drivers/pci/host/pci-keystone.h +++ b/drivers/pci/host/pci-keystone.h @@ -29,6 +29,9 @@ struct keystone_pcie { int msi_host_irqs[MAX_MSI_HOST_IRQS]; struct device_node *msi_intc_np; struct irq_domain *legacy_irq_domain; + struct device_node *np; + + int error_irq; /* Application register space */ void __iomem *va_app_base; @@ -42,6 +45,9 @@ phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); /* Keystone specific PCI controller APIs */ void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); +void ks_dw_pcie_enable_error_irq(void __iomem *reg_base); +irqreturn_t ks_dw_pcie_handle_error_irq(struct device *dev, + void __iomem *reg_base); int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, struct device_node *msi_intc_np); int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 53b79c5f0..6b451df65 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -1003,6 +1003,7 @@ static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) pcie->msi->dev = &pcie->pdev->dev; } +#ifdef CONFIG_PM_SLEEP static int mvebu_pcie_suspend(struct device *dev) { struct mvebu_pcie *pcie; @@ -1031,6 +1032,7 @@ static int mvebu_pcie_resume(struct device *dev) return 0; } +#endif static void mvebu_pcie_port_clk_put(void *data) { @@ -1298,9 +1300,8 @@ static const struct of_device_id mvebu_pcie_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table); -static struct dev_pm_ops mvebu_pcie_pm_ops = { - .suspend_noirq = mvebu_pcie_suspend, - .resume_noirq = mvebu_pcie_resume, +static const struct dev_pm_ops mvebu_pcie_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) }; static struct platform_driver mvebu_pcie_driver = { diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 68d1f41b3..c388468c2 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c @@ -295,6 +295,7 @@ struct tegra_pcie { struct reset_control *afi_rst; struct reset_control *pcie_xrst; + bool legacy_phy; struct phy *phy; struct tegra_msi msi; @@ -311,11 +312,14 @@ struct tegra_pcie { struct tegra_pcie_port { struct tegra_pcie *pcie; + struct device_node *np; struct list_head list; struct resource regs; void __iomem *base; unsigned int index; unsigned int lanes; + + struct phy **phys; }; struct tegra_pcie_bus { @@ -860,6 +864,128 @@ static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) return 0; } +static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc_data *soc = pcie->soc_data; + u32 value; + + /* disable TX/RX data */ + value = pads_readl(pcie, PADS_CTL); + value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); + pads_writel(pcie, value, PADS_CTL); + + /* override IDDQ */ + value = pads_readl(pcie, PADS_CTL); + value |= PADS_CTL_IDDQ_1L; + pads_writel(pcie, PADS_CTL, value); + + /* reset PLL */ + value = pads_readl(pcie, soc->pads_pll_ctl); + value &= ~PADS_PLL_CTL_RST_B4SM; + pads_writel(pcie, value, soc->pads_pll_ctl); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + unsigned int i; + int err; + + for (i = 0; i < port->lanes; i++) { + err = phy_power_on(port->phys[i]); + if (err < 0) { + dev_err(dev, "failed to power on PHY#%u: %d\n", i, + err); + return err; + } + } + + return 0; +} + +static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + unsigned int i; + int err; + + for (i = 0; i < port->lanes; i++) { + err = phy_power_off(port->phys[i]); + if (err < 0) { + dev_err(dev, "failed to power off PHY#%u: %d\n", i, + err); + return err; + } + } + + return 0; +} + +static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) +{ + struct tegra_pcie_port *port; + int err; + + if (pcie->legacy_phy) { + if (pcie->phy) + err = phy_power_on(pcie->phy); + else + err = tegra_pcie_phy_enable(pcie); + + if (err < 0) + dev_err(pcie->dev, "failed to power on PHY: %d\n", err); + + return err; + } + + list_for_each_entry(port, &pcie->ports, list) { + err = tegra_pcie_port_phy_power_on(port); + if (err < 0) { + dev_err(pcie->dev, + "failed to power on PCIe port %u PHY: %d\n", + port->index, err); + return err; + } + } + + return 0; +} + +static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) +{ + struct tegra_pcie_port *port; + int err; + + if (pcie->legacy_phy) { + if (pcie->phy) + err = phy_power_off(pcie->phy); + else + err = tegra_pcie_phy_disable(pcie); + + if (err < 0) + dev_err(pcie->dev, "failed to power off PHY: %d\n", + err); + + return err; + } + + list_for_each_entry(port, &pcie->ports, list) { + err = tegra_pcie_port_phy_power_off(port); + if (err < 0) { + dev_err(pcie->dev, + "failed to power off PCIe port %u PHY: %d\n", + port->index, err); + return err; + } + } + + return 0; +} + static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) { const struct tegra_pcie_soc_data *soc = pcie->soc_data; @@ -899,13 +1025,9 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) afi_writel(pcie, value, AFI_FUSE); } - if (!pcie->phy) - err = tegra_pcie_phy_enable(pcie); - else - err = phy_power_on(pcie->phy); - + err = tegra_pcie_phy_power_on(pcie); if (err < 0) { - dev_err(pcie->dev, "failed to power on PHY: %d\n", err); + dev_err(pcie->dev, "failed to power on PHY(s): %d\n", err); return err; } @@ -942,9 +1064,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie) /* TODO: disable and unprepare clocks? */ - err = phy_power_off(pcie->phy); + err = tegra_pcie_phy_power_off(pcie); if (err < 0) - dev_warn(pcie->dev, "failed to power off PHY: %d\n", err); + dev_err(pcie->dev, "failed to power off PHY(s): %d\n", err); reset_control_assert(pcie->pcie_xrst); reset_control_assert(pcie->afi_rst); @@ -1049,6 +1171,100 @@ static int tegra_pcie_resets_get(struct tegra_pcie *pcie) return 0; } +static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie) +{ + int err; + + pcie->phy = devm_phy_optional_get(pcie->dev, "pcie"); + if (IS_ERR(pcie->phy)) { + err = PTR_ERR(pcie->phy); + dev_err(pcie->dev, "failed to get PHY: %d\n", err); + return err; + } + + err = phy_init(pcie->phy); + if (err < 0) { + dev_err(pcie->dev, "failed to initialize PHY: %d\n", err); + return err; + } + + pcie->legacy_phy = true; + + return 0; +} + +static struct phy *devm_of_phy_optional_get_index(struct device *dev, + struct device_node *np, + const char *consumer, + unsigned int index) +{ + struct phy *phy; + char *name; + + name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index); + if (!name) + return ERR_PTR(-ENOMEM); + + phy = devm_of_phy_get(dev, np, name); + kfree(name); + + if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV) + phy = NULL; + + return phy; +} + +static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) +{ + struct device *dev = port->pcie->dev; + struct phy *phy; + unsigned int i; + int err; + + port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); + if (!port->phys) + return -ENOMEM; + + for (i = 0; i < port->lanes; i++) { + phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i); + if (IS_ERR(phy)) { + dev_err(dev, "failed to get PHY#%u: %ld\n", i, + PTR_ERR(phy)); + return PTR_ERR(phy); + } + + err = phy_init(phy); + if (err < 0) { + dev_err(dev, "failed to initialize PHY#%u: %d\n", i, + err); + return err; + } + + port->phys[i] = phy; + } + + return 0; +} + +static int tegra_pcie_phys_get(struct tegra_pcie *pcie) +{ + const struct tegra_pcie_soc_data *soc = pcie->soc_data; + struct device_node *np = pcie->dev->of_node; + struct tegra_pcie_port *port; + int err; + + if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL) + return tegra_pcie_phys_get_legacy(pcie); + + list_for_each_entry(port, &pcie->ports, list) { + err = tegra_pcie_port_get_phys(port); + if (err < 0) + return err; + } + + return 0; +} + static int tegra_pcie_get_resources(struct tegra_pcie *pcie) { struct platform_device *pdev = to_platform_device(pcie->dev); @@ -1067,16 +1283,9 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) return err; } - pcie->phy = devm_phy_optional_get(pcie->dev, "pcie"); - if (IS_ERR(pcie->phy)) { - err = PTR_ERR(pcie->phy); - dev_err(&pdev->dev, "failed to get PHY: %d\n", err); - return err; - } - - err = phy_init(pcie->phy); + err = tegra_pcie_phys_get(pcie); if (err < 0) { - dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err); + dev_err(&pdev->dev, "failed to get PHYs: %d\n", err); return err; } @@ -1752,6 +1961,7 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) rp->index = index; rp->lanes = value; rp->pcie = pcie; + rp->np = port; rp->base = devm_ioremap_resource(pcie->dev, &rp->regs); if (IS_ERR(rp->base)) diff --git a/drivers/pci/host/pci-thunder-ecam.c b/drivers/pci/host/pci-thunder-ecam.c index d71935cb2..540d03061 100644 --- a/drivers/pci/host/pci-thunder-ecam.c +++ b/drivers/pci/host/pci-thunder-ecam.c @@ -13,18 +13,7 @@ #include <linux/of.h> #include <linux/platform_device.h> -#include "pci-host-common.h" - -/* Mapping is standard ECAM */ -static void __iomem *thunder_ecam_map_bus(struct pci_bus *bus, - unsigned int devfn, - int where) -{ - struct gen_pci *pci = bus->sysdata; - resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 12) | where); -} +#include "../ecam.h" static void set_val(u32 v, int where, int size, u32 *val) { @@ -99,7 +88,7 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct gen_pci *pci = bus->sysdata; + struct pci_config_window *cfg = bus->sysdata; int where_a = where & ~3; void __iomem *addr; u32 node_bits; @@ -129,7 +118,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, * the config space access window. Since we are working with * the high-order 32 bits, shift everything down by 32 bits. */ - node_bits = (pci->cfg.res.start >> 32) & (1 << 12); + node_bits = (cfg->res.start >> 32) & (1 << 12); v |= node_bits; set_val(v, where, size, val); @@ -358,36 +347,24 @@ static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn, return pci_generic_config_write(bus, devfn, where, size, val); } -static struct gen_pci_cfg_bus_ops thunder_ecam_bus_ops = { +static struct pci_ecam_ops pci_thunder_ecam_ops = { .bus_shift = 20, - .ops = { - .map_bus = thunder_ecam_map_bus, + .pci_ops = { + .map_bus = pci_ecam_map_bus, .read = thunder_ecam_config_read, .write = thunder_ecam_config_write, } }; static const struct of_device_id thunder_ecam_of_match[] = { - { .compatible = "cavium,pci-host-thunder-ecam", - .data = &thunder_ecam_bus_ops }, - + { .compatible = "cavium,pci-host-thunder-ecam" }, { }, }; MODULE_DEVICE_TABLE(of, thunder_ecam_of_match); static int thunder_ecam_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - const struct of_device_id *of_id; - struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - - if (!pci) - return -ENOMEM; - - of_id = of_match_node(thunder_ecam_of_match, dev->of_node); - pci->cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data; - - return pci_host_common_probe(pdev, pci); + return pci_host_common_probe(pdev, &pci_thunder_ecam_ops); } static struct platform_driver thunder_ecam_driver = { diff --git a/drivers/pci/host/pci-thunder-pem.c b/drivers/pci/host/pci-thunder-pem.c index cabb92a51..9b8ab94f3 100644 --- a/drivers/pci/host/pci-thunder-pem.c +++ b/drivers/pci/host/pci-thunder-pem.c @@ -20,34 +20,22 @@ #include <linux/of_pci.h> #include <linux/platform_device.h> -#include "pci-host-common.h" +#include "../ecam.h" #define PEM_CFG_WR 0x28 #define PEM_CFG_RD 0x30 struct thunder_pem_pci { - struct gen_pci gen_pci; u32 ea_entry[3]; void __iomem *pem_reg_base; }; -static void __iomem *thunder_pem_map_bus(struct pci_bus *bus, - unsigned int devfn, int where) -{ - struct gen_pci *pci = bus->sysdata; - resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 16) | where); -} - static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u64 read_val; - struct thunder_pem_pci *pem_pci; - struct gen_pci *pci = bus->sysdata; - - pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci); + struct pci_config_window *cfg = bus->sysdata; + struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; if (devfn != 0 || where >= 2048) { *val = ~0; @@ -132,17 +120,17 @@ static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { - struct gen_pci *pci = bus->sysdata; + struct pci_config_window *cfg = bus->sysdata; - if (bus->number < pci->cfg.bus_range->start || - bus->number > pci->cfg.bus_range->end) + if (bus->number < cfg->busr.start || + bus->number > cfg->busr.end) return PCIBIOS_DEVICE_NOT_FOUND; /* * The first device on the bus is the PEM PCIe bridge. * Special case its config access. */ - if (bus->number == pci->cfg.bus_range->start) + if (bus->number == cfg->busr.start) return thunder_pem_bridge_read(bus, devfn, where, size, val); return pci_generic_config_read(bus, devfn, where, size, val); @@ -153,11 +141,11 @@ static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, * reserved bits, this makes the code simpler and is OK as the bits * are not affected by writing zeros to them. */ -static u32 thunder_pem_bridge_w1c_bits(int where) +static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned) { u32 w1c_bits = 0; - switch (where & ~3) { + switch (where_aligned) { case 0x04: /* Command/Status */ case 0x1c: /* Base and I/O Limit/Secondary Status */ w1c_bits = 0xff000000; @@ -184,15 +172,36 @@ static u32 thunder_pem_bridge_w1c_bits(int where) return w1c_bits; } +/* Some bits must be written to one so they appear to be read-only. */ +static u32 thunder_pem_bridge_w1_bits(u64 where_aligned) +{ + u32 w1_bits; + + switch (where_aligned) { + case 0x1c: /* I/O Base / I/O Limit, Secondary Status */ + /* Force 32-bit I/O addressing. */ + w1_bits = 0x0101; + break; + case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */ + /* Force 64-bit addressing */ + w1_bits = 0x00010001; + break; + default: + w1_bits = 0; + break; + } + return w1_bits; +} + static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - struct gen_pci *pci = bus->sysdata; - struct thunder_pem_pci *pem_pci; + struct pci_config_window *cfg = bus->sysdata; + struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; u64 write_val, read_val; + u64 where_aligned = where & ~3ull; u32 mask = 0; - pem_pci = container_of(pci, struct thunder_pem_pci, gen_pci); if (devfn != 0 || where >= 2048) return PCIBIOS_DEVICE_NOT_FOUND; @@ -205,8 +214,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, */ switch (size) { case 1: - read_val = where & ~3ull; - writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); + writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); read_val >>= 32; mask = ~(0xff << (8 * (where & 3))); @@ -215,8 +223,7 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, val |= (u32)read_val; break; case 2: - read_val = where & ~3ull; - writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); + writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); read_val >>= 32; mask = ~(0xffff << (8 * (where & 3))); @@ -244,11 +251,17 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, } /* + * Some bits must be read-only with value of one. Since the + * access method allows these to be cleared if a zero is + * written, force them to one before writing. + */ + val |= thunder_pem_bridge_w1_bits(where_aligned); + + /* * Low order bits are the config address, the high order 32 * bits are the data to be written. */ - write_val = where & ~3ull; - write_val |= (((u64)val) << 32); + write_val = (((u64)val) << 32) | where_aligned; writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); return PCIBIOS_SUCCESSFUL; } @@ -256,53 +269,38 @@ static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { - struct gen_pci *pci = bus->sysdata; + struct pci_config_window *cfg = bus->sysdata; - if (bus->number < pci->cfg.bus_range->start || - bus->number > pci->cfg.bus_range->end) + if (bus->number < cfg->busr.start || + bus->number > cfg->busr.end) return PCIBIOS_DEVICE_NOT_FOUND; /* * The first device on the bus is the PEM PCIe bridge. * Special case its config access. */ - if (bus->number == pci->cfg.bus_range->start) + if (bus->number == cfg->busr.start) return thunder_pem_bridge_write(bus, devfn, where, size, val); return pci_generic_config_write(bus, devfn, where, size, val); } -static struct gen_pci_cfg_bus_ops thunder_pem_bus_ops = { - .bus_shift = 24, - .ops = { - .map_bus = thunder_pem_map_bus, - .read = thunder_pem_config_read, - .write = thunder_pem_config_write, - } -}; - -static const struct of_device_id thunder_pem_of_match[] = { - { .compatible = "cavium,pci-host-thunder-pem", - .data = &thunder_pem_bus_ops }, - - { }, -}; -MODULE_DEVICE_TABLE(of, thunder_pem_of_match); - -static int thunder_pem_probe(struct platform_device *pdev) +static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg) { - struct device *dev = &pdev->dev; - const struct of_device_id *of_id; resource_size_t bar4_start; struct resource *res_pem; struct thunder_pem_pci *pem_pci; + struct platform_device *pdev; + + /* Only OF support for now */ + if (!dev->of_node) + return -EINVAL; pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL); if (!pem_pci) return -ENOMEM; - of_id = of_match_node(thunder_pem_of_match, dev->of_node); - pem_pci->gen_pci.cfg.ops = (struct gen_pci_cfg_bus_ops *)of_id->data; + pdev = to_platform_device(dev); /* * The second register range is the PEM bridge to the PCIe @@ -330,7 +328,29 @@ static int thunder_pem_probe(struct platform_device *pdev) pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u; pem_pci->ea_entry[2] = (u32)(bar4_start >> 32); - return pci_host_common_probe(pdev, &pem_pci->gen_pci); + cfg->priv = pem_pci; + return 0; +} + +static struct pci_ecam_ops pci_thunder_pem_ops = { + .bus_shift = 24, + .init = thunder_pem_init, + .pci_ops = { + .map_bus = pci_ecam_map_bus, + .read = thunder_pem_config_read, + .write = thunder_pem_config_write, + } +}; + +static const struct of_device_id thunder_pem_of_match[] = { + { .compatible = "cavium,pci-host-thunder-pem" }, + { }, +}; +MODULE_DEVICE_TABLE(of, thunder_pem_of_match); + +static int thunder_pem_probe(struct platform_device *pdev) +{ + return pci_host_common_probe(pdev, &pci_thunder_pem_ops); } static struct platform_driver thunder_pem_driver = { diff --git a/drivers/pci/host/pcie-armada8k.c b/drivers/pci/host/pcie-armada8k.c new file mode 100644 index 000000000..55723567b --- /dev/null +++ b/drivers/pci/host/pcie-armada8k.c @@ -0,0 +1,262 @@ +/* + * PCIe host controller driver for Marvell Armada-8K SoCs + * + * Armada-8K PCIe Glue Layer Source Code + * + * Copyright (C) 2016 Marvell Technology Group Ltd. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pci.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/resource.h> +#include <linux/of_pci.h> +#include <linux/of_irq.h> + +#include "pcie-designware.h" + +struct armada8k_pcie { + void __iomem *base; + struct clk *clk; + struct pcie_port pp; +}; + +#define PCIE_VENDOR_REGS_OFFSET 0x8000 + +#define PCIE_GLOBAL_CONTROL_REG 0x0 +#define PCIE_APP_LTSSM_EN BIT(2) +#define PCIE_DEVICE_TYPE_SHIFT 4 +#define PCIE_DEVICE_TYPE_MASK 0xF +#define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ + +#define PCIE_GLOBAL_STATUS_REG 0x8 +#define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) +#define PCIE_GLB_STS_PHY_LINK_UP BIT(9) + +#define PCIE_GLOBAL_INT_CAUSE1_REG 0x1C +#define PCIE_GLOBAL_INT_MASK1_REG 0x20 +#define PCIE_INT_A_ASSERT_MASK BIT(9) +#define PCIE_INT_B_ASSERT_MASK BIT(10) +#define PCIE_INT_C_ASSERT_MASK BIT(11) +#define PCIE_INT_D_ASSERT_MASK BIT(12) + +#define PCIE_ARCACHE_TRC_REG 0x50 +#define PCIE_AWCACHE_TRC_REG 0x54 +#define PCIE_ARUSER_REG 0x5C +#define PCIE_AWUSER_REG 0x60 +/* + * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write + * allocate + */ +#define ARCACHE_DEFAULT_VALUE 0x3511 +#define AWCACHE_DEFAULT_VALUE 0x5311 + +#define DOMAIN_OUTER_SHAREABLE 0x2 +#define AX_USER_DOMAIN_MASK 0x3 +#define AX_USER_DOMAIN_SHIFT 4 + +#define to_armada8k_pcie(x) container_of(x, struct armada8k_pcie, pp) + +static int armada8k_pcie_link_up(struct pcie_port *pp) +{ + struct armada8k_pcie *pcie = to_armada8k_pcie(pp); + u32 reg; + u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; + + reg = readl(pcie->base + PCIE_GLOBAL_STATUS_REG); + + if ((reg & mask) == mask) + return 1; + + dev_dbg(pp->dev, "No link detected (Global-Status: 0x%08x).\n", reg); + return 0; +} + +static void armada8k_pcie_establish_link(struct pcie_port *pp) +{ + struct armada8k_pcie *pcie = to_armada8k_pcie(pp); + void __iomem *base = pcie->base; + u32 reg; + + if (!dw_pcie_link_up(pp)) { + /* Disable LTSSM state machine to enable configuration */ + reg = readl(base + PCIE_GLOBAL_CONTROL_REG); + reg &= ~(PCIE_APP_LTSSM_EN); + writel(reg, base + PCIE_GLOBAL_CONTROL_REG); + } + + /* Set the device to root complex mode */ + reg = readl(base + PCIE_GLOBAL_CONTROL_REG); + reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); + reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; + writel(reg, base + PCIE_GLOBAL_CONTROL_REG); + + /* Set the PCIe master AxCache attributes */ + writel(ARCACHE_DEFAULT_VALUE, base + PCIE_ARCACHE_TRC_REG); + writel(AWCACHE_DEFAULT_VALUE, base + PCIE_AWCACHE_TRC_REG); + + /* Set the PCIe master AxDomain attributes */ + reg = readl(base + PCIE_ARUSER_REG); + reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); + reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; + writel(reg, base + PCIE_ARUSER_REG); + + reg = readl(base + PCIE_AWUSER_REG); + reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); + reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; + writel(reg, base + PCIE_AWUSER_REG); + + /* Enable INT A-D interrupts */ + reg = readl(base + PCIE_GLOBAL_INT_MASK1_REG); + reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | + PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; + writel(reg, base + PCIE_GLOBAL_INT_MASK1_REG); + + if (!dw_pcie_link_up(pp)) { + /* Configuration done. Start LTSSM */ + reg = readl(base + PCIE_GLOBAL_CONTROL_REG); + reg |= PCIE_APP_LTSSM_EN; + writel(reg, base + PCIE_GLOBAL_CONTROL_REG); + } + + /* Wait until the link becomes active again */ + if (dw_pcie_wait_for_link(pp)) + dev_err(pp->dev, "Link not up after reconfiguration\n"); +} + +static void armada8k_pcie_host_init(struct pcie_port *pp) +{ + dw_pcie_setup_rc(pp); + armada8k_pcie_establish_link(pp); +} + +static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + struct armada8k_pcie *pcie = to_armada8k_pcie(pp); + void __iomem *base = pcie->base; + u32 val; + + /* + * Interrupts are directly handled by the device driver of the + * PCI device. However, they are also latched into the PCIe + * controller, so we simply discard them. + */ + val = readl(base + PCIE_GLOBAL_INT_CAUSE1_REG); + writel(val, base + PCIE_GLOBAL_INT_CAUSE1_REG); + + return IRQ_HANDLED; +} + +static struct pcie_host_ops armada8k_pcie_host_ops = { + .link_up = armada8k_pcie_link_up, + .host_init = armada8k_pcie_host_init, +}; + +static int armada8k_add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret; + + pp->root_bus_nr = -1; + pp->ops = &armada8k_pcie_host_ops; + + pp->irq = platform_get_irq(pdev, 0); + if (!pp->irq) { + dev_err(dev, "failed to get irq for port\n"); + return -ENODEV; + } + + ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, + IRQF_SHARED, "armada8k-pcie", pp); + if (ret) { + dev_err(dev, "failed to request irq %d\n", pp->irq); + return ret; + } + + ret = dw_pcie_host_init(pp); + if (ret) { + dev_err(dev, "failed to initialize host: %d\n", ret); + return ret; + } + + return 0; +} + +static int armada8k_pcie_probe(struct platform_device *pdev) +{ + struct armada8k_pcie *pcie; + struct pcie_port *pp; + struct device *dev = &pdev->dev; + struct resource *base; + int ret; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pcie->clk)) + return PTR_ERR(pcie->clk); + + clk_prepare_enable(pcie->clk); + + pp = &pcie->pp; + pp->dev = dev; + platform_set_drvdata(pdev, pcie); + + /* Get the dw-pcie unit configuration/control registers base. */ + base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); + pp->dbi_base = devm_ioremap_resource(dev, base); + if (IS_ERR(pp->dbi_base)) { + dev_err(dev, "couldn't remap regs base %p\n", base); + ret = PTR_ERR(pp->dbi_base); + goto fail; + } + + pcie->base = pp->dbi_base + PCIE_VENDOR_REGS_OFFSET; + + ret = armada8k_add_pcie_port(pp, pdev); + if (ret) + goto fail; + + return 0; + +fail: + if (!IS_ERR(pcie->clk)) + clk_disable_unprepare(pcie->clk); + + return ret; +} + +static const struct of_device_id armada8k_pcie_of_match[] = { + { .compatible = "marvell,armada8k-pcie", }, + {}, +}; +MODULE_DEVICE_TABLE(of, armada8k_pcie_of_match); + +static struct platform_driver armada8k_pcie_driver = { + .probe = armada8k_pcie_probe, + .driver = { + .name = "armada8k-pcie", + .of_match_table = of_match_ptr(armada8k_pcie_of_match), + }, +}; + +module_platform_driver(armada8k_pcie_driver); + +MODULE_DESCRIPTION("Armada 8k PCIe host controller driver"); +MODULE_AUTHOR("Yehuda Yitshak <yehuday@marvell.com>"); +MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index a4cccd356..aafd76654 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -434,7 +434,6 @@ int dw_pcie_host_init(struct pcie_port *pp) struct platform_device *pdev = to_platform_device(pp->dev); struct pci_bus *bus, *child; struct resource *cfg_res; - u32 val; int i, ret; LIST_HEAD(res); struct resource_entry *win; @@ -544,25 +543,6 @@ int dw_pcie_host_init(struct pcie_port *pp) if (pp->ops->host_init) pp->ops->host_init(pp); - /* - * If the platform provides ->rd_other_conf, it means the platform - * uses its own address translation component rather than ATU, so - * we should not program the ATU here. - */ - if (!pp->ops->rd_other_conf) - dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, - PCIE_ATU_TYPE_MEM, pp->mem_base, - pp->mem_bus_addr, pp->mem_size); - - dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - - /* program correct class for RC */ - dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); - - dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); - val |= PORT_LOGIC_SPEED_CHANGE; - dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); - pp->root_bus_nr = pp->busn->start; if (IS_ENABLED(CONFIG_PCI_MSI)) { bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr, @@ -728,8 +708,6 @@ static struct pci_ops dw_pcie_ops = { void dw_pcie_setup_rc(struct pcie_port *pp) { u32 val; - u32 membase; - u32 memlimit; /* set the number of lanes */ dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); @@ -788,18 +766,31 @@ void dw_pcie_setup_rc(struct pcie_port *pp) val |= 0x00010100; dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS); - /* setup memory base, memory limit */ - membase = ((u32)pp->mem_base & 0xfff00000) >> 16; - memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000; - val = memlimit | membase; - dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE); - /* setup command register */ dw_pcie_readl_rc(pp, PCI_COMMAND, &val); val &= 0xffff0000; val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; dw_pcie_writel_rc(pp, val, PCI_COMMAND); + + /* + * If the platform provides ->rd_other_conf, it means the platform + * uses its own address translation component rather than ATU, so + * we should not program the ATU here. + */ + if (!pp->ops->rd_other_conf) + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, + PCIE_ATU_TYPE_MEM, pp->mem_base, + pp->mem_bus_addr, pp->mem_size); + + dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); + + /* program correct class for RC */ + dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); + + dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); + val |= PORT_LOGIC_SPEED_CHANGE; + dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); } MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 5139e6443..3479d30e2 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c @@ -819,7 +819,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) err = nwl_pcie_bridge_init(pcie); if (err) { - dev_err(pcie->dev, "HW Initalization failed\n"); + dev_err(pcie->dev, "HW Initialization failed\n"); return err; } diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 2f6d3a1c1..f6221d739 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -138,6 +138,8 @@ static union apci_descriptor *ibm_slot_from_id(int id) char *table; size = ibm_get_table_from_acpi(&table); + if (size < 0) + return NULL; des = (union apci_descriptor *)table; if (memcmp(des->header.sig, "aPCI", 4) != 0) goto ibm_slot_done; diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index b46b57d87..dc67f3977 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -175,7 +175,7 @@ static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn) struct pci_dev *dev; struct pci_controller *phb; - if (pcibios_find_pci_bus(dn)) + if (pci_find_bus_by_node(dn)) return -EINVAL; /* Add pci bus */ @@ -212,7 +212,7 @@ static int dlpar_remove_phb(char *drc_name, struct device_node *dn) struct pci_dn *pdn; int rc = 0; - if (!pcibios_find_pci_bus(dn)) + if (!pci_find_bus_by_node(dn)) return -EINVAL; /* If pci slot is hotpluggable, use hotplug to remove it */ @@ -356,7 +356,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) pci_lock_rescan_remove(); - bus = pcibios_find_pci_bus(dn); + bus = pci_find_bus_by_node(dn); if (!bus) { ret = -EINVAL; goto out; @@ -380,7 +380,7 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) } /* Remove all devices below slot */ - pcibios_remove_pci_devices(bus); + pci_hp_remove_devices(bus); /* Unmap PCI IO space */ if (pcibios_unmap_io_space(bus)) { diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 611f60562..8d132024f 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -404,7 +404,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) if (state == PRESENT) { pci_lock_rescan_remove(); - pcibios_add_pci_devices(slot->bus); + pci_hp_add_devices(slot->bus); pci_unlock_rescan_remove(); slot->state = CONFIGURED; } else if (state == EMPTY) { @@ -426,7 +426,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) return -EINVAL; pci_lock_rescan_remove(); - pcibios_remove_pci_devices(slot->bus); + pci_hp_remove_devices(slot->bus); pci_unlock_rescan_remove(); vm_unmap_aliases(); diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index 7836d6913..ea41ea1d3 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -93,7 +93,7 @@ int rpaphp_enable_slot(struct slot *slot) if (rc) return rc; - bus = pcibios_find_pci_bus(slot->dn); + bus = pci_find_bus_by_node(slot->dn); if (!bus) { err("%s: no pci_bus for dn %s\n", __func__, slot->dn->full_name); return -EINVAL; @@ -116,7 +116,7 @@ int rpaphp_enable_slot(struct slot *slot) } if (list_empty(&bus->devices)) - pcibios_add_pci_devices(bus); + pci_hp_add_devices(bus); if (!list_empty(&bus->devices)) { info->adapter_status = CONFIGURED; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 342b6918b..d319a9ca9 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1008,6 +1008,9 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) + return -EINVAL; + if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, @@ -1024,10 +1027,6 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, pci_resource_to_user(pdev, i, res, &start, &end); vma->vm_pgoff += start >> PAGE_SHIFT; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; - - if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start)) - return -EINVAL; - return pci_mmap_page_range(pdev, vma, mmap_type, write_combine); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 25e0327d4..c8b4dbdd1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2228,7 +2228,7 @@ void pci_pm_init(struct pci_dev *dev) static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) { - unsigned long flags = IORESOURCE_PCI_FIXED; + unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI; switch (prop) { case PCI_EA_P_MEM: @@ -2389,7 +2389,7 @@ out: return offset + ent_size; } -/* Enhanced Allocation Initalization */ +/* Enhanced Allocation Initialization */ void pci_ea_init(struct pci_dev *dev) { int ea; @@ -2547,7 +2547,7 @@ void pci_request_acs(void) * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites * @dev: the PCI device */ -static int pci_std_enable_acs(struct pci_dev *dev) +static void pci_std_enable_acs(struct pci_dev *dev) { int pos; u16 cap; @@ -2555,7 +2555,7 @@ static int pci_std_enable_acs(struct pci_dev *dev) pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); if (!pos) - return -ENODEV; + return; pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); @@ -2573,8 +2573,6 @@ static int pci_std_enable_acs(struct pci_dev *dev) ctrl |= (cap & PCI_ACS_UF); pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); - - return 0; } /** @@ -2586,10 +2584,10 @@ void pci_enable_acs(struct pci_dev *dev) if (!pci_acs_enable) return; - if (!pci_std_enable_acs(dev)) + if (!pci_dev_specific_enable_acs(dev)) return; - pci_dev_specific_enable_acs(dev); + pci_std_enable_acs(dev); } static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) @@ -3021,6 +3019,121 @@ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) } EXPORT_SYMBOL(pci_request_regions_exclusive); +#ifdef PCI_IOBASE +struct io_range { + struct list_head list; + phys_addr_t start; + resource_size_t size; +}; + +static LIST_HEAD(io_range_list); +static DEFINE_SPINLOCK(io_range_lock); +#endif + +/* + * Record the PCI IO range (expressed as CPU physical address + size). + * Return a negative value if an error has occured, zero otherwise + */ +int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size) +{ + int err = 0; + +#ifdef PCI_IOBASE + struct io_range *range; + resource_size_t allocated_size = 0; + + /* check if the range hasn't been previously recorded */ + spin_lock(&io_range_lock); + list_for_each_entry(range, &io_range_list, list) { + if (addr >= range->start && addr + size <= range->start + size) { + /* range already registered, bail out */ + goto end_register; + } + allocated_size += range->size; + } + + /* range not registed yet, check for available space */ + if (allocated_size + size - 1 > IO_SPACE_LIMIT) { + /* if it's too big check if 64K space can be reserved */ + if (allocated_size + SZ_64K - 1 > IO_SPACE_LIMIT) { + err = -E2BIG; + goto end_register; + } + + size = SZ_64K; + pr_warn("Requested IO range too big, new size set to 64K\n"); + } + + /* add the range to the list */ + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) { + err = -ENOMEM; + goto end_register; + } + + range->start = addr; + range->size = size; + + list_add_tail(&range->list, &io_range_list); + +end_register: + spin_unlock(&io_range_lock); +#endif + + return err; +} + +phys_addr_t pci_pio_to_address(unsigned long pio) +{ + phys_addr_t address = (phys_addr_t)OF_BAD_ADDR; + +#ifdef PCI_IOBASE + struct io_range *range; + resource_size_t allocated_size = 0; + + if (pio > IO_SPACE_LIMIT) + return address; + + spin_lock(&io_range_lock); + list_for_each_entry(range, &io_range_list, list) { + if (pio >= allocated_size && pio < allocated_size + range->size) { + address = range->start + pio - allocated_size; + break; + } + allocated_size += range->size; + } + spin_unlock(&io_range_lock); +#endif + + return address; +} + +unsigned long __weak pci_address_to_pio(phys_addr_t address) +{ +#ifdef PCI_IOBASE + struct io_range *res; + resource_size_t offset = 0; + unsigned long addr = -1; + + spin_lock(&io_range_lock); + list_for_each_entry(res, &io_range_list, list) { + if (address >= res->start && address < res->start + res->size) { + addr = address - res->start + offset; + break; + } + offset += res->size; + } + spin_unlock(&io_range_lock); + + return addr; +#else + if (address > IO_SPACE_LIMIT) + return (unsigned long)-1; + + return (unsigned long) address; +#endif +} + /** * pci_remap_iospace - Remap the memory mapped I/O space * @res: Resource describing the I/O space @@ -4578,6 +4691,37 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, return 0; } +/** + * pci_add_dma_alias - Add a DMA devfn alias for a device + * @dev: the PCI device for which alias is added + * @devfn: alias slot and function + * + * This helper encodes 8-bit devfn as bit number in dma_alias_mask. + * It should be called early, preferably as PCI fixup header quirk. + */ +void pci_add_dma_alias(struct pci_dev *dev, u8 devfn) +{ + if (!dev->dma_alias_mask) + dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX), + sizeof(long), GFP_KERNEL); + if (!dev->dma_alias_mask) { + dev_warn(&dev->dev, "Unable to allocate DMA alias mask\n"); + return; + } + + set_bit(devfn, dev->dma_alias_mask); + dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", + PCI_SLOT(devfn), PCI_FUNC(devfn)); +} + +bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) +{ + return (dev1->dma_alias_mask && + test_bit(dev2->devfn, dev1->dma_alias_mask)) || + (dev2->dma_alias_mask && + test_bit(dev1->devfn, dev2->dma_alias_mask)); +} + bool pci_device_is_present(struct pci_dev *pdev) { u32 v; diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 72db7f420..22ca6412b 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -81,3 +81,17 @@ endchoice config PCIE_PME def_bool y depends on PCIEPORTBUS && PM + +config PCIE_DPC + tristate "PCIe Downstream Port Containment support" + depends on PCIEPORTBUS + default n + help + This enables PCI Express Downstream Port Containment (DPC) + driver support. DPC events from Root and Downstream ports + will be handled by the DPC driver. If your system doesn't + have this capability or you do not want to use this feature, + it is safe to answer N. + + To compile this driver as a module, choose M here: the module + will be called pcie-dpc. diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile index 00c62df5a..b24525b3d 100644 --- a/drivers/pci/pcie/Makefile +++ b/drivers/pci/pcie/Makefile @@ -14,3 +14,5 @@ obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o obj-$(CONFIG_PCIEAER) += aer/ obj-$(CONFIG_PCIE_PME) += pme.o + +obj-$(CONFIG_PCIE_DPC) += pcie-dpc.o diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c new file mode 100644 index 000000000..ab552f1bc --- /dev/null +++ b/drivers/pci/pcie/pcie-dpc.c @@ -0,0 +1,163 @@ +/* + * PCI Express Downstream Port Containment services driver + * Copyright (C) 2016 Intel Corp. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/pcieport_if.h> + +struct dpc_dev { + struct pcie_device *dev; + struct work_struct work; + int cap_pos; +}; + +static void dpc_wait_link_inactive(struct pci_dev *pdev) +{ + unsigned long timeout = jiffies + HZ; + u16 lnk_status; + + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); + while (lnk_status & PCI_EXP_LNKSTA_DLLLA && + !time_after(jiffies, timeout)) { + msleep(10); + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); + } + if (lnk_status & PCI_EXP_LNKSTA_DLLLA) + dev_warn(&pdev->dev, "Link state not disabled for DPC event"); +} + +static void interrupt_event_handler(struct work_struct *work) +{ + struct dpc_dev *dpc = container_of(work, struct dpc_dev, work); + struct pci_dev *dev, *temp, *pdev = dpc->dev->port; + struct pci_bus *parent = pdev->subordinate; + + pci_lock_rescan_remove(); + list_for_each_entry_safe_reverse(dev, temp, &parent->devices, + bus_list) { + pci_dev_get(dev); + pci_stop_and_remove_bus_device(dev); + pci_dev_put(dev); + } + pci_unlock_rescan_remove(); + + dpc_wait_link_inactive(pdev); + pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, + PCI_EXP_DPC_STATUS_TRIGGER | PCI_EXP_DPC_STATUS_INTERRUPT); +} + +static irqreturn_t dpc_irq(int irq, void *context) +{ + struct dpc_dev *dpc = (struct dpc_dev *)context; + struct pci_dev *pdev = dpc->dev->port; + u16 status, source; + + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID, + &source); + if (!status) + return IRQ_NONE; + + dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", + status, source); + + if (status & PCI_EXP_DPC_STATUS_TRIGGER) { + u16 reason = (status >> 1) & 0x3; + + dev_warn(&dpc->dev->device, "DPC %s triggered, remove downstream devices\n", + (reason == 0) ? "unmasked uncorrectable error" : + (reason == 1) ? "ERR_NONFATAL" : + (reason == 2) ? "ERR_FATAL" : "extended error"); + schedule_work(&dpc->work); + } + return IRQ_HANDLED; +} + +#define FLAG(x, y) (((x) & (y)) ? '+' : '-') +static int dpc_probe(struct pcie_device *dev) +{ + struct dpc_dev *dpc; + struct pci_dev *pdev = dev->port; + int status; + u16 ctl, cap; + + dpc = kzalloc(sizeof(*dpc), GFP_KERNEL); + if (!dpc) + return -ENOMEM; + + dpc->cap_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DPC); + dpc->dev = dev; + INIT_WORK(&dpc->work, interrupt_event_handler); + set_service_data(dev, dpc); + + status = request_irq(dev->irq, dpc_irq, IRQF_SHARED, "pcie-dpc", dpc); + if (status) { + dev_warn(&dev->device, "request IRQ%d failed: %d\n", dev->irq, + status); + goto out; + } + + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CAP, &cap); + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); + + ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; + pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); + + dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", + cap & 0xf, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), + FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), + FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), (cap >> 8) & 0xf, + FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); + return status; + out: + kfree(dpc); + return status; +} + +static void dpc_remove(struct pcie_device *dev) +{ + struct dpc_dev *dpc = get_service_data(dev); + struct pci_dev *pdev = dev->port; + u16 ctl; + + pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, &ctl); + ctl &= ~(PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN); + pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); + + free_irq(dev->irq, dpc); + kfree(dpc); +} + +static struct pcie_port_service_driver dpcdriver = { + .name = "dpc", + .port_type = PCI_EXP_TYPE_ROOT_PORT | PCI_EXP_TYPE_DOWNSTREAM, + .service = PCIE_PORT_SERVICE_DPC, + .probe = dpc_probe, + .remove = dpc_remove, +}; + +static int __init dpc_service_init(void) +{ + return pcie_port_service_register(&dpcdriver); +} + +static void __exit dpc_service_exit(void) +{ + pcie_port_service_unregister(&dpcdriver); +} + +MODULE_DESCRIPTION("PCI Express Downstream Port Containment driver"); +MODULE_AUTHOR("Keith Busch <keith.busch@intel.com>"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); + +module_init(dpc_service_init); +module_exit(dpc_service_exit); diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index d52554840..587aef360 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -11,14 +11,14 @@ #include <linux/compiler.h> -#define PCIE_PORT_DEVICE_MAXSERVICES 4 +#define PCIE_PORT_DEVICE_MAXSERVICES 5 /* * According to the PCI Express Base Specification 2.0, the indices of * the MSI-X table entries used by port services must not exceed 31 */ #define PCIE_PORT_MAX_MSIX_ENTRIES 32 -#define get_descriptor_id(type, service) (((type - 4) << 4) | service) +#define get_descriptor_id(type, service) (((type - 4) << 8) | service) extern struct bus_type pcie_port_bus_type; int pcie_port_device_register(struct pci_dev *dev); @@ -67,17 +67,14 @@ static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {} #endif /* !CONFIG_PCIE_PME */ #ifdef CONFIG_ACPI -int pcie_port_acpi_setup(struct pci_dev *port, int *mask); +void pcie_port_acpi_setup(struct pci_dev *port, int *mask); -static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) +static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask) { - return pcie_port_acpi_setup(port, mask); + pcie_port_acpi_setup(port, mask); } #else /* !CONFIG_ACPI */ -static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask) -{ - return 0; -} +static inline void pcie_port_platform_notify(struct pci_dev *port, int *mask){} #endif /* !CONFIG_ACPI */ #endif /* _PORTDRV_H_ */ diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c index b4d2894ee..6b8c2f1d0 100644 --- a/drivers/pci/pcie/portdrv_acpi.c +++ b/drivers/pci/pcie/portdrv_acpi.c @@ -32,32 +32,30 @@ * NOTE: It turns out that we cannot do that for individual port services * separately, because that would make some systems work incorrectly. */ -int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) +void pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask) { struct acpi_pci_root *root; acpi_handle handle; u32 flags; if (acpi_pci_disabled) - return 0; + return; handle = acpi_find_root_bridge_handle(port); if (!handle) - return -EINVAL; + return; root = acpi_pci_find_root(handle); if (!root) - return -ENODEV; + return; flags = root->osc_control_set; - *srv_mask = PCIE_PORT_SERVICE_VC; + *srv_mask = PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC; if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL) *srv_mask |= PCIE_PORT_SERVICE_HP; if (flags & OSC_PCI_EXPRESS_PME_CONTROL) *srv_mask |= PCIE_PORT_SERVICE_PME; if (flags & OSC_PCI_EXPRESS_AER_CONTROL) *srv_mask |= PCIE_PORT_SERVICE_AER; - - return 0; } diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 88122dc2e..32d4d0a3d 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -254,38 +254,28 @@ static void cleanup_service_irqs(struct pci_dev *dev) static int get_port_device_capability(struct pci_dev *dev) { int services = 0; - u32 reg32; int cap_mask = 0; - int err; if (pcie_ports_disabled) return 0; cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP - | PCIE_PORT_SERVICE_VC; + | PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_DPC; if (pci_aer_available()) cap_mask |= PCIE_PORT_SERVICE_AER; - if (pcie_ports_auto) { - err = pcie_port_platform_notify(dev, &cap_mask); - if (err) - return 0; - } + if (pcie_ports_auto) + pcie_port_platform_notify(dev, &cap_mask); /* Hot-Plug Capable */ - if ((cap_mask & PCIE_PORT_SERVICE_HP) && - pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) { - pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, ®32); - if (reg32 & PCI_EXP_SLTCAP_HPC) { - services |= PCIE_PORT_SERVICE_HP; - /* - * Disable hot-plug interrupts in case they have been - * enabled by the BIOS and the hot-plug service driver - * is not loaded. - */ - pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); - } + if ((cap_mask & PCIE_PORT_SERVICE_HP) && dev->is_hotplug_bridge) { + services |= PCIE_PORT_SERVICE_HP; + /* + * Disable hot-plug interrupts in case they have been enabled + * by the BIOS and the hot-plug service driver is not loaded. + */ + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); } /* AER capable */ if ((cap_mask & PCIE_PORT_SERVICE_AER) @@ -311,6 +301,8 @@ static int get_port_device_capability(struct pci_dev *dev) */ pcie_pme_interrupt_enable(dev, false); } + if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC)) + services |= PCIE_PORT_SERVICE_DPC; return services; } @@ -338,7 +330,7 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) device = &pcie->device; device->bus = &pcie_port_bus_type; device->release = release_pcie_device; /* callback to free pcie dev */ - dev_set_name(device, "%s:pcie%02x", + dev_set_name(device, "%s:pcie%03x", pci_name(pdev), get_descriptor_id(pci_pcie_type(pdev), service)); device->parent = &pdev->dev; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index bf8405fb4..8e3ef7209 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1537,6 +1537,7 @@ static void pci_release_dev(struct device *dev) pcibios_release_device(pci_dev); pci_bus_put(pci_dev->bus); kfree(pci_dev->driver_override); + kfree(pci_dev->dma_alias_mask); kfree(pci_dev); } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 8e678027b..ee72ebe18 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3150,6 +3150,39 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, quirk_broken_intx_masking); +/* + * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, + * DisINTx can be set but the interrupt status bit is non-functional. + */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1572, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1574, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1580, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1581, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1583, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1584, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1585, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1586, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1587, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1588, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1589, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d0, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d1, + quirk_broken_intx_masking); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x37d2, + quirk_broken_intx_masking); + static void quirk_no_bus_reset(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; @@ -3185,6 +3218,29 @@ static void quirk_no_pm_reset(struct pci_dev *dev) DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); +/* + * Thunderbolt controllers with broken MSI hotplug signaling: + * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part + * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge). + */ +static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev) +{ + if (pdev->is_hotplug_bridge && + (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C || + pdev->revision <= 1)) + pdev->no_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, + quirk_thunderbolt_hotplug_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE, + quirk_thunderbolt_hotplug_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK, + quirk_thunderbolt_hotplug_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, + quirk_thunderbolt_hotplug_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, + quirk_thunderbolt_hotplug_msi); + #ifdef CONFIG_ACPI /* * Apple: Shutdown Cactus Ridge Thunderbolt controller. @@ -3232,7 +3288,8 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) acpi_execute_simple_method(SXIO, NULL, 0); acpi_execute_simple_method(SXLV, NULL, 0); } -DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547, +DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_apple_poweroff_thunderbolt); /* @@ -3266,9 +3323,11 @@ static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) if (!nhi) goto out; if (nhi->vendor != PCI_VENDOR_ID_INTEL - || (nhi->device != 0x1547 && nhi->device != 0x156c) - || nhi->subsystem_vendor != 0x2222 - || nhi->subsystem_device != 0x1111) + || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && + nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && + nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) + || nhi->subsystem_vendor != 0x2222 + || nhi->subsystem_device != 0x1111) goto out; dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n"); device_pm_wait_for_dev(&dev->dev, &nhi->dev); @@ -3276,9 +3335,14 @@ out: pci_dev_put(nhi); pci_dev_put(sibling); } -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547, +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, quirk_apple_wait_for_thunderbolt); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d, +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, + quirk_apple_wait_for_thunderbolt); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, quirk_apple_wait_for_thunderbolt); #endif @@ -3610,10 +3674,8 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) static void quirk_dma_func0_alias(struct pci_dev *dev) { - if (PCI_FUNC(dev->devfn) != 0) { - dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); - dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; - } + if (PCI_FUNC(dev->devfn) != 0) + pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); } /* @@ -3626,10 +3688,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); static void quirk_dma_func1_alias(struct pci_dev *dev) { - if (PCI_FUNC(dev->devfn) != 1) { - dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1); - dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; - } + if (PCI_FUNC(dev->devfn) != 1) + pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); } /* @@ -3695,13 +3755,8 @@ static void quirk_fixed_dma_alias(struct pci_dev *dev) const struct pci_device_id *id; id = pci_match_id(fixed_dma_alias_tbl, dev); - if (id) { - dev->dma_alias_devfn = id->driver_data; - dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; - dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n", - PCI_SLOT(dev->dma_alias_devfn), - PCI_FUNC(dev->dma_alias_devfn)); - } + if (id) + pci_add_dma_alias(dev, id->driver_data); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); @@ -3734,6 +3789,21 @@ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); /* + * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to + * be added as aliases to the DMA device in order to allow buffer access + * when IOMMU is enabled. Following devfns have to match RIT-LUT table + * programmed in the EEPROM. + */ +static void quirk_mic_x200_dma_alias(struct pci_dev *pdev) +{ + pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0)); + pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0)); + pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3)); +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); + +/* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) * class code. Fix it. */ @@ -3936,6 +4006,55 @@ static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) return acs_flags & ~flags ? 0 : 1; } +/* + * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in + * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, + * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and + * control registers whereas the PCIe spec packs them into words (Rev 3.0, + * 7.16 ACS Extended Capability). The bit definitions are correct, but the + * control register is at offset 8 instead of 6 and we should probably use + * dword accesses to them. This applies to the following PCI Device IDs, as + * found in volume 1 of the datasheet[2]: + * + * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16} + * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20} + * + * N.B. This doesn't fix what lspci shows. + * + * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html + * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html + */ +static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) +{ + return pci_is_pcie(dev) && + pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT && + ((dev->device & ~0xf) == 0xa110 || + (dev->device >= 0xa167 && dev->device <= 0xa16a)); +} + +#define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4) + +static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) +{ + int pos; + u32 cap, ctrl; + + if (!pci_quirk_intel_spt_pch_acs_match(dev)) + return -ENOTTY; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + if (!pos) + return -ENOTTY; + + /* see pci_acs_flags_enabled() */ + pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); + acs_flags &= (cap | PCI_ACS_EC); + + pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); + + return acs_flags & ~ctrl ? 0 : 1; +} + static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) { /* @@ -4024,6 +4143,7 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, /* Intel PCH root ports */ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ /* Cavium ThunderX */ @@ -4159,16 +4279,44 @@ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev) return 0; } +static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) +{ + int pos; + u32 cap, ctrl; + + if (!pci_quirk_intel_spt_pch_acs_match(dev)) + return -ENOTTY; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + if (!pos) + return -ENOTTY; + + pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); + pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); + + ctrl |= (cap & PCI_ACS_SV); + ctrl |= (cap & PCI_ACS_RR); + ctrl |= (cap & PCI_ACS_CR); + ctrl |= (cap & PCI_ACS_UF); + + pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); + + dev_info(&dev->dev, "Intel SPT PCH root port ACS workaround enabled\n"); + + return 0; +} + static const struct pci_dev_enable_acs { u16 vendor; u16 device; int (*enable_acs)(struct pci_dev *dev); } pci_dev_enable_acs[] = { { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs }, + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_spt_pch_acs }, { 0 } }; -void pci_dev_specific_enable_acs(struct pci_dev *dev) +int pci_dev_specific_enable_acs(struct pci_dev *dev) { const struct pci_dev_enable_acs *i; int ret; @@ -4180,9 +4328,11 @@ void pci_dev_specific_enable_acs(struct pci_dev *dev) i->device == (u16)PCI_ANY_ID)) { ret = i->enable_acs(dev); if (ret >= 0) - return; + return ret; } } + + return -ENOTTY; } /* diff --git a/drivers/pci/search.c b/drivers/pci/search.c index a20ce7d5e..33e0f033a 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -40,11 +40,15 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, * If the device is broken and uses an alias requester ID for * DMA, iterate over that too. */ - if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) { - ret = fn(pdev, PCI_DEVID(pdev->bus->number, - pdev->dma_alias_devfn), data); - if (ret) - return ret; + if (unlikely(pdev->dma_alias_mask)) { + u8 devfn; + + for_each_set_bit(devfn, pdev->dma_alias_mask, U8_MAX) { + ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn), + data); + if (ret) + return ret; + } } for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { |