summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-08-05 17:04:01 -0300
commit57f0f512b273f60d52568b8c6b77e17f5636edc0 (patch)
tree5e910f0e82173f4ef4f51111366a3f1299037a7b /drivers/pci
Initial import
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig121
-rw-r--r--drivers/pci/Makefile63
-rw-r--r--drivers/pci/access.c725
-rw-r--r--drivers/pci/ats.c375
-rw-r--r--drivers/pci/bus.c386
-rw-r--r--drivers/pci/host-bridge.c98
-rw-r--r--drivers/pci/host/Kconfig128
-rw-r--r--drivers/pci/host/Makefile17
-rw-r--r--drivers/pci/host/pci-dra7xx.c457
-rw-r--r--drivers/pci/host/pci-exynos.c658
-rw-r--r--drivers/pci/host/pci-host-generic.c276
-rw-r--r--drivers/pci/host/pci-imx6.c661
-rw-r--r--drivers/pci/host/pci-keystone-dw.c517
-rw-r--r--drivers/pci/host/pci-keystone.c413
-rw-r--r--drivers/pci/host/pci-keystone.h58
-rw-r--r--drivers/pci/host/pci-layerscape.c175
-rw-r--r--drivers/pci/host/pci-mvebu.c1122
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c383
-rw-r--r--drivers/pci/host/pci-tegra.c2079
-rw-r--r--drivers/pci/host/pci-versatile.c238
-rw-r--r--drivers/pci/host/pci-xgene.c531
-rw-r--r--drivers/pci/host/pcie-designware.c832
-rw-r--r--drivers/pci/host/pcie-designware.h87
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c108
-rw-r--r--drivers/pci/host/pcie-iproc.c268
-rw-r--r--drivers/pci/host/pcie-iproc.h42
-rw-r--r--drivers/pci/host/pcie-rcar.c991
-rw-r--r--drivers/pci/host/pcie-spear13xx.c391
-rw-r--r--drivers/pci/host/pcie-xilinx.c890
-rw-r--r--drivers/pci/hotplug-pci.c29
-rw-r--r--drivers/pci/hotplug/Kconfig159
-rw-r--r--drivers/pci/hotplug/Makefile72
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c227
-rw-r--r--drivers/pci/hotplug/acpiphp.h202
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c356
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1036
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c487
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h110
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c731
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c333
-rw-r--r--drivers/pci/hotplug/cpcihp_generic.c226
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c328
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.h79
-rw-r--r--drivers/pci/hotplug/cpqphp.h738
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1467
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c2971
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c667
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.h57
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c1564
-rw-r--r--drivers/pci/hotplug/cpqphp_sysfs.c222
-rw-r--r--drivers/pci/hotplug/ibmphp.h760
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c1384
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c1211
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c1131
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c1722
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c2158
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c555
-rw-r--r--drivers/pci/hotplug/pciehp.h187
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c137
-rw-r--r--drivers/pci/hotplug/pciehp_core.c387
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c692
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c859
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c128
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c365
-rw-r--r--drivers/pci/hotplug/rpadlpar.h24
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c479
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c130
-rw-r--r--drivers/pci/hotplug/rpaphp.h103
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c448
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c135
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c147
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c214
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c717
-rw-r--r--drivers/pci/hotplug/shpchp.h348
-rw-r--r--drivers/pci/hotplug/shpchp_core.c389
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c730
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c1111
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c116
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c96
-rw-r--r--drivers/pci/htirq.c170
-rw-r--r--drivers/pci/iov.c762
-rw-r--r--drivers/pci/irq.c61
-rw-r--r--drivers/pci/msi.c1365
-rw-r--r--drivers/pci/of.c61
-rw-r--r--drivers/pci/pci-acpi.c716
-rw-r--r--drivers/pci/pci-driver.c1415
-rw-r--r--drivers/pci/pci-label.c306
-rw-r--r--drivers/pci/pci-stub.c97
-rw-r--r--drivers/pci/pci-sysfs.c1584
-rw-r--r--drivers/pci/pci.c4635
-rw-r--r--drivers/pci/pci.h328
-rw-r--r--drivers/pci/pcie/Kconfig82
-rw-r--r--drivers/pci/pcie/Makefile16
-rw-r--r--drivers/pci/pcie/aer/Kconfig28
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile12
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c536
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c434
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h132
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c141
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c805
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c262
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c975
-rw-r--r--drivers/pci/pcie/pme.c481
-rw-r--r--drivers/pci/pcie/portdrv.h83
-rw-r--r--drivers/pci/pcie/portdrv_acpi.c63
-rw-r--r--drivers/pci/pcie/portdrv_bus.c55
-rw-r--r--drivers/pci/pcie/portdrv_core.c578
-rw-r--r--drivers/pci/pcie/portdrv_pci.c364
-rw-r--r--drivers/pci/probe.c2220
-rw-r--r--drivers/pci/proc.c446
-rw-r--r--drivers/pci/quirks.c4015
-rw-r--r--drivers/pci/remove.c162
-rw-r--r--drivers/pci/rom.c230
-rw-r--r--drivers/pci/search.c386
-rw-r--r--drivers/pci/setup-bus.c1819
-rw-r--r--drivers/pci/setup-irq.c68
-rw-r--r--drivers/pci/setup-res.c390
-rw-r--r--drivers/pci/slot.c397
-rw-r--r--drivers/pci/syscall.c136
-rw-r--r--drivers/pci/vc.c434
-rw-r--r--drivers/pci/vpd.c62
-rw-r--r--drivers/pci/xen-pcifront.c1180
124 files changed, 70655 insertions, 0 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
new file mode 100644
index 000000000..73de4efcb
--- /dev/null
+++ b/drivers/pci/Kconfig
@@ -0,0 +1,121 @@
+#
+# PCI configuration
+#
+config PCI_BUS_ADDR_T_64BIT
+ def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ depends on PCI
+
+config PCI_MSI
+ bool "Message Signaled Interrupts (MSI and MSI-X)"
+ depends on PCI
+ select GENERIC_MSI_IRQ
+ help
+ This allows device drivers to enable MSI (Message Signaled
+ Interrupts). Message Signaled Interrupts enable a device to
+ generate an interrupt using an inbound Memory Write on its
+ PCI bus instead of asserting a device IRQ pin.
+
+ Use of PCI MSI interrupts can be disabled at kernel boot time
+ by using the 'pci=nomsi' option. This disables MSI for the
+ entire system.
+
+ If you don't know what to do here, say Y.
+
+config PCI_MSI_IRQ_DOMAIN
+ bool
+ depends on PCI_MSI
+ select GENERIC_MSI_IRQ_DOMAIN
+
+config PCI_DEBUG
+ bool "PCI Debugging"
+ depends on PCI && DEBUG_KERNEL
+ help
+ Say Y here if you want the PCI core to produce a bunch of debug
+ messages to the system log. Select this if you are having a
+ problem with PCI support and want to see more of what is going on.
+
+ When in doubt, say N.
+
+config PCI_REALLOC_ENABLE_AUTO
+ bool "Enable PCI resource re-allocation detection"
+ depends on PCI
+ help
+ Say Y here if you want the PCI core to detect if PCI resource
+ re-allocation needs to be enabled. You can always use pci=realloc=on
+ or pci=realloc=off to override it. Note this feature is a no-op
+ unless PCI_IOV support is also enabled; in that case it will
+ automatically re-allocate PCI resources if SR-IOV BARs have not
+ been allocated by the BIOS.
+
+ When in doubt, say N.
+
+config PCI_STUB
+ tristate "PCI Stub driver"
+ depends on PCI
+ help
+ Say Y or M here if you want be able to reserve a PCI device
+ when it is going to be assigned to a guest operating system.
+
+ When in doubt, say N.
+
+config XEN_PCIDEV_FRONTEND
+ tristate "Xen PCI Frontend"
+ depends on PCI && X86 && XEN
+ select PCI_XEN
+ select XEN_XENBUS_FRONTEND
+ default y
+ help
+ The PCI device frontend driver allows the kernel to import arbitrary
+ PCI devices from a PCI backend to support PCI driver domains.
+
+config HT_IRQ
+ bool "Interrupts on hypertransport devices"
+ default y
+ depends on PCI && X86_LOCAL_APIC
+ help
+ This allows native hypertransport devices to use interrupts.
+
+ If unsure say Y.
+
+config PCI_ATS
+ bool
+
+config PCI_IOV
+ bool "PCI IOV support"
+ depends on PCI
+ select PCI_ATS
+ help
+ I/O Virtualization is a PCI feature supported by some devices
+ which allows them to create virtual devices which share their
+ physical resources.
+
+ If unsure, say N.
+
+config PCI_PRI
+ bool "PCI PRI support"
+ depends on PCI
+ select PCI_ATS
+ help
+ PRI is the PCI Page Request Interface. It allows PCI devices that are
+ behind an IOMMU to recover from page faults.
+
+ If unsure, say N.
+
+config PCI_PASID
+ bool "PCI PASID support"
+ depends on PCI
+ select PCI_ATS
+ help
+ Process Address Space Identifiers (PASIDs) can be used by PCI devices
+ to access more than one IO address space at the same time. To make
+ use of this feature an IOMMU is required which also supports PASIDs.
+ Select this option if you have such an IOMMU and want to compile the
+ driver for it into your kernel.
+
+ If unsure, say N.
+
+config PCI_LABEL
+ def_bool y if (DMI || ACPI)
+ select NLS
+
+source "drivers/pci/host/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
new file mode 100644
index 000000000..73e4af400
--- /dev/null
+++ b/drivers/pci/Makefile
@@ -0,0 +1,63 @@
+#
+# Makefile for the PCI bus specific drivers.
+#
+
+obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \
+ pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
+ irq.o vpd.o setup-bus.o vc.o
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
+
+obj-$(CONFIG_PCI_QUIRKS) += quirks.o
+
+# Build PCI Express stuff if needed
+obj-$(CONFIG_PCIEPORTBUS) += pcie/
+
+# Build the PCI Hotplug drivers if we were asked to
+obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
+ifdef CONFIG_HOTPLUG_PCI
+obj-y += hotplug-pci.o
+endif
+
+# Build the PCI MSI interrupt support
+obj-$(CONFIG_PCI_MSI) += msi.o
+
+# Build the Hypertransport interrupt support
+obj-$(CONFIG_HT_IRQ) += htirq.o
+
+obj-$(CONFIG_PCI_ATS) += ats.o
+obj-$(CONFIG_PCI_IOV) += iov.o
+
+#
+# Some architectures use the generic PCI setup functions
+#
+obj-$(CONFIG_ALPHA) += setup-irq.o
+obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-irq.o
+obj-$(CONFIG_SUPERH) += setup-irq.o
+obj-$(CONFIG_MIPS) += setup-irq.o
+obj-$(CONFIG_TILE) += setup-irq.o
+obj-$(CONFIG_SPARC_LEON) += setup-irq.o
+obj-$(CONFIG_M68K) += setup-irq.o
+
+#
+# ACPI Related PCI FW Functions
+# ACPI _DSM provided firmware instance and string name
+#
+obj-$(CONFIG_ACPI) += pci-acpi.o
+
+# SMBIOS provided firmware instance and labels
+obj-$(CONFIG_PCI_LABEL) += pci-label.o
+
+obj-$(CONFIG_PCI_SYSCALL) += syscall.o
+
+obj-$(CONFIG_PCI_STUB) += pci-stub.o
+
+obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
+
+obj-$(CONFIG_OF) += of.o
+
+ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
+
+# PCI host controller drivers
+obj-y += host/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
new file mode 100644
index 000000000..d9b64a175
--- /dev/null
+++ b/drivers/pci/access.c
@@ -0,0 +1,725 @@
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/wait.h>
+
+#include "pci.h"
+
+/*
+ * This interrupt-safe spinlock protects all accesses to PCI
+ * configuration space.
+ */
+
+DEFINE_RAW_SPINLOCK(pci_lock);
+
+/*
+ * Wrappers for all PCI configuration access functions. They just check
+ * alignment, do locking and call the low-level functions pointed to
+ * by pci_dev->ops.
+ */
+
+#define PCI_byte_BAD 0
+#define PCI_word_BAD (pos & 1)
+#define PCI_dword_BAD (pos & 3)
+
+#define PCI_OP_READ(size,type,len) \
+int pci_bus_read_config_##size \
+ (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
+{ \
+ int res; \
+ unsigned long flags; \
+ u32 data = 0; \
+ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
+ res = bus->ops->read(bus, devfn, pos, len, &data); \
+ *value = (type)data; \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
+ return res; \
+}
+
+#define PCI_OP_WRITE(size,type,len) \
+int pci_bus_write_config_##size \
+ (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
+{ \
+ int res; \
+ unsigned long flags; \
+ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
+ raw_spin_lock_irqsave(&pci_lock, flags); \
+ res = bus->ops->write(bus, devfn, pos, len, value); \
+ raw_spin_unlock_irqrestore(&pci_lock, flags); \
+ return res; \
+}
+
+PCI_OP_READ(byte, u8, 1)
+PCI_OP_READ(word, u16, 2)
+PCI_OP_READ(dword, u32, 4)
+PCI_OP_WRITE(byte, u8, 1)
+PCI_OP_WRITE(word, u16, 2)
+PCI_OP_WRITE(dword, u32, 4)
+
+EXPORT_SYMBOL(pci_bus_read_config_byte);
+EXPORT_SYMBOL(pci_bus_read_config_word);
+EXPORT_SYMBOL(pci_bus_read_config_dword);
+EXPORT_SYMBOL(pci_bus_write_config_byte);
+EXPORT_SYMBOL(pci_bus_write_config_word);
+EXPORT_SYMBOL(pci_bus_write_config_dword);
+
+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = bus->ops->map_bus(bus, devfn, where);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (size == 1)
+ *val = readb(addr);
+ else if (size == 2)
+ *val = readw(addr);
+ else
+ *val = readl(addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_read);
+
+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+
+ addr = bus->ops->map_bus(bus, devfn, where);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 1)
+ writeb(val, addr);
+ else if (size == 2)
+ writew(val, addr);
+ else
+ writel(val, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_write);
+
+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ void __iomem *addr;
+
+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
+ if (!addr) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ *val = readl(addr);
+
+ if (size <= 2)
+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_read32);
+
+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ void __iomem *addr;
+ u32 mask, tmp;
+
+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
+ if (!addr)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (size == 4) {
+ writel(val, addr);
+ return PCIBIOS_SUCCESSFUL;
+ } else {
+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
+ }
+
+ tmp = readl(addr) & mask;
+ tmp |= val << ((where & 0x3) * 8);
+ writel(tmp, addr);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+EXPORT_SYMBOL_GPL(pci_generic_config_write32);
+
+/**
+ * pci_bus_set_ops - Set raw operations of pci bus
+ * @bus: pci bus struct
+ * @ops: new raw operations
+ *
+ * Return previous raw operations
+ */
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
+{
+ struct pci_ops *old_ops;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+ old_ops = bus->ops;
+ bus->ops = ops;
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+ return old_ops;
+}
+EXPORT_SYMBOL(pci_bus_set_ops);
+
+/**
+ * pci_read_vpd - Read one entry from Vital Product Data
+ * @dev: pci device struct
+ * @pos: offset in vpd space
+ * @count: number of bytes to read
+ * @buf: pointer to where to store result
+ *
+ */
+ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->read(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_read_vpd);
+
+/**
+ * pci_write_vpd - Write entry to Vital Product Data
+ * @dev: pci device struct
+ * @pos: offset in vpd space
+ * @count: number of bytes to write
+ * @buf: buffer containing write data
+ *
+ */
+ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
+{
+ if (!dev->vpd || !dev->vpd->ops)
+ return -ENODEV;
+ return dev->vpd->ops->write(dev, pos, count, buf);
+}
+EXPORT_SYMBOL(pci_write_vpd);
+
+/*
+ * The following routines are to prevent the user from accessing PCI config
+ * space when it's unsafe to do so. Some devices require this during BIST and
+ * we're required to prevent it during D-state transitions.
+ *
+ * We have a bit per device to indicate it's blocked and a global wait queue
+ * for callers to sleep on until devices are unblocked.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
+
+static noinline void pci_wait_cfg(struct pci_dev *dev)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ __add_wait_queue(&pci_cfg_wait, &wait);
+ do {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ raw_spin_unlock_irq(&pci_lock);
+ schedule();
+ raw_spin_lock_irq(&pci_lock);
+ } while (dev->block_cfg_access);
+ __remove_wait_queue(&pci_cfg_wait, &wait);
+}
+
+/* Returns 0 on success, negative values indicate error. */
+#define PCI_USER_READ_CONFIG(size,type) \
+int pci_user_read_config_##size \
+ (struct pci_dev *dev, int pos, type *val) \
+{ \
+ int ret = PCIBIOS_SUCCESSFUL; \
+ u32 data = -1; \
+ if (PCI_##size##_BAD) \
+ return -EINVAL; \
+ raw_spin_lock_irq(&pci_lock); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
+ ret = dev->bus->ops->read(dev->bus, dev->devfn, \
+ pos, sizeof(type), &data); \
+ raw_spin_unlock_irq(&pci_lock); \
+ *val = (type)data; \
+ return pcibios_err_to_errno(ret); \
+} \
+EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
+
+/* Returns 0 on success, negative values indicate error. */
+#define PCI_USER_WRITE_CONFIG(size,type) \
+int pci_user_write_config_##size \
+ (struct pci_dev *dev, int pos, type val) \
+{ \
+ int ret = PCIBIOS_SUCCESSFUL; \
+ if (PCI_##size##_BAD) \
+ return -EINVAL; \
+ raw_spin_lock_irq(&pci_lock); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
+ ret = dev->bus->ops->write(dev->bus, dev->devfn, \
+ pos, sizeof(type), val); \
+ raw_spin_unlock_irq(&pci_lock); \
+ return pcibios_err_to_errno(ret); \
+} \
+EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
+
+PCI_USER_READ_CONFIG(byte, u8)
+PCI_USER_READ_CONFIG(word, u16)
+PCI_USER_READ_CONFIG(dword, u32)
+PCI_USER_WRITE_CONFIG(byte, u8)
+PCI_USER_WRITE_CONFIG(word, u16)
+PCI_USER_WRITE_CONFIG(dword, u32)
+
+/* VPD access through PCI 2.2+ VPD capability */
+
+#define PCI_VPD_PCI22_SIZE (PCI_VPD_ADDR_MASK + 1)
+
+struct pci_vpd_pci22 {
+ struct pci_vpd base;
+ struct mutex lock;
+ u16 flag;
+ bool busy;
+ u8 cap;
+};
+
+/*
+ * Wait for last operation to complete.
+ * This code has to spin since there is no other notification from the PCI
+ * hardware. Since the VPD is often implemented by serial attachment to an
+ * EEPROM, it may take many milliseconds to complete.
+ *
+ * Returns 0 on success, negative values indicate error.
+ */
+static int pci_vpd_pci22_wait(struct pci_dev *dev)
+{
+ struct pci_vpd_pci22 *vpd =
+ container_of(dev->vpd, struct pci_vpd_pci22, base);
+ unsigned long timeout = jiffies + HZ/20 + 2;
+ u16 status;
+ int ret;
+
+ if (!vpd->busy)
+ return 0;
+
+ for (;;) {
+ ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+ &status);
+ if (ret < 0)
+ return ret;
+
+ if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
+ vpd->busy = false;
+ return 0;
+ }
+
+ if (time_after(jiffies, timeout)) {
+ dev_printk(KERN_DEBUG, &dev->dev, "vpd r/w failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
+ return -ETIMEDOUT;
+ }
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (!cond_resched())
+ udelay(10);
+ }
+}
+
+static ssize_t pci_vpd_pci22_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
+{
+ struct pci_vpd_pci22 *vpd =
+ container_of(dev->vpd, struct pci_vpd_pci22, base);
+ int ret;
+ loff_t end = pos + count;
+ u8 *buf = arg;
+
+ if (pos < 0 || pos > vpd->base.len || end > vpd->base.len)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&vpd->lock))
+ return -EINTR;
+
+ ret = pci_vpd_pci22_wait(dev);
+ if (ret < 0)
+ goto out;
+
+ while (pos < end) {
+ u32 val;
+ unsigned int i, skip;
+
+ ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+ pos & ~3);
+ if (ret < 0)
+ break;
+ vpd->busy = true;
+ vpd->flag = PCI_VPD_ADDR_F;
+ ret = pci_vpd_pci22_wait(dev);
+ if (ret < 0)
+ break;
+
+ ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
+ if (ret < 0)
+ break;
+
+ skip = pos & 3;
+ for (i = 0; i < sizeof(u32); i++) {
+ if (i >= skip) {
+ *buf++ = val;
+ if (++pos == end)
+ break;
+ }
+ val >>= 8;
+ }
+ }
+out:
+ mutex_unlock(&vpd->lock);
+ return ret ? ret : count;
+}
+
+static ssize_t pci_vpd_pci22_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
+{
+ struct pci_vpd_pci22 *vpd =
+ container_of(dev->vpd, struct pci_vpd_pci22, base);
+ const u8 *buf = arg;
+ loff_t end = pos + count;
+ int ret = 0;
+
+ if (pos < 0 || (pos & 3) || (count & 3) || end > vpd->base.len)
+ return -EINVAL;
+
+ if (mutex_lock_killable(&vpd->lock))
+ return -EINTR;
+
+ ret = pci_vpd_pci22_wait(dev);
+ if (ret < 0)
+ goto out;
+
+ while (pos < end) {
+ u32 val;
+
+ val = *buf++;
+ val |= *buf++ << 8;
+ val |= *buf++ << 16;
+ val |= *buf++ << 24;
+
+ ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
+ if (ret < 0)
+ break;
+ ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
+ pos | PCI_VPD_ADDR_F);
+ if (ret < 0)
+ break;
+
+ vpd->busy = true;
+ vpd->flag = 0;
+ ret = pci_vpd_pci22_wait(dev);
+ if (ret < 0)
+ break;
+
+ pos += sizeof(u32);
+ }
+out:
+ mutex_unlock(&vpd->lock);
+ return ret ? ret : count;
+}
+
+static void pci_vpd_pci22_release(struct pci_dev *dev)
+{
+ kfree(container_of(dev->vpd, struct pci_vpd_pci22, base));
+}
+
+static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+ .read = pci_vpd_pci22_read,
+ .write = pci_vpd_pci22_write,
+ .release = pci_vpd_pci22_release,
+};
+
+int pci_vpd_pci22_init(struct pci_dev *dev)
+{
+ struct pci_vpd_pci22 *vpd;
+ u8 cap;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ if (!cap)
+ return -ENODEV;
+ vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ if (!vpd)
+ return -ENOMEM;
+
+ vpd->base.len = PCI_VPD_PCI22_SIZE;
+ vpd->base.ops = &pci_vpd_pci22_ops;
+ mutex_init(&vpd->lock);
+ vpd->cap = cap;
+ vpd->busy = false;
+ dev->vpd = &vpd->base;
+ return 0;
+}
+
+/**
+ * pci_cfg_access_lock - Lock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * When access is locked, any userspace reads or writes to config
+ * space and concurrent lock requests will sleep until access is
+ * allowed via pci_cfg_access_unlocked again.
+ */
+void pci_cfg_access_lock(struct pci_dev *dev)
+{
+ might_sleep();
+
+ raw_spin_lock_irq(&pci_lock);
+ if (dev->block_cfg_access)
+ pci_wait_cfg(dev);
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irq(&pci_lock);
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
+
+/**
+ * pci_cfg_access_trylock - try to lock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * Same as pci_cfg_access_lock, but will return 0 if access is
+ * already locked, 1 otherwise. This function can be used from
+ * atomic contexts.
+ */
+bool pci_cfg_access_trylock(struct pci_dev *dev)
+{
+ unsigned long flags;
+ bool locked = true;
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+ if (dev->block_cfg_access)
+ locked = false;
+ else
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return locked;
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
+
+/**
+ * pci_cfg_access_unlock - Unlock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * This function allows PCI config accesses to resume.
+ */
+void pci_cfg_access_unlock(struct pci_dev *dev)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ /* This indicates a problem in the caller, but we don't need
+ * to kill them, unlike a double-block above. */
+ WARN_ON(!dev->block_cfg_access);
+
+ dev->block_cfg_access = 0;
+ wake_up_all(&pci_cfg_wait);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
+
+static inline int pcie_cap_version(const struct pci_dev *dev)
+{
+ return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
+}
+
+bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END ||
+ type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_UPSTREAM ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_PCI_BRIDGE ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE;
+}
+
+static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM) &&
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
+}
+
+static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
+{
+ int type = pci_pcie_type(dev);
+
+ return type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_RC_EC;
+}
+
+static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
+{
+ if (!pci_is_pcie(dev))
+ return false;
+
+ switch (pos) {
+ case PCI_EXP_FLAGS:
+ return true;
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVSTA:
+ return true;
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_LNKSTA:
+ return pcie_cap_has_lnkctl(dev);
+ case PCI_EXP_SLTCAP:
+ case PCI_EXP_SLTCTL:
+ case PCI_EXP_SLTSTA:
+ return pcie_cap_has_sltctl(dev);
+ case PCI_EXP_RTCTL:
+ case PCI_EXP_RTCAP:
+ case PCI_EXP_RTSTA:
+ return pcie_cap_has_rtctl(dev);
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
+ case PCI_EXP_LNKSTA2:
+ return pcie_cap_version(dev) > 1;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Note that these accessor functions are only for the "PCI Express
+ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
+ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
+ */
+int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
+{
+ int ret;
+
+ *val = 0;
+ if (pos & 1)
+ return -EINVAL;
+
+ if (pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_word() fails, it may
+ * have been written as 0xFFFF if hardware error happens
+ * during pci_read_config_word().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
+ }
+
+ /*
+ * For Functions that do not implement the Slot Capabilities,
+ * Slot Status, and Slot Control registers, these spaces must
+ * be hardwired to 0b, with the exception of the Presence Detect
+ * State bit in the Slot Status register of Downstream Ports,
+ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
+ */
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_capability_read_word);
+
+int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
+{
+ int ret;
+
+ *val = 0;
+ if (pos & 3)
+ return -EINVAL;
+
+ if (pcie_capability_reg_implemented(dev, pos)) {
+ ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
+ /*
+ * Reset *val to 0 if pci_read_config_dword() fails, it may
+ * have been written as 0xFFFFFFFF if hardware error happens
+ * during pci_read_config_dword().
+ */
+ if (ret)
+ *val = 0;
+ return ret;
+ }
+
+ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ *val = PCI_EXP_SLTSTA_PDS;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_capability_read_dword);
+
+int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
+{
+ if (pos & 1)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
+}
+EXPORT_SYMBOL(pcie_capability_write_word);
+
+int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
+{
+ if (pos & 3)
+ return -EINVAL;
+
+ if (!pcie_capability_reg_implemented(dev, pos))
+ return 0;
+
+ return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
+}
+EXPORT_SYMBOL(pcie_capability_write_dword);
+
+int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
+ u16 clear, u16 set)
+{
+ int ret;
+ u16 val;
+
+ ret = pcie_capability_read_word(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = pcie_capability_write_word(dev, pos, val);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
+
+int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
+ u32 clear, u32 set)
+{
+ int ret;
+ u32 val;
+
+ ret = pcie_capability_read_dword(dev, pos, &val);
+ if (!ret) {
+ val &= ~clear;
+ val |= set;
+ ret = pcie_capability_write_dword(dev, pos, val);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
new file mode 100644
index 000000000..a8099d4d0
--- /dev/null
+++ b/drivers/pci/ats.c
@@ -0,0 +1,375 @@
+/*
+ * drivers/pci/ats.c
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
+ * Copyright (C) 2011 Advanced Micro Devices,
+ *
+ * PCI Express I/O Virtualization (IOV) support.
+ * Address Translation Service 1.0
+ * Page Request Interface added by Joerg Roedel <joerg.roedel@amd.com>
+ * PASID support added by Joerg Roedel <joerg.roedel@amd.com>
+ */
+
+#include <linux/export.h>
+#include <linux/pci-ats.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "pci.h"
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_ats);
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+EXPORT_SYMBOL_GPL(pci_disable_ats);
+
+void pci_restore_ats_state(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ if (!pci_ats_enabled(dev))
+ return;
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
+ BUG();
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
+
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+}
+EXPORT_SYMBOL_GPL(pci_restore_ats_state);
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
+EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
+
+#ifdef CONFIG_PCI_PRI
+/**
+ * pci_enable_pri - Enable PRI capability
+ * @ pdev: PCI device structure
+ *
+ * Returns 0 on success, negative value on error
+ */
+int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
+{
+ u16 control, status;
+ u32 max_requests;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ if ((control & PCI_PRI_CTRL_ENABLE) ||
+ !(status & PCI_PRI_STATUS_STOPPED))
+ return -EBUSY;
+
+ pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
+ reqs = min(max_requests, reqs);
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+
+ control |= PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_pri);
+
+/**
+ * pci_disable_pri - Disable PRI capability
+ * @pdev: PCI device structure
+ *
+ * Only clears the enabled-bit, regardless of its former value
+ */
+void pci_disable_pri(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control &= ~PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pri);
+
+/**
+ * pci_reset_pri - Resets device's PRI state
+ * @pdev: PCI device structure
+ *
+ * The PRI capability must be disabled before this function is called.
+ * Returns 0 on success, negative value on error.
+ */
+int pci_reset_pri(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ if (control & PCI_PRI_CTRL_ENABLE)
+ return -EBUSY;
+
+ control |= PCI_PRI_CTRL_RESET;
+
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_reset_pri);
+#endif /* CONFIG_PCI_PRI */
+
+#ifdef CONFIG_PCI_PASID
+/**
+ * pci_enable_pasid - Enable the PASID capability
+ * @pdev: PCI device structure
+ * @features: Features to enable
+ *
+ * Returns 0 on success, negative value on error. This function checks
+ * whether the features are actually supported by the device and returns
+ * an error if not.
+ */
+int pci_enable_pasid(struct pci_dev *pdev, int features)
+{
+ u16 control, supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+
+ if (control & PCI_PASID_CTRL_ENABLE)
+ return -EINVAL;
+
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
+
+ /* User wants to enable anything unsupported? */
+ if ((supported & features) != features)
+ return -EINVAL;
+
+ control = PCI_PASID_CTRL_ENABLE | features;
+
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_pasid);
+
+/**
+ * pci_disable_pasid - Disable the PASID capability
+ * @pdev: PCI device structure
+ *
+ */
+void pci_disable_pasid(struct pci_dev *pdev)
+{
+ u16 control = 0;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return;
+
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pasid);
+
+/**
+ * pci_pasid_features - Check which PASID features are supported
+ * @pdev: PCI device structure
+ *
+ * Returns a negative value when no PASI capability is present.
+ * Otherwise is returns a bitmask with supported features. Current
+ * features reported are:
+ * PCI_PASID_CAP_EXEC - Execute permission supported
+ * PCI_PASID_CAP_PRIV - Privileged mode supported
+ */
+int pci_pasid_features(struct pci_dev *pdev)
+{
+ u16 supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
+
+ return supported;
+}
+EXPORT_SYMBOL_GPL(pci_pasid_features);
+
+#define PASID_NUMBER_SHIFT 8
+#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
+/**
+ * pci_max_pasid - Get maximum number of PASIDs supported by device
+ * @pdev: PCI device structure
+ *
+ * Returns negative value when PASID capability is not present.
+ * Otherwise it returns the numer of supported PASIDs.
+ */
+int pci_max_pasids(struct pci_dev *pdev)
+{
+ u16 supported;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+
+ supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
+
+ return (1 << supported);
+}
+EXPORT_SYMBOL_GPL(pci_max_pasids);
+#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
new file mode 100644
index 000000000..6fbd3f2b5
--- /dev/null
+++ b/drivers/pci/bus.c
@@ -0,0 +1,386 @@
+/*
+ * drivers/pci/bus.c
+ *
+ * From setup-res.c, by:
+ * Dave Rusling (david.rusling@reo.mts.dec.com)
+ * David Mosberger (davidm@cs.arizona.edu)
+ * David Miller (davem@redhat.com)
+ * Ivan Kokshaysky (ink@jurassic.park.msu.ru)
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+
+#include "pci.h"
+
+void pci_add_resource_offset(struct list_head *resources, struct resource *res,
+ resource_size_t offset)
+{
+ struct resource_entry *entry;
+
+ entry = resource_list_create_entry(res, 0);
+ if (!entry) {
+ printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res);
+ return;
+ }
+
+ entry->offset = offset;
+ resource_list_add_tail(entry, resources);
+}
+EXPORT_SYMBOL(pci_add_resource_offset);
+
+void pci_add_resource(struct list_head *resources, struct resource *res)
+{
+ pci_add_resource_offset(resources, res, 0);
+}
+EXPORT_SYMBOL(pci_add_resource);
+
+void pci_free_resource_list(struct list_head *resources)
+{
+ resource_list_free(resources);
+}
+EXPORT_SYMBOL(pci_free_resource_list);
+
+void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
+ unsigned int flags)
+{
+ struct pci_bus_resource *bus_res;
+
+ bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
+ if (!bus_res) {
+ dev_err(&bus->dev, "can't add %pR resource\n", res);
+ return;
+ }
+
+ bus_res->res = res;
+ bus_res->flags = flags;
+ list_add_tail(&bus_res->list, &bus->resources);
+}
+
+struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n)
+{
+ struct pci_bus_resource *bus_res;
+
+ if (n < PCI_BRIDGE_RESOURCE_NUM)
+ return bus->resource[n];
+
+ n -= PCI_BRIDGE_RESOURCE_NUM;
+ list_for_each_entry(bus_res, &bus->resources, list) {
+ if (n-- == 0)
+ return bus_res->res;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_bus_resource_n);
+
+void pci_bus_remove_resources(struct pci_bus *bus)
+{
+ int i;
+ struct pci_bus_resource *bus_res, *tmp;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
+ bus->resource[i] = NULL;
+
+ list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
+ list_del(&bus_res->list);
+ kfree(bus_res);
+ }
+}
+
+static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+static struct pci_bus_region pci_64_bit = {0,
+ (pci_bus_addr_t) 0xffffffffffffffffULL};
+static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
+ (pci_bus_addr_t) 0xffffffffffffffffULL};
+#endif
+
+/*
+ * @res contains CPU addresses. Clip it so the corresponding bus addresses
+ * on @bus are entirely within @region. This is used to control the bus
+ * addresses of resources we allocate, e.g., we may need a resource that
+ * can be mapped by a 32-bit BAR.
+ */
+static void pci_clip_resource_to_region(struct pci_bus *bus,
+ struct resource *res,
+ struct pci_bus_region *region)
+{
+ struct pci_bus_region r;
+
+ pcibios_resource_to_bus(bus, &r, res);
+ if (r.start < region->start)
+ r.start = region->start;
+ if (r.end > region->end)
+ r.end = region->end;
+
+ if (r.end < r.start)
+ res->end = res->start - 1;
+ else
+ pcibios_bus_to_resource(bus, res, &r);
+}
+
+static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned long type_mask,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data,
+ struct pci_bus_region *region)
+{
+ int i, ret;
+ struct resource *r, avail;
+ resource_size_t max;
+
+ type_mask |= IORESOURCE_TYPE_BITS;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ if (!r)
+ continue;
+
+ /* type_mask must match */
+ if ((res->flags ^ r->flags) & type_mask)
+ continue;
+
+ /* We cannot allocate a non-prefetching resource
+ from a pre-fetching area */
+ if ((r->flags & IORESOURCE_PREFETCH) &&
+ !(res->flags & IORESOURCE_PREFETCH))
+ continue;
+
+ avail = *r;
+ pci_clip_resource_to_region(bus, &avail, region);
+
+ /*
+ * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
+ * protect badly documented motherboard resources, but if
+ * this is an already-configured bridge window, its start
+ * overrides "min".
+ */
+ if (avail.start)
+ min = avail.start;
+
+ max = avail.end;
+
+ /* Ok, try it out.. */
+ ret = allocate_resource(r, res, size, min, max,
+ align, alignf, alignf_data);
+ if (ret == 0)
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * pci_bus_alloc_resource - allocate a resource from a parent bus
+ * @bus: PCI bus
+ * @res: resource to allocate
+ * @size: size of resource to allocate
+ * @align: alignment of resource to allocate
+ * @min: minimum /proc/iomem address to allocate
+ * @type_mask: IORESOURCE_* type flags
+ * @alignf: resource alignment function
+ * @alignf_data: data argument for resource alignment function
+ *
+ * Given the PCI bus a device resides on, the size, minimum address,
+ * alignment and type, try to find an acceptable resource allocation
+ * for a specific device resource.
+ */
+int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+ resource_size_t size, resource_size_t align,
+ resource_size_t min, unsigned long type_mask,
+ resource_size_t (*alignf)(void *,
+ const struct resource *,
+ resource_size_t,
+ resource_size_t),
+ void *alignf_data)
+{
+#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+ int rc;
+
+ if (res->flags & IORESOURCE_MEM_64) {
+ rc = pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_high);
+ if (rc == 0)
+ return 0;
+
+ return pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_64_bit);
+ }
+#endif
+
+ return pci_bus_alloc_from_region(bus, res, size, align, min,
+ type_mask, alignf, alignf_data,
+ &pci_32_bit);
+}
+EXPORT_SYMBOL(pci_bus_alloc_resource);
+
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window. If this
+ * resource fits inside a window of an upstream bridge, do nothing. If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+ struct pci_bus *bus = dev->bus;
+ struct resource *res = &dev->resource[idx];
+ struct resource orig_res = *res;
+ struct resource *r;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t start, end;
+
+ if (!r)
+ continue;
+
+ if (resource_type(res) != resource_type(r))
+ continue;
+
+ start = max(r->start, res->start);
+ end = min(r->end, res->end);
+
+ if (start > end)
+ continue; /* no overlap */
+
+ if (res->start == start && res->end == end)
+ return false; /* no change */
+
+ res->start = start;
+ res->end = end;
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ &orig_res, res);
+
+ return true;
+ }
+
+ return false;
+}
+
+void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
+
+/**
+ * pci_bus_add_device - start driver for a single device
+ * @dev: device to add
+ *
+ * This adds add sysfs entries and start device drivers
+ */
+void pci_bus_add_device(struct pci_dev *dev)
+{
+ int retval;
+
+ /*
+ * Can not put in pci_device_add yet because resources
+ * are not assigned yet for some devices.
+ */
+ pci_fixup_device(pci_fixup_final, dev);
+ pci_create_sysfs_dev_files(dev);
+ pci_proc_attach_device(dev);
+
+ dev->match_driver = true;
+ retval = device_attach(&dev->dev);
+ WARN_ON(retval < 0);
+
+ dev->is_added = 1;
+}
+EXPORT_SYMBOL_GPL(pci_bus_add_device);
+
+/**
+ * pci_bus_add_devices - start driver for PCI devices
+ * @bus: bus to check for new devices
+ *
+ * Start driver for PCI devices and add some sysfs entries.
+ */
+void pci_bus_add_devices(const struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ /* Skip already-added devices */
+ if (dev->is_added)
+ continue;
+ pci_bus_add_device(dev);
+ }
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ BUG_ON(!dev->is_added);
+ child = dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
+ }
+}
+EXPORT_SYMBOL(pci_bus_add_devices);
+
+/** pci_walk_bus - walk devices on/under bus, calling callback.
+ * @top bus whose devices should be walked
+ * @cb callback to be called for each device found
+ * @userdata arbitrary pointer to be passed to callback.
+ *
+ * Walk the given bus, including any bridged devices
+ * on buses under this bus. Call the provided callback
+ * on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ *
+ */
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus;
+ struct list_head *next;
+ int retval;
+
+ bus = top;
+ down_read(&pci_bus_sem);
+ next = top->devices.next;
+ for (;;) {
+ if (next == &bus->devices) {
+ /* end of this bus, go up or finish */
+ if (bus == top)
+ break;
+ next = bus->self->bus_list.next;
+ bus = bus->self->bus;
+ continue;
+ }
+ dev = list_entry(next, struct pci_dev, bus_list);
+ if (dev->subordinate) {
+ /* this is a pci-pci bridge, do its devices next */
+ next = dev->subordinate->devices.next;
+ bus = dev->subordinate;
+ } else
+ next = dev->bus_list.next;
+
+ retval = cb(dev, userdata);
+ if (retval)
+ break;
+ }
+ up_read(&pci_bus_sem);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus);
+
+struct pci_bus *pci_bus_get(struct pci_bus *bus)
+{
+ if (bus)
+ get_device(&bus->dev);
+ return bus;
+}
+EXPORT_SYMBOL(pci_bus_get);
+
+void pci_bus_put(struct pci_bus *bus)
+{
+ if (bus)
+ put_device(&bus->dev);
+}
+EXPORT_SYMBOL(pci_bus_put);
+
diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c
new file mode 100644
index 000000000..5f4a2e04c
--- /dev/null
+++ b/drivers/pci/host-bridge.c
@@ -0,0 +1,98 @@
+/*
+ * host bridge related code
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+
+#include "pci.h"
+
+static struct pci_bus *find_pci_root_bus(struct pci_bus *bus)
+{
+ while (bus->parent)
+ bus = bus->parent;
+
+ return bus;
+}
+
+struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus)
+{
+ struct pci_bus *root_bus = find_pci_root_bus(bus);
+
+ return to_pci_host_bridge(root_bus->bridge);
+}
+
+struct device *pci_get_host_bridge_device(struct pci_dev *dev)
+{
+ struct pci_bus *root_bus = find_pci_root_bus(dev->bus);
+ struct device *bridge = root_bus->bridge;
+
+ kobject_get(&bridge->kobj);
+ return bridge;
+}
+
+void pci_put_host_bridge_device(struct device *dev)
+{
+ kobject_put(&dev->kobj);
+}
+
+void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
+ void (*release_fn)(struct pci_host_bridge *),
+ void *release_data)
+{
+ bridge->release_fn = release_fn;
+ bridge->release_data = release_data;
+}
+
+void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct resource_entry *window;
+ resource_size_t offset = 0;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ if (resource_contains(window->res, res)) {
+ offset = window->offset;
+ break;
+ }
+ }
+
+ region->start = res->start - offset;
+ region->end = res->end - offset;
+}
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+
+static bool region_contains(struct pci_bus_region *region1,
+ struct pci_bus_region *region2)
+{
+ return region1->start <= region2->start && region1->end >= region2->end;
+}
+
+void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
+ struct pci_bus_region *region)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
+ struct resource_entry *window;
+ resource_size_t offset = 0;
+
+ resource_list_for_each_entry(window, &bridge->windows) {
+ struct pci_bus_region bus_region;
+
+ if (resource_type(res) != resource_type(window->res))
+ continue;
+
+ bus_region.start = window->res->start - window->offset;
+ bus_region.end = window->res->end - window->offset;
+
+ if (region_contains(&bus_region, region)) {
+ offset = window->offset;
+ break;
+ }
+ }
+
+ res->start = region->start + offset;
+ res->end = region->end + offset;
+}
+EXPORT_SYMBOL(pcibios_bus_to_resource);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
new file mode 100644
index 000000000..1dfb567b3
--- /dev/null
+++ b/drivers/pci/host/Kconfig
@@ -0,0 +1,128 @@
+menu "PCI host controller drivers"
+ depends on PCI
+
+config PCI_DRA7XX
+ bool "TI DRA7xx PCIe controller"
+ select PCIE_DW
+ depends on OF && HAS_IOMEM && TI_PIPE3
+ help
+ Enables support for the PCIe controller in the DRA7xx SoC. There
+ are two instances of PCIe controller in DRA7xx. This controller can
+ act both as EP and RC. This reuses the Designware core.
+
+config PCI_MVEBU
+ bool "Marvell EBU PCIe controller"
+ depends on ARCH_MVEBU || ARCH_DOVE
+ depends on OF
+
+config PCIE_DW
+ bool
+
+config PCI_EXYNOS
+ bool "Samsung Exynos PCIe controller"
+ depends on SOC_EXYNOS5440
+ select PCIEPORTBUS
+ select PCIE_DW
+
+config PCI_IMX6
+ bool "Freescale i.MX6 PCIe controller"
+ depends on SOC_IMX6Q
+ select PCIEPORTBUS
+ select PCIE_DW
+
+config PCI_TEGRA
+ bool "NVIDIA Tegra PCIe controller"
+ depends on ARCH_TEGRA && !ARM64
+ help
+ Say Y here if you want support for the PCIe host controller found
+ on NVIDIA Tegra SoCs.
+
+config PCI_RCAR_GEN2
+ bool "Renesas R-Car Gen2 Internal PCI controller"
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ help
+ Say Y here if you want internal PCI support on R-Car Gen2 SoC.
+ There are 3 internal PCI controllers available with a single
+ built-in EHCI/OHCI host controller present on each one.
+
+config PCI_RCAR_GEN2_PCIE
+ bool "Renesas R-Car PCIe controller"
+ depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST)
+ help
+ Say Y here if you want PCIe controller support on R-Car Gen2 SoCs.
+
+config PCI_HOST_GENERIC
+ bool "Generic PCI host controller"
+ depends on ARM && OF
+ help
+ Say Y here if you want to support a simple generic PCI host
+ controller, such as the one emulated by kvmtool.
+
+config PCIE_SPEAR13XX
+ bool "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX
+ select PCIEPORTBUS
+ select PCIE_DW
+ help
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
+config PCI_KEYSTONE
+ bool "TI Keystone PCIe controller"
+ depends on ARCH_KEYSTONE
+ select PCIE_DW
+ select PCIEPORTBUS
+ help
+ Say Y here if you want to enable PCI controller support on Keystone
+ SoCs. The PCI controller on Keystone is based on Designware hardware
+ and therefore the driver re-uses the Designware core functions to
+ implement the driver.
+
+config PCIE_XILINX
+ bool "Xilinx AXI PCIe host bridge support"
+ depends on ARCH_ZYNQ
+ help
+ Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
+ Host Bridge driver.
+
+config PCI_XGENE
+ bool "X-Gene PCIe controller"
+ depends on ARCH_XGENE
+ depends on OF
+ select PCIEPORTBUS
+ help
+ Say Y here if you want internal PCI support on APM X-Gene SoC.
+ There are 5 internal PCIe ports available. Each port is GEN3 capable
+ and have varied lanes from x1 to x8.
+
+config PCI_LAYERSCAPE
+ bool "Freescale Layerscape PCIe controller"
+ depends on OF && ARM
+ select PCIE_DW
+ select MFD_SYSCON
+ help
+ Say Y here if you want PCIe controller support on Layerscape SoCs.
+
+config PCI_VERSATILE
+ bool "ARM Versatile PB PCI controller"
+ depends on ARCH_VERSATILE
+
+config PCIE_IPROC
+ tristate "Broadcom iProc PCIe controller"
+ depends on OF && ARM
+ default n
+ help
+ This enables the iProc PCIe core controller support for Broadcom's
+ iProc family of SoCs. An appropriate bus interface driver also needs
+ to be enabled
+
+config PCIE_IPROC_PLATFORM
+ tristate "Broadcom iProc PCIe platform bus driver"
+ depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
+ depends on OF
+ select PCIE_IPROC
+ default ARCH_BCM_IPROC
+ help
+ Say Y here if you want to use the Broadcom iProc PCIe controller
+ through the generic platform bus interface
+
+endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
new file mode 100644
index 000000000..f733b4e27
--- /dev/null
+++ b/drivers/pci/host/Makefile
@@ -0,0 +1,17 @@
+obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
+obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
+obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
+obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
+obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
+obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
+obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
+obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
+obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
new file mode 100644
index 000000000..2d57e19a2
--- /dev/null
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -0,0 +1,457 @@
+/*
+ * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/resource.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* PCIe controller wrapper DRA7XX configuration registers */
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
+#define ERR_SYS BIT(0)
+#define ERR_FATAL BIT(1)
+#define ERR_NONFATAL BIT(2)
+#define ERR_COR BIT(3)
+#define ERR_AXI BIT(4)
+#define ERR_ECRC BIT(5)
+#define PME_TURN_OFF BIT(8)
+#define PME_TO_ACK BIT(9)
+#define PM_PME BIT(10)
+#define LINK_REQ_RST BIT(11)
+#define LINK_UP_EVT BIT(12)
+#define CFG_BME_EVT BIT(13)
+#define CFG_MSE_EVT BIT(14)
+#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
+ ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
+ LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
+
+#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
+#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
+#define INTA BIT(0)
+#define INTB BIT(1)
+#define INTC BIT(2)
+#define INTD BIT(3)
+#define MSI BIT(4)
+#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
+
+#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
+#define LTSSM_EN 0x1
+
+#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
+#define LINK_UP BIT(16)
+
+struct dra7xx_pcie {
+ void __iomem *base;
+ struct phy **phy;
+ int phy_count;
+ struct device *dev;
+ struct pcie_port pp;
+};
+
+#define to_dra7xx_pcie(x) container_of((x), struct dra7xx_pcie, pp)
+
+static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
+{
+ return readl(pcie->base + offset);
+}
+
+static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
+ u32 value)
+{
+ writel(value, pcie->base + offset);
+}
+
+static int dra7xx_pcie_link_up(struct pcie_port *pp)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+
+ return !!(reg & LINK_UP);
+}
+
+static int dra7xx_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 reg;
+ unsigned int retries = 1000;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "link is already up\n");
+ return 0;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg |= LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ while (retries--) {
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
+ if (reg & LINK_UP)
+ break;
+ usleep_range(10, 20);
+ }
+
+ if (retries == 0) {
+ dev_err(pp->dev, "link is not up\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
+ ~INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, INTERRUPTS);
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
+ ~LEG_EP_INTERRUPTS & ~MSI);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, MSI);
+ else
+ dra7xx_pcie_writel(dra7xx,
+ PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
+ LEG_EP_INTERRUPTS);
+}
+
+static void dra7xx_pcie_host_init(struct pcie_port *pp)
+{
+ dw_pcie_setup_rc(pp);
+ dra7xx_pcie_establish_link(pp);
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+ dra7xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops dra7xx_pcie_host_ops = {
+ .link_up = dra7xx_pcie_link_up,
+ .host_init = dra7xx_pcie_host_init,
+};
+
+static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = dra7xx_pcie_intx_map,
+};
+
+static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
+{
+ struct device *dev = pp->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
+
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ pp->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ &intx_domain_ops, pp);
+ if (!pp->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(pp->irq_domain);
+ }
+
+ return 0;
+}
+
+static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
+
+ switch (reg) {
+ case MSI:
+ dw_handle_msi_irq(pp);
+ break;
+ case INTA:
+ case INTB:
+ case INTC:
+ case INTD:
+ generic_handle_irq(irq_find_mapping(pp->irq_domain, ffs(reg)));
+ break;
+ }
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct dra7xx_pcie *dra7xx = arg;
+ u32 reg;
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
+
+ if (reg & ERR_SYS)
+ dev_dbg(dra7xx->dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_dbg(dra7xx->dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dra7xx->dev, "Non Fatal Error\n");
+
+ if (reg & ERR_COR)
+ dev_dbg(dra7xx->dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_dbg(dra7xx->dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_ECRC)
+ dev_dbg(dra7xx->dev, "ECRC Error\n");
+
+ if (reg & PME_TURN_OFF)
+ dev_dbg(dra7xx->dev,
+ "Power Management Event Turn-Off message received\n");
+
+ if (reg & PME_TO_ACK)
+ dev_dbg(dra7xx->dev,
+ "Power Management Turn-Off Ack message received\n");
+
+ if (reg & PM_PME)
+ dev_dbg(dra7xx->dev,
+ "PM Power Management Event message received\n");
+
+ if (reg & LINK_REQ_RST)
+ dev_dbg(dra7xx->dev, "Link Request Reset\n");
+
+ if (reg & LINK_UP_EVT)
+ dev_dbg(dra7xx->dev, "Link-up state change\n");
+
+ if (reg & CFG_BME_EVT)
+ dev_dbg(dra7xx->dev, "CFG 'Bus Master Enable' change\n");
+
+ if (reg & CFG_MSE_EVT)
+ dev_dbg(dra7xx->dev, "CFG 'Memory Space Enable' change\n");
+
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
+
+ return IRQ_HANDLED;
+}
+
+static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ struct platform_device *pdev)
+{
+ int ret;
+ struct pcie_port *pp;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ pp = &dra7xx->pp;
+ pp->dev = dev;
+ pp->ops = &dra7xx_pcie_host_ops;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (pp->irq < 0) {
+ dev_err(dev, "missing IRQ resource\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->irq,
+ dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+ "dra7-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+ ret = dra7xx_pcie_init_irq_domain(pp);
+ if (ret < 0)
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
+ pp->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!pp->dbi_base)
+ return -ENOMEM;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dra7xx->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init dra7xx_pcie_probe(struct platform_device *pdev)
+{
+ u32 reg;
+ int ret;
+ int irq;
+ int i;
+ int phy_count;
+ struct phy **phy;
+ void __iomem *base;
+ struct resource *res;
+ struct dra7xx_pcie *dra7xx;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char name[10];
+
+ dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
+ if (!dra7xx)
+ return -ENOMEM;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "missing IRQ resource\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+ IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
+ base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ phy_count = of_property_count_strings(np, "phy-names");
+ if (phy_count < 0) {
+ dev_err(dev, "unable to find the strings\n");
+ return phy_count;
+ }
+
+ phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ for (i = 0; i < phy_count; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i]))
+ return PTR_ERR(phy[i]);
+
+ ret = phy_init(phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(phy[i]);
+ if (ret < 0) {
+ phy_exit(phy[i]);
+ goto err_phy;
+ }
+ }
+
+ dra7xx->base = base;
+ dra7xx->phy = phy;
+ dra7xx->dev = dev;
+ dra7xx->phy_count = phy_count;
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_phy;
+ }
+
+ reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
+ reg &= ~LTSSM_EN;
+ dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
+
+ platform_set_drvdata(pdev, dra7xx);
+
+ ret = dra7xx_add_pcie_port(dra7xx, pdev);
+ if (ret < 0)
+ goto err_add_port;
+
+ return 0;
+
+err_add_port:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(phy[i]);
+ phy_exit(phy[i]);
+ }
+
+ return ret;
+}
+
+static int __exit dra7xx_pcie_remove(struct platform_device *pdev)
+{
+ struct dra7xx_pcie *dra7xx = platform_get_drvdata(pdev);
+ struct pcie_port *pp = &dra7xx->pp;
+ struct device *dev = &pdev->dev;
+ int count = dra7xx->phy_count;
+
+ if (pp->irq_domain)
+ irq_domain_remove(pp->irq_domain);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ while (count--) {
+ phy_power_off(dra7xx->phy[count]);
+ phy_exit(dra7xx->phy[count]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_dra7xx_pcie_match[] = {
+ { .compatible = "ti,dra7-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_dra7xx_pcie_match);
+
+static struct platform_driver dra7xx_pcie_driver = {
+ .remove = __exit_p(dra7xx_pcie_remove),
+ .driver = {
+ .name = "dra7-pcie",
+ .of_match_table = of_dra7xx_pcie_match,
+ },
+};
+
+module_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
+
+MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
+MODULE_DESCRIPTION("TI PCIe controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
new file mode 100644
index 000000000..c139237e0
--- /dev/null
+++ b/drivers/pci/host/pci-exynos.c
@@ -0,0 +1,658 @@
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
+
+struct exynos_pcie {
+ void __iomem *elbi_base;
+ void __iomem *phy_base;
+ void __iomem *block_base;
+ int reset_gpio;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct pcie_port pp;
+};
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE 0x000
+#define IRQ_INTA_ASSERT (0x1 << 0)
+#define IRQ_INTB_ASSERT (0x1 << 2)
+#define IRQ_INTC_ASSERT (0x1 << 4)
+#define IRQ_INTD_ASSERT (0x1 << 6)
+#define PCIE_IRQ_LEVEL 0x004
+#define PCIE_IRQ_SPECIAL 0x008
+#define PCIE_IRQ_EN_PULSE 0x00c
+#define PCIE_IRQ_EN_LEVEL 0x010
+#define IRQ_MSI_ENABLE (0x1 << 2)
+#define PCIE_IRQ_EN_SPECIAL 0x014
+#define PCIE_PWR_RESET 0x018
+#define PCIE_CORE_RESET 0x01c
+#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
+#define PCIE_STICKY_RESET 0x020
+#define PCIE_NONSTICKY_RESET 0x024
+#define PCIE_APP_INIT_RESET 0x028
+#define PCIE_APP_LTSSM_ENABLE 0x02c
+#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_LTSSM_ENABLE 0x1
+#define PCIE_ELBI_SLV_AWMISC 0x11c
+#define PCIE_ELBI_SLV_ARMISC 0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* PCIe Purple registers */
+#define PCIE_PHY_GLOBAL_RESET 0x000
+#define PCIE_PHY_COMMON_RESET 0x004
+#define PCIE_PHY_CMN_REG 0x008
+#define PCIE_PHY_MAC_RESET 0x00c
+#define PCIE_PHY_PLL_LOCKED 0x010
+#define PCIE_PHY_TRSVREG_RESET 0x020
+#define PCIE_PHY_TRSV_RESET 0x024
+
+/* PCIe PHY registers */
+#define PCIE_PHY_IMPEDANCE 0x004
+#define PCIE_PHY_PLL_DIV_0 0x008
+#define PCIE_PHY_PLL_BIAS 0x00c
+#define PCIE_PHY_DCC_FEEDBACK 0x014
+#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_COMMON_POWER 0x064
+#define PCIE_PHY_COMMON_PD_CMN (0x1 << 3)
+#define PCIE_PHY_TRSV0_EMP_LVL 0x084
+#define PCIE_PHY_TRSV0_DRV_LVL 0x088
+#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_POWER 0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV0_LVCC 0x0dc
+#define PCIE_PHY_TRSV1_EMP_LVL 0x144
+#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_POWER 0x184
+#define PCIE_PHY_TRSV1_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV1_LVCC 0x19c
+#define PCIE_PHY_TRSV2_EMP_LVL 0x204
+#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_POWER 0x244
+#define PCIE_PHY_TRSV2_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV2_LVCC 0x25c
+#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
+#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_POWER 0x304
+#define PCIE_PHY_TRSV3_PD_TSV (0x1 << 7)
+#define PCIE_PHY_TRSV3_LVCC 0x31c
+
+static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->elbi_base + reg);
+}
+
+static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->elbi_base + reg);
+}
+
+static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->phy_base + reg);
+}
+
+static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->phy_base + reg);
+}
+
+static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->block_base + reg);
+}
+
+static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->block_base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ }
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ }
+}
+
+static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val &= ~PCIE_CORE_RESET_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_PWR_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val |= PCIE_CORE_RESET_ENABLE;
+
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_NONSTICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_APP_INIT_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_APP_INIT_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
+}
+
+static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
+}
+
+static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_CMN_REG);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSVREG_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
+}
+
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val &= ~PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val &= ~PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val &= ~PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val &= ~PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val &= ~PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+ val |= PCIE_PHY_COMMON_PD_CMN;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+ val |= PCIE_PHY_TRSV0_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+ val |= PCIE_PHY_TRSV1_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+ val |= PCIE_PHY_TRSV2_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+ val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+ val |= PCIE_PHY_TRSV3_PD_TSV;
+ exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_init_phy(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* DCC feedback control off */
+ exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
+
+ /* set TX/RX impedance */
+ exynos_phy_writel(exynos_pcie, 0xd5, PCIE_PHY_IMPEDANCE);
+
+ /* set 50Mhz PHY clock */
+ exynos_phy_writel(exynos_pcie, 0x14, PCIE_PHY_PLL_DIV_0);
+ exynos_phy_writel(exynos_pcie, 0x12, PCIE_PHY_PLL_DIV_1);
+
+ /* set TX Differential output for lane 0 */
+ exynos_phy_writel(exynos_pcie, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
+
+ /* set TX Pre-emphasis Level Control for lane 0 to minimum */
+ exynos_phy_writel(exynos_pcie, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
+
+ /* set RX clock and data recovery bandwidth */
+ exynos_phy_writel(exynos_pcie, 0xe7, PCIE_PHY_PLL_BIAS);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV0_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV1_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV2_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV3_RXCDR);
+
+ /* change TX Pre-emphasis Level Control for lanes */
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
+
+ /* set LVCC */
+ exynos_phy_writel(exynos_pcie, 0x20, PCIE_PHY_TRSV0_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV1_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV2_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
+}
+
+static void exynos_pcie_assert_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (exynos_pcie->reset_gpio >= 0)
+ devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "RESET");
+}
+
+static int exynos_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* assert reset signals */
+ exynos_pcie_assert_core_reset(pp);
+ exynos_pcie_assert_phy_reset(pp);
+
+ /* de-assert phy reset */
+ exynos_pcie_deassert_phy_reset(pp);
+
+ /* power on phy */
+ exynos_pcie_power_on_phy(pp);
+
+ /* initialize phy */
+ exynos_pcie_init_phy(pp);
+
+ /* pulse for common reset */
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
+ udelay(500);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+
+ /* de-assert core reset */
+ exynos_pcie_deassert_core_reset(pp);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert reset signal */
+ exynos_pcie_assert_reset(pp);
+
+ /* assert LTSSM enable */
+ exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
+ PCIE_APP_LTSSM_ENABLE);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ while (exynos_phy_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED) == 0) {
+ val = exynos_blk_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED);
+ dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ }
+ /* power off phy */
+ exynos_pcie_power_off_phy(pp);
+
+ dev_err(pp->dev, "PCIe Link Fail\n");
+ return -EINVAL;
+ }
+ }
+
+ dev_info(pp->dev, "Link up\n");
+
+ return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
+}
+
+static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* enable INTX interrupt */
+ val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE);
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ exynos_pcie_clear_irq_pulse(pp);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ dw_pcie_msi_init(pp);
+
+ /* enable MSI interrupt */
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+ val |= IRQ_MSI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+}
+
+static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ exynos_pcie_enable_irq_pulse(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ exynos_pcie_msi_init(pp);
+}
+
+static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val)
+{
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ *val = readl(dbi_base);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+}
+
+static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base)
+{
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ writel(val, dbi_base);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3),
+ where, size, val);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_link_up(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+
+ if (val == PCIE_ELBI_LTSSM_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static void exynos_pcie_host_init(struct pcie_port *pp)
+{
+ exynos_pcie_establish_link(pp);
+ exynos_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops exynos_pcie_host_ops = {
+ .readl_rc = exynos_pcie_readl_rc,
+ .writel_rc = exynos_pcie_writel_rc,
+ .rd_own_conf = exynos_pcie_rd_own_conf,
+ .wr_own_conf = exynos_pcie_wr_own_conf,
+ .link_up = exynos_pcie_link_up,
+ .host_init = exynos_pcie_host_init,
+};
+
+static int __init exynos_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (!pp->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq(pdev, 0);
+ if (!pp->msi_irq) {
+ dev_err(&pdev->dev, "failed to get msi irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ exynos_pcie_msi_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request msi irq\n");
+ return ret;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &exynos_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *elbi_base;
+ struct resource *phy_base;
+ struct resource *block_base;
+ int ret;
+
+ exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
+ GFP_KERNEL);
+ if (!exynos_pcie)
+ return -ENOMEM;
+
+ pp = &exynos_pcie->pp;
+
+ pp->dev = &pdev->dev;
+
+ exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+ exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(exynos_pcie->clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(exynos_pcie->clk);
+ }
+ ret = clk_prepare_enable(exynos_pcie->clk);
+ if (ret)
+ return ret;
+
+ exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(exynos_pcie->bus_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+ ret = PTR_ERR(exynos_pcie->bus_clk);
+ goto fail_clk;
+ }
+ ret = clk_prepare_enable(exynos_pcie->bus_clk);
+ if (ret)
+ goto fail_clk;
+
+ elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ if (IS_ERR(exynos_pcie->elbi_base)) {
+ ret = PTR_ERR(exynos_pcie->elbi_base);
+ goto fail_bus_clk;
+ }
+
+ phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ if (IS_ERR(exynos_pcie->phy_base)) {
+ ret = PTR_ERR(exynos_pcie->phy_base);
+ goto fail_bus_clk;
+ }
+
+ block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+ if (IS_ERR(exynos_pcie->block_base)) {
+ ret = PTR_ERR(exynos_pcie->block_base);
+ goto fail_bus_clk;
+ }
+
+ ret = exynos_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_bus_clk;
+
+ platform_set_drvdata(pdev, exynos_pcie);
+ return 0;
+
+fail_bus_clk:
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(exynos_pcie->clk);
+ return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+ clk_disable_unprepare(exynos_pcie->clk);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+ { .compatible = "samsung,exynos5440-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
+
+static struct platform_driver exynos_pcie_driver = {
+ .remove = __exit_p(exynos_pcie_remove),
+ .driver = {
+ .name = "exynos-pcie",
+ .of_match_table = exynos_pcie_of_match,
+ },
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init exynos_pcie_init(void)
+{
+ return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(exynos_pcie_init);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
new file mode 100644
index 000000000..ba46e581d
--- /dev/null
+++ b/drivers/pci/host/pci-host-generic.c
@@ -0,0 +1,276 @@
+/*
+ * Simple, generic PCI host controller driver targetting firmware-initialised
+ * systems and virtual machines (e.g. the PCI emulation provided by kvmtool).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+
+struct gen_pci_cfg_bus_ops {
+ u32 bus_shift;
+ void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int);
+};
+
+struct gen_pci_cfg_windows {
+ struct resource res;
+ struct resource *bus_range;
+ void __iomem **win;
+
+ const struct gen_pci_cfg_bus_ops *ops;
+};
+
+struct gen_pci {
+ struct pci_host_bridge host;
+ struct gen_pci_cfg_windows cfg;
+ struct list_head resources;
+};
+
+static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+ resource_size_t idx = bus->number - pci->cfg.bus_range->start;
+
+ return pci->cfg.win[idx] + ((devfn << 8) | where);
+}
+
+static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = {
+ .bus_shift = 16,
+ .map_bus = gen_pci_map_cfg_bus_cam,
+};
+
+static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct gen_pci *pci = sys->private_data;
+ resource_size_t idx = bus->number - pci->cfg.bus_range->start;
+
+ return pci->cfg.win[idx] + ((devfn << 12) | where);
+}
+
+static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = {
+ .bus_shift = 20,
+ .map_bus = gen_pci_map_cfg_bus_ecam,
+};
+
+static struct pci_ops gen_pci_ops = {
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static const struct of_device_id gen_pci_of_match[] = {
+ { .compatible = "pci-host-cam-generic",
+ .data = &gen_pci_cfg_cam_bus_ops },
+
+ { .compatible = "pci-host-ecam-generic",
+ .data = &gen_pci_cfg_ecam_bus_ops },
+
+ { },
+};
+MODULE_DEVICE_TABLE(of, gen_pci_of_match);
+
+static void gen_pci_release_of_pci_ranges(struct gen_pci *pci)
+{
+ pci_free_resource_list(&pci->resources);
+}
+
+static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci)
+{
+ int err, res_valid = 0;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+ resource_size_t iobase;
+ struct resource_entry *win;
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources,
+ &iobase);
+ if (err)
+ return err;
+
+ resource_list_for_each_entry(win, &pci->resources) {
+ struct resource *parent, *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ err = pci_remap_iospace(res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+ break;
+ case IORESOURCE_BUS:
+ pci->cfg.bus_range = res;
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ goto out_release_res;
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+}
+
+static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci)
+{
+ int err;
+ u8 bus_max;
+ resource_size_t busn;
+ struct resource *bus_range;
+ struct device *dev = pci->host.dev.parent;
+ struct device_node *np = dev->of_node;
+
+ err = of_address_to_resource(np, 0, &pci->cfg.res);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ /* Limit the bus-range to fit within reg */
+ bus_max = pci->cfg.bus_range->start +
+ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1;
+ pci->cfg.bus_range->end = min_t(resource_size_t,
+ pci->cfg.bus_range->end, bus_max);
+
+ pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range),
+ sizeof(*pci->cfg.win), GFP_KERNEL);
+ if (!pci->cfg.win)
+ return -ENOMEM;
+
+ /* Map our Configuration Space windows */
+ if (!devm_request_mem_region(dev, pci->cfg.res.start,
+ resource_size(&pci->cfg.res),
+ "Configuration Space"))
+ return -ENOMEM;
+
+ bus_range = pci->cfg.bus_range;
+ for (busn = bus_range->start; busn <= bus_range->end; ++busn) {
+ u32 idx = busn - bus_range->start;
+ u32 sz = 1 << pci->cfg.ops->bus_shift;
+
+ pci->cfg.win[idx] = devm_ioremap(dev,
+ pci->cfg.res.start + busn * sz,
+ sz);
+ if (!pci->cfg.win[idx])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int gen_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct gen_pci *pci = sys->private_data;
+ list_splice_init(&pci->resources, &sys->resources);
+ return 1;
+}
+
+static int gen_pci_probe(struct platform_device *pdev)
+{
+ int err;
+ const char *type;
+ const struct of_device_id *of_id;
+ const int *prop;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
+ struct hw_pci hw = {
+ .nr_controllers = 1,
+ .private_data = (void **)&pci,
+ .setup = gen_pci_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .ops = &gen_pci_ops,
+ };
+
+ if (!pci)
+ return -ENOMEM;
+
+ type = of_get_property(np, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL);
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
+
+ of_id = of_match_node(gen_pci_of_match, np);
+ pci->cfg.ops = of_id->data;
+ gen_pci_ops.map_bus = pci->cfg.ops->map_bus;
+ pci->host.dev.parent = dev;
+ INIT_LIST_HEAD(&pci->host.windows);
+ INIT_LIST_HEAD(&pci->resources);
+
+ /* Parse our PCI ranges and request their resources */
+ err = gen_pci_parse_request_of_pci_ranges(pci);
+ if (err)
+ return err;
+
+ /* Parse and map our Configuration Space windows */
+ err = gen_pci_parse_map_cfg_windows(pci);
+ if (err) {
+ gen_pci_release_of_pci_ranges(pci);
+ return err;
+ }
+
+ pci_common_init_dev(dev, &hw);
+ return 0;
+}
+
+static struct platform_driver gen_pci_driver = {
+ .driver = {
+ .name = "pci-host-generic",
+ .of_match_table = gen_pci_of_match,
+ },
+ .probe = gen_pci_probe,
+};
+module_platform_driver(gen_pci_driver);
+
+MODULE_DESCRIPTION("Generic PCI host driver");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644
index 000000000..fdb953677
--- /dev/null
+++ b/drivers/pci/host/pci-imx6.c
@@ -0,0 +1,661 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ * http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x) container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+ int reset_gpio;
+ struct clk *pcie_bus;
+ struct clk *pcie_phy;
+ struct clk *pcie;
+ struct pcie_port pp;
+ struct regmap *iomuxc_gpr;
+ void __iomem *mem_base;
+};
+
+/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_LCR 0x7c
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
+#define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
+#define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
+#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
+#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+ u32 val;
+ u32 max_iterations = 10;
+ u32 wait_counter = 0;
+
+ do {
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+ wait_counter++;
+
+ if (val == exp_val)
+ return 0;
+
+ udelay(1);
+ } while (wait_counter < max_iterations);
+
+ return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+ u32 val;
+ int ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = addr << PCIE_PHY_CTRL_DATA_LOC;
+ writel(val, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+ u32 val, phy_ctl;
+ int ret;
+
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ /* assert Read signal */
+ phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+ writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ val = readl(dbi_base + PCIE_PHY_STAT);
+ *data = val & 0xffff;
+
+ /* deassert Read signal */
+ writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+ u32 var;
+ int ret;
+
+ /* write addr */
+ /* cap addr */
+ ret = pcie_phy_wait_ack(dbi_base, addr);
+ if (ret)
+ return ret;
+
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* capture data */
+ var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert cap data */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ /* assert wr signal */
+ var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack */
+ ret = pcie_phy_poll_ack(dbi_base, 1);
+ if (ret)
+ return ret;
+
+ /* deassert wr signal */
+ var = data << PCIE_PHY_CTRL_DATA_LOC;
+ writel(var, dbi_base + PCIE_PHY_CTRL);
+
+ /* wait for ack de-assertion */
+ ret = pcie_phy_poll_ack(dbi_base, 0);
+ if (ret)
+ return ret;
+
+ writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+ return 0;
+}
+
+/* Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ u32 val, gpr1, gpr12;
+
+ /*
+ * If the bootloader already enabled the link we need some special
+ * handling to get the core back into a state where it is safe to
+ * touch it for configuration. As there is no dedicated reset signal
+ * wired up for MX6QDL, we need to manually force LTSSM into "detect"
+ * state before completely disabling LTSSM, which is a prerequisite
+ * for core configuration.
+ *
+ * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
+ * indication that the bootloader activated the link.
+ */
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+ regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+ if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+ (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+ val = readl(pp->dbi_base + PCIE_PL_PFLR);
+ val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+ val |= PCIE_PL_PFLR_FORCE_LINK;
+ writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+ }
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+ return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ int ret;
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_phy);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_phy clock\n");
+ goto err_pcie_phy;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie_bus);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie_bus clock\n");
+ goto err_pcie_bus;
+ }
+
+ ret = clk_prepare_enable(imx6_pcie->pcie);
+ if (ret) {
+ dev_err(pp->dev, "unable to enable pcie clock\n");
+ goto err_pcie;
+ }
+
+ /* power up core phy and enable ref clock */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+ /*
+ * the async reset input need ref clock to sync internally,
+ * when the ref clock comes after reset, internal synced
+ * reset time is too short, cannot meet the requirement.
+ * add one ~10us delay here.
+ */
+ udelay(10);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+ IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+ /* allow the clocks to stabilize */
+ usleep_range(200, 500);
+
+ /* Some boards don't have PCIe reset GPIO. */
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ gpio_set_value(imx6_pcie->reset_gpio, 0);
+ msleep(100);
+ gpio_set_value(imx6_pcie->reset_gpio, 1);
+ }
+ return 0;
+
+err_pcie:
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+err_pcie_bus:
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+err_pcie_phy:
+ return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+ /* configure constant input signal to the pcie ctrl and phy */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+ IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static int imx6_pcie_wait_for_link(struct pcie_port *pp)
+{
+ int count = 200;
+
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ if (--count)
+ continue;
+
+ dev_err(pp->dev, "phy link never came up\n");
+ dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+ readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ return dw_handle_msi_irq(pp);
+}
+
+static int imx6_pcie_start_link(struct pcie_port *pp)
+{
+ struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+ uint32_t tmp;
+ int ret, count;
+
+ /*
+ * Force Gen1 operation when starting the link. In case the link is
+ * started in Gen2 mode, there is a possibility the devices on the
+ * bus will not be detected at all. This happens with PCIe switches.
+ */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+
+ /* Start LTSSM. */
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+ ret = imx6_pcie_wait_for_link(pp);
+ if (ret)
+ return ret;
+
+ /* Allow Gen2 mode after the link is up. */
+ tmp = readl(pp->dbi_base + PCIE_RC_LCR);
+ tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
+ tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
+ writel(tmp, pp->dbi_base + PCIE_RC_LCR);
+
+ /*
+ * Start Directed Speed Change so the best possible speed both link
+ * partners support can be negotiated.
+ */
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ tmp |= PORT_LOGIC_SPEED_CHANGE;
+ writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+
+ count = 200;
+ while (count--) {
+ tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ /* Test if the speed change finished. */
+ if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
+ break;
+ usleep_range(100, 1000);
+ }
+
+ /* Make sure link training is finished as well! */
+ if (count)
+ ret = imx6_pcie_wait_for_link(pp);
+ else
+ ret = -EINVAL;
+
+ if (ret) {
+ dev_err(pp->dev, "Failed to bring link up!\n");
+ } else {
+ tmp = readl(pp->dbi_base + 0x80);
+ dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+ }
+
+ return ret;
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+ imx6_pcie_assert_core_reset(pp);
+
+ imx6_pcie_init_phy(pp);
+
+ imx6_pcie_deassert_core_reset(pp);
+
+ dw_pcie_setup_rc(pp);
+
+ imx6_pcie_start_link(pp);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ dw_pcie_msi_init(pp);
+}
+
+static void imx6_pcie_reset_phy(struct pcie_port *pp)
+{
+ uint32_t temp;
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
+ temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+
+ usleep_range(2000, 3000);
+
+ pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
+ temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+ PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+ pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp);
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+ u32 rc, debug_r0, rx_valid;
+ int count = 5;
+
+ /*
+ * Test if the PHY reports that the link is up and also that the LTSSM
+ * training finished. There are three possible states of the link when
+ * this code is called:
+ * 1) The link is DOWN (unlikely)
+ * The link didn't come up yet for some reason. This usually means
+ * we have a real problem somewhere. Reset the PHY and exit. This
+ * state calls for inspection of the DEBUG registers.
+ * 2) The link is UP, but still in LTSSM training
+ * Wait for the training to finish, which should take a very short
+ * time. If the training does not finish, we have a problem and we
+ * need to inspect the DEBUG registers. If the training does finish,
+ * the link is up and operating correctly.
+ * 3) The link is UP and no longer in LTSSM training
+ * The link is up and operating correctly.
+ */
+ while (1) {
+ rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
+ break;
+ if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
+ return 1;
+ if (!count--)
+ break;
+ dev_dbg(pp->dev, "Link is up, but still in training\n");
+ /*
+ * Wait a little bit, then re-check if the link finished
+ * the training.
+ */
+ usleep_range(1000, 2000);
+ }
+ /*
+ * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+ * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+ * If (MAC/LTSSM.state == Recovery.RcvrLock)
+ * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+ * to gen2 is stuck
+ */
+ pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+ debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
+
+ if (rx_valid & 0x01)
+ return 0;
+
+ if ((debug_r0 & 0x3f) != 0x0d)
+ return 0;
+
+ dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+ dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
+
+ imx6_pcie_reset_phy(pp);
+
+ return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+ .link_up = imx6_pcie_link_up,
+ .host_init = imx6_pcie_host_init,
+};
+
+static int __init imx6_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ int ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ pp->msi_irq = platform_get_irq_byname(pdev, "msi");
+ if (pp->msi_irq <= 0) {
+ dev_err(&pdev->dev, "failed to get MSI irq\n");
+ return -ENODEV;
+ }
+
+ ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ imx6_pcie_msi_handler,
+ IRQF_SHARED, "mx6-pcie-msi", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request MSI irq\n");
+ return -ENODEV;
+ }
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &imx6_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+ if (!imx6_pcie)
+ return -ENOMEM;
+
+ pp = &imx6_pcie->pp;
+ pp->dev = &pdev->dev;
+
+ /* Added for PCI abort handling */
+ hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+ "imprecise external abort");
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ /* Fetch GPIOs */
+ imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (gpio_is_valid(imx6_pcie->reset_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, imx6_pcie->reset_gpio,
+ GPIOF_OUT_INIT_LOW, "PCIe reset");
+ if (ret) {
+ dev_err(&pdev->dev, "unable to get reset gpio\n");
+ return ret;
+ }
+ }
+
+ /* Fetch clocks */
+ imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy)) {
+ dev_err(&pdev->dev,
+ "pcie_phy clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_phy);
+ }
+
+ imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(imx6_pcie->pcie_bus)) {
+ dev_err(&pdev->dev,
+ "pcie_bus clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie_bus);
+ }
+
+ imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(imx6_pcie->pcie)) {
+ dev_err(&pdev->dev,
+ "pcie clock source missing or invalid\n");
+ return PTR_ERR(imx6_pcie->pcie);
+ }
+
+ /* Grab GPR config register range */
+ imx6_pcie->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+ dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx6_pcie->iomuxc_gpr);
+ }
+
+ ret = imx6_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, imx6_pcie);
+ return 0;
+}
+
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+ struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+ /* bring down link, so bootloader gets clean state in case of reboot */
+ imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+ { .compatible = "fsl,imx6q-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+ .driver = {
+ .name = "imx6q-pcie",
+ .of_match_table = imx6_pcie_of_match,
+ },
+ .shutdown = imx6_pcie_shutdown,
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+ return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+module_init(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
new file mode 100644
index 000000000..f34892e0e
--- /dev/null
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -0,0 +1,517 @@
+/*
+ * Designware application register space functions for Keystone PCI controller
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+#include "pci-keystone.h"
+
+/* Application register defines */
+#define LTSSM_EN_VAL 1
+#define LTSSM_STATE_MASK 0x1f
+#define LTSSM_STATE_L0 0x11
+#define DBI_CS2_EN_VAL 0x20
+#define OB_XLAT_EN_VAL 2
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define CFG_SETUP 0x008
+#define OB_SIZE 0x030
+#define CFG_PCIM_WIN_SZ_IDX 3
+#define CFG_PCIM_WIN_CNT 32
+#define SPACE0_REMOTE_CFG_OFFSET 0x1000
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
+#define OB_OFFSET_HI(n) (0x204 + (8 * n))
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+#define IRQ_STATUS 0x184
+#define IRQ_ENABLE_SET 0x188
+#define IRQ_ENABLE_CLR 0x18c
+
+#define MSI_IRQ 0x054
+#define MSI0_IRQ_STATUS 0x104
+#define MSI0_IRQ_ENABLE_SET 0x108
+#define MSI0_IRQ_ENABLE_CLR 0x10c
+#define IRQ_STATUS 0x184
+#define MSI_IRQ_OFFSET 4
+
+/* Config space registers */
+#define DEBUG0 0x728
+
+#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
+
+static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
+ u32 *bit_pos)
+{
+ *reg_offset = offset % 8;
+ *bit_pos = offset >> 3;
+}
+
+u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ return ks_pcie->app.start + MSI_IRQ;
+}
+
+void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 pending, vector;
+ int src, virq;
+
+ pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
+
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (src = 0; src < 4; src++) {
+ if (BIT(src) & pending) {
+ vector = offset + (src << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
+ src, vector, virq);
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
+{
+ u32 offset, reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie;
+ unsigned int irq = d->irq;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+
+ msi = irq_get_msi_desc(irq);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ ks_pcie = to_keystone_pcie(pp);
+ offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+ update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
+
+ writel(BIT(bit_pos),
+ ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
+ writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
+}
+
+void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ writel(BIT(bit_pos),
+ ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
+}
+
+void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ writel(BIT(bit_pos),
+ ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
+}
+
+static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
+{
+ struct keystone_pcie *ks_pcie;
+ unsigned int irq = d->irq;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ u32 offset;
+
+ msi = irq_get_msi_desc(irq);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ ks_pcie = to_keystone_pcie(pp);
+ offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+
+ /* Mask the end point if PVM implemented */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (msi->msi_attrib.maskbit)
+ pci_msi_mask_irq(d);
+ }
+
+ ks_dw_pcie_msi_clear_irq(pp, offset);
+}
+
+static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
+{
+ struct keystone_pcie *ks_pcie;
+ unsigned int irq = d->irq;
+ struct msi_desc *msi;
+ struct pcie_port *pp;
+ u32 offset;
+
+ msi = irq_get_msi_desc(irq);
+ pp = sys_to_pcie(msi->dev->bus->sysdata);
+ ks_pcie = to_keystone_pcie(pp);
+ offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+
+ /* Mask the end point if PVM implemented */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (msi->msi_attrib.maskbit)
+ pci_msi_unmask_irq(d);
+ }
+
+ ks_dw_pcie_msi_set_irq(pp, offset);
+}
+
+static struct irq_chip ks_dw_pcie_msi_irq_chip = {
+ .name = "Keystone-PCIe-MSI-IRQ",
+ .irq_ack = ks_dw_pcie_msi_irq_ack,
+ .irq_mask = ks_dw_pcie_msi_irq_mask,
+ .irq_unmask = ks_dw_pcie_msi_irq_unmask,
+};
+
+static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
+ .map = ks_dw_pcie_msi_map,
+};
+
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ int i;
+
+ pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
+ MAX_MSI_IRQS,
+ &ks_dw_pcie_msi_domain_ops,
+ chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+
+ return 0;
+}
+
+void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ for (i = 0; i < MAX_LEGACY_IRQS; i++)
+ writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
+}
+
+void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 pending;
+ int virq;
+
+ pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
+
+ if (BIT(0) & pending) {
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+ dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
+ virq);
+ generic_handle_irq(virq);
+ }
+
+ /* EOI the INTx interrupt */
+ writel(offset, ks_pcie->va_app_base + IRQ_EOI);
+}
+
+static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_dw_pcie_ack_legacy_irq,
+ .irq_mask = ks_dw_pcie_mask_legacy_irq,
+ .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
+};
+
+static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
+ .map = ks_dw_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
+{
+ u32 val;
+
+ writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
+ reg_virt + CMD_STATUS);
+
+ do {
+ val = readl(reg_virt + CMD_STATUS);
+ } while (!(val & DBI_CS2_EN_VAL));
+}
+
+/**
+ * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
+{
+ u32 val;
+
+ writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
+ reg_virt + CMD_STATUS);
+
+ do {
+ val = readl(reg_virt + CMD_STATUS);
+ } while (val & DBI_CS2_EN_VAL);
+}
+
+void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 start = pp->mem.start, end = pp->mem.end;
+ int i, tr_size;
+
+ /* Disable BARs for inbound access */
+ ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+ writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
+ ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+
+ /* Set outbound translation size per window division */
+ writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
+
+ tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
+ writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
+ writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
+ start += tr_size;
+ }
+
+ /* Enable OB translation */
+ writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
+ ks_pcie->va_app_base + CMD_STATUS);
+}
+
+/**
+ * ks_pcie_cfg_setup() - Set up configuration space address for a device
+ *
+ * @ks_pcie: ptr to keystone_pcie structure
+ * @bus: Bus number the device is residing on
+ * @devfn: device, function number info
+ *
+ * Forms and returns the address of configuration space mapped in PCIESS
+ * address space 0. Also configures CFG_SETUP for remote configuration space
+ * access.
+ *
+ * The address space has two regions to access configuration - local and remote.
+ * We access local region for bus 0 (as RC is attached on bus 0) and remote
+ * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
+ * we will do TYPE 0 access as it will be on our secondary bus (logical).
+ * CFG_SETUP is needed only for remote configuration access.
+ */
+static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
+ unsigned int devfn)
+{
+ u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 regval;
+
+ if (bus == 0)
+ return pp->dbi_base;
+
+ regval = (bus << 16) | (device << 8) | function;
+
+ /*
+ * Since Bus#1 will be a virtual bus, we need to have TYPE0
+ * access only.
+ * TYPE 1
+ */
+ if (bus != 1)
+ regval |= BIT(24);
+
+ writel(regval, ks_pcie->va_app_base + CFG_SETUP);
+ return pp->va_cfg0_base;
+}
+
+int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ u8 bus_num = bus->number;
+ void __iomem *addr;
+
+ addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
+
+ return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
+}
+
+int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ u8 bus_num = bus->number;
+ void __iomem *addr;
+
+ addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
+
+ return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
+}
+
+/**
+ * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+ /* Configure and set up BAR0 */
+ ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+
+ /* Enable BAR0 */
+ writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+ writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+
+ ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
+}
+
+/**
+ * ks_dw_pcie_link_up() - Check if link up
+ */
+int ks_dw_pcie_link_up(struct pcie_port *pp)
+{
+ u32 val = readl(pp->dbi_base + DEBUG0);
+
+ return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
+}
+
+void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ /* Disable Link training */
+ val = readl(ks_pcie->va_app_base + CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+
+ /* Initiate Link Training */
+ val = readl(ks_pcie->va_app_base + CMD_STATUS);
+ writel(LTSSM_EN_VAL | val, ks_pcie->va_app_base + CMD_STATUS);
+}
+
+/**
+ * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
+ *
+ * Ioremap the register resources, initialize legacy irq domain
+ * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
+ * PCI host controller.
+ */
+int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
+ struct device_node *msi_intc_np)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ struct platform_device *pdev = to_platform_device(pp->dev);
+ struct resource *res;
+
+ /* Index 0 is the config reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(pp->dev, res);
+ if (IS_ERR(pp->dbi_base))
+ return PTR_ERR(pp->dbi_base);
+
+ /*
+ * We set these same and is used in pcie rd/wr_other_conf
+ * functions
+ */
+ pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
+ pp->va_cfg1_base = pp->va_cfg0_base;
+
+ /* Index 1 is the application reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ /* Create legacy IRQ domain */
+ ks_pcie->legacy_irq_domain =
+ irq_domain_add_linear(ks_pcie->legacy_intc_np,
+ MAX_LEGACY_IRQS,
+ &ks_dw_pcie_legacy_irq_domain_ops,
+ NULL);
+ if (!ks_pcie->legacy_irq_domain) {
+ dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
+ return -EINVAL;
+ }
+
+ return dw_pcie_host_init(pp);
+}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
new file mode 100644
index 000000000..75333b0c4
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.c
@@ -0,0 +1,413 @@
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irqchip/chained_irq.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "pcie-designware.h"
+#include "pci-keystone.h"
+
+#define DRIVER_NAME "keystone-pcie"
+
+/* driver specific constants */
+#define MAX_MSI_HOST_IRQS 8
+#define MAX_LEGACY_HOST_IRQS 4
+
+/* DEV_STAT_CTRL */
+#define PCIE_CAP_BASE 0x70
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+
+#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
+
+static void quirk_limit_mrrs(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_dev *bridge = bus->self;
+ static const struct pci_device_id rc_pci_devids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { 0, },
+ };
+
+ if (pci_is_root_bus(bus))
+ return;
+
+ /* look for the host bridge */
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ bus = bus->parent;
+ }
+
+ if (bridge) {
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
+ }
+ }
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
+
+static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ int count = 200;
+
+ dw_pcie_setup_rc(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "Link already up\n");
+ return 0;
+ }
+
+ ks_dw_pcie_initiate_link_train(ks_pcie);
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ if (--count) {
+ ks_dw_pcie_initiate_link_train(ks_pcie);
+ continue;
+ }
+ dev_err(pp->dev, "phy link never came up\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ u32 offset = irq - ks_pcie->msi_host_irqs[0];
+ struct pcie_port *pp = &ks_pcie->pp;
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ dev_dbg(pp->dev, "%s, irq %d\n", __func__, irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+ ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
+ chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @irq: IRQ line for legacy interrupts
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+ struct pcie_port *pp = &ks_pcie->pp;
+ u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+
+ dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
+
+ /*
+ * The chained irq handler installation would have replaced normal
+ * interrupt driver handler so we need to take care of mask/unmask and
+ * ack operation.
+ */
+ chained_irq_enter(chip, desc);
+ ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ char *controller, int *num_irqs)
+{
+ int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
+ struct device *dev = ks_pcie->pp.dev;
+ struct device_node *np_pcie = dev->of_node, **np_temp;
+
+ if (!strcmp(controller, "msi-interrupt-controller"))
+ legacy = 0;
+
+ if (legacy) {
+ np_temp = &ks_pcie->legacy_intc_np;
+ max_host_irqs = MAX_LEGACY_HOST_IRQS;
+ host_irqs = &ks_pcie->legacy_host_irqs[0];
+ } else {
+ np_temp = &ks_pcie->msi_intc_np;
+ max_host_irqs = MAX_MSI_HOST_IRQS;
+ host_irqs = &ks_pcie->msi_host_irqs[0];
+ }
+
+ /* interrupt controller is in a child node */
+ *np_temp = of_find_node_by_name(np_pcie, controller);
+ if (!(*np_temp)) {
+ dev_err(dev, "Node for %s is absent\n", controller);
+ goto out;
+ }
+ temp = of_irq_count(*np_temp);
+ if (!temp)
+ goto out;
+ if (temp > max_host_irqs)
+ dev_warn(dev, "Too many %s interrupts defined %u\n",
+ (legacy ? "legacy" : "MSI"), temp);
+
+ /*
+ * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
+ * 7 (MSI)
+ */
+ for (temp = 0; temp < max_host_irqs; temp++) {
+ host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
+ if (!host_irqs[temp])
+ break;
+ }
+ if (temp) {
+ *num_irqs = temp;
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ /* Legacy IRQ */
+ for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
+ irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
+ irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
+ ks_pcie_legacy_irq_handler);
+ }
+ ks_dw_pcie_enable_legacy_irqs(ks_pcie);
+
+ /* MSI IRQ */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
+ irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
+ ks_pcie_msi_irq_handler);
+ irq_set_handler_data(ks_pcie->msi_host_irqs[i],
+ ks_pcie);
+ }
+ }
+}
+
+/*
+ * When a PCI device does not exist during config cycles, keystone host gets a
+ * bus error instead of returning 0xffffffff. This handler always returns 0
+ * for this kind of faults.
+ */
+static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+ if ((instr & 0x0e100090) == 0x00100090) {
+ int reg = (instr >> 12) & 15;
+
+ regs->uregs[reg] = -1;
+ regs->ARM_pc += 4;
+ }
+
+ return 0;
+}
+
+static void __init ks_pcie_host_init(struct pcie_port *pp)
+{
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+ u32 val;
+
+ ks_pcie_establish_link(ks_pcie);
+ ks_dw_pcie_setup_rc_app_regs(ks_pcie);
+ ks_pcie_setup_interrupts(ks_pcie);
+ writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+ pp->dbi_base + PCI_IO_BASE);
+
+ /* update the Vendor ID */
+ writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
+
+ /* update the DEV_STAT_CTRL to publish right mrrs */
+ val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ /* set the mrrs to 256 bytes */
+ val |= BIT(12);
+ writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+
+ /*
+ * PCIe access errors that result into OCP errors are caught by ARM as
+ * "External aborts"
+ */
+ hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+ "Asynchronous external abort");
+}
+
+static struct pcie_host_ops keystone_pcie_host_ops = {
+ .rd_other_conf = ks_dw_pcie_rd_other_conf,
+ .wr_other_conf = ks_dw_pcie_wr_other_conf,
+ .link_up = ks_dw_pcie_link_up,
+ .host_init = ks_pcie_host_init,
+ .msi_set_irq = ks_dw_pcie_msi_set_irq,
+ .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
+ .get_msi_addr = ks_dw_pcie_get_msi_addr,
+ .msi_host_init = ks_dw_pcie_msi_host_init,
+ .scan_bus = ks_dw_pcie_v3_65_scan_bus,
+};
+
+static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
+{
+ struct pcie_port *pp = &ks_pcie->pp;
+ int ret;
+
+ ret = ks_pcie_get_irq_controller_info(ks_pcie,
+ "legacy-interrupt-controller",
+ &ks_pcie->num_legacy_host_irqs);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ ret = ks_pcie_get_irq_controller_info(ks_pcie,
+ "msi-interrupt-controller",
+ &ks_pcie->num_msi_host_irqs);
+ if (ret)
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &keystone_pcie_host_ops;
+ ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id ks_pcie_of_match[] = {
+ {
+ .type = "pci",
+ .compatible = "ti,keystone-pcie",
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
+
+static int __exit ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(ks_pcie->clk);
+
+ return 0;
+}
+
+static int __init ks_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keystone_pcie *ks_pcie;
+ struct pcie_port *pp;
+ struct resource *res;
+ void __iomem *reg_p;
+ struct phy *phy;
+ int ret = 0;
+
+ ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
+ GFP_KERNEL);
+ if (!ks_pcie)
+ return -ENOMEM;
+
+ pp = &ks_pcie->pp;
+
+ /* initialize SerDes Phy if present */
+ phy = devm_phy_get(dev, "pcie-phy");
+ if (!IS_ERR_OR_NULL(phy)) {
+ ret = phy_init(phy);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* index 2 is to read PCI DEVICE_ID */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ reg_p = devm_ioremap_resource(dev, res);
+ if (IS_ERR(reg_p))
+ return PTR_ERR(reg_p);
+ ks_pcie->device_id = readl(reg_p) >> 16;
+ devm_iounmap(dev, reg_p);
+ devm_release_mem_region(dev, res->start, resource_size(res));
+
+ pp->dev = dev;
+ platform_set_drvdata(pdev, ks_pcie);
+ ks_pcie->clk = devm_clk_get(dev, "pcie");
+ if (IS_ERR(ks_pcie->clk)) {
+ dev_err(dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(ks_pcie->clk);
+ }
+ ret = clk_prepare_enable(ks_pcie->clk);
+ if (ret)
+ return ret;
+
+ ret = ks_add_pcie_port(ks_pcie, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ return 0;
+fail_clk:
+ clk_disable_unprepare(ks_pcie->clk);
+
+ return ret;
+}
+
+static struct platform_driver ks_pcie_driver __refdata = {
+ .probe = ks_pcie_probe,
+ .remove = __exit_p(ks_pcie_remove),
+ .driver = {
+ .name = "keystone-pcie",
+ .of_match_table = of_match_ptr(ks_pcie_of_match),
+ },
+};
+
+module_platform_driver(ks_pcie_driver);
+
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_DESCRIPTION("Keystone PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
new file mode 100644
index 000000000..478d932b6
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.h
@@ -0,0 +1,58 @@
+/*
+ * Keystone PCI Controller's common includes
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ * http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define MAX_LEGACY_IRQS 4
+#define MAX_MSI_HOST_IRQS 8
+#define MAX_LEGACY_HOST_IRQS 4
+
+struct keystone_pcie {
+ struct clk *clk;
+ struct pcie_port pp;
+ /* PCI Device ID */
+ u32 device_id;
+ int num_legacy_host_irqs;
+ int legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
+ struct device_node *legacy_intc_np;
+
+ int num_msi_host_irqs;
+ int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+
+ /* Application register space */
+ void __iomem *va_app_base;
+ struct resource app;
+};
+
+/* Keystone DW specific MSI controller APIs/definitions */
+void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
+u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
+
+/* Keystone specific PCI controller APIs */
+void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
+int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
+ struct device_node *msi_intc_np);
+int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val);
+int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val);
+void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
+int ks_dw_pcie_link_up(struct pcie_port *pp);
+void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
+void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
+void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
+ struct msi_controller *chip);
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
new file mode 100644
index 000000000..4a6e62f67
--- /dev/null
+++ b/drivers/pci/host/pci-layerscape.c
@@ -0,0 +1,175 @@
+/*
+ * PCIe host controller driver for Freescale Layerscape SoCs
+ *
+ * Copyright (C) 2014 Freescale Semiconductor.
+ *
+ * Author: Minghuan Lian <Minghuan.Lian@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pcie-designware.h"
+
+/* PEX1/2 Misc Ports Status Register */
+#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4)
+#define LTSSM_STATE_SHIFT 20
+#define LTSSM_STATE_MASK 0x3f
+#define LTSSM_PCIE_L0 0x11 /* L0 state */
+
+/* Symbol Timer Register and Filter Mask Register 1 */
+#define PCIE_STRFMR1 0x71c
+
+struct ls_pcie {
+ struct list_head node;
+ struct device *dev;
+ struct pci_bus *bus;
+ void __iomem *dbi;
+ struct regmap *scfg;
+ struct pcie_port pp;
+ int index;
+ int msi_irq;
+};
+
+#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp)
+
+static int ls_pcie_link_up(struct pcie_port *pp)
+{
+ u32 state;
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+
+ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
+ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
+
+ if (state < LTSSM_PCIE_L0)
+ return 0;
+
+ return 1;
+}
+
+static void ls_pcie_host_init(struct pcie_port *pp)
+{
+ struct ls_pcie *pcie = to_ls_pcie(pp);
+ int count = 0;
+ u32 val;
+
+ dw_pcie_setup_rc(pp);
+
+ while (!ls_pcie_link_up(pp)) {
+ usleep_range(100, 1000);
+ count++;
+ if (count >= 200) {
+ dev_err(pp->dev, "phy link never came up\n");
+ return;
+ }
+ }
+
+ /*
+ * LS1021A Workaround for internal TKT228622
+ * to fix the INTx hang issue
+ */
+ val = ioread32(pcie->dbi + PCIE_STRFMR1);
+ val &= 0xffff;
+ iowrite32(val, pcie->dbi + PCIE_STRFMR1);
+}
+
+static struct pcie_host_ops ls_pcie_host_ops = {
+ .link_up = ls_pcie_link_up,
+ .host_init = ls_pcie_host_init,
+};
+
+static int ls_add_pcie_port(struct ls_pcie *pcie)
+{
+ struct pcie_port *pp;
+ int ret;
+
+ pp = &pcie->pp;
+ pp->dev = pcie->dev;
+ pp->dbi_base = pcie->dbi;
+ pp->root_bus_nr = -1;
+ pp->ops = &ls_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(pp->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init ls_pcie_probe(struct platform_device *pdev)
+{
+ struct ls_pcie *pcie;
+ struct resource *dbi_base;
+ u32 index[2];
+ int ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &pdev->dev;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
+ if (IS_ERR(pcie->dbi)) {
+ dev_err(&pdev->dev, "missing *regs* space\n");
+ return PTR_ERR(pcie->dbi);
+ }
+
+ pcie->scfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,pcie-scfg");
+ if (IS_ERR(pcie->scfg)) {
+ dev_err(&pdev->dev, "No syscfg phandle specified\n");
+ return PTR_ERR(pcie->scfg);
+ }
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ "fsl,pcie-scfg", index, 2);
+ if (ret)
+ return ret;
+ pcie->index = index[1];
+
+ ret = ls_add_pcie_port(pcie);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static const struct of_device_id ls_pcie_of_match[] = {
+ { .compatible = "fsl,ls1021a-pcie" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
+
+static struct platform_driver ls_pcie_driver = {
+ .driver = {
+ .name = "layerscape-pcie",
+ .of_match_table = ls_pcie_of_match,
+ },
+};
+
+module_platform_driver_probe(ls_pcie_driver, ls_pcie_probe);
+
+MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@freescale.com>");
+MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
new file mode 100644
index 000000000..1ab863551
--- /dev/null
+++ b/drivers/pci/host/pci-mvebu.c
@@ -0,0 +1,1122 @@
+/*
+ * PCIe driver for Marvell Armada 370 and Armada XP SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+
+/*
+ * PCIe unit register offsets.
+ */
+#define PCIE_DEV_ID_OFF 0x0000
+#define PCIE_CMD_OFF 0x0004
+#define PCIE_DEV_REV_OFF 0x0008
+#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
+#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
+#define PCIE_HEADER_LOG_4_OFF 0x0128
+#define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
+#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
+#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
+#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
+#define PCIE_WIN5_CTRL_OFF 0x1880
+#define PCIE_WIN5_BASE_OFF 0x1884
+#define PCIE_WIN5_REMAP_OFF 0x188c
+#define PCIE_CONF_ADDR_OFF 0x18f8
+#define PCIE_CONF_ADDR_EN 0x80000000
+#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
+#define PCIE_CONF_ADDR(bus, devfn, where) \
+ (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
+ PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
+ PCIE_CONF_ADDR_EN)
+#define PCIE_CONF_DATA_OFF 0x18fc
+#define PCIE_MASK_OFF 0x1910
+#define PCIE_MASK_ENABLE_INTS 0x0f000000
+#define PCIE_CTRL_OFF 0x1a00
+#define PCIE_CTRL_X1_MODE 0x0001
+#define PCIE_STAT_OFF 0x1a04
+#define PCIE_STAT_BUS 0xff00
+#define PCIE_STAT_DEV 0x1f0000
+#define PCIE_STAT_LINK_DOWN BIT(0)
+#define PCIE_DEBUG_CTRL 0x1a60
+#define PCIE_DEBUG_SOFT_RESET BIT(20)
+
+/* PCI configuration space of a PCI-to-PCI bridge */
+struct mvebu_sw_pci_bridge {
+ u16 vendor;
+ u16 device;
+ u16 command;
+ u16 class;
+ u8 interface;
+ u8 revision;
+ u8 bist;
+ u8 header_type;
+ u8 latency_timer;
+ u8 cache_line_size;
+ u32 bar[2];
+ u8 primary_bus;
+ u8 secondary_bus;
+ u8 subordinate_bus;
+ u8 secondary_latency_timer;
+ u8 iobase;
+ u8 iolimit;
+ u16 secondary_status;
+ u16 membase;
+ u16 memlimit;
+ u16 iobaseupper;
+ u16 iolimitupper;
+ u8 cappointer;
+ u8 reserved1;
+ u16 reserved2;
+ u32 romaddr;
+ u8 intline;
+ u8 intpin;
+ u16 bridgectrl;
+};
+
+struct mvebu_pcie_port;
+
+/* Structure representing all PCIe interfaces */
+struct mvebu_pcie {
+ struct platform_device *pdev;
+ struct mvebu_pcie_port *ports;
+ struct msi_controller *msi;
+ struct resource io;
+ struct resource realio;
+ struct resource mem;
+ struct resource busn;
+ int nports;
+};
+
+/* Structure representing one PCIe interface */
+struct mvebu_pcie_port {
+ char *name;
+ void __iomem *base;
+ u32 port;
+ u32 lane;
+ int devfn;
+ unsigned int mem_target;
+ unsigned int mem_attr;
+ unsigned int io_target;
+ unsigned int io_attr;
+ struct clk *clk;
+ int reset_gpio;
+ int reset_active_low;
+ char *reset_name;
+ struct mvebu_sw_pci_bridge bridge;
+ struct device_node *dn;
+ struct mvebu_pcie *pcie;
+ phys_addr_t memwin_base;
+ size_t memwin_size;
+ phys_addr_t iowin_base;
+ size_t iowin_size;
+ u32 saved_pcie_stat;
+};
+
+static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->base + reg);
+}
+
+static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
+{
+ return readl(port->base + reg);
+}
+
+static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
+{
+ return port->io_target != -1 && port->io_attr != -1;
+}
+
+static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
+{
+ return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+}
+
+static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
+{
+ u32 stat;
+
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
+ stat &= ~PCIE_STAT_BUS;
+ stat |= nr << 8;
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
+{
+ u32 stat;
+
+ stat = mvebu_readl(port, PCIE_STAT_OFF);
+ stat &= ~PCIE_STAT_DEV;
+ stat |= nr << 16;
+ mvebu_writel(port, stat, PCIE_STAT_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+ const struct mbus_dram_target_info *dram;
+ u32 size;
+ int i;
+
+ dram = mv_mbus_dram_info();
+
+ /* First, disable and clear BARs and windows. */
+ for (i = 1; i < 3; i++) {
+ mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
+ }
+
+ for (i = 0; i < 5; i++) {
+ mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ }
+
+ mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
+ mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
+
+ /* Setup windows for DDR banks. Count total DDR size on the fly. */
+ size = 0;
+ for (i = 0; i < dram->num_cs; i++) {
+ const struct mbus_dram_window *cs = dram->cs + i;
+
+ mvebu_writel(port, cs->base & 0xffff0000,
+ PCIE_WIN04_BASE_OFF(i));
+ mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
+ mvebu_writel(port,
+ ((cs->size - 1) & 0xffff0000) |
+ (cs->mbus_attr << 8) |
+ (dram->mbus_dram_target_id << 4) | 1,
+ PCIE_WIN04_CTRL_OFF(i));
+
+ size += cs->size;
+ }
+
+ /* Round up 'size' to the nearest power of two. */
+ if ((size & (size - 1)) != 0)
+ size = 1 << fls(size);
+
+ /* Setup BAR[1] to all DRAM banks. */
+ mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
+ mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
+ PCIE_BAR_CTRL_OFF(1));
+}
+
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+{
+ u32 cmd, mask;
+
+ /* Point PCIe unit MBUS decode windows to DRAM space. */
+ mvebu_pcie_setup_wins(port);
+
+ /* Master + slave enable. */
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
+ cmd |= PCI_COMMAND_IO;
+ cmd |= PCI_COMMAND_MEMORY;
+ cmd |= PCI_COMMAND_MASTER;
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+ /* Enable interrupt lines A-D. */
+ mask = mvebu_readl(port, PCIE_MASK_OFF);
+ mask |= PCIE_MASK_ENABLE_INTS;
+ mvebu_writel(port, mask, PCIE_MASK_OFF);
+}
+
+static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
+ struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+{
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+
+ *val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
+ struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 val)
+{
+ u32 _val, shift = 8 * (where & 3);
+
+ mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
+ PCIE_CONF_ADDR_OFF);
+ _val = mvebu_readl(port, PCIE_CONF_DATA_OFF);
+
+ if (size == 4)
+ _val = val;
+ else if (size == 2)
+ _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift);
+ else if (size == 1)
+ _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift);
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ mvebu_writel(port, _val, PCIE_CONF_DATA_OFF);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Remove windows, starting from the largest ones to the smallest
+ * ones.
+ */
+static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
+ phys_addr_t base, size_t size)
+{
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+
+ mvebu_mbus_del_window(base, sz);
+ base += sz;
+ size -= sz;
+ }
+}
+
+/*
+ * MBus windows can only have a power of two size, but PCI BARs do not
+ * have this constraint. Therefore, we have to split the PCI BAR into
+ * areas each having a power of two size. We start from the largest
+ * one (i.e highest order bit set in the size).
+ */
+static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+ unsigned int target, unsigned int attribute,
+ phys_addr_t base, size_t size,
+ phys_addr_t remap)
+{
+ size_t size_mapped = 0;
+
+ while (size) {
+ size_t sz = 1 << (fls(size) - 1);
+ int ret;
+
+ ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+ sz, remap);
+ if (ret) {
+ phys_addr_t end = base + sz - 1;
+
+ dev_err(&port->pcie->pdev->dev,
+ "Could not create MBus window at [mem %pa-%pa]: %d\n",
+ &base, &end, ret);
+ mvebu_pcie_del_windows(port, base - size_mapped,
+ size_mapped);
+ return;
+ }
+
+ size -= sz;
+ size_mapped += sz;
+ base += sz;
+ if (remap != MVEBU_MBUS_NO_REMAP)
+ remap += sz;
+ }
+}
+
+static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+{
+ phys_addr_t iobase;
+
+ /* Are the new iobase/iolimit values invalid? */
+ if (port->bridge.iolimit < port->bridge.iobase ||
+ port->bridge.iolimitupper < port->bridge.iobaseupper ||
+ !(port->bridge.command & PCI_COMMAND_IO)) {
+
+ /* If a window was configured, remove it */
+ if (port->iowin_base) {
+ mvebu_pcie_del_windows(port, port->iowin_base,
+ port->iowin_size);
+ port->iowin_base = 0;
+ port->iowin_size = 0;
+ }
+
+ return;
+ }
+
+ if (!mvebu_has_ioport(port)) {
+ dev_WARN(&port->pcie->pdev->dev,
+ "Attempt to set IO when IO is disabled\n");
+ return;
+ }
+
+ /*
+ * We read the PCI-to-PCI bridge emulated registers, and
+ * calculate the base address and size of the address decoding
+ * window to setup, according to the PCI-to-PCI bridge
+ * specifications. iobase is the bus address, port->iowin_base
+ * is the CPU address.
+ */
+ iobase = ((port->bridge.iobase & 0xF0) << 8) |
+ (port->bridge.iobaseupper << 16);
+ port->iowin_base = port->pcie->io.start + iobase;
+ port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+ (port->bridge.iolimitupper << 16)) -
+ iobase) + 1;
+
+ mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+ port->iowin_base, port->iowin_size,
+ iobase);
+}
+
+static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+{
+ /* Are the new membase/memlimit values invalid? */
+ if (port->bridge.memlimit < port->bridge.membase ||
+ !(port->bridge.command & PCI_COMMAND_MEMORY)) {
+
+ /* If a window was configured, remove it */
+ if (port->memwin_base) {
+ mvebu_pcie_del_windows(port, port->memwin_base,
+ port->memwin_size);
+ port->memwin_base = 0;
+ port->memwin_size = 0;
+ }
+
+ return;
+ }
+
+ /*
+ * We read the PCI-to-PCI bridge emulated registers, and
+ * calculate the base address and size of the address decoding
+ * window to setup, according to the PCI-to-PCI bridge
+ * specifications.
+ */
+ port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
+ port->memwin_size =
+ (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ port->memwin_base + 1;
+
+ mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+ port->memwin_base, port->memwin_size,
+ MVEBU_MBUS_NO_REMAP);
+}
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
+{
+ struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+
+ memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
+
+ bridge->class = PCI_CLASS_BRIDGE_PCI;
+ bridge->vendor = PCI_VENDOR_ID_MARVELL;
+ bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+ bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->cache_line_size = 0x10;
+
+ /* We support 32 bits I/O addressing */
+ bridge->iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->iolimit = PCI_IO_RANGE_TYPE_32;
+}
+
+/*
+ * Read the configuration space of the PCI-to-PCI bridge associated to
+ * the given PCIe interface.
+ */
+static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
+ unsigned int where, int size, u32 *value)
+{
+ struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+
+ switch (where & ~3) {
+ case PCI_VENDOR_ID:
+ *value = bridge->device << 16 | bridge->vendor;
+ break;
+
+ case PCI_COMMAND:
+ *value = bridge->command;
+ break;
+
+ case PCI_CLASS_REVISION:
+ *value = bridge->class << 16 | bridge->interface << 8 |
+ bridge->revision;
+ break;
+
+ case PCI_CACHE_LINE_SIZE:
+ *value = bridge->bist << 24 | bridge->header_type << 16 |
+ bridge->latency_timer << 8 | bridge->cache_line_size;
+ break;
+
+ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
+ *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
+ break;
+
+ case PCI_PRIMARY_BUS:
+ *value = (bridge->secondary_latency_timer << 24 |
+ bridge->subordinate_bus << 16 |
+ bridge->secondary_bus << 8 |
+ bridge->primary_bus);
+ break;
+
+ case PCI_IO_BASE:
+ if (!mvebu_has_ioport(port))
+ *value = bridge->secondary_status << 16;
+ else
+ *value = (bridge->secondary_status << 16 |
+ bridge->iolimit << 8 |
+ bridge->iobase);
+ break;
+
+ case PCI_MEMORY_BASE:
+ *value = (bridge->memlimit << 16 | bridge->membase);
+ break;
+
+ case PCI_PREF_MEMORY_BASE:
+ *value = 0;
+ break;
+
+ case PCI_IO_BASE_UPPER16:
+ *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
+ break;
+
+ case PCI_ROM_ADDRESS1:
+ *value = 0;
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ /* LINE PIN MIN_GNT MAX_LAT */
+ *value = 0;
+ break;
+
+ default:
+ *value = 0xffffffff;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ if (size == 2)
+ *value = (*value >> (8 * (where & 3))) & 0xffff;
+ else if (size == 1)
+ *value = (*value >> (8 * (where & 3))) & 0xff;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* Write to the PCI-to-PCI bridge configuration space */
+static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
+ unsigned int where, int size, u32 value)
+{
+ struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+ u32 mask, reg;
+ int err;
+
+ if (size == 4)
+ mask = 0x0;
+ else if (size == 2)
+ mask = ~(0xffff << ((where & 3) * 8));
+ else if (size == 1)
+ mask = ~(0xff << ((where & 3) * 8));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
+ if (err)
+ return err;
+
+ value = (reg & mask) | value << ((where & 3) * 8);
+
+ switch (where & ~3) {
+ case PCI_COMMAND:
+ {
+ u32 old = bridge->command;
+
+ if (!mvebu_has_ioport(port))
+ value &= ~PCI_COMMAND_IO;
+
+ bridge->command = value & 0xffff;
+ if ((old ^ bridge->command) & PCI_COMMAND_IO)
+ mvebu_pcie_handle_iobase_change(port);
+ if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
+ mvebu_pcie_handle_membase_change(port);
+ break;
+ }
+
+ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
+ bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
+ break;
+
+ case PCI_IO_BASE:
+ /*
+ * We also keep bit 1 set, it is a read-only bit that
+ * indicates we support 32 bits addressing for the
+ * I/O
+ */
+ bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
+ bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
+ mvebu_pcie_handle_iobase_change(port);
+ break;
+
+ case PCI_MEMORY_BASE:
+ bridge->membase = value & 0xffff;
+ bridge->memlimit = value >> 16;
+ mvebu_pcie_handle_membase_change(port);
+ break;
+
+ case PCI_IO_BASE_UPPER16:
+ bridge->iobaseupper = value & 0xffff;
+ bridge->iolimitupper = value >> 16;
+ mvebu_pcie_handle_iobase_change(port);
+ break;
+
+ case PCI_PRIMARY_BUS:
+ bridge->primary_bus = value & 0xff;
+ bridge->secondary_bus = (value >> 8) & 0xff;
+ bridge->subordinate_bus = (value >> 16) & 0xff;
+ bridge->secondary_latency_timer = (value >> 24) & 0xff;
+ mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
+ break;
+
+ default:
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
+ struct pci_bus *bus,
+ int devfn)
+{
+ int i;
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+
+ if (bus->number == 0 && port->devfn == devfn)
+ return port;
+ if (bus->number != 0 &&
+ bus->number >= port->bridge.secondary_bus &&
+ bus->number <= port->bridge.subordinate_bus)
+ return port;
+ }
+
+ return NULL;
+}
+
+/* PCI configuration space write function */
+static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie_port *port;
+ int ret;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Access the emulated PCI-to-PCI bridge */
+ if (bus->number == 0)
+ return mvebu_sw_pci_bridge_write(port, where, size, val);
+
+ if (!mvebu_pcie_link_up(port))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /*
+ * On the secondary bus, we don't want to expose any other
+ * device than the device physically connected in the PCIe
+ * slot, visible in slot 0. In slot 1, there's a special
+ * Marvell device that only makes sense when the Armada is
+ * used as a PCIe endpoint.
+ */
+ if (bus->number == port->bridge.secondary_bus &&
+ PCI_SLOT(devfn) != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Access the real PCIe interface */
+ ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
+ where, size, val);
+
+ return ret;
+}
+
+/* PCI configuration space read function */
+static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+ struct mvebu_pcie_port *port;
+ int ret;
+
+ port = mvebu_pcie_find_port(pcie, bus, devfn);
+ if (!port) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Access the emulated PCI-to-PCI bridge */
+ if (bus->number == 0)
+ return mvebu_sw_pci_bridge_read(port, where, size, val);
+
+ if (!mvebu_pcie_link_up(port)) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /*
+ * On the secondary bus, we don't want to expose any other
+ * device than the device physically connected in the PCIe
+ * slot, visible in slot 0. In slot 1, there's a special
+ * Marvell device that only makes sense when the Armada is
+ * used as a PCIe endpoint.
+ */
+ if (bus->number == port->bridge.secondary_bus &&
+ PCI_SLOT(devfn) != 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ /* Access the real PCIe interface */
+ ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
+ where, size, val);
+
+ return ret;
+}
+
+static struct pci_ops mvebu_pcie_ops = {
+ .read = mvebu_pcie_rd_conf,
+ .write = mvebu_pcie_wr_conf,
+};
+
+static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(sys);
+ int i;
+
+ pcie->mem.name = "PCI MEM";
+ pcie->realio.name = "PCI I/O";
+
+ if (request_resource(&iomem_resource, &pcie->mem))
+ return 0;
+
+ if (resource_size(&pcie->realio) != 0) {
+ if (request_resource(&ioport_resource, &pcie->realio)) {
+ release_resource(&pcie->mem);
+ return 0;
+ }
+ pci_add_resource_offset(&sys->resources, &pcie->realio,
+ sys->io_offset);
+ }
+ pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+
+ if (!port->base)
+ continue;
+ mvebu_pcie_setup_hw(port);
+ }
+
+ return 1;
+}
+
+static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct mvebu_pcie *pcie = sys_to_pcie(sys);
+ struct pci_bus *bus;
+
+ bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr,
+ &mvebu_pcie_ops, sys, &sys->resources);
+ if (!bus)
+ return NULL;
+
+ pci_scan_child_bus(bus);
+
+ return bus;
+}
+
+static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+ const struct resource *res,
+ resource_size_t start,
+ resource_size_t size,
+ resource_size_t align)
+{
+ if (dev->bus->number != 0)
+ return start;
+
+ /*
+ * On the PCI-to-PCI bridge side, the I/O windows must have at
+ * least a 64 KB size and the memory windows must have at
+ * least a 1 MB size. Moreover, MBus windows need to have a
+ * base address aligned on their size, and their size must be
+ * a power of two. This means that if the BAR doesn't have a
+ * power of two size, several MBus windows will actually be
+ * created. We need to ensure that the biggest MBus window
+ * (which will be the first one) is aligned on its size, which
+ * explains the rounddown_pow_of_two() being done here.
+ */
+ if (res->flags & IORESOURCE_IO)
+ return round_up(start, max_t(resource_size_t, SZ_64K,
+ rounddown_pow_of_two(size)));
+ else if (res->flags & IORESOURCE_MEM)
+ return round_up(start, max_t(resource_size_t, SZ_1M,
+ rounddown_pow_of_two(size)));
+ else
+ return start;
+}
+
+static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
+{
+ struct hw_pci hw;
+
+ memset(&hw, 0, sizeof(hw));
+
+#ifdef CONFIG_PCI_MSI
+ hw.msi_ctrl = pcie->msi;
+#endif
+
+ hw.nr_controllers = 1;
+ hw.private_data = (void **)&pcie;
+ hw.setup = mvebu_pcie_setup;
+ hw.scan = mvebu_pcie_scan_bus;
+ hw.map_irq = of_irq_parse_and_map_pci;
+ hw.ops = &mvebu_pcie_ops;
+ hw.align_resource = mvebu_pcie_align_resource;
+
+ pci_common_init(&hw);
+}
+
+/*
+ * Looks up the list of register addresses encoded into the reg =
+ * <...> property for one that matches the given port/lane. Once
+ * found, maps it.
+ */
+static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
+ struct device_node *np,
+ struct mvebu_pcie_port *port)
+{
+ struct resource regs;
+ int ret = 0;
+
+ ret = of_address_to_resource(np, 0, &regs);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return devm_ioremap_resource(&pdev->dev, &regs);
+}
+
+#define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
+#define DT_TYPE_IO 0x1
+#define DT_TYPE_MEM32 0x2
+#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
+#define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
+
+static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+ unsigned long type,
+ unsigned int *tgt,
+ unsigned int *attr)
+{
+ const int na = 3, ns = 2;
+ const __be32 *range;
+ int rlen, nranges, rangesz, pna, i;
+
+ *tgt = -1;
+ *attr = -1;
+
+ range = of_get_property(np, "ranges", &rlen);
+ if (!range)
+ return -EINVAL;
+
+ pna = of_n_addr_cells(np);
+ rangesz = pna + na + ns;
+ nranges = rlen / sizeof(__be32) / rangesz;
+
+ for (i = 0; i < nranges; i++, range += rangesz) {
+ u32 flags = of_read_number(range, 1);
+ u32 slot = of_read_number(range + 1, 1);
+ u64 cpuaddr = of_read_number(range + na, pna);
+ unsigned long rtype;
+
+ if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+ rtype = IORESOURCE_IO;
+ else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+ rtype = IORESOURCE_MEM;
+ else
+ continue;
+
+ if (slot == PCI_SLOT(devfn) && type == rtype) {
+ *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+ *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
+{
+ struct device_node *msi_node;
+
+ msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
+ "msi-parent", 0);
+ if (!msi_node)
+ return;
+
+ pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+
+ if (pcie->msi)
+ pcie->msi->dev = &pcie->pdev->dev;
+}
+
+static int mvebu_pcie_suspend(struct device *dev)
+{
+ struct mvebu_pcie *pcie;
+ int i;
+
+ pcie = dev_get_drvdata(dev);
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = pcie->ports + i;
+ port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
+ }
+
+ return 0;
+}
+
+static int mvebu_pcie_resume(struct device *dev)
+{
+ struct mvebu_pcie *pcie;
+ int i;
+
+ pcie = dev_get_drvdata(dev);
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = pcie->ports + i;
+ mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
+ mvebu_pcie_setup_hw(port);
+ }
+
+ return 0;
+}
+
+static int mvebu_pcie_probe(struct platform_device *pdev)
+{
+ struct mvebu_pcie *pcie;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ int i, ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
+ GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the PCIe memory and I/O aperture */
+ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
+ if (resource_size(&pcie->mem) == 0) {
+ dev_err(&pdev->dev, "invalid memory aperture size\n");
+ return -EINVAL;
+ }
+
+ mvebu_mbus_get_pcie_io_aperture(&pcie->io);
+
+ if (resource_size(&pcie->io) != 0) {
+ pcie->realio.flags = pcie->io.flags;
+ pcie->realio.start = PCIBIOS_MIN_IO;
+ pcie->realio.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ resource_size(&pcie->io));
+ } else
+ pcie->realio = pcie->io;
+
+ /* Get the bus range */
+ ret = of_pci_parse_bus_range(np, &pcie->busn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
+ ret);
+ return ret;
+ }
+
+ i = 0;
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ if (!of_device_is_available(child))
+ continue;
+ i++;
+ }
+
+ pcie->ports = devm_kzalloc(&pdev->dev, i *
+ sizeof(struct mvebu_pcie_port),
+ GFP_KERNEL);
+ if (!pcie->ports)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_child_of_node(pdev->dev.of_node, child) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+ enum of_gpio_flags flags;
+
+ if (!of_device_is_available(child))
+ continue;
+
+ port->pcie = pcie;
+
+ if (of_property_read_u32(child, "marvell,pcie-port",
+ &port->port)) {
+ dev_warn(&pdev->dev,
+ "ignoring PCIe DT node, missing pcie-port property\n");
+ continue;
+ }
+
+ if (of_property_read_u32(child, "marvell,pcie-lane",
+ &port->lane))
+ port->lane = 0;
+
+ port->name = kasprintf(GFP_KERNEL, "pcie%d.%d",
+ port->port, port->lane);
+
+ port->devfn = of_pci_get_devfn(child);
+ if (port->devfn < 0)
+ continue;
+
+ ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_MEM,
+ &port->mem_target, &port->mem_attr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for mem window\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ if (resource_size(&pcie->io) != 0)
+ mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
+ &port->io_target, &port->io_attr);
+ else {
+ port->io_target = -1;
+ port->io_attr = -1;
+ }
+
+ port->reset_gpio = of_get_named_gpio_flags(child,
+ "reset-gpios", 0, &flags);
+ if (gpio_is_valid(port->reset_gpio)) {
+ u32 reset_udelay = 20000;
+
+ port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ port->reset_name = kasprintf(GFP_KERNEL,
+ "pcie%d.%d-reset", port->port, port->lane);
+ of_property_read_u32(child, "reset-delay-us",
+ &reset_udelay);
+
+ ret = devm_gpio_request_one(&pdev->dev,
+ port->reset_gpio, GPIOF_DIR_OUT, port->reset_name);
+ if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ continue;
+ }
+
+ gpio_set_value(port->reset_gpio,
+ (port->reset_active_low) ? 1 : 0);
+ msleep(reset_udelay/1000);
+ }
+
+ port->clk = of_clk_get_by_name(child, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+ port->port, port->lane);
+ continue;
+ }
+
+ ret = clk_prepare_enable(port->clk);
+ if (ret)
+ continue;
+
+ port->base = mvebu_pcie_map_registers(pdev, child, port);
+ if (IS_ERR(port->base)) {
+ dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
+ port->port, port->lane);
+ port->base = NULL;
+ clk_disable_unprepare(port->clk);
+ continue;
+ }
+
+ mvebu_pcie_set_local_dev_nr(port, 1);
+
+ port->dn = child;
+ mvebu_sw_pci_bridge_init(port);
+ i++;
+ }
+
+ pcie->nports = i;
+
+ for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
+ pci_ioremap_io(i, pcie->io.start + i);
+
+ mvebu_pcie_msi_enable(pcie);
+ mvebu_pcie_enable(pcie);
+
+ platform_set_drvdata(pdev, pcie);
+
+ return 0;
+}
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+ { .compatible = "marvell,armada-xp-pcie", },
+ { .compatible = "marvell,armada-370-pcie", },
+ { .compatible = "marvell,dove-pcie", },
+ { .compatible = "marvell,kirkwood-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
+
+static struct dev_pm_ops mvebu_pcie_pm_ops = {
+ .suspend_noirq = mvebu_pcie_suspend,
+ .resume_noirq = mvebu_pcie_resume,
+};
+
+static struct platform_driver mvebu_pcie_driver = {
+ .driver = {
+ .name = "mvebu-pcie",
+ .of_match_table = mvebu_pcie_of_match_table,
+ /* driver unloading/unbinding currently not supported */
+ .suppress_bind_attrs = true,
+ .pm = &mvebu_pcie_pm_ops,
+ },
+ .probe = mvebu_pcie_probe,
+};
+module_platform_driver(mvebu_pcie_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell EBU PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
new file mode 100644
index 000000000..367e28fa7
--- /dev/null
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -0,0 +1,383 @@
+/*
+ * pci-rcar-gen2: internal PCI bus support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+/* AHB-PCI Bridge PCI communication registers */
+#define RCAR_AHBPCI_PCICOM_OFFSET 0x800
+
+#define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00)
+#define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04)
+#define RCAR_PCIAHB_PREFETCH0 0x0
+#define RCAR_PCIAHB_PREFETCH4 0x1
+#define RCAR_PCIAHB_PREFETCH8 0x2
+#define RCAR_PCIAHB_PREFETCH16 0x3
+
+#define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10)
+#define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14)
+#define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1)
+#define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1)
+#define RCAR_AHBPCI_WIN1_HOST (1 << 30)
+#define RCAR_AHBPCI_WIN1_DEVICE (1 << 31)
+
+#define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20)
+#define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24)
+#define RCAR_PCI_INT_SIGTABORT (1 << 0)
+#define RCAR_PCI_INT_SIGRETABORT (1 << 1)
+#define RCAR_PCI_INT_REMABORT (1 << 2)
+#define RCAR_PCI_INT_PERR (1 << 3)
+#define RCAR_PCI_INT_SIGSERR (1 << 4)
+#define RCAR_PCI_INT_RESERR (1 << 5)
+#define RCAR_PCI_INT_WIN1ERR (1 << 12)
+#define RCAR_PCI_INT_WIN2ERR (1 << 13)
+#define RCAR_PCI_INT_A (1 << 16)
+#define RCAR_PCI_INT_B (1 << 17)
+#define RCAR_PCI_INT_PME (1 << 19)
+#define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_SIGRETABORT | \
+ RCAR_PCI_INT_REMABORT | \
+ RCAR_PCI_INT_PERR | \
+ RCAR_PCI_INT_SIGSERR | \
+ RCAR_PCI_INT_RESERR | \
+ RCAR_PCI_INT_WIN1ERR | \
+ RCAR_PCI_INT_WIN2ERR)
+
+#define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30)
+#define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0)
+#define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1)
+#define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2)
+#define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7)
+#define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17)
+#define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \
+ RCAR_AHB_BUS_MMODE_BYTE_BURST | \
+ RCAR_AHB_BUS_MMODE_WR_INCR | \
+ RCAR_AHB_BUS_MMODE_HBUS_REQ | \
+ RCAR_AHB_BUS_SMODE_READYCTR)
+
+#define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34)
+#define RCAR_USBCTR_USBH_RST (1 << 0)
+#define RCAR_USBCTR_PCICLK_MASK (1 << 1)
+#define RCAR_USBCTR_PLL_RST (1 << 2)
+#define RCAR_USBCTR_DIRPD (1 << 8)
+#define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9)
+#define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10)
+#define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10)
+
+#define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40)
+#define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0)
+#define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1)
+#define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12)
+
+#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
+
+struct rcar_pci_priv {
+ struct device *dev;
+ void __iomem *reg;
+ struct resource io_res;
+ struct resource mem_res;
+ struct resource *cfg_res;
+ unsigned busnr;
+ int irq;
+ unsigned long window_size;
+};
+
+/* PCI configuration space operations */
+static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+ int slot, val;
+
+ if (sys->busnr != bus->number || PCI_FUNC(devfn))
+ return NULL;
+
+ /* Only one EHCI/OHCI device built-in */
+ slot = PCI_SLOT(devfn);
+ if (slot > 2)
+ return NULL;
+
+ /* bridge logic only has registers to 0x40 */
+ if (slot == 0x0 && where >= 0x40)
+ return NULL;
+
+ val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG :
+ RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG;
+
+ iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ return priv->reg + (slot >> 1) * 0x100 + where;
+}
+
+/* PCI interrupt mapping */
+static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pci_sys_data *sys = dev->bus->sysdata;
+ struct rcar_pci_priv *priv = sys->private_data;
+ int irq;
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+ if (!irq)
+ irq = priv->irq;
+
+ return irq;
+}
+
+#ifdef CONFIG_PCI_DEBUG
+/* if debug enabled, then attach an error handler irq to the bridge */
+
+static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
+{
+ struct rcar_pci_priv *priv = pw;
+ u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
+
+ if (status & RCAR_PCI_INT_ALLERRORS) {
+ dev_err(priv->dev, "error irq: status %08x\n", status);
+
+ /* clear the error(s) */
+ iowrite32(status & RCAR_PCI_INT_ALLERRORS,
+ priv->reg + RCAR_PCI_INT_STATUS_REG);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = devm_request_irq(priv->dev, priv->irq, rcar_pci_err_irq,
+ IRQF_SHARED, "error irq", priv);
+ if (ret) {
+ dev_err(priv->dev, "cannot claim IRQ for error handling\n");
+ return;
+ }
+
+ val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG);
+ val |= RCAR_PCI_INT_ALLERRORS;
+ iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
+}
+#else
+static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+#endif
+
+/* PCI host controller setup */
+static int rcar_pci_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pci_priv *priv = sys->private_data;
+ void __iomem *reg = priv->reg;
+ u32 val;
+
+ pm_runtime_enable(priv->dev);
+ pm_runtime_get_sync(priv->dev);
+
+ val = ioread32(reg + RCAR_PCI_UNIT_REV_REG);
+ dev_info(priv->dev, "PCI: bus%u revision %x\n", sys->busnr, val);
+
+ /* Disable Direct Power Down State and assert reset */
+ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD;
+ val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST;
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+ udelay(4);
+
+ /* De-assert reset and reset PCIAHB window1 size */
+ val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK |
+ RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST);
+
+ /* Setup PCIAHB window1 size */
+ switch (priv->window_size) {
+ case SZ_2G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_2G;
+ break;
+ case SZ_1G:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_1G;
+ break;
+ case SZ_512M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_512M;
+ break;
+ default:
+ pr_warn("unknown window size %ld - defaulting to 256M\n",
+ priv->window_size);
+ priv->window_size = SZ_256M;
+ /* fall-through */
+ case SZ_256M:
+ val |= RCAR_USBCTR_PCIAHB_WIN1_256M;
+ break;
+ }
+ iowrite32(val, reg + RCAR_USBCTR_REG);
+
+ /* Configure AHB master and slave modes */
+ iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG);
+
+ /* Configure PCI arbiter */
+ val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG);
+ val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 |
+ RCAR_PCI_ARBITER_PCIBP_MODE;
+ iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG);
+
+ /* PCI-AHB mapping: 0x40000000 base */
+ iowrite32(0x40000000 | RCAR_PCIAHB_PREFETCH16,
+ reg + RCAR_PCIAHB_WIN1_CTR_REG);
+
+ /* AHB-PCI mapping: OHCI/EHCI registers */
+ val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM;
+ iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG);
+
+ /* Enable AHB-PCI bridge PCI configuration access */
+ iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG,
+ reg + RCAR_AHBPCI_WIN1_CTR_REG);
+ /* Set PCI-AHB Window1 address */
+ iowrite32(0x40000000 | PCI_BASE_ADDRESS_MEM_PREFETCH,
+ reg + PCI_BASE_ADDRESS_1);
+ /* Set AHB-PCI bridge PCI communication area address */
+ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET;
+ iowrite32(val, reg + PCI_BASE_ADDRESS_0);
+
+ val = ioread32(reg + PCI_COMMAND);
+ val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+ iowrite32(val, reg + PCI_COMMAND);
+
+ /* Enable PCI interrupts */
+ iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME,
+ reg + RCAR_PCI_INT_ENABLE_REG);
+
+ if (priv->irq > 0)
+ rcar_pci_setup_errirq(priv);
+
+ /* Add PCI resources */
+ pci_add_resource(&sys->resources, &priv->io_res);
+ pci_add_resource(&sys->resources, &priv->mem_res);
+
+ /* Setup bus number based on platform device id / of bus-range */
+ sys->busnr = priv->busnr;
+ return 1;
+}
+
+static struct pci_ops rcar_pci_ops = {
+ .map_bus = rcar_pci_cfg_base,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int rcar_pci_probe(struct platform_device *pdev)
+{
+ struct resource *cfg_res, *mem_res;
+ struct rcar_pci_priv *priv;
+ void __iomem *reg;
+ struct hw_pci hw;
+ void *hw_private[1];
+
+ cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg = devm_ioremap_resource(&pdev->dev, cfg_res);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!mem_res || !mem_res->start)
+ return -ENODEV;
+
+ if (mem_res->start & 0xFFFF)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct rcar_pci_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->mem_res = *mem_res;
+ /*
+ * The controller does not support/use port I/O,
+ * so setup a dummy port I/O region here.
+ */
+ priv->io_res.start = priv->mem_res.start;
+ priv->io_res.end = priv->mem_res.end;
+ priv->io_res.flags = IORESOURCE_IO;
+
+ priv->cfg_res = cfg_res;
+
+ priv->irq = platform_get_irq(pdev, 0);
+ priv->reg = reg;
+ priv->dev = &pdev->dev;
+
+ if (priv->irq < 0) {
+ dev_err(&pdev->dev, "no valid irq found\n");
+ return priv->irq;
+ }
+
+ priv->window_size = SZ_1G;
+
+ if (pdev->dev.of_node) {
+ struct resource busnr;
+ int ret;
+
+ ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse bus-range\n");
+ return ret;
+ }
+
+ priv->busnr = busnr.start;
+ if (busnr.end != busnr.start)
+ dev_warn(&pdev->dev, "only one bus number supported\n");
+ } else {
+ priv->busnr = pdev->id;
+ }
+
+ hw_private[0] = priv;
+ memset(&hw, 0, sizeof(hw));
+ hw.nr_controllers = ARRAY_SIZE(hw_private);
+ hw.private_data = hw_private;
+ hw.map_irq = rcar_pci_map_irq;
+ hw.ops = &rcar_pci_ops;
+ hw.setup = rcar_pci_setup;
+ pci_common_init_dev(&pdev->dev, &hw);
+ return 0;
+}
+
+static struct of_device_id rcar_pci_of_match[] = {
+ { .compatible = "renesas,pci-r8a7790", },
+ { .compatible = "renesas,pci-r8a7791", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, rcar_pci_of_match);
+
+static struct platform_driver rcar_pci_driver = {
+ .driver = {
+ .name = "pci-rcar-gen2",
+ .suppress_bind_attrs = true,
+ .of_match_table = rcar_pci_of_match,
+ },
+ .probe = rcar_pci_probe,
+};
+
+module_platform_driver(rcar_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen2 internal PCI");
+MODULE_AUTHOR("Valentine Barshak <valentine.barshak@cogentembedded.com>");
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
new file mode 100644
index 000000000..00e92720d
--- /dev/null
+++ b/drivers/pci/host/pci-tegra.c
@@ -0,0 +1,2079 @@
+/*
+ * PCIe host controller driver for Tegra SoCs
+ *
+ * Copyright (c) 2010, CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on NVIDIA PCIe driver
+ * Copyright (c) 2008-2009, NVIDIA Corporation.
+ *
+ * Bits taken from arch/arm/mach-dove/pcie.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <soc/tegra/cpuidle.h>
+#include <soc/tegra/pmc.h>
+
+#include <asm/mach/irq.h>
+#include <asm/mach/map.h>
+#include <asm/mach/pci.h>
+
+#define INT_PCI_MSI_NR (8 * 32)
+
+/* register definitions */
+
+#define AFI_AXI_BAR0_SZ 0x00
+#define AFI_AXI_BAR1_SZ 0x04
+#define AFI_AXI_BAR2_SZ 0x08
+#define AFI_AXI_BAR3_SZ 0x0c
+#define AFI_AXI_BAR4_SZ 0x10
+#define AFI_AXI_BAR5_SZ 0x14
+
+#define AFI_AXI_BAR0_START 0x18
+#define AFI_AXI_BAR1_START 0x1c
+#define AFI_AXI_BAR2_START 0x20
+#define AFI_AXI_BAR3_START 0x24
+#define AFI_AXI_BAR4_START 0x28
+#define AFI_AXI_BAR5_START 0x2c
+
+#define AFI_FPCI_BAR0 0x30
+#define AFI_FPCI_BAR1 0x34
+#define AFI_FPCI_BAR2 0x38
+#define AFI_FPCI_BAR3 0x3c
+#define AFI_FPCI_BAR4 0x40
+#define AFI_FPCI_BAR5 0x44
+
+#define AFI_CACHE_BAR0_SZ 0x48
+#define AFI_CACHE_BAR0_ST 0x4c
+#define AFI_CACHE_BAR1_SZ 0x50
+#define AFI_CACHE_BAR1_ST 0x54
+
+#define AFI_MSI_BAR_SZ 0x60
+#define AFI_MSI_FPCI_BAR_ST 0x64
+#define AFI_MSI_AXI_BAR_ST 0x68
+
+#define AFI_MSI_VEC0 0x6c
+#define AFI_MSI_VEC1 0x70
+#define AFI_MSI_VEC2 0x74
+#define AFI_MSI_VEC3 0x78
+#define AFI_MSI_VEC4 0x7c
+#define AFI_MSI_VEC5 0x80
+#define AFI_MSI_VEC6 0x84
+#define AFI_MSI_VEC7 0x88
+
+#define AFI_MSI_EN_VEC0 0x8c
+#define AFI_MSI_EN_VEC1 0x90
+#define AFI_MSI_EN_VEC2 0x94
+#define AFI_MSI_EN_VEC3 0x98
+#define AFI_MSI_EN_VEC4 0x9c
+#define AFI_MSI_EN_VEC5 0xa0
+#define AFI_MSI_EN_VEC6 0xa4
+#define AFI_MSI_EN_VEC7 0xa8
+
+#define AFI_CONFIGURATION 0xac
+#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
+
+#define AFI_FPCI_ERROR_MASKS 0xb0
+
+#define AFI_INTR_MASK 0xb4
+#define AFI_INTR_MASK_INT_MASK (1 << 0)
+#define AFI_INTR_MASK_MSI_MASK (1 << 8)
+
+#define AFI_INTR_CODE 0xb8
+#define AFI_INTR_CODE_MASK 0xf
+#define AFI_INTR_INI_SLAVE_ERROR 1
+#define AFI_INTR_INI_DECODE_ERROR 2
+#define AFI_INTR_TARGET_ABORT 3
+#define AFI_INTR_MASTER_ABORT 4
+#define AFI_INTR_INVALID_WRITE 5
+#define AFI_INTR_LEGACY 6
+#define AFI_INTR_FPCI_DECODE_ERROR 7
+#define AFI_INTR_AXI_DECODE_ERROR 8
+#define AFI_INTR_FPCI_TIMEOUT 9
+#define AFI_INTR_PE_PRSNT_SENSE 10
+#define AFI_INTR_PE_CLKREQ_SENSE 11
+#define AFI_INTR_CLKCLAMP_SENSE 12
+#define AFI_INTR_RDY4PD_SENSE 13
+#define AFI_INTR_P2P_ERROR 14
+
+#define AFI_INTR_SIGNATURE 0xbc
+#define AFI_UPPER_FPCI_ADDRESS 0xc0
+#define AFI_SM_INTR_ENABLE 0xc4
+#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
+#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
+#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
+#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
+#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
+#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
+#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
+#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
+
+#define AFI_AFI_INTR_ENABLE 0xc8
+#define AFI_INTR_EN_INI_SLVERR (1 << 0)
+#define AFI_INTR_EN_INI_DECERR (1 << 1)
+#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
+#define AFI_INTR_EN_TGT_DECERR (1 << 3)
+#define AFI_INTR_EN_TGT_WRERR (1 << 4)
+#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
+#define AFI_INTR_EN_AXI_DECERR (1 << 6)
+#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
+#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
+
+#define AFI_PCIE_CONFIG 0x0f8
+#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
+#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
+#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
+
+#define AFI_FUSE 0x104
+#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
+
+#define AFI_PEX0_CTRL 0x110
+#define AFI_PEX1_CTRL 0x118
+#define AFI_PEX2_CTRL 0x128
+#define AFI_PEX_CTRL_RST (1 << 0)
+#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
+#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
+#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
+
+#define AFI_PLLE_CONTROL 0x160
+#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
+#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
+
+#define AFI_PEXBIAS_CTRL_0 0x168
+
+#define RP_VEND_XP 0x00000F00
+#define RP_VEND_XP_DL_UP (1 << 30)
+
+#define RP_PRIV_MISC 0x00000FE0
+#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
+#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+
+#define RP_LINK_CONTROL_STATUS 0x00000090
+#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
+#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
+
+#define PADS_CTL_SEL 0x0000009C
+
+#define PADS_CTL 0x000000A0
+#define PADS_CTL_IDDQ_1L (1 << 0)
+#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
+#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
+
+#define PADS_PLL_CTL_TEGRA20 0x000000B8
+#define PADS_PLL_CTL_TEGRA30 0x000000B4
+#define PADS_PLL_CTL_RST_B4SM (1 << 1)
+#define PADS_PLL_CTL_LOCKDET (1 << 8)
+#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
+#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
+#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
+#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
+#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
+#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
+
+#define PADS_REFCLK_CFG0 0x000000C8
+#define PADS_REFCLK_CFG1 0x000000CC
+#define PADS_REFCLK_BIAS 0x000000D0
+
+/*
+ * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
+ * entries, one entry per PCIe port. These field definitions and desired
+ * values aren't in the TRM, but do come from NVIDIA.
+ */
+#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
+#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
+#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
+#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
+
+/* Default value provided by HW engineering is 0xfa5c */
+#define PADS_REFCLK_CFG_VALUE \
+ ( \
+ (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
+ (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
+ (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
+ (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
+ )
+
+struct tegra_msi {
+ struct msi_controller chip;
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ unsigned long pages;
+ struct mutex lock;
+ int irq;
+};
+
+/* used to differentiate between Tegra SoC generations */
+struct tegra_pcie_soc_data {
+ unsigned int num_ports;
+ unsigned int msi_base_shift;
+ u32 pads_pll_ctl;
+ u32 tx_ref_sel;
+ bool has_pex_clkreq_en;
+ bool has_pex_bias_ctrl;
+ bool has_intr_prsnt_sense;
+ bool has_cml_clk;
+ bool has_gen2;
+};
+
+static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
+{
+ return container_of(chip, struct tegra_msi, chip);
+}
+
+struct tegra_pcie {
+ struct device *dev;
+
+ void __iomem *pads;
+ void __iomem *afi;
+ int irq;
+
+ struct list_head buses;
+ struct resource *cs;
+
+ struct resource all;
+ struct resource io;
+ struct resource pio;
+ struct resource mem;
+ struct resource prefetch;
+ struct resource busn;
+
+ struct clk *pex_clk;
+ struct clk *afi_clk;
+ struct clk *pll_e;
+ struct clk *cml_clk;
+
+ struct reset_control *pex_rst;
+ struct reset_control *afi_rst;
+ struct reset_control *pcie_xrst;
+
+ struct phy *phy;
+
+ struct tegra_msi msi;
+
+ struct list_head ports;
+ unsigned int num_ports;
+ u32 xbar_config;
+
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
+
+ const struct tegra_pcie_soc_data *soc_data;
+ struct dentry *debugfs;
+};
+
+struct tegra_pcie_port {
+ struct tegra_pcie *pcie;
+ struct list_head list;
+ struct resource regs;
+ void __iomem *base;
+ unsigned int index;
+ unsigned int lanes;
+};
+
+struct tegra_pcie_bus {
+ struct vm_struct *area;
+ struct list_head list;
+ unsigned int nr;
+};
+
+static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
+ unsigned long offset)
+{
+ writel(value, pcie->afi + offset);
+}
+
+static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->afi + offset);
+}
+
+static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
+ unsigned long offset)
+{
+ writel(value, pcie->pads + offset);
+}
+
+static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
+{
+ return readl(pcie->pads + offset);
+}
+
+/*
+ * The configuration space mapping on Tegra is somewhat similar to the ECAM
+ * defined by PCIe. However it deviates a bit in how the 4 bits for extended
+ * register accesses are mapped:
+ *
+ * [27:24] extended register number
+ * [23:16] bus number
+ * [15:11] device number
+ * [10: 8] function number
+ * [ 7: 0] register number
+ *
+ * Mapping the whole extended configuration space would require 256 MiB of
+ * virtual address space, only a small part of which will actually be used.
+ * To work around this, a 1 MiB of virtual addresses are allocated per bus
+ * when the bus is first accessed. When the physical range is mapped, the
+ * the bus number bits are hidden so that the extended register number bits
+ * appear as bits [19:16]. Therefore the virtual mapping looks like this:
+ *
+ * [19:16] extended register number
+ * [15:11] device number
+ * [10: 8] function number
+ * [ 7: 0] register number
+ *
+ * This is achieved by stitching together 16 chunks of 64 KiB of physical
+ * address space via the MMU.
+ */
+static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
+{
+ return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
+ (PCI_FUNC(devfn) << 8) | (where & 0xfc);
+}
+
+static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
+ unsigned int busnr)
+{
+ pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
+ L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
+ phys_addr_t cs = pcie->cs->start;
+ struct tegra_pcie_bus *bus;
+ unsigned int i;
+ int err;
+
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&bus->list);
+ bus->nr = busnr;
+
+ /* allocate 1 MiB of virtual addresses */
+ bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
+ if (!bus->area) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ /* map each of the 16 chunks of 64 KiB each */
+ for (i = 0; i < 16; i++) {
+ unsigned long virt = (unsigned long)bus->area->addr +
+ i * SZ_64K;
+ phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
+
+ err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
+ if (err < 0) {
+ dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
+ err);
+ goto unmap;
+ }
+ }
+
+ return bus;
+
+unmap:
+ vunmap(bus->area->addr);
+free:
+ kfree(bus);
+ return ERR_PTR(err);
+}
+
+/*
+ * Look up a virtual address mapping for the specified bus number. If no such
+ * mapping exists, try to create one.
+ */
+static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
+ unsigned int busnr)
+{
+ struct tegra_pcie_bus *bus;
+
+ list_for_each_entry(bus, &pcie->buses, list)
+ if (bus->nr == busnr)
+ return (void __iomem *)bus->area->addr;
+
+ bus = tegra_pcie_bus_alloc(pcie, busnr);
+ if (IS_ERR(bus))
+ return NULL;
+
+ list_add_tail(&bus->list, &pcie->buses);
+
+ return (void __iomem *)bus->area->addr;
+}
+
+static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
+ void __iomem *addr = NULL;
+
+ if (bus->number == 0) {
+ unsigned int slot = PCI_SLOT(devfn);
+ struct tegra_pcie_port *port;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ if (port->index + 1 == slot) {
+ addr = port->base + (where & ~3);
+ break;
+ }
+ }
+ } else {
+ addr = tegra_pcie_bus_map(pcie, bus->number);
+ if (!addr) {
+ dev_err(pcie->dev,
+ "failed to map cfg. space for bus %u\n",
+ bus->number);
+ return NULL;
+ }
+
+ addr += tegra_pcie_conf_offset(devfn, where);
+ }
+
+ return addr;
+}
+
+static struct pci_ops tegra_pcie_ops = {
+ .map_bus = tegra_pcie_conf_address,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
+{
+ unsigned long ret = 0;
+
+ switch (port->index) {
+ case 0:
+ ret = AFI_PEX0_CTRL;
+ break;
+
+ case 1:
+ ret = AFI_PEX1_CTRL;
+ break;
+
+ case 2:
+ ret = AFI_PEX2_CTRL;
+ break;
+ }
+
+ return ret;
+}
+
+static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
+{
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* pulse reset signal */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ usleep_range(1000, 2000);
+
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+}
+
+static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* enable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+ value |= AFI_PEX_CTRL_REFCLK_EN;
+
+ if (soc->has_pex_clkreq_en)
+ value |= AFI_PEX_CTRL_CLKREQ_EN;
+
+ value |= AFI_PEX_CTRL_OVERRIDE_EN;
+
+ afi_writel(port->pcie, value, ctrl);
+
+ tegra_pcie_port_reset(port);
+}
+
+static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
+{
+ const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
+ unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
+ unsigned long value;
+
+ /* assert port reset */
+ value = afi_readl(port->pcie, ctrl);
+ value &= ~AFI_PEX_CTRL_RST;
+ afi_writel(port->pcie, value, ctrl);
+
+ /* disable reference clock */
+ value = afi_readl(port->pcie, ctrl);
+
+ if (soc->has_pex_clkreq_en)
+ value &= ~AFI_PEX_CTRL_CLKREQ_EN;
+
+ value &= ~AFI_PEX_CTRL_REFCLK_EN;
+ afi_writel(port->pcie, value, ctrl);
+}
+
+static void tegra_pcie_port_free(struct tegra_pcie_port *port)
+{
+ struct tegra_pcie *pcie = port->pcie;
+
+ devm_iounmap(pcie->dev, port->base);
+ devm_release_mem_region(pcie->dev, port->regs.start,
+ resource_size(&port->regs));
+ list_del(&port->list);
+ devm_kfree(pcie->dev, port);
+}
+
+/* Tegra PCIE root complex wrongly reports device class */
+static void tegra_pcie_fixup_class(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
+
+/* Tegra PCIE requires relaxed ordering */
+static void tegra_pcie_relax_enable(struct pci_dev *dev)
+{
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
+
+static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(sys);
+ int err;
+
+ err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
+ if (err < 0)
+ return err;
+
+ err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
+ if (err)
+ return err;
+
+ pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
+ pci_add_resource_offset(&sys->resources, &pcie->prefetch,
+ sys->mem_offset);
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ pci_ioremap_io(pcie->pio.start, pcie->io.start);
+
+ return 1;
+}
+
+static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
+ int irq;
+
+ tegra_cpuidle_pcie_irqs_in_use();
+
+ irq = of_irq_parse_and_map_pci(pdev, slot, pin);
+ if (!irq)
+ irq = pcie->irq;
+
+ return irq;
+}
+
+static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct tegra_pcie *pcie = sys_to_pcie(sys);
+ struct pci_bus *bus;
+
+ bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
+ &sys->resources);
+ if (!bus)
+ return NULL;
+
+ pci_scan_child_bus(bus);
+
+ return bus;
+}
+
+static irqreturn_t tegra_pcie_isr(int irq, void *arg)
+{
+ const char *err_msg[] = {
+ "Unknown",
+ "AXI slave error",
+ "AXI decode error",
+ "Target abort",
+ "Master abort",
+ "Invalid write",
+ "Legacy interrupt",
+ "Response decoding error",
+ "AXI response decoding error",
+ "Transaction timeout",
+ "Slot present pin change",
+ "Slot clock request change",
+ "TMS clock ramp change",
+ "TMS ready for power down",
+ "Peer2Peer error",
+ };
+ struct tegra_pcie *pcie = arg;
+ u32 code, signature;
+
+ code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
+ signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
+ afi_writel(pcie, 0, AFI_INTR_CODE);
+
+ if (code == AFI_INTR_LEGACY)
+ return IRQ_NONE;
+
+ if (code >= ARRAY_SIZE(err_msg))
+ code = 0;
+
+ /*
+ * do not pollute kernel log with master abort reports since they
+ * happen a lot during enumeration
+ */
+ if (code == AFI_INTR_MASTER_ABORT)
+ dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
+ signature);
+ else
+ dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
+ signature);
+
+ if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
+ code == AFI_INTR_FPCI_DECODE_ERROR) {
+ u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
+ u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
+
+ if (code == AFI_INTR_MASTER_ABORT)
+ dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
+ else
+ dev_err(pcie->dev, " FPCI address: %10llx\n", address);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * FPCI map is as follows:
+ * - 0xfdfc000000: I/O space
+ * - 0xfdfe000000: type 0 configuration space
+ * - 0xfdff000000: type 1 configuration space
+ * - 0xfe00000000: type 0 extended configuration space
+ * - 0xfe10000000: type 1 extended configuration space
+ */
+static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
+{
+ u32 fpci_bar, size, axi_address;
+
+ /* Bar 0: type 1 extended configuration space */
+ fpci_bar = 0xfe100000;
+ size = resource_size(pcie->cs);
+ axi_address = pcie->cs->start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
+
+ /* Bar 1: downstream IO bar */
+ fpci_bar = 0xfdfc0000;
+ size = resource_size(&pcie->io);
+ axi_address = pcie->io.start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
+
+ /* Bar 2: prefetchable memory BAR */
+ fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = resource_size(&pcie->prefetch);
+ axi_address = pcie->prefetch.start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
+
+ /* Bar 3: non prefetchable memory BAR */
+ fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
+ size = resource_size(&pcie->mem);
+ axi_address = pcie->mem.start;
+ afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
+ afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
+ afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
+
+ /* NULL out the remaining BARs as they are not used */
+ afi_writel(pcie, 0, AFI_AXI_BAR4_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR4);
+
+ afi_writel(pcie, 0, AFI_AXI_BAR5_START);
+ afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
+ afi_writel(pcie, 0, AFI_FPCI_BAR5);
+
+ /* map all upstream transactions as uncached */
+ afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
+ afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
+
+ /* MSI translations are setup only when needed */
+ afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+ afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
+ afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
+}
+
+static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ u32 value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_before(jiffies, timeout)) {
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ if (value & PADS_PLL_CTL_LOCKDET)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ u32 value;
+ int err;
+
+ /* initialize internal PHY, enable up to 16 PCIE lanes */
+ pads_writel(pcie, 0x0, PADS_CTL_SEL);
+
+ /* override IDDQ to 1 on all 4 lanes */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /*
+ * Set up PHY PLL inputs select PLLE output as refclock,
+ * set TX ref sel to div10 (not div5).
+ */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
+ value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* reset PLL */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value &= ~PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ usleep_range(20, 100);
+
+ /* take PLL out of reset */
+ value = pads_readl(pcie, soc->pads_pll_ctl);
+ value |= PADS_PLL_CTL_RST_B4SM;
+ pads_writel(pcie, value, soc->pads_pll_ctl);
+
+ /* Configure the reference clock driver */
+ value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
+ pads_writel(pcie, value, PADS_REFCLK_CFG0);
+ if (soc->num_ports > 2)
+ pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
+
+ /* wait for the PLL to lock */
+ err = tegra_pcie_pll_wait(pcie, 500);
+ if (err < 0) {
+ dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
+ return err;
+ }
+
+ /* turn off IDDQ override */
+ value = pads_readl(pcie, PADS_CTL);
+ value &= ~PADS_CTL_IDDQ_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ /* enable TX/RX data */
+ value = pads_readl(pcie, PADS_CTL);
+ value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
+ pads_writel(pcie, value, PADS_CTL);
+
+ return 0;
+}
+
+static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct tegra_pcie_port *port;
+ unsigned long value;
+ int err;
+
+ /* enable PLL power down */
+ if (pcie->phy) {
+ value = afi_readl(pcie, AFI_PLLE_CONTROL);
+ value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
+ value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
+ afi_writel(pcie, value, AFI_PLLE_CONTROL);
+ }
+
+ /* power down PCIe slot clock bias pad */
+ if (soc->has_pex_bias_ctrl)
+ afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+ /* configure mode and disable all ports */
+ value = afi_readl(pcie, AFI_PCIE_CONFIG);
+ value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+ value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+
+ list_for_each_entry(port, &pcie->ports, list)
+ value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+
+ afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+ if (soc->has_gen2) {
+ value = afi_readl(pcie, AFI_FUSE);
+ value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+ } else {
+ value = afi_readl(pcie, AFI_FUSE);
+ value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+ afi_writel(pcie, value, AFI_FUSE);
+ }
+
+ if (!pcie->phy)
+ err = tegra_pcie_phy_enable(pcie);
+ else
+ err = phy_power_on(pcie->phy);
+
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+ return err;
+ }
+
+ /* take the PCIe interface module out of reset */
+ reset_control_deassert(pcie->pcie_xrst);
+
+ /* finally enable PCIe */
+ value = afi_readl(pcie, AFI_CONFIGURATION);
+ value |= AFI_CONFIGURATION_EN_FPCI;
+ afi_writel(pcie, value, AFI_CONFIGURATION);
+
+ value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
+ AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
+ AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
+
+ if (soc->has_intr_prsnt_sense)
+ value |= AFI_INTR_EN_PRSNT_SENSE;
+
+ afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
+ afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
+
+ /* don't enable MSI for now, only when needed */
+ afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
+
+ /* disable all exceptions */
+ afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
+
+ return 0;
+}
+
+static void tegra_pcie_power_off(struct tegra_pcie *pcie)
+{
+ int err;
+
+ /* TODO: disable and unprepare clocks? */
+
+ err = phy_power_off(pcie->phy);
+ if (err < 0)
+ dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
+
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
+
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+ err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
+ if (err < 0)
+ dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
+}
+
+static int tegra_pcie_power_on(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ int err;
+
+ reset_control_assert(pcie->pcie_xrst);
+ reset_control_assert(pcie->afi_rst);
+ reset_control_assert(pcie->pex_rst);
+
+ tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
+
+ /* enable regulators */
+ err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
+ if (err < 0)
+ dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
+
+ err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
+ pcie->pex_clk,
+ pcie->pex_rst);
+ if (err) {
+ dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
+ return err;
+ }
+
+ reset_control_deassert(pcie->afi_rst);
+
+ err = clk_prepare_enable(pcie->afi_clk);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
+ return err;
+ }
+
+ if (soc->has_cml_clk) {
+ err = clk_prepare_enable(pcie->cml_clk);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable CML clock: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ err = clk_prepare_enable(pcie->pll_e);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+
+ pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
+ if (IS_ERR(pcie->pex_clk))
+ return PTR_ERR(pcie->pex_clk);
+
+ pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
+ if (IS_ERR(pcie->afi_clk))
+ return PTR_ERR(pcie->afi_clk);
+
+ pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
+ if (IS_ERR(pcie->pll_e))
+ return PTR_ERR(pcie->pll_e);
+
+ if (soc->has_cml_clk) {
+ pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
+ if (IS_ERR(pcie->cml_clk))
+ return PTR_ERR(pcie->cml_clk);
+ }
+
+ return 0;
+}
+
+static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
+{
+ pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
+ if (IS_ERR(pcie->pex_rst))
+ return PTR_ERR(pcie->pex_rst);
+
+ pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
+ if (IS_ERR(pcie->afi_rst))
+ return PTR_ERR(pcie->afi_rst);
+
+ pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
+ if (IS_ERR(pcie->pcie_xrst))
+ return PTR_ERR(pcie->pcie_xrst);
+
+ return 0;
+}
+
+static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct resource *pads, *afi, *res;
+ int err;
+
+ err = tegra_pcie_clocks_get(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_resets_get(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get resets: %d\n", err);
+ return err;
+ }
+
+ pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ err = PTR_ERR(pcie->phy);
+ dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
+ return err;
+ }
+
+ err = phy_init(pcie->phy);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_power_on(pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to power up: %d\n", err);
+ return err;
+ }
+
+ pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
+ pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
+ if (IS_ERR(pcie->pads)) {
+ err = PTR_ERR(pcie->pads);
+ goto poweroff;
+ }
+
+ afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
+ pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
+ if (IS_ERR(pcie->afi)) {
+ err = PTR_ERR(pcie->afi);
+ goto poweroff;
+ }
+
+ /* request configuration space, but remap later, on demand */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
+ if (!res) {
+ err = -EADDRNOTAVAIL;
+ goto poweroff;
+ }
+
+ pcie->cs = devm_request_mem_region(pcie->dev, res->start,
+ resource_size(res), res->name);
+ if (!pcie->cs) {
+ err = -EADDRNOTAVAIL;
+ goto poweroff;
+ }
+
+ /* request interrupt */
+ err = platform_get_irq_byname(pdev, "intr");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto poweroff;
+ }
+
+ pcie->irq = err;
+
+ err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
+ goto poweroff;
+ }
+
+ return 0;
+
+poweroff:
+ tegra_pcie_power_off(pcie);
+ return err;
+}
+
+static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
+{
+ int err;
+
+ if (pcie->irq > 0)
+ free_irq(pcie->irq, pcie);
+
+ tegra_pcie_power_off(pcie);
+
+ err = phy_exit(pcie->phy);
+ if (err < 0)
+ dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
+
+ return 0;
+}
+
+static int tegra_msi_alloc(struct tegra_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
+{
+ struct device *dev = chip->chip.dev;
+
+ mutex_lock(&chip->lock);
+
+ if (!test_bit(irq, chip->used))
+ dev_err(dev, "trying to free unused MSI#%lu\n", irq);
+ else
+ clear_bit(irq, chip->used);
+
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
+{
+ struct tegra_pcie *pcie = data;
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned int i, processed = 0;
+
+ for (i = 0; i < 8; i++) {
+ unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+
+ while (reg) {
+ unsigned int offset = find_first_bit(&reg, 32);
+ unsigned int index = i * 32 + offset;
+ unsigned int irq;
+
+ /* clear the interrupt */
+ afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(pcie->dev, "unhandled MSI\n");
+ } else {
+ /*
+ * that's weird who triggered this?
+ * just clear it
+ */
+ dev_info(pcie->dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
+
+ processed++;
+ }
+ }
+
+ return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int tegra_msi_setup_irq(struct msi_controller *chip,
+ struct pci_dev *pdev, struct msi_desc *desc)
+{
+ struct tegra_msi *msi = to_tegra_msi(chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = tegra_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq) {
+ tegra_msi_free(msi, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = virt_to_phys((void *)msi->pages);
+ /* 32 bit address only */
+ msg.address_hi = 0;
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void tegra_msi_teardown_irq(struct msi_controller *chip,
+ unsigned int irq)
+{
+ struct tegra_msi *msi = to_tegra_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ irq_dispose_mapping(irq);
+ tegra_msi_free(msi, hwirq);
+}
+
+static struct irq_chip tegra_msi_irq_chip = {
+ .name = "Tegra PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ tegra_cpuidle_pcie_irqs_in_use();
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = tegra_msi_map,
+};
+
+static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned long base;
+ int err;
+ u32 reg;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = tegra_msi_setup_irq;
+ msi->chip.teardown_irq = tegra_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ err = platform_get_irq_byname(pdev, "msi");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto err;
+ }
+
+ msi->irq = err;
+
+ err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
+ tegra_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup AFI/FPCI range */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ base = virt_to_phys((void *)msi->pages);
+
+ afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
+ afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
+ /* this register is in 4K increments */
+ afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
+
+ /* enable all MSI vectors */
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
+ afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
+
+ /* and unmask the MSI interrupt */
+ reg = afi_readl(pcie, AFI_INTR_MASK);
+ reg |= AFI_INTR_MASK_MSI_MASK;
+ afi_writel(pcie, reg, AFI_INTR_MASK);
+
+ return 0;
+
+err:
+ irq_domain_remove(msi->domain);
+ return err;
+}
+
+static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
+{
+ struct tegra_msi *msi = &pcie->msi;
+ unsigned int i, irq;
+ u32 value;
+
+ /* mask the MSI interrupt */
+ value = afi_readl(pcie, AFI_INTR_MASK);
+ value &= ~AFI_INTR_MASK_MSI_MASK;
+ afi_writel(pcie, value, AFI_INTR_MASK);
+
+ /* disable all MSI vectors */
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
+ afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
+
+ free_pages(msi->pages, 0);
+
+ if (msi->irq > 0)
+ free_irq(msi->irq, pcie);
+
+ for (i = 0; i < INT_PCI_MSI_NR; i++) {
+ irq = irq_find_mapping(msi->domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(msi->domain);
+
+ return 0;
+}
+
+static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
+ u32 *xbar)
+{
+ struct device_node *np = pcie->dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ switch (lanes) {
+ case 0x0000104:
+ dev_info(pcie->dev, "4x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
+ return 0;
+
+ case 0x0000102:
+ dev_info(pcie->dev, "2x1, 1x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ switch (lanes) {
+ case 0x00000204:
+ dev_info(pcie->dev, "4x1, 2x1 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
+ return 0;
+
+ case 0x00020202:
+ dev_info(pcie->dev, "2x3 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
+ return 0;
+
+ case 0x00010104:
+ dev_info(pcie->dev, "4x1, 1x2 configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
+ return 0;
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ switch (lanes) {
+ case 0x00000004:
+ dev_info(pcie->dev, "single-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
+ return 0;
+
+ case 0x00000202:
+ dev_info(pcie->dev, "dual-mode configuration\n");
+ *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Check whether a given set of supplies is available in a device tree node.
+ * This is used to check whether the new or the legacy device tree bindings
+ * should be used.
+ */
+static bool of_regulator_bulk_available(struct device_node *np,
+ struct regulator_bulk_data *supplies,
+ unsigned int num_supplies)
+{
+ char property[32];
+ unsigned int i;
+
+ for (i = 0; i < num_supplies; i++) {
+ snprintf(property, 32, "%s-supply", supplies[i].supply);
+
+ if (of_find_property(np, property, NULL) == NULL)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Old versions of the device tree binding for this device used a set of power
+ * supplies that didn't match the hardware inputs. This happened to work for a
+ * number of cases but is not future proof. However to preserve backwards-
+ * compatibility with old device trees, this function will try to use the old
+ * set of supplies.
+ */
+static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
+{
+ struct device_node *np = pcie->dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
+ pcie->num_supplies = 3;
+ else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
+ pcie->num_supplies = 2;
+
+ if (pcie->num_supplies == 0) {
+ dev_err(pcie->dev, "device %s not supported in legacy mode\n",
+ np->full_name);
+ return -ENODEV;
+ }
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "pex-clk";
+ pcie->supplies[1].supply = "vdd";
+
+ if (pcie->num_supplies > 2)
+ pcie->supplies[2].supply = "avdd";
+
+ return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ pcie->supplies);
+}
+
+/*
+ * Obtains the list of regulators required for a particular generation of the
+ * IP block.
+ *
+ * This would've been nice to do simply by providing static tables for use
+ * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
+ * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
+ * and either seems to be optional depending on which ports are being used.
+ */
+static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
+{
+ struct device_node *np = pcie->dev->of_node;
+ unsigned int i = 0;
+
+ if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+ pcie->num_supplies = 7;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avddio-pex";
+ pcie->supplies[i++].supply = "dvddio-pex";
+ pcie->supplies[i++].supply = "avdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ pcie->supplies[i++].supply = "avdd-pll-erefe";
+ } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ bool need_pexa = false, need_pexb = false;
+
+ /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
+ if (lane_mask & 0x0f)
+ need_pexa = true;
+
+ /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
+ if (lane_mask & 0x30)
+ need_pexb = true;
+
+ pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
+ (need_pexb ? 2 : 0);
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ pcie->supplies[i++].supply = "avdd-plle";
+
+ if (need_pexa) {
+ pcie->supplies[i++].supply = "avdd-pexa";
+ pcie->supplies[i++].supply = "vdd-pexa";
+ }
+
+ if (need_pexb) {
+ pcie->supplies[i++].supply = "avdd-pexb";
+ pcie->supplies[i++].supply = "vdd-pexb";
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ pcie->num_supplies = 5;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "avdd-pex";
+ pcie->supplies[1].supply = "vdd-pex";
+ pcie->supplies[2].supply = "avdd-pex-pll";
+ pcie->supplies[3].supply = "avdd-plle";
+ pcie->supplies[4].supply = "vddio-pex-clk";
+ }
+
+ if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
+ pcie->num_supplies))
+ return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ pcie->supplies);
+
+ /*
+ * If not all regulators are available for this new scheme, assume
+ * that the device tree complies with an older version of the device
+ * tree binding.
+ */
+ dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
+
+ devm_kfree(pcie->dev, pcie->supplies);
+ pcie->num_supplies = 0;
+
+ return tegra_pcie_get_legacy_regulators(pcie);
+}
+
+static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
+{
+ const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+ struct device_node *np = pcie->dev->of_node, *port;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ u32 lanes = 0, mask = 0;
+ unsigned int lane = 0;
+ struct resource res;
+ int err;
+
+ memset(&pcie->all, 0, sizeof(pcie->all));
+ pcie->all.flags = IORESOURCE_MEM;
+ pcie->all.name = np->full_name;
+ pcie->all.start = ~0;
+ pcie->all.end = 0;
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(pcie->dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ err = of_pci_range_to_resource(&range, np, &res);
+ if (err < 0)
+ return err;
+
+ switch (res.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ memcpy(&pcie->pio, &res, sizeof(res));
+ pcie->pio.name = np->full_name;
+
+ /*
+ * The Tegra PCIe host bridge uses this to program the
+ * mapping of the I/O space to the physical address,
+ * so we override the .start and .end fields here that
+ * of_pci_range_to_resource() converted to I/O space.
+ * We also set the IORESOURCE_MEM type to clarify that
+ * the resource is in the physical memory space.
+ */
+ pcie->io.start = range.cpu_addr;
+ pcie->io.end = range.cpu_addr + range.size - 1;
+ pcie->io.flags = IORESOURCE_MEM;
+ pcie->io.name = "I/O";
+
+ memcpy(&res, &pcie->io, sizeof(res));
+ break;
+
+ case IORESOURCE_MEM:
+ if (res.flags & IORESOURCE_PREFETCH) {
+ memcpy(&pcie->prefetch, &res, sizeof(res));
+ pcie->prefetch.name = "prefetchable";
+ } else {
+ memcpy(&pcie->mem, &res, sizeof(res));
+ pcie->mem.name = "non-prefetchable";
+ }
+ break;
+ }
+
+ if (res.start <= pcie->all.start)
+ pcie->all.start = res.start;
+
+ if (res.end >= pcie->all.end)
+ pcie->all.end = res.end;
+ }
+
+ err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
+ if (err < 0)
+ return err;
+
+ err = of_pci_parse_bus_range(np, &pcie->busn);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse ranges property: %d\n",
+ err);
+ pcie->busn.name = np->name;
+ pcie->busn.start = 0;
+ pcie->busn.end = 0xff;
+ pcie->busn.flags = IORESOURCE_BUS;
+ }
+
+ /* parse root ports */
+ for_each_child_of_node(np, port) {
+ struct tegra_pcie_port *rp;
+ unsigned int index;
+ u32 value;
+
+ err = of_pci_get_devfn(port);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse address: %d\n",
+ err);
+ return err;
+ }
+
+ index = PCI_SLOT(err);
+
+ if (index < 1 || index > soc->num_ports) {
+ dev_err(pcie->dev, "invalid port number: %d\n", index);
+ return -EINVAL;
+ }
+
+ index--;
+
+ err = of_property_read_u32(port, "nvidia,num-lanes", &value);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
+ err);
+ return err;
+ }
+
+ if (value > 16) {
+ dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
+ return -EINVAL;
+ }
+
+ lanes |= value << (index << 3);
+
+ if (!of_device_is_available(port)) {
+ lane += value;
+ continue;
+ }
+
+ mask |= ((1 << value) - 1) << lane;
+ lane += value;
+
+ rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ err = of_address_to_resource(port, 0, &rp->regs);
+ if (err < 0) {
+ dev_err(pcie->dev, "failed to parse address: %d\n",
+ err);
+ return err;
+ }
+
+ INIT_LIST_HEAD(&rp->list);
+ rp->index = index;
+ rp->lanes = value;
+ rp->pcie = pcie;
+
+ rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
+ if (IS_ERR(rp->base))
+ return PTR_ERR(rp->base);
+
+ list_add_tail(&rp->list, &pcie->ports);
+ }
+
+ err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
+ if (err < 0) {
+ dev_err(pcie->dev, "invalid lane configuration\n");
+ return err;
+ }
+
+ err = tegra_pcie_get_regulators(pcie, mask);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/*
+ * FIXME: If there are no PCIe cards attached, then calling this function
+ * can result in the increase of the bootup time as there are big timeout
+ * loops.
+ */
+#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
+static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
+{
+ unsigned int retries = 3;
+ unsigned long value;
+
+ /* override presence detection */
+ value = readl(port->base + RP_PRIV_MISC);
+ value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
+ value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
+ writel(value, port->base + RP_PRIV_MISC);
+
+ do {
+ unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+ do {
+ value = readl(port->base + RP_VEND_XP);
+
+ if (value & RP_VEND_XP_DL_UP)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+ if (!timeout) {
+ dev_err(port->pcie->dev, "link %u down, retrying\n",
+ port->index);
+ goto retry;
+ }
+
+ timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
+
+ do {
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ return true;
+
+ usleep_range(1000, 2000);
+ } while (--timeout);
+
+retry:
+ tegra_pcie_port_reset(port);
+ } while (--retries);
+
+ return false;
+}
+
+static int tegra_pcie_enable(struct tegra_pcie *pcie)
+{
+ struct tegra_pcie_port *port, *tmp;
+ struct hw_pci hw;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ dev_info(pcie->dev, "probing port %u, using %u lanes\n",
+ port->index, port->lanes);
+
+ tegra_pcie_port_enable(port);
+
+ if (tegra_pcie_port_check_link(port))
+ continue;
+
+ dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
+
+ tegra_pcie_port_disable(port);
+ tegra_pcie_port_free(port);
+ }
+
+ memset(&hw, 0, sizeof(hw));
+
+#ifdef CONFIG_PCI_MSI
+ hw.msi_ctrl = &pcie->msi.chip;
+#endif
+
+ hw.nr_controllers = 1;
+ hw.private_data = (void **)&pcie;
+ hw.setup = tegra_pcie_setup;
+ hw.map_irq = tegra_pcie_map_irq;
+ hw.scan = tegra_pcie_scan_bus;
+ hw.ops = &tegra_pcie_ops;
+
+ pci_common_init_dev(pcie->dev, &hw);
+
+ return 0;
+}
+
+static const struct tegra_pcie_soc_data tegra20_pcie_data = {
+ .num_ports = 2,
+ .msi_base_shift = 0,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
+ .has_pex_clkreq_en = false,
+ .has_pex_bias_ctrl = false,
+ .has_intr_prsnt_sense = false,
+ .has_cml_clk = false,
+ .has_gen2 = false,
+};
+
+static const struct tegra_pcie_soc_data tegra30_pcie_data = {
+ .num_ports = 3,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = false,
+};
+
+static const struct tegra_pcie_soc_data tegra124_pcie_data = {
+ .num_ports = 2,
+ .msi_base_shift = 8,
+ .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+ .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+ .has_pex_clkreq_en = true,
+ .has_pex_bias_ctrl = true,
+ .has_intr_prsnt_sense = true,
+ .has_cml_clk = true,
+ .has_gen2 = true,
+};
+
+static const struct of_device_id tegra_pcie_of_match[] = {
+ { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
+ { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
+ { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
+
+static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct tegra_pcie *pcie = s->private;
+
+ if (list_empty(&pcie->ports))
+ return NULL;
+
+ seq_printf(s, "Index Status\n");
+
+ return seq_list_start(&pcie->ports, *pos);
+}
+
+static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct tegra_pcie *pcie = s->private;
+
+ return seq_list_next(v, &pcie->ports, pos);
+}
+
+static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
+{
+}
+
+static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
+{
+ bool up = false, active = false;
+ struct tegra_pcie_port *port;
+ unsigned int value;
+
+ port = list_entry(v, struct tegra_pcie_port, list);
+
+ value = readl(port->base + RP_VEND_XP);
+
+ if (value & RP_VEND_XP_DL_UP)
+ up = true;
+
+ value = readl(port->base + RP_LINK_CONTROL_STATUS);
+
+ if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
+ active = true;
+
+ seq_printf(s, "%2u ", port->index);
+
+ if (up)
+ seq_printf(s, "up");
+
+ if (active) {
+ if (up)
+ seq_printf(s, ", ");
+
+ seq_printf(s, "active");
+ }
+
+ seq_printf(s, "\n");
+ return 0;
+}
+
+static const struct seq_operations tegra_pcie_ports_seq_ops = {
+ .start = tegra_pcie_ports_seq_start,
+ .next = tegra_pcie_ports_seq_next,
+ .stop = tegra_pcie_ports_seq_stop,
+ .show = tegra_pcie_ports_seq_show,
+};
+
+static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
+{
+ struct tegra_pcie *pcie = inode->i_private;
+ struct seq_file *s;
+ int err;
+
+ err = seq_open(file, &tegra_pcie_ports_seq_ops);
+ if (err)
+ return err;
+
+ s = file->private_data;
+ s->private = pcie;
+
+ return 0;
+}
+
+static const struct file_operations tegra_pcie_ports_ops = {
+ .owner = THIS_MODULE,
+ .open = tegra_pcie_ports_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
+{
+ struct dentry *file;
+
+ pcie->debugfs = debugfs_create_dir("pcie", NULL);
+ if (!pcie->debugfs)
+ return -ENOMEM;
+
+ file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
+ pcie, &tegra_pcie_ports_ops);
+ if (!file)
+ goto remove;
+
+ return 0;
+
+remove:
+ debugfs_remove_recursive(pcie->debugfs);
+ pcie->debugfs = NULL;
+ return -ENOMEM;
+}
+
+static int tegra_pcie_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct tegra_pcie *pcie;
+ int err;
+
+ match = of_match_device(tegra_pcie_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pcie->buses);
+ INIT_LIST_HEAD(&pcie->ports);
+ pcie->soc_data = match->data;
+ pcie->dev = &pdev->dev;
+
+ err = tegra_pcie_parse_dt(pcie);
+ if (err < 0)
+ return err;
+
+ pcibios_min_mem = 0;
+
+ err = tegra_pcie_get_resources(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ return err;
+ }
+
+ err = tegra_pcie_enable_controller(pcie);
+ if (err)
+ goto put_resources;
+
+ /* setup the AFI address translations */
+ tegra_pcie_setup_translations(pcie);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = tegra_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ goto put_resources;
+ }
+ }
+
+ err = tegra_pcie_enable(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
+ goto disable_msi;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_pcie_debugfs_init(pcie);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
+ err);
+ }
+
+ platform_set_drvdata(pdev, pcie);
+ return 0;
+
+disable_msi:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ tegra_pcie_disable_msi(pcie);
+put_resources:
+ tegra_pcie_put_resources(pcie);
+ return err;
+}
+
+static struct platform_driver tegra_pcie_driver = {
+ .driver = {
+ .name = "tegra-pcie",
+ .of_match_table = tegra_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_pcie_probe,
+};
+module_platform_driver(tegra_pcie_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
new file mode 100644
index 000000000..0863d9cc2
--- /dev/null
+++ b/drivers/pci/host/pci-versatile.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2004 Koninklijke Philips Electronics NV
+ *
+ * Conversion to platform driver and DT:
+ * Copyright 2014 Linaro Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * 14/04/2005 Initial version, colin.king@philips.com
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+static void __iomem *versatile_pci_base;
+static void __iomem *versatile_cfg_base[2];
+
+#define PCI_IMAP(m) (versatile_pci_base + ((m) * 4))
+#define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4))
+#define PCI_SELFID (versatile_pci_base + 0xc)
+
+#define VP_PCI_DEVICE_ID 0x030010ee
+#define VP_PCI_CLASS_ID 0x0b400000
+
+static u32 pci_slot_ignore;
+
+static int __init versatile_pci_slot_ignore(char *str)
+{
+ int retval;
+ int slot;
+
+ while ((retval = get_option(&str, &slot))) {
+ if ((slot < 0) || (slot > 31))
+ pr_err("Illegal slot value: %d\n", slot);
+ else
+ pci_slot_ignore |= (1 << slot);
+ }
+ return 1;
+}
+__setup("pci_slot_ignore=", versatile_pci_slot_ignore);
+
+
+static void __iomem *versatile_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int offset)
+{
+ unsigned int busnr = bus->number;
+
+ if (pci_slot_ignore & (1 << PCI_SLOT(devfn)))
+ return NULL;
+
+ return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset);
+}
+
+static struct pci_ops pci_versatile_ops = {
+ .map_bus = versatile_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write,
+};
+
+static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *res)
+{
+ int err, mem = 1, res_valid = 0;
+ struct device_node *np = dev->of_node;
+ resource_size_t iobase;
+ struct resource_entry *win;
+
+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, res, &iobase);
+ if (err)
+ return err;
+
+ resource_list_for_each_entry(win, res) {
+ struct resource *parent, *res = win->res;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+ parent = &ioport_resource;
+ err = pci_remap_iospace(res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+ continue;
+ }
+ break;
+ case IORESOURCE_MEM:
+ parent = &iomem_resource;
+ res_valid |= !(res->flags & IORESOURCE_PREFETCH);
+
+ writel(res->start >> 28, PCI_IMAP(mem));
+ writel(PHYS_OFFSET >> 28, PCI_SMAP(mem));
+ mem++;
+
+ break;
+ case IORESOURCE_BUS:
+ default:
+ continue;
+ }
+
+ err = devm_request_resource(dev, parent, res);
+ if (err)
+ goto out_release_res;
+ }
+
+ if (!res_valid) {
+ dev_err(dev, "non-prefetchable memory resource required\n");
+ err = -EINVAL;
+ goto out_release_res;
+ }
+
+ return 0;
+
+out_release_res:
+ pci_free_resource_list(res);
+ return err;
+}
+
+/* Unused, temporary to satisfy ARM arch code */
+struct pci_sys_data sys;
+
+static int versatile_pci_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret, i, myslot = -1;
+ u32 val;
+ void __iomem *local_pci_cfg_base;
+ struct pci_bus *bus;
+ LIST_HEAD(pci_res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ versatile_pci_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(versatile_pci_base))
+ return PTR_ERR(versatile_pci_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(versatile_cfg_base[0]))
+ return PTR_ERR(versatile_cfg_base[0]);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ versatile_cfg_base[1] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(versatile_cfg_base[1]))
+ return PTR_ERR(versatile_cfg_base[1]);
+
+ ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res);
+ if (ret)
+ return ret;
+
+ /*
+ * We need to discover the PCI core first to configure itself
+ * before the main PCI probing is performed
+ */
+ for (i = 0; i < 32; i++) {
+ if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) &&
+ (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) {
+ myslot = i;
+ break;
+ }
+ }
+ if (myslot == -1) {
+ dev_err(&pdev->dev, "Cannot find PCI core!\n");
+ return -EIO;
+ }
+ /*
+ * Do not to map Versatile FPGA PCI device into memory space
+ */
+ pci_slot_ignore |= (1 << myslot);
+
+ dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot);
+
+ writel(myslot, PCI_SELFID);
+ local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11);
+
+ val = readl(local_pci_cfg_base + PCI_COMMAND);
+ val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
+ writel(val, local_pci_cfg_base + PCI_COMMAND);
+
+ /*
+ * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
+ */
+ writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
+ writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
+ writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
+
+ /*
+ * For many years the kernel and QEMU were symbiotically buggy
+ * in that they both assumed the same broken IRQ mapping.
+ * QEMU therefore attempts to auto-detect old broken kernels
+ * so that they still work on newer QEMU as they did on old
+ * QEMU. Since we now use the correct (ie matching-hardware)
+ * IRQ mapping we write a definitely different value to a
+ * PCI_INTERRUPT_LINE register to tell QEMU that we expect
+ * real hardware behaviour and it need not be backwards
+ * compatible for us. This write is harmless on real hardware.
+ */
+ writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE);
+
+ pci_add_flags(PCI_ENABLE_PROC_DOMAINS);
+ pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC);
+
+ bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, &sys, &pci_res);
+ if (!bus)
+ return -ENOMEM;
+
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_assign_unassigned_bus_resources(bus);
+ pci_bus_add_devices(bus);
+
+ return 0;
+}
+
+static const struct of_device_id versatile_pci_of_match[] = {
+ { .compatible = "arm,versatile-pci", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, versatile_pci_of_match);
+
+static struct platform_driver versatile_pci_driver = {
+ .driver = {
+ .name = "versatile-pci",
+ .of_match_table = versatile_pci_of_match,
+ },
+ .probe = versatile_pci_probe,
+};
+module_platform_driver(versatile_pci_driver);
+
+MODULE_DESCRIPTION("Versatile PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
new file mode 100644
index 000000000..ee082c036
--- /dev/null
+++ b/drivers/pci/host/pci-xgene.c
@@ -0,0 +1,531 @@
+/**
+ * APM X-Gene PCIe Driver
+ *
+ * Copyright (c) 2014 Applied Micro Circuits Corporation.
+ *
+ * Author: Tanmay Inamdar <tinamdar@apm.com>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PCIECORE_CTLANDSTATUS 0x50
+#define PIM1_1L 0x80
+#define IBAR2 0x98
+#define IR2MSK 0x9c
+#define PIM2_1L 0xa0
+#define IBAR3L 0xb4
+#define IR3MSKL 0xbc
+#define PIM3_1L 0xc4
+#define OMR1BARL 0x100
+#define OMR2BARL 0x118
+#define OMR3BARL 0x130
+#define CFGBARL 0x154
+#define CFGBARH 0x158
+#define CFGCTL 0x15c
+#define RTDID 0x160
+#define BRIDGE_CFG_0 0x2000
+#define BRIDGE_CFG_4 0x2010
+#define BRIDGE_STATUS_0 0x2600
+
+#define LINK_UP_MASK 0x00000100
+#define AXI_EP_CFG_ACCESS 0x10000
+#define EN_COHERENCY 0xF0000000
+#define EN_REG 0x00000001
+#define OB_LO_IO 0x00000002
+#define XGENE_PCIE_VENDORID 0x10E8
+#define XGENE_PCIE_DEVICEID 0xE004
+#define SZ_1T (SZ_1G*1024ULL)
+#define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe)
+
+struct xgene_pcie_port {
+ struct device_node *node;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *csr_base;
+ void __iomem *cfg_base;
+ unsigned long cfg_addr;
+ bool link_up;
+};
+
+static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
+{
+ return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+}
+
+/*
+ * When the address bit [17:16] is 2'b01, the Configuration access will be
+ * treated as Type 1 and it will be forwarded to external PCIe device.
+ */
+static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+
+ if (bus->number >= (bus->primary + 1))
+ return port->cfg_base + AXI_EP_CFG_ACCESS;
+
+ return port->cfg_base;
+}
+
+/*
+ * For Configuration request, RTDID register is used as Bus Number,
+ * Device Number and Function number of the header fields.
+ */
+static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+ unsigned int b, d, f;
+ u32 rtdid_val = 0;
+
+ b = bus->number;
+ d = PCI_SLOT(devfn);
+ f = PCI_FUNC(devfn);
+
+ if (!pci_is_root_bus(bus))
+ rtdid_val = (b << 8) | (d << 3) | f;
+
+ writel(rtdid_val, port->csr_base + RTDID);
+ /* read the register back to ensure flush */
+ readl(port->csr_base + RTDID);
+}
+
+/*
+ * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as
+ * the translation from PCI bus to native BUS. Entire DDR region
+ * is mapped into PCIe space using these registers, so it can be
+ * reached by DMA from EP devices. The BAR0/1 of bridge should be
+ * hidden during enumeration to avoid the sizing and resource allocation
+ * by PCIe core.
+ */
+static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset)
+{
+ if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) ||
+ (offset == PCI_BASE_ADDRESS_1)))
+ return true;
+
+ return false;
+}
+
+static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
+ int offset)
+{
+ struct xgene_pcie_port *port = bus->sysdata;
+
+ if ((pci_is_root_bus(bus) && devfn != 0) || !port->link_up ||
+ xgene_pcie_hide_rc_bars(bus, offset))
+ return NULL;
+
+ xgene_pcie_set_rtdid_reg(bus, devfn);
+ return xgene_pcie_get_cfg_base(bus) + offset;
+}
+
+static struct pci_ops xgene_pcie_ops = {
+ .map_bus = xgene_pcie_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static u64 xgene_pcie_set_ib_mask(void __iomem *csr_base, u32 addr,
+ u32 flags, u64 size)
+{
+ u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
+ u32 val32 = 0;
+ u32 val;
+
+ val32 = readl(csr_base + addr);
+ val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16);
+ writel(val, csr_base + addr);
+
+ val32 = readl(csr_base + addr + 0x04);
+ val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16);
+ writel(val, csr_base + addr + 0x04);
+
+ val32 = readl(csr_base + addr + 0x04);
+ val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16);
+ writel(val, csr_base + addr + 0x04);
+
+ val32 = readl(csr_base + addr + 0x08);
+ val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16);
+ writel(val, csr_base + addr + 0x08);
+
+ return mask;
+}
+
+static void xgene_pcie_linkup(struct xgene_pcie_port *port,
+ u32 *lanes, u32 *speed)
+{
+ void __iomem *csr_base = port->csr_base;
+ u32 val32;
+
+ port->link_up = false;
+ val32 = readl(csr_base + PCIECORE_CTLANDSTATUS);
+ if (val32 & LINK_UP_MASK) {
+ port->link_up = true;
+ *speed = PIPE_PHY_RATE_RD(val32);
+ val32 = readl(csr_base + BRIDGE_STATUS_0);
+ *lanes = val32 >> 26;
+ }
+}
+
+static int xgene_pcie_init_port(struct xgene_pcie_port *port)
+{
+ int rc;
+
+ port->clk = clk_get(port->dev, NULL);
+ if (IS_ERR(port->clk)) {
+ dev_err(port->dev, "clock not available\n");
+ return -ENODEV;
+ }
+
+ rc = clk_prepare_enable(port->clk);
+ if (rc) {
+ dev_err(port->dev, "clock enable failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr");
+ port->csr_base = devm_ioremap_resource(port->dev, res);
+ if (IS_ERR(port->csr_base))
+ return PTR_ERR(port->csr_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
+ port->cfg_base = devm_ioremap_resource(port->dev, res);
+ if (IS_ERR(port->cfg_base))
+ return PTR_ERR(port->cfg_base);
+ port->cfg_addr = res->start;
+
+ return 0;
+}
+
+static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
+ struct resource *res, u32 offset,
+ u64 cpu_addr, u64 pci_addr)
+{
+ void __iomem *base = port->csr_base + offset;
+ resource_size_t size = resource_size(res);
+ u64 restype = resource_type(res);
+ u64 mask = 0;
+ u32 min_size;
+ u32 flag = EN_REG;
+
+ if (restype == IORESOURCE_MEM) {
+ min_size = SZ_128M;
+ } else {
+ min_size = 128;
+ flag |= OB_LO_IO;
+ }
+
+ if (size >= min_size)
+ mask = ~(size - 1) | flag;
+ else
+ dev_warn(port->dev, "res size 0x%llx less than minimum 0x%x\n",
+ (u64)size, min_size);
+
+ writel(lower_32_bits(cpu_addr), base);
+ writel(upper_32_bits(cpu_addr), base + 0x04);
+ writel(lower_32_bits(mask), base + 0x08);
+ writel(upper_32_bits(mask), base + 0x0c);
+ writel(lower_32_bits(pci_addr), base + 0x10);
+ writel(upper_32_bits(pci_addr), base + 0x14);
+}
+
+static void xgene_pcie_setup_cfg_reg(void __iomem *csr_base, u64 addr)
+{
+ writel(lower_32_bits(addr), csr_base + CFGBARL);
+ writel(upper_32_bits(addr), csr_base + CFGBARH);
+ writel(EN_REG, csr_base + CFGCTL);
+}
+
+static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
+ struct list_head *res,
+ resource_size_t io_base)
+{
+ struct resource_entry *window;
+ struct device *dev = port->dev;
+ int ret;
+
+ resource_list_for_each_entry(window, res) {
+ struct resource *res = window->res;
+ u64 restype = resource_type(res);
+
+ dev_dbg(port->dev, "%pR\n", res);
+
+ switch (restype) {
+ case IORESOURCE_IO:
+ xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
+ res->start - window->offset);
+ ret = pci_remap_iospace(res, io_base);
+ if (ret < 0)
+ return ret;
+ break;
+ case IORESOURCE_MEM:
+ xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
+ res->start - window->offset);
+ break;
+ case IORESOURCE_BUS:
+ break;
+ default:
+ dev_err(dev, "invalid resource %pR\n", res);
+ return -EINVAL;
+ }
+ }
+ xgene_pcie_setup_cfg_reg(port->csr_base, port->cfg_addr);
+
+ return 0;
+}
+
+static void xgene_pcie_setup_pims(void *addr, u64 pim, u64 size)
+{
+ writel(lower_32_bits(pim), addr);
+ writel(upper_32_bits(pim) | EN_COHERENCY, addr + 0x04);
+ writel(lower_32_bits(size), addr + 0x10);
+ writel(upper_32_bits(size), addr + 0x14);
+}
+
+/*
+ * X-Gene PCIe support maximum 3 inbound memory regions
+ * This function helps to select a region based on size of region
+ */
+static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
+{
+ if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) {
+ *ib_reg_mask |= (1 << 1);
+ return 1;
+ }
+
+ if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
+ *ib_reg_mask |= (1 << 0);
+ return 0;
+ }
+
+ if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) {
+ *ib_reg_mask |= (1 << 2);
+ return 2;
+ }
+
+ return -EINVAL;
+}
+
+static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
+ struct of_pci_range *range, u8 *ib_reg_mask)
+{
+ void __iomem *csr_base = port->csr_base;
+ void __iomem *cfg_base = port->cfg_base;
+ void *bar_addr;
+ void *pim_addr;
+ u64 cpu_addr = range->cpu_addr;
+ u64 pci_addr = range->pci_addr;
+ u64 size = range->size;
+ u64 mask = ~(size - 1) | EN_REG;
+ u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64;
+ u32 bar_low;
+ int region;
+
+ region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size);
+ if (region < 0) {
+ dev_warn(port->dev, "invalid pcie dma-range config\n");
+ return;
+ }
+
+ if (range->flags & IORESOURCE_PREFETCH)
+ flags |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+
+ bar_low = pcie_bar_low_val((u32)cpu_addr, flags);
+ switch (region) {
+ case 0:
+ xgene_pcie_set_ib_mask(csr_base, BRIDGE_CFG_4, flags, size);
+ bar_addr = cfg_base + PCI_BASE_ADDRESS_0;
+ writel(bar_low, bar_addr);
+ writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
+ pim_addr = csr_base + PIM1_1L;
+ break;
+ case 1:
+ bar_addr = csr_base + IBAR2;
+ writel(bar_low, bar_addr);
+ writel(lower_32_bits(mask), csr_base + IR2MSK);
+ pim_addr = csr_base + PIM2_1L;
+ break;
+ case 2:
+ bar_addr = csr_base + IBAR3L;
+ writel(bar_low, bar_addr);
+ writel(upper_32_bits(cpu_addr), bar_addr + 0x4);
+ writel(lower_32_bits(mask), csr_base + IR3MSKL);
+ writel(upper_32_bits(mask), csr_base + IR3MSKL + 0x4);
+ pim_addr = csr_base + PIM3_1L;
+ break;
+ }
+
+ xgene_pcie_setup_pims(pim_addr, pci_addr, ~(size - 1));
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+ parser->end = parser->range + rlen / sizeof(__be32);
+
+ return 0;
+}
+
+static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
+{
+ struct device_node *np = port->node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct device *dev = port->dev;
+ u8 ib_reg_mask = 0;
+
+ if (pci_dma_range_parser_init(&parser, np)) {
+ dev_err(dev, "missing dma-ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+
+ dev_dbg(port->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+ xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask);
+ }
+ return 0;
+}
+
+/* clear BAR configuration which was done by firmware */
+static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
+{
+ int i;
+
+ for (i = PIM1_1L; i <= CFGCTL; i += 4)
+ writel(0x0, port->csr_base + i);
+}
+
+static int xgene_pcie_setup(struct xgene_pcie_port *port,
+ struct list_head *res,
+ resource_size_t io_base)
+{
+ u32 val, lanes = 0, speed = 0;
+ int ret;
+
+ xgene_pcie_clear_config(port);
+
+ /* setup the vendor and device IDs correctly */
+ val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID;
+ writel(val, port->csr_base + BRIDGE_CFG_0);
+
+ ret = xgene_pcie_map_ranges(port, res, io_base);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_parse_map_dma_ranges(port);
+ if (ret)
+ return ret;
+
+ xgene_pcie_linkup(port, &lanes, &speed);
+ if (!port->link_up)
+ dev_info(port->dev, "(rc) link down\n");
+ else
+ dev_info(port->dev, "(rc) x%d gen-%d link up\n",
+ lanes, speed + 1);
+ return 0;
+}
+
+static int xgene_pcie_probe_bridge(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ struct xgene_pcie_port *port;
+ resource_size_t iobase = 0;
+ struct pci_bus *bus;
+ int ret;
+ LIST_HEAD(res);
+
+ port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+ port->node = of_node_get(pdev->dev.of_node);
+ port->dev = &pdev->dev;
+
+ ret = xgene_pcie_map_reg(port, pdev);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_init_port(port);
+ if (ret)
+ return ret;
+
+ ret = of_pci_get_host_bridge_resources(dn, 0, 0xff, &res, &iobase);
+ if (ret)
+ return ret;
+
+ ret = xgene_pcie_setup(port, &res, iobase);
+ if (ret)
+ return ret;
+
+ bus = pci_create_root_bus(&pdev->dev, 0,
+ &xgene_pcie_ops, port, &res);
+ if (!bus)
+ return -ENOMEM;
+
+ pci_scan_child_bus(bus);
+ pci_assign_unassigned_bus_resources(bus);
+ pci_bus_add_devices(bus);
+
+ platform_set_drvdata(pdev, port);
+ return 0;
+}
+
+static const struct of_device_id xgene_pcie_match_table[] = {
+ {.compatible = "apm,xgene-pcie",},
+ {},
+};
+
+static struct platform_driver xgene_pcie_driver = {
+ .driver = {
+ .name = "xgene-pcie",
+ .of_match_table = of_match_ptr(xgene_pcie_match_table),
+ },
+ .probe = xgene_pcie_probe_bridge,
+};
+module_platform_driver(xgene_pcie_driver);
+
+MODULE_AUTHOR("Tanmay Inamdar <tinamdar@apm.com>");
+MODULE_DESCRIPTION("APM X-Gene PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
new file mode 100644
index 000000000..2e9f84fdd
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.c
@@ -0,0 +1,832 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+/* Synopsis specific PCIE configuration registers */
+#define PCIE_PORT_LINK_CONTROL 0x710
+#define PORT_LINK_MODE_MASK (0x3f << 16)
+#define PORT_LINK_MODE_1_LANES (0x1 << 16)
+#define PORT_LINK_MODE_2_LANES (0x3 << 16)
+#define PORT_LINK_MODE_4_LANES (0x7 << 16)
+
+#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
+#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
+#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
+
+#define PCIE_MSI_ADDR_LO 0x820
+#define PCIE_MSI_ADDR_HI 0x824
+#define PCIE_MSI_INTR0_ENABLE 0x828
+#define PCIE_MSI_INTR0_MASK 0x82C
+#define PCIE_MSI_INTR0_STATUS 0x830
+
+#define PCIE_ATU_VIEWPORT 0x900
+#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
+#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
+#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
+#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
+#define PCIE_ATU_CR1 0x904
+#define PCIE_ATU_TYPE_MEM (0x0 << 0)
+#define PCIE_ATU_TYPE_IO (0x2 << 0)
+#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
+#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
+#define PCIE_ATU_CR2 0x908
+#define PCIE_ATU_ENABLE (0x1 << 31)
+#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
+#define PCIE_ATU_LOWER_BASE 0x90C
+#define PCIE_ATU_UPPER_BASE 0x910
+#define PCIE_ATU_LIMIT 0x914
+#define PCIE_ATU_LOWER_TARGET 0x918
+#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
+#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
+#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
+#define PCIE_ATU_UPPER_TARGET 0x91C
+
+static struct hw_pci dw_pci;
+
+static unsigned long global_io_offset;
+
+static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+ BUG_ON(!sys->private_data);
+
+ return sys->private_data;
+}
+
+int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val)
+{
+ *val = readl(addr);
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 3))) & 0xffff;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val)
+{
+ if (size == 4)
+ writel(val, addr);
+ else if (size == 2)
+ writew(val, addr + (where & 2));
+ else if (size == 1)
+ writeb(val, addr + (where & 3));
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
+{
+ if (pp->ops->readl_rc)
+ pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
+ else
+ *val = readl(pp->dbi_base + reg);
+}
+
+static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
+{
+ if (pp->ops->writel_rc)
+ pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+ else
+ writel(val, pp->dbi_base + reg);
+}
+
+static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ int ret;
+
+ if (pp->ops->rd_own_conf)
+ ret = pp->ops->rd_own_conf(pp, where, size, val);
+ else
+ ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where,
+ size, val);
+
+ return ret;
+}
+
+static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ int ret;
+
+ if (pp->ops->wr_own_conf)
+ ret = pp->ops->wr_own_conf(pp, where, size, val);
+ else
+ ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where,
+ size, val);
+
+ return ret;
+}
+
+static struct irq_chip dw_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+/* MSI int handler */
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+{
+ unsigned long val;
+ int i, pos, irq;
+ irqreturn_t ret = IRQ_NONE;
+
+ for (i = 0; i < MAX_MSI_CTRLS; i++) {
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+ (u32 *)&val);
+ if (val) {
+ ret = IRQ_HANDLED;
+ pos = 0;
+ while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+ irq = irq_find_mapping(pp->irq_domain,
+ i * 32 + pos);
+ dw_pcie_wr_own_conf(pp,
+ PCIE_MSI_INTR0_STATUS + i * 12,
+ 4, 1 << pos);
+ generic_handle_irq(irq);
+ pos++;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+ pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+ /* program the msi_data */
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+ virt_to_phys((void *)pp->msi_data));
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 32) * 12;
+ bit = irq % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val &= ~(1 << bit);
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base,
+ unsigned int nvec, unsigned int pos)
+{
+ unsigned int i;
+
+ for (i = 0; i < nvec; i++) {
+ irq_set_msi_desc_off(irq_base, i, NULL);
+ /* Disable corresponding interrupt on MSI controller */
+ if (pp->ops->msi_clear_irq)
+ pp->ops->msi_clear_irq(pp, pos + i);
+ else
+ dw_pcie_msi_clear_irq(pp, pos + i);
+ }
+
+ bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec));
+}
+
+static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ unsigned int res, bit, val;
+
+ res = (irq / 32) * 12;
+ bit = irq % 32;
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+ val |= 1 << bit;
+ dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+ int irq, pos0, i;
+ struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+ pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
+ order_base_2(no_irqs));
+ if (pos0 < 0)
+ goto no_valid_irq;
+
+ irq = irq_find_mapping(pp->irq_domain, pos0);
+ if (!irq)
+ goto no_valid_irq;
+
+ /*
+ * irq_create_mapping (called from dw_pcie_host_init) pre-allocates
+ * descs so there is no need to allocate descs here. We can therefore
+ * assume that if irq_find_mapping above returns non-zero, then the
+ * descs are also successfully allocated.
+ */
+
+ for (i = 0; i < no_irqs; i++) {
+ if (irq_set_msi_desc_off(irq, i, desc) != 0) {
+ clear_irq_range(pp, irq, i, pos0);
+ goto no_valid_irq;
+ }
+ /*Enable corresponding interrupt in MSI interrupt controller */
+ if (pp->ops->msi_set_irq)
+ pp->ops->msi_set_irq(pp, pos0 + i);
+ else
+ dw_pcie_msi_set_irq(pp, pos0 + i);
+ }
+
+ *pos = pos0;
+ return irq;
+
+no_valid_irq:
+ *pos = pos0;
+ return -ENOSPC;
+}
+
+static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ int irq, pos;
+ struct msi_msg msg;
+ struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+ if (desc->msi_attrib.is_msix)
+ return -EINVAL;
+
+ irq = assign_irq(1, desc, &pos);
+ if (irq < 0)
+ return irq;
+
+ if (pp->ops->get_msi_addr)
+ msg.address_lo = pp->ops->get_msi_addr(pp);
+ else
+ msg.address_lo = virt_to_phys((void *)pp->msi_data);
+ msg.address_hi = 0x0;
+
+ if (pp->ops->get_msi_data)
+ msg.data = pp->ops->get_msi_data(pp, pos);
+ else
+ msg.data = pos;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct msi_desc *msi = irq_data_get_msi(data);
+ struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
+
+ clear_irq_range(pp, irq, 1, data->hwirq);
+}
+
+static struct msi_controller dw_pcie_msi_chip = {
+ .setup_irq = dw_msi_setup_irq,
+ .teardown_irq = dw_msi_teardown_irq,
+};
+
+int dw_pcie_link_up(struct pcie_port *pp)
+{
+ if (pp->ops->link_up)
+ return pp->ops->link_up(pp);
+ else
+ return 0;
+}
+
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = dw_pcie_msi_map,
+};
+
+int dw_pcie_host_init(struct pcie_port *pp)
+{
+ struct device_node *np = pp->dev->of_node;
+ struct platform_device *pdev = to_platform_device(pp->dev);
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ struct resource *cfg_res;
+ u32 val, na, ns;
+ const __be32 *addrp;
+ int i, index, ret;
+
+ /* Find the address cell size and the number of cells in order to get
+ * the untranslated address.
+ */
+ of_property_read_u32(np, "#address-cells", &na);
+ ns = of_n_size_cells(np);
+
+ cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
+ if (cfg_res) {
+ pp->cfg0_size = resource_size(cfg_res)/2;
+ pp->cfg1_size = resource_size(cfg_res)/2;
+ pp->cfg0_base = cfg_res->start;
+ pp->cfg1_base = cfg_res->start + pp->cfg0_size;
+
+ /* Find the untranslated configuration space address */
+ index = of_property_match_string(np, "reg-names", "config");
+ addrp = of_get_address(np, index, NULL, NULL);
+ pp->cfg0_mod_base = of_read_number(addrp, ns);
+ pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
+ } else {
+ dev_err(pp->dev, "missing *config* reg space\n");
+ }
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(pp->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
+
+ if (restype == IORESOURCE_IO) {
+ of_pci_range_to_resource(&range, np, &pp->io);
+ pp->io.name = "I/O";
+ pp->io.start = max_t(resource_size_t,
+ PCIBIOS_MIN_IO,
+ range.pci_addr + global_io_offset);
+ pp->io.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ range.pci_addr + range.size
+ + global_io_offset - 1);
+ pp->io_size = resource_size(&pp->io);
+ pp->io_bus_addr = range.pci_addr;
+ pp->io_base = range.cpu_addr;
+
+ /* Find the untranslated IO space address */
+ pp->io_mod_base = of_read_number(parser.range -
+ parser.np + na, ns);
+ }
+ if (restype == IORESOURCE_MEM) {
+ of_pci_range_to_resource(&range, np, &pp->mem);
+ pp->mem.name = "MEM";
+ pp->mem_size = resource_size(&pp->mem);
+ pp->mem_bus_addr = range.pci_addr;
+
+ /* Find the untranslated MEM space address */
+ pp->mem_mod_base = of_read_number(parser.range -
+ parser.np + na, ns);
+ }
+ if (restype == 0) {
+ of_pci_range_to_resource(&range, np, &pp->cfg);
+ pp->cfg0_size = resource_size(&pp->cfg)/2;
+ pp->cfg1_size = resource_size(&pp->cfg)/2;
+ pp->cfg0_base = pp->cfg.start;
+ pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
+
+ /* Find the untranslated configuration space address */
+ pp->cfg0_mod_base = of_read_number(parser.range -
+ parser.np + na, ns);
+ pp->cfg1_mod_base = pp->cfg0_mod_base +
+ pp->cfg0_size;
+ }
+ }
+
+ ret = of_pci_parse_bus_range(np, &pp->busn);
+ if (ret < 0) {
+ pp->busn.name = np->name;
+ pp->busn.start = 0;
+ pp->busn.end = 0xff;
+ pp->busn.flags = IORESOURCE_BUS;
+ dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
+ ret, &pp->busn);
+ }
+
+ if (!pp->dbi_base) {
+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
+ resource_size(&pp->cfg));
+ if (!pp->dbi_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
+
+ pp->mem_base = pp->mem.start;
+
+ if (!pp->va_cfg0_base) {
+ pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+ pp->cfg0_size);
+ if (!pp->va_cfg0_base) {
+ dev_err(pp->dev, "error with ioremap in function\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (!pp->va_cfg1_base) {
+ pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+ pp->cfg1_size);
+ if (!pp->va_cfg1_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
+ dev_err(pp->dev, "Failed to parse the number of lanes\n");
+ return -EINVAL;
+ }
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (!pp->ops->msi_host_init) {
+ pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+ MAX_MSI_IRQS, &msi_domain_ops,
+ &dw_pcie_msi_chip);
+ if (!pp->irq_domain) {
+ dev_err(pp->dev, "irq domain init failed\n");
+ return -ENXIO;
+ }
+
+ for (i = 0; i < MAX_MSI_IRQS; i++)
+ irq_create_mapping(pp->irq_domain, i);
+ } else {
+ ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (pp->ops->host_init)
+ pp->ops->host_init(pp);
+
+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+ /* program correct class for RC */
+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+#ifdef CONFIG_PCI_MSI
+ dw_pcie_msi_chip.dev = pp->dev;
+ dw_pci.msi_ctrl = &dw_pcie_msi_chip;
+#endif
+
+ dw_pci.nr_controllers = 1;
+ dw_pci.private_data = (void **)&pp;
+
+ pci_common_init_dev(pp->dev, &dw_pci);
+
+ return 0;
+}
+
+static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
+{
+ /* Program viewport 0 : OUTBOUND : CFG0 */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+{
+ /* Program viewport 1 : OUTBOUND : CFG1 */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+{
+ /* Program viewport 0 : OUTBOUND : MEM */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+{
+ /* Program viewport 1 : OUTBOUND : IO */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 *val)
+{
+ int ret = PCIBIOS_SUCCESSFUL;
+ u32 address, busdev;
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+ address = where & ~0x3;
+
+ if (bus->parent->number == pp->root_bus_nr) {
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+}
+
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ u32 devfn, int where, int size, u32 val)
+{
+ int ret = PCIBIOS_SUCCESSFUL;
+ u32 address, busdev;
+
+ busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
+ PCIE_ATU_FUNC(PCI_FUNC(devfn));
+ address = where & ~0x3;
+
+ if (bus->parent->number == pp->root_bus_nr) {
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_mem_outbound(pp);
+ } else {
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
+ ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size,
+ val);
+ dw_pcie_prog_viewport_io_outbound(pp);
+ }
+
+ return ret;
+}
+
+static int dw_pcie_valid_config(struct pcie_port *pp,
+ struct pci_bus *bus, int dev)
+{
+ /* If there is no link, then there is no device */
+ if (bus->number != pp->root_bus_nr) {
+ if (!dw_pcie_link_up(pp))
+ return 0;
+ }
+
+ /* access only one slot on each root port */
+ if (bus->number == pp->root_bus_nr && dev > 0)
+ return 0;
+
+ /*
+ * do not read more than one device on the bus directly attached
+ * to RC's (Virtual Bridge's) DS side.
+ */
+ if (bus->primary == pp->root_bus_nr && dev > 0)
+ return 0;
+
+ return 1;
+}
+
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number != pp->root_bus_nr)
+ if (pp->ops->rd_other_conf)
+ ret = pp->ops->rd_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_rd_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_rd_own_conf(pp, where, size, val);
+
+ return ret;
+}
+
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number != pp->root_bus_nr)
+ if (pp->ops->wr_other_conf)
+ ret = pp->ops->wr_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_wr_other_conf(pp, bus, devfn,
+ where, size, val);
+ else
+ ret = dw_pcie_wr_own_conf(pp, where, size, val);
+
+ return ret;
+}
+
+static struct pci_ops dw_pcie_ops = {
+ .read = dw_pcie_rd_conf,
+ .write = dw_pcie_wr_conf,
+};
+
+static int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct pcie_port *pp;
+
+ pp = sys_to_pcie(sys);
+
+ if (global_io_offset < SZ_1M && pp->io_size > 0) {
+ sys->io_offset = global_io_offset - pp->io_bus_addr;
+ pci_ioremap_io(global_io_offset, pp->io_base);
+ global_io_offset += SZ_64K;
+ pci_add_resource_offset(&sys->resources, &pp->io,
+ sys->io_offset);
+ }
+
+ sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
+ pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+ pci_add_resource(&sys->resources, &pp->busn);
+
+ return 1;
+}
+
+static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct pci_bus *bus;
+ struct pcie_port *pp = sys_to_pcie(sys);
+
+ pp->root_bus_nr = sys->busnr;
+ bus = pci_create_root_bus(pp->dev, sys->busnr,
+ &dw_pcie_ops, sys, &sys->resources);
+ if (!bus)
+ return NULL;
+
+ pci_scan_child_bus(bus);
+
+ if (bus && pp->ops->scan_bus)
+ pp->ops->scan_bus(pp);
+
+ return bus;
+}
+
+static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
+ int irq;
+
+ irq = of_irq_parse_and_map_pci(dev, slot, pin);
+ if (!irq)
+ irq = pp->irq;
+
+ return irq;
+}
+
+static struct hw_pci dw_pci = {
+ .setup = dw_pcie_setup,
+ .scan = dw_pcie_scan_bus,
+ .map_irq = dw_pcie_map_irq,
+};
+
+void dw_pcie_setup_rc(struct pcie_port *pp)
+{
+ u32 val;
+ u32 membase;
+ u32 memlimit;
+
+ /* set the number of lanes */
+ dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
+ val &= ~PORT_LINK_MODE_MASK;
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LINK_MODE_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LINK_MODE_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LINK_MODE_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
+
+ /* set link width speed control register */
+ dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+ val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
+
+ /* setup RC BARs */
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, 0x00000000, PCI_BASE_ADDRESS_1);
+
+ /* setup interrupt pins */
+ dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
+ val &= 0xffff00ff;
+ val |= 0x00000100;
+ dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
+
+ /* setup bus numbers */
+ dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
+ val &= 0xff000000;
+ val |= 0x00010100;
+ dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
+
+ /* setup memory base, memory limit */
+ membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
+ memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
+ val = memlimit | membase;
+ dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
+
+ /* setup command register */
+ dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
+ val &= 0xffff0000;
+ val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
+ dw_pcie_writel_rc(pp, val, PCI_COMMAND);
+}
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Designware PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
new file mode 100644
index 000000000..d0bbd2768
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.h
@@ -0,0 +1,87 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PCIE_DESIGNWARE_H
+#define _PCIE_DESIGNWARE_H
+
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS 32
+#define MAX_MSI_CTRLS (MAX_MSI_IRQS / 32)
+
+struct pcie_port {
+ struct device *dev;
+ u8 root_bus_nr;
+ void __iomem *dbi_base;
+ u64 cfg0_base;
+ u64 cfg0_mod_base;
+ void __iomem *va_cfg0_base;
+ u32 cfg0_size;
+ u64 cfg1_base;
+ u64 cfg1_mod_base;
+ void __iomem *va_cfg1_base;
+ u32 cfg1_size;
+ u64 io_base;
+ u64 io_mod_base;
+ phys_addr_t io_bus_addr;
+ u32 io_size;
+ u64 mem_base;
+ u64 mem_mod_base;
+ phys_addr_t mem_bus_addr;
+ u32 mem_size;
+ struct resource cfg;
+ struct resource io;
+ struct resource mem;
+ struct resource busn;
+ int irq;
+ u32 lanes;
+ struct pcie_host_ops *ops;
+ int msi_irq;
+ struct irq_domain *irq_domain;
+ unsigned long msi_data;
+ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
+};
+
+struct pcie_host_ops {
+ void (*readl_rc)(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val);
+ void (*writel_rc)(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base);
+ int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+ int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+ int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 *val);
+ int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size, u32 val);
+ int (*link_up)(struct pcie_port *pp);
+ void (*host_init)(struct pcie_port *pp);
+ void (*msi_set_irq)(struct pcie_port *pp, int irq);
+ void (*msi_clear_irq)(struct pcie_port *pp, int irq);
+ u32 (*get_msi_addr)(struct pcie_port *pp);
+ u32 (*get_msi_data)(struct pcie_port *pp, int pos);
+ void (*scan_bus)(struct pcie_port *pp);
+ int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip);
+};
+
+int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
+int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val);
+irqreturn_t dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
+int dw_pcie_link_up(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+
+#endif /* _PCIE_DESIGNWARE_H */
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
new file mode 100644
index 000000000..afad6c21f
--- /dev/null
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+
+#include "pcie-iproc.h"
+
+static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
+{
+ struct iproc_pcie *pcie;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource reg;
+ resource_size_t iobase = 0;
+ LIST_HEAD(res);
+ int ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pcie);
+
+ ret = of_address_to_resource(np, 0, &reg);
+ if (ret < 0) {
+ dev_err(pcie->dev, "unable to obtain controller resources\n");
+ return ret;
+ }
+
+ pcie->base = devm_ioremap(pcie->dev, reg.start, resource_size(&reg));
+ if (!pcie->base) {
+ dev_err(pcie->dev, "unable to map controller registers\n");
+ return -ENOMEM;
+ }
+
+ /* PHY use is optional */
+ pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy");
+ if (IS_ERR(pcie->phy)) {
+ if (PTR_ERR(pcie->phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ pcie->phy = NULL;
+ }
+
+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
+ if (ret) {
+ dev_err(pcie->dev,
+ "unable to get PCI host bridge resources\n");
+ return ret;
+ }
+
+ pcie->resources = &res;
+
+ ret = iproc_pcie_setup(pcie);
+ if (ret) {
+ dev_err(pcie->dev, "PCIe controller setup failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
+{
+ struct iproc_pcie *pcie = platform_get_drvdata(pdev);
+
+ return iproc_pcie_remove(pcie);
+}
+
+static const struct of_device_id iproc_pcie_of_match_table[] = {
+ { .compatible = "brcm,iproc-pcie", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
+
+static struct platform_driver iproc_pcie_pltfm_driver = {
+ .driver = {
+ .name = "iproc-pcie",
+ .of_match_table = of_match_ptr(iproc_pcie_of_match_table),
+ },
+ .probe = iproc_pcie_pltfm_probe,
+ .remove = iproc_pcie_pltfm_remove,
+};
+module_platform_driver(iproc_pcie_pltfm_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
new file mode 100644
index 000000000..329e1b545
--- /dev/null
+++ b/drivers/pci/host/pcie-iproc.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2015 Broadcom Corporatcommon ion
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+
+#include "pcie-iproc.h"
+
+#define CLK_CONTROL_OFFSET 0x000
+#define EP_MODE_SURVIVE_PERST_SHIFT 1
+#define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT)
+#define RC_PCIE_RST_OUTPUT_SHIFT 0
+#define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT)
+
+#define CFG_IND_ADDR_OFFSET 0x120
+#define CFG_IND_ADDR_MASK 0x00001ffc
+
+#define CFG_IND_DATA_OFFSET 0x124
+
+#define CFG_ADDR_OFFSET 0x1f8
+#define CFG_ADDR_BUS_NUM_SHIFT 20
+#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000
+#define CFG_ADDR_DEV_NUM_SHIFT 15
+#define CFG_ADDR_DEV_NUM_MASK 0x000f8000
+#define CFG_ADDR_FUNC_NUM_SHIFT 12
+#define CFG_ADDR_FUNC_NUM_MASK 0x00007000
+#define CFG_ADDR_REG_NUM_SHIFT 2
+#define CFG_ADDR_REG_NUM_MASK 0x00000ffc
+#define CFG_ADDR_CFG_TYPE_SHIFT 0
+#define CFG_ADDR_CFG_TYPE_MASK 0x00000003
+
+#define CFG_DATA_OFFSET 0x1fc
+
+#define SYS_RC_INTX_EN 0x330
+#define SYS_RC_INTX_MASK 0xf
+
+static inline struct iproc_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+/**
+ * Note access to the configuration registers are protected at the higher layer
+ * by 'pci_lock' in drivers/pci/access.c
+ */
+static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
+ unsigned int devfn,
+ int where)
+{
+ struct pci_sys_data *sys = bus->sysdata;
+ struct iproc_pcie *pcie = sys_to_pcie(sys);
+ unsigned slot = PCI_SLOT(devfn);
+ unsigned fn = PCI_FUNC(devfn);
+ unsigned busno = bus->number;
+ u32 val;
+
+ /* root complex access */
+ if (busno == 0) {
+ if (slot >= 1)
+ return NULL;
+ writel(where & CFG_IND_ADDR_MASK,
+ pcie->base + CFG_IND_ADDR_OFFSET);
+ return (pcie->base + CFG_IND_DATA_OFFSET);
+ }
+
+ if (fn > 1)
+ return NULL;
+
+ /* EP device access */
+ val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
+ (slot << CFG_ADDR_DEV_NUM_SHIFT) |
+ (fn << CFG_ADDR_FUNC_NUM_SHIFT) |
+ (where & CFG_ADDR_REG_NUM_MASK) |
+ (1 & CFG_ADDR_CFG_TYPE_MASK);
+ writel(val, pcie->base + CFG_ADDR_OFFSET);
+
+ return (pcie->base + CFG_DATA_OFFSET);
+}
+
+static struct pci_ops iproc_pcie_ops = {
+ .map_bus = iproc_pcie_map_cfg_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static void iproc_pcie_reset(struct iproc_pcie *pcie)
+{
+ u32 val;
+
+ /*
+ * Configure the PCIe controller as root complex and send a downstream
+ * reset
+ */
+ val = EP_MODE_SURVIVE_PERST | RC_PCIE_RST_OUTPUT;
+ writel(val, pcie->base + CLK_CONTROL_OFFSET);
+ udelay(250);
+ val &= ~EP_MODE_SURVIVE_PERST;
+ writel(val, pcie->base + CLK_CONTROL_OFFSET);
+ msleep(250);
+}
+
+static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus)
+{
+ u8 hdr_type;
+ u32 link_ctrl;
+ u16 pos, link_status;
+ int link_is_active = 0;
+
+ /* make sure we are not in EP mode */
+ pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type);
+ if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) {
+ dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type);
+ return -EFAULT;
+ }
+
+ /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */
+ pci_bus_write_config_word(bus, 0, PCI_CLASS_DEVICE,
+ PCI_CLASS_BRIDGE_PCI);
+
+ /* check link status to see if link is active */
+ pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
+ pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status);
+ if (link_status & PCI_EXP_LNKSTA_NLW)
+ link_is_active = 1;
+
+ if (!link_is_active) {
+ /* try GEN 1 link speed */
+#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc
+#define PCI_TARGET_LINK_SPEED_MASK 0xf
+#define PCI_TARGET_LINK_SPEED_GEN2 0x2
+#define PCI_TARGET_LINK_SPEED_GEN1 0x1
+ pci_bus_read_config_dword(bus, 0,
+ PCI_LINK_STATUS_CTRL_2_OFFSET,
+ &link_ctrl);
+ if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) ==
+ PCI_TARGET_LINK_SPEED_GEN2) {
+ link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK;
+ link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1;
+ pci_bus_write_config_dword(bus, 0,
+ PCI_LINK_STATUS_CTRL_2_OFFSET,
+ link_ctrl);
+ msleep(100);
+
+ pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP);
+ pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA,
+ &link_status);
+ if (link_status & PCI_EXP_LNKSTA_NLW)
+ link_is_active = 1;
+ }
+ }
+
+ dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN");
+
+ return link_is_active ? 0 : -ENODEV;
+}
+
+static void iproc_pcie_enable(struct iproc_pcie *pcie)
+{
+ writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN);
+}
+
+int iproc_pcie_setup(struct iproc_pcie *pcie)
+{
+ int ret;
+ struct pci_bus *bus;
+
+ if (!pcie || !pcie->dev || !pcie->base)
+ return -EINVAL;
+
+ if (pcie->phy) {
+ ret = phy_init(pcie->phy);
+ if (ret) {
+ dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+ return ret;
+ }
+
+ ret = phy_power_on(pcie->phy);
+ if (ret) {
+ dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+ goto err_exit_phy;
+ }
+
+ }
+
+ iproc_pcie_reset(pcie);
+
+ pcie->sysdata.private_data = pcie;
+
+ bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops,
+ &pcie->sysdata, pcie->resources);
+ if (!bus) {
+ dev_err(pcie->dev, "unable to create PCI root bus\n");
+ ret = -ENOMEM;
+ goto err_power_off_phy;
+ }
+ pcie->root_bus = bus;
+
+ ret = iproc_pcie_check_link(pcie, bus);
+ if (ret) {
+ dev_err(pcie->dev, "no PCIe EP device detected\n");
+ goto err_rm_root_bus;
+ }
+
+ iproc_pcie_enable(pcie);
+
+ pci_scan_child_bus(bus);
+ pci_assign_unassigned_bus_resources(bus);
+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+ pci_bus_add_devices(bus);
+
+ return 0;
+
+err_rm_root_bus:
+ pci_stop_root_bus(bus);
+ pci_remove_root_bus(bus);
+
+err_power_off_phy:
+ if (pcie->phy)
+ phy_power_off(pcie->phy);
+err_exit_phy:
+ if (pcie->phy)
+ phy_exit(pcie->phy);
+
+ return ret;
+}
+EXPORT_SYMBOL(iproc_pcie_setup);
+
+int iproc_pcie_remove(struct iproc_pcie *pcie)
+{
+ pci_stop_root_bus(pcie->root_bus);
+ pci_remove_root_bus(pcie->root_bus);
+
+ if (pcie->phy) {
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(iproc_pcie_remove);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
new file mode 100644
index 000000000..e28075ed1
--- /dev/null
+++ b/drivers/pci/host/pcie-iproc.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _PCIE_IPROC_H
+#define _PCIE_IPROC_H
+
+#define IPROC_PCIE_MAX_NUM_IRQS 6
+
+/**
+ * iProc PCIe device
+ * @dev: pointer to device data structure
+ * @base: PCIe host controller I/O register base
+ * @resources: linked list of all PCI resources
+ * @sysdata: Per PCI controller data
+ * @root_bus: pointer to root bus
+ * @phy: optional PHY device that controls the Serdes
+ * @irqs: interrupt IDs
+ */
+struct iproc_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct list_head *resources;
+ struct pci_sys_data sysdata;
+ struct pci_bus *root_bus;
+ struct phy *phy;
+ int irqs[IPROC_PCIE_MAX_NUM_IRQS];
+};
+
+int iproc_pcie_setup(struct iproc_pcie *pcie);
+int iproc_pcie_remove(struct iproc_pcie *pcie);
+
+#endif /* _PCIE_IPROC_H */
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
new file mode 100644
index 000000000..c086210f2
--- /dev/null
+++ b/drivers/pci/host/pcie-rcar.c
@@ -0,0 +1,991 @@
+/*
+ * PCIe driver for Renesas R-Car SoCs
+ * Copyright (C) 2014 Renesas Electronics Europe Ltd
+ *
+ * Based on:
+ * arch/sh/drivers/pci/pcie-sh7786.c
+ * arch/sh/drivers/pci/ops-sh7786.c
+ * Copyright (C) 2009 - 2011 Paul Mundt
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define DRV_NAME "rcar-pcie"
+
+#define PCIECAR 0x000010
+#define PCIECCTLR 0x000018
+#define CONFIG_SEND_ENABLE (1 << 31)
+#define TYPE0 (0 << 8)
+#define TYPE1 (1 << 8)
+#define PCIECDR 0x000020
+#define PCIEMSR 0x000028
+#define PCIEINTXR 0x000400
+#define PCIEMSITXR 0x000840
+
+/* Transfer control */
+#define PCIETCTLR 0x02000
+#define CFINIT 1
+#define PCIETSTR 0x02004
+#define DATA_LINK_ACTIVE 1
+#define PCIEERRFR 0x02020
+#define UNSUPPORTED_REQUEST (1 << 4)
+#define PCIEMSIFR 0x02044
+#define PCIEMSIALR 0x02048
+#define MSIFE 1
+#define PCIEMSIAUR 0x0204c
+#define PCIEMSIIER 0x02050
+
+/* root port address */
+#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
+
+/* local address reg & mask */
+#define PCIELAR(x) (0x02200 + ((x) * 0x20))
+#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
+#define LAM_PREFETCH (1 << 3)
+#define LAM_64BIT (1 << 2)
+#define LAR_ENABLE (1 << 1)
+
+/* PCIe address reg & mask */
+#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
+#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
+#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
+#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
+#define PAR_ENABLE (1 << 31)
+#define IO_SPACE (1 << 8)
+
+/* Configuration */
+#define PCICONF(x) (0x010000 + ((x) * 0x4))
+#define PMCAP(x) (0x010040 + ((x) * 0x4))
+#define EXPCAP(x) (0x010070 + ((x) * 0x4))
+#define VCCAP(x) (0x010100 + ((x) * 0x4))
+
+/* link layer */
+#define IDSETR1 0x011004
+#define TLCTLR 0x011048
+#define MACSR 0x011054
+#define MACCTLR 0x011058
+#define SCRAMBLE_DISABLE (1 << 27)
+
+/* R-Car H1 PHY */
+#define H1_PCIEPHYADRR 0x04000c
+#define WRITE_CMD (1 << 16)
+#define PHY_ACK (1 << 24)
+#define RATE_POS 12
+#define LANE_POS 8
+#define ADR_POS 0
+#define H1_PCIEPHYDOUTR 0x040014
+#define H1_PCIEPHYSR 0x040018
+
+#define INT_PCI_MSI_NR 32
+
+#define RCONF(x) (PCICONF(0)+(x))
+#define RPMCAP(x) (PMCAP(0)+(x))
+#define REXPCAP(x) (EXPCAP(0)+(x))
+#define RVCCAP(x) (VCCAP(0)+(x))
+
+#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
+#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
+#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
+
+#define RCAR_PCI_MAX_RESOURCES 4
+#define MAX_NR_INBOUND_MAPS 6
+
+struct rcar_msi {
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ struct msi_controller chip;
+ unsigned long pages;
+ struct mutex lock;
+ int irq1;
+ int irq2;
+};
+
+static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
+{
+ return container_of(chip, struct rcar_msi, chip);
+}
+
+/* Structure representing the PCIe interface */
+struct rcar_pcie {
+ struct device *dev;
+ void __iomem *base;
+ struct resource res[RCAR_PCI_MAX_RESOURCES];
+ struct resource busn;
+ int root_bus_nr;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct rcar_msi msi;
+};
+
+static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
+ unsigned long reg)
+{
+ writel(val, pcie->base + reg);
+}
+
+static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
+ unsigned long reg)
+{
+ return readl(pcie->base + reg);
+}
+
+enum {
+ RCAR_PCI_ACCESS_READ,
+ RCAR_PCI_ACCESS_WRITE,
+};
+
+static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
+{
+ int shift = 8 * (where & 3);
+ u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+ val &= ~(mask << shift);
+ val |= data << shift;
+ rcar_pci_write_reg(pcie, val, where & ~3);
+}
+
+static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
+{
+ int shift = 8 * (where & 3);
+ u32 val = rcar_pci_read_reg(pcie, where & ~3);
+
+ return val >> shift;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_config_access(struct rcar_pcie *pcie,
+ unsigned char access_type, struct pci_bus *bus,
+ unsigned int devfn, int where, u32 *data)
+{
+ int dev, func, reg, index;
+
+ dev = PCI_SLOT(devfn);
+ func = PCI_FUNC(devfn);
+ reg = where & ~3;
+ index = reg / 4;
+
+ /*
+ * While each channel has its own memory-mapped extended config
+ * space, it's generally only accessible when in endpoint mode.
+ * When in root complex mode, the controller is unable to target
+ * itself with either type 0 or type 1 accesses, and indeed, any
+ * controller initiated target transfer to its own config space
+ * result in a completer abort.
+ *
+ * Each channel effectively only supports a single device, but as
+ * the same channel <-> device access works for any PCI_SLOT()
+ * value, we cheat a bit here and bind the controller's config
+ * space to devfn 0 in order to enable self-enumeration. In this
+ * case the regular ECAR/ECDR path is sidelined and the mangled
+ * config access itself is initiated as an internal bus transaction.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (dev != 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ) {
+ *data = rcar_pci_read_reg(pcie, PCICONF(index));
+ } else {
+ /* Keep an eye out for changes to the root bus number */
+ if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
+ pcie->root_bus_nr = *data & 0xff;
+
+ rcar_pci_write_reg(pcie, *data, PCICONF(index));
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (pcie->root_bus_nr < 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Clear errors */
+ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+
+ /* Set the PIO address */
+ rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+ PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
+
+ /* Enable the configuration access */
+ if (bus->parent->number == pcie->root_bus_nr)
+ rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+ else
+ rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+
+ /* Check for errors */
+ if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ /* Check for master and target aborts */
+ if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
+ (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (access_type == RCAR_PCI_ACCESS_READ)
+ *data = rcar_pci_read_reg(pcie, PCIECDR);
+ else
+ rcar_pci_write_reg(pcie, *data, PCIECDR);
+
+ /* Disable the configuration access */
+ rcar_pci_write_reg(pcie, 0, PCIECCTLR);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ int ret;
+
+ ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, val);
+ if (ret != PCIBIOS_SUCCESSFUL) {
+ *val = 0xffffffff;
+ return ret;
+ }
+
+ if (size == 1)
+ *val = (*val >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *val = (*val >> (8 * (where & 2))) & 0xffff;
+
+ dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
+ bus->number, devfn, where, size, (unsigned long)*val);
+
+ return ret;
+}
+
+/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
+static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
+ int shift, ret;
+ u32 data;
+
+ ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
+ bus, devfn, where, &data);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
+ bus->number, devfn, where, size, (unsigned long)val);
+
+ if (size == 1) {
+ shift = 8 * (where & 3);
+ data &= ~(0xff << shift);
+ data |= ((val & 0xff) << shift);
+ } else if (size == 2) {
+ shift = 8 * (where & 2);
+ data &= ~(0xffff << shift);
+ data |= ((val & 0xffff) << shift);
+ } else
+ data = val;
+
+ ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
+ bus, devfn, where, &data);
+
+ return ret;
+}
+
+static struct pci_ops rcar_pcie_ops = {
+ .read = rcar_pcie_read_conf,
+ .write = rcar_pcie_write_conf,
+};
+
+static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
+{
+ struct resource *res = &pcie->res[win];
+
+ /* Setup PCIe address space mappings for each resource */
+ resource_size_t size;
+ resource_size_t res_start;
+ u32 mask;
+
+ rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+
+ /*
+ * The PAMR mask is calculated in units of 128Bytes, which
+ * keeps things pretty simple.
+ */
+ size = resource_size(res);
+ mask = (roundup_pow_of_two(size) / SZ_128) - 1;
+ rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+
+ if (res->flags & IORESOURCE_IO)
+ res_start = pci_pio_to_address(res->start);
+ else
+ res_start = res->start;
+
+ rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
+ rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
+ PCIEPALR(win));
+
+ /* First resource is for IO */
+ mask = PAR_ENABLE;
+ if (res->flags & IORESOURCE_IO)
+ mask |= IO_SPACE;
+
+ rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+}
+
+static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct rcar_pcie *pcie = sys_to_pcie(sys);
+ struct resource *res;
+ int i;
+
+ pcie->root_bus_nr = -1;
+
+ /* Setup PCI resources */
+ for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
+
+ res = &pcie->res[i];
+ if (!res->flags)
+ continue;
+
+ rcar_pcie_setup_window(i, pcie);
+
+ if (res->flags & IORESOURCE_IO) {
+ phys_addr_t io_start = pci_pio_to_address(res->start);
+ pci_ioremap_io(nr * SZ_64K, io_start);
+ } else
+ pci_add_resource(&sys->resources, res);
+ }
+ pci_add_resource(&sys->resources, &pcie->busn);
+
+ return 1;
+}
+
+static struct hw_pci rcar_pci = {
+ .setup = rcar_pcie_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .ops = &rcar_pcie_ops,
+};
+
+static void rcar_pcie_enable(struct rcar_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+
+ rcar_pci.nr_controllers = 1;
+ rcar_pci.private_data = (void **)&pcie;
+#ifdef CONFIG_PCI_MSI
+ rcar_pci.msi_ctrl = &pcie->msi.chip;
+#endif
+
+ pci_common_init_dev(&pdev->dev, &rcar_pci);
+}
+
+static int phy_wait_for_ack(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 100;
+
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+ return 0;
+
+ udelay(100);
+ }
+
+ dev_err(pcie->dev, "Access to PCIe phy timed out\n");
+
+ return -ETIMEDOUT;
+}
+
+static void phy_write_reg(struct rcar_pcie *pcie,
+ unsigned int rate, unsigned int addr,
+ unsigned int lane, unsigned int data)
+{
+ unsigned long phyaddr;
+
+ phyaddr = WRITE_CMD |
+ ((rate & 1) << RATE_POS) |
+ ((lane & 0xf) << LANE_POS) |
+ ((addr & 0xff) << ADR_POS);
+
+ /* Set write data */
+ rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+
+ /* Clear command */
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+ rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+
+ /* Ignore errors as they will be dealt with if the data link is down */
+ phy_wait_for_ack(pcie);
+}
+
+static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ while (timeout--) {
+ if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+ return 0;
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
+{
+ int err;
+
+ /* Begin initialization */
+ rcar_pci_write_reg(pcie, 0, PCIETCTLR);
+
+ /* Set mode */
+ rcar_pci_write_reg(pcie, 1, PCIEMSR);
+
+ /*
+ * Initial header for port config space is type 1, set the device
+ * class to match. Hardware takes care of propagating the IDSETR
+ * settings, so there is no need to bother with a quirk.
+ */
+ rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+
+ /*
+ * Setup Secondary Bus Number & Subordinate Bus Number, even though
+ * they aren't used, to avoid bridge being detected as broken.
+ */
+ rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
+ rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
+
+ /* Initialize default capabilities. */
+ rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
+ rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
+ PCI_HEADER_TYPE_BRIDGE);
+
+ /* Enable data link layer active state reporting */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+ PCI_EXP_LNKCAP_DLLLARC);
+
+ /* Write out the physical slot number = 0 */
+ rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
+
+ /* Set the completion timer timeout to the maximum 50ms. */
+ rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
+
+ /* Terminate list of capabilities (Next Capability Offset=0) */
+ rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
+
+ /* Enable MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
+
+ /* Finish initialization - establish a PCI Express link */
+ rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
+
+ /* This will timeout if we don't have a link. */
+ err = rcar_pcie_wait_for_dl(pcie);
+ if (err)
+ return err;
+
+ /* Enable INTx interrupts */
+ rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
+
+ wmb();
+
+ return 0;
+}
+
+static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
+{
+ unsigned int timeout = 10;
+
+ /* Initialize the phy */
+ phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
+ phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
+ phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
+ phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
+ phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
+ phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
+ phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
+ phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
+ phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
+
+ phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
+ phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
+ phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
+
+ while (timeout--) {
+ if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
+ return rcar_pcie_hw_init(pcie);
+
+ msleep(5);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcar_msi_alloc(struct rcar_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
+{
+ mutex_lock(&chip->lock);
+ clear_bit(irq, chip->used);
+ mutex_unlock(&chip->lock);
+}
+
+static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
+{
+ struct rcar_pcie *pcie = data;
+ struct rcar_msi *msi = &pcie->msi;
+ unsigned long reg;
+
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+
+ /* MSI & INTx share an interrupt - we only handle MSI here */
+ if (!reg)
+ return IRQ_NONE;
+
+ while (reg) {
+ unsigned int index = find_first_bit(&reg, 32);
+ unsigned int irq;
+
+ /* clear the interrupt */
+ rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(pcie->dev, "unhandled MSI\n");
+ } else {
+ /* Unknown MSI, just clear it */
+ dev_dbg(pcie->dev, "unexpected MSI\n");
+ }
+
+ /* see if there's any more pending in this vector */
+ reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = rcar_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq) {
+ rcar_msi_free(msi, hwirq);
+ return -EINVAL;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+ msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
+ msg.data = hwirq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct rcar_msi *msi = to_rcar_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ rcar_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip rcar_msi_irq_chip = {
+ .name = "R-Car PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = rcar_msi_map,
+};
+
+static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+{
+ struct platform_device *pdev = to_platform_device(pcie->dev);
+ struct rcar_msi *msi = &pcie->msi;
+ unsigned long base;
+ int err;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = rcar_msi_setup_irq;
+ msi->chip.teardown_irq = rcar_msi_teardown_irq;
+
+ msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ /* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+ IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+ IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto err;
+ }
+
+ /* setup MSI data target */
+ msi->pages = __get_free_pages(GFP_KERNEL, 0);
+ base = virt_to_phys((void *)msi->pages);
+
+ rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
+ rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
+
+ /* enable all MSI interrupts */
+ rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+
+ return 0;
+
+err:
+ irq_domain_remove(msi->domain);
+ return err;
+}
+
+static int rcar_pcie_get_resources(struct platform_device *pdev,
+ struct rcar_pcie *pcie)
+{
+ struct resource res;
+ int err, i;
+
+ err = of_address_to_resource(pdev->dev.of_node, 0, &res);
+ if (err)
+ return err;
+
+ pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(pcie->clk)) {
+ dev_err(pcie->dev, "cannot get platform clock\n");
+ return PTR_ERR(pcie->clk);
+ }
+ err = clk_prepare_enable(pcie->clk);
+ if (err)
+ goto fail_clk;
+
+ pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(pcie->bus_clk)) {
+ dev_err(pcie->dev, "cannot get pcie bus clock\n");
+ err = PTR_ERR(pcie->bus_clk);
+ goto fail_clk;
+ }
+ err = clk_prepare_enable(pcie->bus_clk);
+ if (err)
+ goto err_map_reg;
+
+ i = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!i) {
+ dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_map_reg;
+ }
+ pcie->msi.irq1 = i;
+
+ i = irq_of_parse_and_map(pdev->dev.of_node, 1);
+ if (!i) {
+ dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
+ err = -ENOENT;
+ goto err_map_reg;
+ }
+ pcie->msi.irq2 = i;
+
+ pcie->base = devm_ioremap_resource(&pdev->dev, &res);
+ if (IS_ERR(pcie->base)) {
+ err = PTR_ERR(pcie->base);
+ goto err_map_reg;
+ }
+
+ return 0;
+
+err_map_reg:
+ clk_disable_unprepare(pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(pcie->clk);
+
+ return err;
+}
+
+static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
+ struct of_pci_range *range,
+ int *index)
+{
+ u64 restype = range->flags;
+ u64 cpu_addr = range->cpu_addr;
+ u64 cpu_end = range->cpu_addr + range->size;
+ u64 pci_addr = range->pci_addr;
+ u32 flags = LAM_64BIT | LAR_ENABLE;
+ u64 mask;
+ u64 size;
+ int idx = *index;
+
+ if (restype & IORESOURCE_PREFETCH)
+ flags |= LAM_PREFETCH;
+
+ /*
+ * If the size of the range is larger than the alignment of the start
+ * address, we have to use multiple entries to perform the mapping.
+ */
+ if (cpu_addr > 0) {
+ unsigned long nr_zeros = __ffs64(cpu_addr);
+ u64 alignment = 1ULL << nr_zeros;
+
+ size = min(range->size, alignment);
+ } else {
+ size = range->size;
+ }
+ /* Hardware supports max 4GiB inbound region */
+ size = min(size, 1ULL << 32);
+
+ mask = roundup_pow_of_two(size) - 1;
+ mask &= ~0xf;
+
+ while (cpu_addr < cpu_end) {
+ /*
+ * Set up 64-bit inbound regions as the range parser doesn't
+ * distinguish between 32 and 64-bit types.
+ */
+ rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+ rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+
+ rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
+ rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+ rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
+
+ pci_addr += size;
+ cpu_addr += size;
+ idx += 2;
+
+ if (idx > MAX_NR_INBOUND_MAPS) {
+ dev_err(pcie->dev, "Failed to map inbound regions!\n");
+ return -EINVAL;
+ }
+ }
+ *index = idx;
+
+ return 0;
+}
+
+static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
+ struct device_node *node)
+{
+ const int na = 3, ns = 2;
+ int rlen;
+
+ parser->node = node;
+ parser->pna = of_n_addr_cells(node);
+ parser->np = parser->pna + na + ns;
+
+ parser->range = of_get_property(node, "dma-ranges", &rlen);
+ if (!parser->range)
+ return -ENOENT;
+
+ parser->end = parser->range + rlen / sizeof(__be32);
+ return 0;
+}
+
+static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int index = 0;
+ int err;
+
+ if (pci_dma_range_parser_init(&parser, np))
+ return -EINVAL;
+
+ /* Get the dma-ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ u64 end = range.cpu_addr + range.size - 1;
+ dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.flags, range.cpu_addr, end, range.pci_addr);
+
+ err = rcar_pcie_inbound_ranges(pcie, &range, &index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rcar_pcie_of_match[] = {
+ { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
+ { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init },
+ { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
+
+static int rcar_pcie_probe(struct platform_device *pdev)
+{
+ struct rcar_pcie *pcie;
+ unsigned int data;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ const struct of_device_id *of_id;
+ int err, win = 0;
+ int (*hw_init_fn)(struct rcar_pcie *);
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->dev = &pdev->dev;
+ platform_set_drvdata(pdev, pcie);
+
+ /* Get the bus range */
+ if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) {
+ dev_err(&pdev->dev, "failed to parse bus-range property\n");
+ return -EINVAL;
+ }
+
+ if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) {
+ dev_err(&pdev->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ err = rcar_pcie_get_resources(pdev, pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request resources: %d\n", err);
+ return err;
+ }
+
+ for_each_of_pci_range(&parser, &range) {
+ err = of_pci_range_to_resource(&range, pdev->dev.of_node,
+ &pcie->res[win++]);
+ if (err < 0)
+ return err;
+
+ if (win > RCAR_PCI_MAX_RESOURCES)
+ break;
+ }
+
+ err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ err = rcar_pcie_enable_msi(pcie);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to enable MSI support: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+ hw_init_fn = of_id->data;
+
+ /* Failure to get a link might just be that no cards are inserted */
+ err = hw_init_fn(pcie);
+ if (err) {
+ dev_info(&pdev->dev, "PCIe link down\n");
+ return 0;
+ }
+
+ data = rcar_pci_read_reg(pcie, MACSR);
+ dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
+
+ rcar_pcie_enable(pcie);
+
+ return 0;
+}
+
+static struct platform_driver rcar_pcie_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = rcar_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = rcar_pcie_probe,
+};
+module_platform_driver(rcar_pcie_driver);
+
+MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
new file mode 100644
index 000000000..020d78890
--- /dev/null
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -0,0 +1,391 @@
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+ void __iomem *app_base;
+ struct phy *phy;
+ struct clk *clk;
+ struct pcie_port pp;
+ bool is_gen1;
+};
+
+struct pcie_app_reg {
+ u32 app_ctrl_0; /* cr0 */
+ u32 app_ctrl_1; /* cr1 */
+ u32 app_status_0; /* cr2 */
+ u32 app_status_1; /* cr3 */
+ u32 msg_status; /* cr4 */
+ u32 msg_payload; /* cr5 */
+ u32 int_sts; /* cr6 */
+ u32 int_clr; /* cr7 */
+ u32 int_mask; /* cr8 */
+ u32 mst_bmisc; /* cr9 */
+ u32 phy_ctrl; /* cr10 */
+ u32 phy_status; /* cr11 */
+ u32 cxpl_debug_info_0; /* cr12 */
+ u32 cxpl_debug_info_1; /* cr13 */
+ u32 ven_msg_ctrl_0; /* cr14 */
+ u32 ven_msg_ctrl_1; /* cr15 */
+ u32 ven_msg_data_0; /* cr16 */
+ u32 ven_msg_data_1; /* cr17 */
+ u32 ven_msi_0; /* cr18 */
+ u32 ven_msi_1; /* cr19 */
+ u32 mst_rmisc; /* cr20 */
+};
+
+/* CR0 ID */
+#define RX_LANE_FLIP_EN_ID 0
+#define TX_LANE_FLIP_EN_ID 1
+#define SYS_AUX_PWR_DET_ID 2
+#define APP_LTSSM_ENABLE_ID 3
+#define SYS_ATTEN_BUTTON_PRESSED_ID 4
+#define SYS_MRL_SENSOR_STATE_ID 5
+#define SYS_PWR_FAULT_DET_ID 6
+#define SYS_MRL_SENSOR_CHGED_ID 7
+#define SYS_PRE_DET_CHGED_ID 8
+#define SYS_CMD_CPLED_INT_ID 9
+#define APP_INIT_RST_0_ID 11
+#define APP_REQ_ENTR_L1_ID 12
+#define APP_READY_ENTR_L23_ID 13
+#define APP_REQ_EXIT_L1_ID 14
+#define DEVICE_TYPE_EP (0 << 25)
+#define DEVICE_TYPE_LEP (1 << 25)
+#define DEVICE_TYPE_RC (4 << 25)
+#define SYS_INT_ID 29
+#define MISCTRL_EN_ID 30
+#define REG_TRANSLATION_ENABLE 31
+
+/* CR1 ID */
+#define APPS_PM_XMT_TURNOFF_ID 2
+#define APPS_PM_XMT_PME_ID 5
+
+/* CR3 ID */
+#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
+#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
+#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
+#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
+#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
+#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
+#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
+#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
+#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
+#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
+#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
+#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
+#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
+#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
+#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
+#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
+#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
+#define XMLH_LTSSM_STATE_L0 0x11
+#define XMLH_LTSSM_STATE_L0S 0x12
+#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
+#define XMLH_LTSSM_STATE_L1_IDLE 0x14
+#define XMLH_LTSSM_STATE_L2_IDLE 0x15
+#define XMLH_LTSSM_STATE_L2_WAKE 0x16
+#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
+#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
+#define XMLH_LTSSM_STATE_DISABLED 0x19
+#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
+#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
+#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
+#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
+#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
+#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
+#define XMLH_LTSSM_STATE_MASK 0x3F
+#define XMLH_LINK_UP (1 << 6)
+
+/* CR4 ID */
+#define CFG_MSI_EN_ID 18
+
+/* CR6 */
+#define INTA_CTRL_INT (1 << 7)
+#define INTB_CTRL_INT (1 << 8)
+#define INTC_CTRL_INT (1 << 9)
+#define INTD_CTRL_INT (1 << 10)
+#define MSI_CTRL_INT (1 << 26)
+
+/* CR19 ID */
+#define VEN_MSI_REQ_ID 11
+#define VEN_MSI_FUN_NUM_ID 8
+#define VEN_MSI_TC_ID 5
+#define VEN_MSI_VECTOR_ID 0
+#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
+#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
+#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
+#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
+
+#define EXP_CAP_ID_OFFSET 0x70
+
+#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
+
+static int spear13xx_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "link already up\n");
+ return 0;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val);
+
+ dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A);
+ dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80);
+
+ /*
+ * if is_gen1 is set then handle it, so that some buggy card
+ * also works
+ */
+ if (spear13xx_pcie->is_gen1) {
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4,
+ &val);
+ if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
+ PCI_EXP_LNKCAP, 4, val);
+ }
+
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4,
+ &val);
+ if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
+ PCI_EXP_LNKCTL2, 4, val);
+ }
+ }
+
+ /* enable ltssm */
+ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+ | (1 << APP_LTSSM_ENABLE_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ dev_err(pp->dev, "link Fail\n");
+ return -EINVAL;
+ }
+ }
+ dev_info(pp->dev, "link up\n");
+
+ return 0;
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ unsigned int status;
+
+ status = readl(&app_reg->int_sts);
+
+ if (status & MSI_CTRL_INT) {
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ BUG();
+ dw_handle_msi_irq(pp);
+ }
+
+ writel(status, &app_reg->int_clr);
+
+ return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+ /* Enable MSI interrupt */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ dw_pcie_msi_init(pp);
+ writel(readl(&app_reg->int_mask) |
+ MSI_CTRL_INT, &app_reg->int_mask);
+ }
+}
+
+static int spear13xx_pcie_link_up(struct pcie_port *pp)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+ if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+ return 1;
+
+ return 0;
+}
+
+static void spear13xx_pcie_host_init(struct pcie_port *pp)
+{
+ spear13xx_pcie_establish_link(pp);
+ spear13xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops spear13xx_pcie_host_ops = {
+ .link_up = spear13xx_pcie_link_up,
+ .host_init = spear13xx_pcie_host_init,
+};
+
+static int spear13xx_add_pcie_port(struct pcie_port *pp,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+ IRQF_SHARED, "spear1340-pcie", pp);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &spear13xx_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spear13xx_pcie_probe(struct platform_device *pdev)
+{
+ struct spear13xx_pcie *spear13xx_pcie;
+ struct pcie_port *pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+ if (!spear13xx_pcie)
+ return -ENOMEM;
+
+ spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(spear13xx_pcie->phy)) {
+ ret = PTR_ERR(spear13xx_pcie->phy);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev, "probe deferred\n");
+ else
+ dev_err(dev, "couldn't get pcie-phy\n");
+ return ret;
+ }
+
+ phy_init(spear13xx_pcie->phy);
+
+ spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spear13xx_pcie->clk)) {
+ dev_err(dev, "couldn't get clk for pcie\n");
+ return PTR_ERR(spear13xx_pcie->clk);
+ }
+ ret = clk_prepare_enable(spear13xx_pcie->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk for pcie\n");
+ return ret;
+ }
+
+ pp = &spear13xx_pcie->pp;
+
+ pp->dev = dev;
+
+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
+ ret = PTR_ERR(pp->dbi_base);
+ goto fail_clk;
+ }
+ spear13xx_pcie->app_base = pp->dbi_base + 0x2000;
+
+ if (of_property_read_bool(np, "st,pcie-is-gen1"))
+ spear13xx_pcie->is_gen1 = true;
+
+ ret = spear13xx_add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ platform_set_drvdata(pdev, spear13xx_pcie);
+ return 0;
+
+fail_clk:
+ clk_disable_unprepare(spear13xx_pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+ { .compatible = "st,spear1340-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
+
+static struct platform_driver spear13xx_pcie_driver = {
+ .probe = spear13xx_pcie_probe,
+ .driver = {
+ .name = "spear-pcie",
+ .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ },
+};
+
+/* SPEAr13xx PCIe driver does not allow module unload */
+
+static int __init spear13xx_pcie_init(void)
+{
+ return platform_driver_register(&spear13xx_pcie_driver);
+}
+module_init(spear13xx_pcie_init);
+
+MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
new file mode 100644
index 000000000..f1a06a091
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -0,0 +1,890 @@
+/*
+ * PCIe host controller driver for Xilinx AXI PCIe Bridge
+ *
+ * Copyright (c) 2012 - 2014 Xilinx, Inc.
+ *
+ * Based on the Tegra PCIe driver
+ *
+ * Bits taken from Synopsys Designware Host controller driver and
+ * ARM PCI Host generic driver.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+/* Register definitions */
+#define XILINX_PCIE_REG_BIR 0x00000130
+#define XILINX_PCIE_REG_IDR 0x00000138
+#define XILINX_PCIE_REG_IMR 0x0000013c
+#define XILINX_PCIE_REG_PSCR 0x00000144
+#define XILINX_PCIE_REG_RPSC 0x00000148
+#define XILINX_PCIE_REG_MSIBASE1 0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2 0x00000150
+#define XILINX_PCIE_REG_RPEFR 0x00000154
+#define XILINX_PCIE_REG_RPIFR1 0x00000158
+#define XILINX_PCIE_REG_RPIFR2 0x0000015c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN BIT(0)
+#define XILINX_PCIE_INTR_ECRC_ERR BIT(1)
+#define XILINX_PCIE_INTR_STR_ERR BIT(2)
+#define XILINX_PCIE_INTR_HOT_RESET BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL BIT(10)
+#define XILINX_PCIE_INTR_FATAL BIT(11)
+#define XILINX_PCIE_INTR_INTX BIT(16)
+#define XILINX_PCIE_INTR_MSI BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR BIT(27)
+#define XILINX_PCIE_INTR_MST_ERRP BIT(28)
+#define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED
+#define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT 27
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT 20
+#define ECAM_DEV_NUM_SHIFT 12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS 128
+
+/* Number of Memory Resources */
+#define XILINX_MAX_NUM_RESOURCES 3
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @irq: Interrupt number
+ * @msi_pages: MSI pages
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @irq_domain: IRQ domain pointer
+ * @bus_range: Bus range
+ * @resources: Bus Resources
+ */
+struct xilinx_pcie_port {
+ void __iomem *reg_base;
+ u32 irq;
+ unsigned long msi_pages;
+ u8 root_busno;
+ struct device *dev;
+ struct irq_domain *irq_domain;
+ struct resource bus_range;
+ struct list_head resources;
+};
+
+static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+
+static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+ return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+ writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+{
+ return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+ unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+ if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+ dev_dbg(port->dev, "Requester ID %lu\n",
+ val & XILINX_PCIE_RPEFR_REQ_ID);
+ pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ XILINX_PCIE_REG_RPEFR);
+ }
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+
+ /* Check if link is up when trying to access downstream ports */
+ if (bus->number != port->root_busno)
+ if (!xilinx_pcie_link_is_up(port))
+ return false;
+
+ /* Only one device down on each root port */
+ if (bus->number == port->root_busno && devfn > 0)
+ return false;
+
+ /*
+ * Do not read more than one device on the bus directly attached
+ * to RC.
+ */
+ if (bus->primary == port->root_busno && devfn > 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * xilinx_pcie_map_bus - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ * accessed.
+ */
+static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
+ unsigned int devfn, int where)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+ int relbus;
+
+ if (!xilinx_pcie_valid_device(bus, devfn))
+ return NULL;
+
+ relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+ (devfn << ECAM_DEV_NUM_SHIFT);
+
+ return port->reg_base + relbus + where;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+ .map_bus = xilinx_pcie_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+/* MSI functions */
+
+/**
+ * xilinx_pcie_destroy_msi - Free MSI number
+ * @irq: IRQ to be freed
+ */
+static void xilinx_pcie_destroy_msi(unsigned int irq)
+{
+ struct irq_desc *desc;
+ struct msi_desc *msi;
+ struct xilinx_pcie_port *port;
+
+ desc = irq_to_desc(irq);
+ msi = irq_desc_get_msi_desc(desc);
+ port = sys_to_pcie(msi->dev->bus->sysdata);
+
+ if (!test_bit(irq, msi_irq_in_use))
+ dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
+ else
+ clear_bit(irq, msi_irq_in_use);
+}
+
+/**
+ * xilinx_pcie_assign_msi - Allocate MSI number
+ * @port: PCIe port structure
+ *
+ * Return: A valid IRQ on success and error value on failure.
+ */
+static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
+{
+ int pos;
+
+ pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+ if (pos < XILINX_NUM_MSI_IRQS)
+ set_bit(pos, msi_irq_in_use);
+ else
+ return -ENOSPC;
+
+ return pos;
+}
+
+/**
+ * xilinx_msi_teardown_irq - Destroy the MSI
+ * @chip: MSI Chip descriptor
+ * @irq: MSI IRQ to destroy
+ */
+static void xilinx_msi_teardown_irq(struct msi_controller *chip,
+ unsigned int irq)
+{
+ xilinx_pcie_destroy_msi(irq);
+}
+
+/**
+ * xilinx_pcie_msi_setup_irq - Setup MSI request
+ * @chip: MSI chip pointer
+ * @pdev: PCIe device pointer
+ * @desc: MSI descriptor pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip,
+ struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
+ unsigned int irq;
+ int hwirq;
+ struct msi_msg msg;
+ phys_addr_t msg_addr;
+
+ hwirq = xilinx_pcie_assign_msi(port);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(port->irq_domain, hwirq);
+ if (!irq)
+ return -EINVAL;
+
+ irq_set_msi_desc(irq, desc);
+
+ msg_addr = virt_to_phys((void *)port->msi_pages);
+
+ msg.address_hi = 0;
+ msg.address_lo = msg_addr;
+ msg.data = irq;
+
+ pci_write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+/* MSI Chip Descriptor */
+static struct msi_controller xilinx_pcie_msi_chip = {
+ .setup_irq = xilinx_pcie_msi_setup_irq,
+ .teardown_irq = xilinx_msi_teardown_irq,
+};
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip xilinx_msi_irq_chip = {
+ .name = "Xilinx PCIe MSI",
+ .irq_enable = pci_msi_unmask_irq,
+ .irq_disable = pci_msi_mask_irq,
+ .irq_mask = pci_msi_mask_irq,
+ .irq_unmask = pci_msi_unmask_irq,
+};
+
+/**
+ * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+/* IRQ Domain operations */
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = xilinx_pcie_msi_map,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+ phys_addr_t msg_addr;
+
+ port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+ msg_addr = virt_to_phys((void *)port->msi_pages);
+ pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+}
+
+/* INTx Functions */
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+ .map = xilinx_pcie_intx_map,
+};
+
+/* PCIe HW Functions */
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+ struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+ u32 val, mask, status, msi_data;
+
+ /* Read interrupt decode and mask registers */
+ val = pcie_read(port, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+ status = val & mask;
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & XILINX_PCIE_INTR_LINK_DOWN)
+ dev_warn(port->dev, "Link Down\n");
+
+ if (status & XILINX_PCIE_INTR_ECRC_ERR)
+ dev_warn(port->dev, "ECRC failed\n");
+
+ if (status & XILINX_PCIE_INTR_STR_ERR)
+ dev_warn(port->dev, "Streaming error\n");
+
+ if (status & XILINX_PCIE_INTR_HOT_RESET)
+ dev_info(port->dev, "Hot reset\n");
+
+ if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+ dev_warn(port->dev, "ECAM access timeout\n");
+
+ if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+ dev_warn(port->dev, "Correctable error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_NONFATAL) {
+ dev_warn(port->dev, "Non fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_FATAL) {
+ dev_warn(port->dev, "Fatal error message\n");
+ xilinx_pcie_clear_err_interrupts(port);
+ }
+
+ if (status & XILINX_PCIE_INTR_INTX) {
+ /* INTx interrupt received */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ /* Check whether interrupt valid */
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ return IRQ_HANDLED;
+ }
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ /* Handle INTx Interrupt */
+ val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+ XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
+ generic_handle_irq(irq_find_mapping(port->irq_domain, val));
+ }
+
+ if (status & XILINX_PCIE_INTR_MSI) {
+ /* MSI Interrupt */
+ val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+ if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+ dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+ return IRQ_HANDLED;
+ }
+
+ if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+ msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ XILINX_PCIE_RPIFR2_MSG_DATA;
+
+ /* Clear interrupt FIFO register 1 */
+ pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ XILINX_PCIE_REG_RPIFR1);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ /* Handle MSI Interrupt */
+ generic_handle_irq(msi_data);
+ }
+ }
+ }
+
+ if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+ dev_warn(port->dev, "Slave unsupported request\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+ dev_warn(port->dev, "Slave unexpected completion\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_COMPL)
+ dev_warn(port->dev, "Slave completion timeout\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ERRP)
+ dev_warn(port->dev, "Slave Error Poison\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+ dev_warn(port->dev, "Slave Completer Abort\n");
+
+ if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+ dev_warn(port->dev, "Slave Illegal Burst\n");
+
+ if (status & XILINX_PCIE_INTR_MST_DECERR)
+ dev_warn(port->dev, "Master decode error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_SLVERR)
+ dev_warn(port->dev, "Master slave error\n");
+
+ if (status & XILINX_PCIE_INTR_MST_ERRP)
+ dev_warn(port->dev, "Master error poison\n");
+
+ /* Clear the Interrupt Decode register */
+ pcie_write(port, status, XILINX_PCIE_REG_IDR);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_pcie_free_irq_domain - Free IRQ domain
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
+{
+ int i;
+ u32 irq, num_irqs;
+
+ /* Free IRQ Domain */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+
+ free_pages(port->msi_pages, 0);
+
+ num_irqs = XILINX_NUM_MSI_IRQS;
+ } else {
+ /* INTx */
+ num_irqs = 4;
+ }
+
+ for (i = 0; i < num_irqs; i++) {
+ irq = irq_find_mapping(port->irq_domain, i);
+ if (irq > 0)
+ irq_dispose_mapping(irq);
+ }
+
+ irq_domain_remove(port->irq_domain);
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct device_node *pcie_intc_node;
+
+ /* Setup INTx */
+ pcie_intc_node = of_get_next_child(node, NULL);
+ if (!pcie_intc_node) {
+ dev_err(dev, "No PCIe Intc node found\n");
+ return PTR_ERR(pcie_intc_node);
+ }
+
+ port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+ &intx_domain_ops,
+ port);
+ if (!port->irq_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return PTR_ERR(port->irq_domain);
+ }
+
+ /* Setup MSI */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ port->irq_domain = irq_domain_add_linear(node,
+ XILINX_NUM_MSI_IRQS,
+ &msi_domain_ops,
+ &xilinx_pcie_msi_chip);
+ if (!port->irq_domain) {
+ dev_err(dev, "Failed to get a MSI IRQ domain\n");
+ return PTR_ERR(port->irq_domain);
+ }
+
+ xilinx_pcie_enable_msi(port);
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+ if (xilinx_pcie_link_is_up(port))
+ dev_info(port->dev, "PCIe Link is UP\n");
+ else
+ dev_info(port->dev, "PCIe Link is DOWN\n");
+
+ /* Disable all interrupts */
+ pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ XILINX_PCIE_REG_IMR);
+
+ /* Clear pending interrupts */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ XILINX_PCIE_IMR_ALL_MASK,
+ XILINX_PCIE_REG_IDR);
+
+ /* Enable all interrupts */
+ pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
+
+ /* Enable the Bridge enable bit */
+ pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ XILINX_PCIE_REG_RPSC_BEN,
+ XILINX_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_pcie_setup - Setup memory resources
+ * @nr: Bus number
+ * @sys: Per controller structure
+ *
+ * Return: '1' on success and error value on failure
+ */
+static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(sys);
+
+ list_splice_init(&port->resources, &sys->resources);
+
+ return 1;
+}
+
+/**
+ * xilinx_pcie_scan_bus - Scan PCIe bus for devices
+ * @nr: Bus number
+ * @sys: Per controller structure
+ *
+ * Return: Valid Bus pointer on success and NULL on failure
+ */
+static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+ struct xilinx_pcie_port *port = sys_to_pcie(sys);
+ struct pci_bus *bus;
+
+ port->root_busno = sys->busnr;
+ bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
+ sys, &sys->resources);
+
+ return bus;
+}
+
+/**
+ * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *mem;
+ resource_size_t offset;
+ struct of_pci_range_parser parser;
+ struct of_pci_range range;
+ struct resource_entry *win;
+ int err = 0, mem_resno = 0;
+
+ /* Get the ranges */
+ if (of_pci_range_parser_init(&parser, node)) {
+ dev_err(dev, "missing \"ranges\" property\n");
+ return -EINVAL;
+ }
+
+ /* Parse the ranges and add the resources found to the list */
+ for_each_of_pci_range(&parser, &range) {
+
+ if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
+ dev_err(dev, "Maximum memory resources exceeded\n");
+ return -EINVAL;
+ }
+
+ mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
+ of_pci_range_to_resource(&range, node, mem);
+
+ switch (mem->flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_MEM:
+ offset = range.cpu_addr - range.pci_addr;
+ mem_resno++;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err < 0) {
+ dev_warn(dev, "Invalid resource found %pR\n", mem);
+ continue;
+ }
+
+ err = request_resource(&iomem_resource, mem);
+ if (err)
+ goto free_resources;
+
+ pci_add_resource_offset(&port->resources, mem, offset);
+ }
+
+ /* Get the bus range */
+ if (of_pci_parse_bus_range(node, &port->bus_range)) {
+ u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
+ u8 last;
+
+ last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
+ XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
+
+ port->bus_range = (struct resource) {
+ .name = node->name,
+ .start = 0,
+ .end = last,
+ .flags = IORESOURCE_BUS,
+ };
+ }
+
+ /* Register bus resource */
+ pci_add_resource(&port->resources, &port->bus_range);
+
+ return 0;
+
+free_resources:
+ release_child_resources(&iomem_resource);
+ resource_list_for_each_entry(win, &port->resources)
+ devm_kfree(dev, win->res);
+ pci_free_resource_list(&port->resources);
+
+ return err;
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+ struct device *dev = port->dev;
+ struct device_node *node = dev->of_node;
+ struct resource regs;
+ const char *type;
+ int err;
+
+ type = of_get_property(node, "device_type", NULL);
+ if (!type || strcmp(type, "pci")) {
+ dev_err(dev, "invalid \"device_type\" %s\n", type);
+ return -EINVAL;
+ }
+
+ err = of_address_to_resource(node, 0, &regs);
+ if (err) {
+ dev_err(dev, "missing \"reg\" property\n");
+ return err;
+ }
+
+ port->reg_base = devm_ioremap_resource(dev, &regs);
+ if (IS_ERR(port->reg_base))
+ return PTR_ERR(port->reg_base);
+
+ port->irq = irq_of_parse_and_map(node, 0);
+ err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+ IRQF_SHARED, "xilinx-pcie", port);
+ if (err) {
+ dev_err(dev, "unable to request irq %d\n", port->irq);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port;
+ struct hw_pci hw;
+ struct device *dev = &pdev->dev;
+ int err;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = dev;
+
+ err = xilinx_pcie_parse_dt(port);
+ if (err) {
+ dev_err(dev, "Parsing DT failed\n");
+ return err;
+ }
+
+ xilinx_pcie_init_port(port);
+
+ err = xilinx_pcie_init_irq_domain(port);
+ if (err) {
+ dev_err(dev, "Failed creating IRQ Domain\n");
+ return err;
+ }
+
+ /*
+ * Parse PCI ranges, configuration bus range and
+ * request their resources
+ */
+ INIT_LIST_HEAD(&port->resources);
+ err = xilinx_pcie_parse_and_add_res(port);
+ if (err) {
+ dev_err(dev, "Failed adding resources\n");
+ return err;
+ }
+
+ platform_set_drvdata(pdev, port);
+
+ /* Register the device */
+ memset(&hw, 0, sizeof(hw));
+ hw = (struct hw_pci) {
+ .nr_controllers = 1,
+ .private_data = (void **)&port,
+ .setup = xilinx_pcie_setup,
+ .map_irq = of_irq_parse_and_map_pci,
+ .scan = xilinx_pcie_scan_bus,
+ .ops = &xilinx_pcie_ops,
+ };
+
+#ifdef CONFIG_PCI_MSI
+ xilinx_pcie_msi_chip.dev = port->dev;
+ hw.msi_ctrl = &xilinx_pcie_msi_chip;
+#endif
+ pci_common_init_dev(dev, &hw);
+
+ return 0;
+}
+
+/**
+ * xilinx_pcie_remove - Remove function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' always
+ */
+static int xilinx_pcie_remove(struct platform_device *pdev)
+{
+ struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
+
+ xilinx_pcie_free_irq_domain(port);
+
+ return 0;
+}
+
+static struct of_device_id xilinx_pcie_of_match[] = {
+ { .compatible = "xlnx,axi-pcie-host-1.00.a", },
+ {}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+ .driver = {
+ .name = "xilinx-pcie",
+ .of_match_table = xilinx_pcie_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = xilinx_pcie_probe,
+ .remove = xilinx_pcie_remove,
+};
+module_platform_driver(xilinx_pcie_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
new file mode 100644
index 000000000..c68366cee
--- /dev/null
+++ b/drivers/pci/hotplug-pci.c
@@ -0,0 +1,29 @@
+/* Core PCI functionality used only by PCI hotplug */
+
+#include <linux/pci.h>
+#include <linux/export.h>
+#include "pci.h"
+
+int pci_hp_add_bridge(struct pci_dev *dev)
+{
+ struct pci_bus *parent = dev->bus;
+ int pass, busnr, start = parent->busn_res.start;
+ int end = parent->busn_res.end;
+
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ printk(KERN_ERR "No bus number available for hot-added bridge %s\n",
+ pci_name(dev));
+ return -1;
+ }
+ for (pass = 0; pass < 2; pass++)
+ busnr = pci_scan_bridge(parent, dev, busnr, pass);
+ if (!dev->subordinate)
+ return -1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
new file mode 100644
index 000000000..df8caec59
--- /dev/null
+++ b/drivers/pci/hotplug/Kconfig
@@ -0,0 +1,159 @@
+#
+# PCI Hotplug support
+#
+
+menuconfig HOTPLUG_PCI
+ bool "Support for PCI Hotplug"
+ depends on PCI && SYSFS
+ ---help---
+ Say Y here if you have a motherboard with a PCI Hotplug controller.
+ This allows you to add and remove PCI cards while the machine is
+ powered up and running.
+
+ When in doubt, say N.
+
+if HOTPLUG_PCI
+
+config HOTPLUG_PCI_COMPAQ
+ tristate "Compaq PCI Hotplug driver"
+ depends on X86 && PCI_BIOS
+ help
+ Say Y here if you have a motherboard with a Compaq PCI Hotplug
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpqphp.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_COMPAQ_NVRAM
+ bool "Save configuration into NVRAM on Compaq servers"
+ depends on HOTPLUG_PCI_COMPAQ
+ help
+ Say Y here if you have a Compaq server that has a PCI Hotplug
+ controller. This will allow the PCI Hotplug driver to store the PCI
+ system configuration options in NVRAM.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_IBM
+ tristate "IBM PCI Hotplug driver"
+ depends on X86_IO_APIC && X86 && PCI_BIOS
+ help
+ Say Y here if you have a motherboard with a IBM PCI Hotplug
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ibmphp.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_ACPI
+ bool "ACPI PCI Hotplug driver"
+ depends on HOTPLUG_PCI=y && ((!ACPI_DOCK && ACPI) || (ACPI_DOCK))
+ help
+ Say Y here if you have a system that supports PCI Hotplug using
+ ACPI.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_ACPI_IBM
+ tristate "ACPI PCI Hotplug driver IBM extensions"
+ depends on HOTPLUG_PCI_ACPI
+ help
+ Say Y here if you have an IBM system that supports PCI Hotplug using
+ ACPI.
+
+ To compile this driver as a module, choose M here: the
+ module will be called acpiphp_ibm.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_CPCI
+ bool "CompactPCI Hotplug driver"
+ help
+ Say Y here if you have a CompactPCI system card with CompactPCI
+ hotswap support per the PICMG 2.1 specification.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_CPCI_ZT5550
+ tristate "Ziatech ZT5550 CompactPCI Hotplug driver"
+ depends on HOTPLUG_PCI_CPCI && X86
+ help
+ Say Y here if you have an Performance Technologies (formerly Intel,
+ formerly just Ziatech) Ziatech ZT5550 CompactPCI system card.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpcihp_zt5550.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_CPCI_GENERIC
+ tristate "Generic port I/O CompactPCI Hotplug driver"
+ depends on HOTPLUG_PCI_CPCI && X86
+ help
+ Say Y here if you have a CompactPCI system card that exposes the #ENUM
+ hotswap signal as a bit in a system register that can be read through
+ standard port I/O.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpcihp_generic.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_SHPC
+ tristate "SHPC PCI Hotplug driver"
+ help
+ Say Y here if you have a motherboard with a SHPC PCI Hotplug
+ controller.
+
+ To compile this driver as a module, choose M here: the
+ module will be called shpchp.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_RPA
+ tristate "RPA PCI Hotplug driver"
+ depends on PPC_PSERIES && EEH
+ help
+ Say Y here if you have a RPA system that supports PCI Hotplug.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rpaphp.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_RPA_DLPAR
+ tristate "RPA Dynamic Logical Partitioning for I/O slots"
+ depends on HOTPLUG_PCI_RPA
+ help
+ Say Y here if your system supports Dynamic Logical Partitioning
+ for I/O slots.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rpadlpar_io.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_SGI
+ tristate "SGI PCI Hotplug Support"
+ depends on IA64_SGI_SN2 || IA64_GENERIC
+ help
+ Say Y here if you want to use the SGI Altix Hotplug
+ Driver for PCI devices.
+
+ When in doubt, say N.
+
+config HOTPLUG_PCI_S390
+ bool "System z PCI Hotplug Support"
+ depends on S390 && 64BIT
+ help
+ Say Y here if you want to use the System z PCI Hotplug
+ driver for PCI devices. Without this driver it is not
+ possible to access stand-by PCI functions nor to deconfigure
+ PCI functions.
+
+ When in doubt, say Y.
+
+endif # HOTPLUG_PCI
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
new file mode 100644
index 000000000..4a9aa08b0
--- /dev/null
+++ b/drivers/pci/hotplug/Makefile
@@ -0,0 +1,72 @@
+#
+# Makefile for the Linux kernel pci hotplug controller drivers.
+#
+
+obj-$(CONFIG_HOTPLUG_PCI) += pci_hotplug.o
+obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o
+obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o
+
+# native drivers should be linked before acpiphp in order to allow the
+# native driver to attempt to bind first. We can then fall back to
+# generic support.
+
+obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o
+obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o
+obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o
+obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o
+obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
+obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
+obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
+obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
+obj-$(CONFIG_HOTPLUG_PCI_S390) += s390_pci_hpc.o
+
+# acpiphp_ibm extends acpiphp, so should be linked afterwards.
+
+obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
+
+pci_hotplug-objs := pci_hotplug_core.o
+
+ifdef CONFIG_HOTPLUG_PCI_CPCI
+pci_hotplug-objs += cpci_hotplug_core.o \
+ cpci_hotplug_pci.o
+endif
+ifdef CONFIG_ACPI
+pci_hotplug-objs += acpi_pcihp.o
+endif
+
+cpqphp-objs := cpqphp_core.o \
+ cpqphp_ctrl.o \
+ cpqphp_sysfs.o \
+ cpqphp_pci.o
+cpqphp-$(CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM) += cpqphp_nvram.o
+cpqphp-objs += $(cpqphp-y)
+
+ibmphp-objs := ibmphp_core.o \
+ ibmphp_ebda.o \
+ ibmphp_pci.o \
+ ibmphp_res.o \
+ ibmphp_hpc.o
+
+acpiphp-objs := acpiphp_core.o \
+ acpiphp_glue.o
+
+rpaphp-objs := rpaphp_core.o \
+ rpaphp_pci.o \
+ rpaphp_slot.o
+
+rpadlpar_io-objs := rpadlpar_core.o \
+ rpadlpar_sysfs.o
+
+pciehp-objs := pciehp_core.o \
+ pciehp_ctrl.o \
+ pciehp_pci.o \
+ pciehp_hpc.o
+ifdef CONFIG_ACPI
+pciehp-objs += pciehp_acpi.o
+endif
+
+shpchp-objs := shpchp_core.o \
+ shpchp_ctrl.o \
+ shpchp_pci.o \
+ shpchp_sysfs.o \
+ shpchp_hpc.o
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
new file mode 100644
index 000000000..876ccc620
--- /dev/null
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -0,0 +1,227 @@
+/*
+ * Common ACPI functions for hot plug platforms
+ *
+ * Copyright (C) 2006 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/slab.h>
+
+#define MY_NAME "acpi_pcihp"
+
+#define dbg(fmt, arg...) do { if (debug_acpi) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __func__ , ## arg); } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
+
+#define METHOD_NAME__SUN "_SUN"
+#define METHOD_NAME_OSHP "OSHP"
+
+static bool debug_acpi;
+
+/* acpi_run_oshp - get control of hotplug from the firmware
+ *
+ * @handle - the handle of the hotplug controller.
+ */
+static acpi_status acpi_run_oshp(acpi_handle handle)
+{
+ acpi_status status;
+ struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+
+ /* run OSHP */
+ status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ if (status != AE_NOT_FOUND)
+ printk(KERN_ERR "%s:%s OSHP fails=0x%x\n",
+ __func__, (char *)string.pointer, status);
+ else
+ dbg("%s:%s OSHP not found\n",
+ __func__, (char *)string.pointer);
+ else
+ pr_debug("%s:%s OSHP passes\n", __func__,
+ (char *)string.pointer);
+
+ kfree(string.pointer);
+ return status;
+}
+
+/**
+ * acpi_get_hp_hw_control_from_firmware
+ * @dev: the pci_dev of the bridge that has a hotplug controller
+ * @flags: requested control bits for _OSC
+ *
+ * Attempt to take hotplug control from firmware.
+ */
+int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
+{
+ acpi_status status;
+ acpi_handle chandle, handle;
+ struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
+ if (!flags) {
+ err("Invalid flags %u specified!\n", flags);
+ return -EINVAL;
+ }
+
+ /*
+ * Per PCI firmware specification, we should run the ACPI _OSC
+ * method to get control of hotplug hardware before using it. If
+ * an _OSC is missing, we look for an OSHP to do the same thing.
+ * To handle different BIOS behavior, we look for _OSC on a root
+ * bridge preferentially (according to PCI fw spec). Later for
+ * OSHP within the scope of the hotplug controller and its parents,
+ * up to the host bridge under which this controller exists.
+ */
+ handle = acpi_find_root_bridge_handle(pdev);
+ if (handle) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ dbg("Trying to get hotplug control for %s\n",
+ (char *)string.pointer);
+ status = acpi_pci_osc_control_set(handle, &flags, flags);
+ if (ACPI_SUCCESS(status))
+ goto got_one;
+ if (status == AE_SUPPORT)
+ goto no_control;
+ kfree(string.pointer);
+ string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
+ }
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle) {
+ /*
+ * This hotplug controller was not listed in the ACPI name
+ * space at all. Try to get acpi handle of parent pci bus.
+ */
+ struct pci_bus *pbus;
+ for (pbus = pdev->bus; pbus; pbus = pbus->parent) {
+ handle = acpi_pci_get_bridge_handle(pbus);
+ if (handle)
+ break;
+ }
+ }
+
+ while (handle) {
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ dbg("Trying to get hotplug control for %s \n",
+ (char *)string.pointer);
+ status = acpi_run_oshp(handle);
+ if (ACPI_SUCCESS(status))
+ goto got_one;
+ if (acpi_is_root_bridge(handle))
+ break;
+ chandle = handle;
+ status = acpi_get_parent(chandle, &handle);
+ if (ACPI_FAILURE(status))
+ break;
+ }
+no_control:
+ dbg("Cannot get control of hotplug hardware for pci %s\n",
+ pci_name(pdev));
+ kfree(string.pointer);
+ return -ENODEV;
+got_one:
+ dbg("Gained control for hotplug HW for pci %s (%s)\n",
+ pci_name(pdev), (char *)string.pointer);
+ kfree(string.pointer);
+ return 0;
+}
+EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
+
+static int pcihp_is_ejectable(acpi_handle handle)
+{
+ acpi_status status;
+ unsigned long long removable;
+ if (!acpi_has_method(handle, "_ADR"))
+ return 0;
+ if (acpi_has_method(handle, "_EJ0"))
+ return 1;
+ status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
+ if (ACPI_SUCCESS(status) && removable)
+ return 1;
+ return 0;
+}
+
+/**
+ * acpi_pcihp_check_ejectable - check if handle is ejectable ACPI PCI slot
+ * @pbus: the PCI bus of the PCI slot corresponding to 'handle'
+ * @handle: ACPI handle to check
+ *
+ * Return 1 if handle is ejectable PCI slot, 0 otherwise.
+ */
+int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle)
+{
+ acpi_handle bridge_handle, parent_handle;
+
+ bridge_handle = acpi_pci_get_bridge_handle(pbus);
+ if (!bridge_handle)
+ return 0;
+ if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
+ return 0;
+ if (bridge_handle != parent_handle)
+ return 0;
+ return pcihp_is_ejectable(handle);
+}
+EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable);
+
+static acpi_status
+check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ int *found = (int *)context;
+ if (pcihp_is_ejectable(handle)) {
+ *found = 1;
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK;
+}
+
+/**
+ * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots
+ * @handle - handle of the PCI bus to scan
+ *
+ * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise.
+ */
+int acpi_pci_detect_ejectable(acpi_handle handle)
+{
+ int found = 0;
+
+ if (!handle)
+ return found;
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ check_hotplug, NULL, (void *)&found, NULL);
+ return found;
+}
+EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable);
+
+module_param(debug_acpi, bool, 0644);
+MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
new file mode 100644
index 000000000..b0e61bf26
--- /dev/null
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -0,0 +1,202 @@
+/*
+ * ACPI PCI Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
+ * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
+ * Copyright (C) 2002,2003 NEC Corporation
+ * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com)
+ * Copyright (C) 2003-2005 Hewlett Packard
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>,
+ * <t-kochi@bq.jp.nec.com>
+ *
+ */
+
+#ifndef _ACPIPHP_H
+#define _ACPIPHP_H
+
+#include <linux/acpi.h>
+#include <linux/mutex.h>
+#include <linux/pci_hotplug.h>
+
+struct acpiphp_context;
+struct acpiphp_bridge;
+struct acpiphp_slot;
+
+/*
+ * struct slot - slot information for each *physical* slot
+ */
+struct slot {
+ struct hotplug_slot *hotplug_slot;
+ struct acpiphp_slot *acpi_slot;
+ struct hotplug_slot_info info;
+ unsigned int sun; /* ACPI _SUN (Slot User Number) value */
+};
+
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
+/*
+ * struct acpiphp_bridge - PCI bridge information
+ *
+ * for each bridge device in ACPI namespace
+ */
+struct acpiphp_bridge {
+ struct list_head list;
+ struct list_head slots;
+ struct kref ref;
+
+ struct acpiphp_context *context;
+
+ int nr_slots;
+
+ /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */
+ struct pci_bus *pci_bus;
+
+ /* PCI-to-PCI bridge device */
+ struct pci_dev *pci_dev;
+
+ bool is_going_away;
+};
+
+
+/*
+ * struct acpiphp_slot - PCI slot information
+ *
+ * PCI slot information for each *physical* PCI slot
+ */
+struct acpiphp_slot {
+ struct list_head node;
+ struct pci_bus *bus;
+ struct list_head funcs; /* one slot may have different
+ objects (i.e. for each function) */
+ struct slot *slot;
+
+ u8 device; /* pci device# */
+ u32 flags; /* see below */
+};
+
+
+/*
+ * struct acpiphp_func - PCI function information
+ *
+ * PCI function information for each object in ACPI namespace
+ * typically 8 objects per slot (i.e. for each PCI function)
+ */
+struct acpiphp_func {
+ struct acpiphp_bridge *parent;
+ struct acpiphp_slot *slot;
+
+ struct list_head sibling;
+
+ u8 function; /* pci function# */
+ u32 flags; /* see below */
+};
+
+struct acpiphp_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_func func;
+ struct acpiphp_bridge *bridge;
+ unsigned int refcount;
+};
+
+static inline struct acpiphp_context *to_acpiphp_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_context, hp);
+}
+
+static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
+{
+ return container_of(func, struct acpiphp_context, func);
+}
+
+static inline struct acpi_device *func_to_acpi_device(struct acpiphp_func *func)
+{
+ return func_to_context(func)->hp.self;
+}
+
+static inline acpi_handle func_to_handle(struct acpiphp_func *func)
+{
+ return func_to_acpi_device(func)->handle;
+}
+
+struct acpiphp_root_context {
+ struct acpi_hotplug_context hp;
+ struct acpiphp_bridge *root_bridge;
+};
+
+static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_hotplug_context *hp)
+{
+ return container_of(hp, struct acpiphp_root_context, hp);
+}
+
+/*
+ * struct acpiphp_attention_info - device specific attention registration
+ *
+ * ACPI has no generic method of setting/getting attention status
+ * this allows for device specific driver registration
+ */
+struct acpiphp_attention_info
+{
+ int (*set_attn)(struct hotplug_slot *slot, u8 status);
+ int (*get_attn)(struct hotplug_slot *slot, u8 *status);
+ struct module *owner;
+};
+
+/* ACPI _STA method value (ignore bit 4; battery present) */
+#define ACPI_STA_ALL (0x0000000f)
+
+/* slot flags */
+
+#define SLOT_ENABLED (0x00000001)
+#define SLOT_IS_GOING_AWAY (0x00000002)
+
+/* function flags */
+
+#define FUNC_HAS_STA (0x00000001)
+#define FUNC_HAS_EJ0 (0x00000002)
+
+/* function prototypes */
+
+/* acpiphp_core.c */
+int acpiphp_register_attention(struct acpiphp_attention_info*info);
+int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun);
+void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
+
+/* acpiphp_glue.c */
+typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
+
+int acpiphp_enable_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_slot(struct acpiphp_slot *slot);
+u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
+u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot);
+
+/* variables */
+extern bool acpiphp_disabled;
+
+#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
new file mode 100644
index 000000000..e291efcd0
--- /dev/null
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -0,0 +1,356 @@
+/*
+ * ACPI PCI Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
+ * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
+ * Copyright (C) 2002,2003 NEC Corporation
+ * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com)
+ * Copyright (C) 2003-2005 Hewlett Packard
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <kristen.c.accardi@intel.com>
+ *
+ */
+
+#define pr_fmt(fmt) "acpiphp: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include "acpiphp.h"
+
+/* name size which is used for entries in pcihpfs */
+#define SLOT_NAME_SIZE 21 /* {_SUN} */
+
+bool acpiphp_disabled;
+
+/* local variables */
+static struct acpiphp_attention_info *attention_info;
+
+#define DRIVER_VERSION "0.5"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <gregkh@us.ibm.com>, Takayoshi Kochi <t-kochi@bq.jp.nec.com>, Matthew Wilcox <willy@hp.com>"
+#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(disable, "disable acpiphp driver");
+module_param_named(disable, acpiphp_disabled, bool, 0444);
+
+static int enable_slot (struct hotplug_slot *slot);
+static int disable_slot (struct hotplug_slot *slot);
+static int set_attention_status (struct hotplug_slot *slot, u8 value);
+static int get_power_status (struct hotplug_slot *slot, u8 *value);
+static int get_attention_status (struct hotplug_slot *slot, u8 *value);
+static int get_latch_status (struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .set_attention_status = set_attention_status,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+/**
+ * acpiphp_register_attention - set attention LED callback
+ * @info: must be completely filled with LED callbacks
+ *
+ * Description: This is used to register a hardware specific ACPI
+ * driver that manipulates the attention LED. All the fields in
+ * info must be set.
+ */
+int acpiphp_register_attention(struct acpiphp_attention_info *info)
+{
+ int retval = -EINVAL;
+
+ if (info && info->owner && info->set_attn &&
+ info->get_attn && !attention_info) {
+ retval = 0;
+ attention_info = info;
+ }
+ return retval;
+}
+EXPORT_SYMBOL_GPL(acpiphp_register_attention);
+
+
+/**
+ * acpiphp_unregister_attention - unset attention LED callback
+ * @info: must match the pointer used to register
+ *
+ * Description: This is used to un-register a hardware specific acpi
+ * driver that manipulates the attention LED. The pointer to the
+ * info struct must be the same as the one used to set it.
+ */
+int acpiphp_unregister_attention(struct acpiphp_attention_info *info)
+{
+ int retval = -EINVAL;
+
+ if (info && attention_info == info) {
+ attention_info = NULL;
+ retval = 0;
+ }
+ return retval;
+}
+EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
+
+
+/**
+ * enable_slot - power on and enable a slot
+ * @hotplug_slot: slot to enable
+ *
+ * Actual tasks are done in acpiphp_enable_slot()
+ */
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ /* enable the specified slot */
+ return acpiphp_enable_slot(slot->acpi_slot);
+}
+
+
+/**
+ * disable_slot - disable and power off a slot
+ * @hotplug_slot: slot to disable
+ *
+ * Actual tasks are done in acpiphp_disable_slot()
+ */
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ /* disable the specified slot */
+ return acpiphp_disable_slot(slot->acpi_slot);
+}
+
+
+/**
+ * set_attention_status - set attention LED
+ * @hotplug_slot: slot to set attention LED on
+ * @status: value to set attention LED to (0 or 1)
+ *
+ * attention status LED, so we use a callback that
+ * was registered with us. This allows hardware specific
+ * ACPI implementations to blink the light for us.
+ */
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
+ int retval = -ENODEV;
+
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
+
+ if (attention_info && try_module_get(attention_info->owner)) {
+ retval = attention_info->set_attn(hotplug_slot, status);
+ module_put(attention_info->owner);
+ } else
+ attention_info = NULL;
+ return retval;
+}
+
+
+/**
+ * get_power_status - get power status of a slot
+ * @hotplug_slot: slot to get status
+ * @value: pointer to store status
+ *
+ * Some platforms may not implement _STA method properly.
+ * In that case, the value returned may not be reliable.
+ */
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = acpiphp_get_power_status(slot->acpi_slot);
+
+ return 0;
+}
+
+
+/**
+ * get_attention_status - get attention LED status
+ * @hotplug_slot: slot to get status from
+ * @value: returns with value of attention LED
+ *
+ * ACPI doesn't have known method to determine the state
+ * of the attention status LED, so we use a callback that
+ * was registered with us. This allows hardware specific
+ * ACPI implementations to determine its state.
+ */
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ int retval = -EINVAL;
+
+ pr_debug("%s - physical_slot = %s\n", __func__,
+ hotplug_slot_name(hotplug_slot));
+
+ if (attention_info && try_module_get(attention_info->owner)) {
+ retval = attention_info->get_attn(hotplug_slot, value);
+ module_put(attention_info->owner);
+ } else
+ attention_info = NULL;
+ return retval;
+}
+
+
+/**
+ * get_latch_status - get latch status of a slot
+ * @hotplug_slot: slot to get status
+ * @value: pointer to store status
+ *
+ * ACPI doesn't provide any formal means to access latch status.
+ * Instead, we fake latch status from _STA.
+ */
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = acpiphp_get_latch_status(slot->acpi_slot);
+
+ return 0;
+}
+
+
+/**
+ * get_adapter_status - get adapter status of a slot
+ * @hotplug_slot: slot to get status
+ * @value: pointer to store status
+ *
+ * ACPI doesn't provide any formal means to access adapter status.
+ * Instead, we fake adapter status from _STA.
+ */
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = acpiphp_get_adapter_status(slot->acpi_slot);
+
+ return 0;
+}
+
+/**
+ * release_slot - free up the memory used by a slot
+ * @hotplug_slot: slot to free
+ */
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+/* callback routine to initialize 'struct slot' for each slot */
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
+ unsigned int sun)
+{
+ struct slot *slot;
+ int retval = -ENOMEM;
+ char name[SLOT_NAME_SIZE];
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ slot->hotplug_slot = kzalloc(sizeof(*slot->hotplug_slot), GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+
+ slot->hotplug_slot->info = &slot->info;
+
+ slot->hotplug_slot->private = slot;
+ slot->hotplug_slot->release = &release_slot;
+ slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
+
+ slot->acpi_slot = acpiphp_slot;
+ slot->hotplug_slot->info->power_status = acpiphp_get_power_status(slot->acpi_slot);
+ slot->hotplug_slot->info->attention_status = 0;
+ slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
+ slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
+
+ acpiphp_slot->slot = slot;
+ slot->sun = sun;
+ snprintf(name, SLOT_NAME_SIZE, "%u", sun);
+
+ retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+ acpiphp_slot->device, name);
+ if (retval == -EBUSY)
+ goto error_hpslot;
+ if (retval) {
+ pr_err("pci_hp_register failed with error %d\n", retval);
+ goto error_hpslot;
+ }
+
+ pr_info("Slot [%s] registered\n", slot_name(slot));
+
+ return 0;
+error_hpslot:
+ kfree(slot->hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return retval;
+}
+
+
+void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
+{
+ struct slot *slot = acpiphp_slot->slot;
+ int retval = 0;
+
+ pr_info("Slot [%s] unregistered\n", slot_name(slot));
+
+ retval = pci_hp_deregister(slot->hotplug_slot);
+ if (retval)
+ pr_err("pci_hp_deregister failed with error %d\n", retval);
+}
+
+
+void __init acpiphp_init(void)
+{
+ pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+ acpiphp_disabled ? ", disabled by user; please report a bug"
+ : "");
+}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
new file mode 100644
index 000000000..bcb90e488
--- /dev/null
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -0,0 +1,1036 @@
+/*
+ * ACPI PCI HotPlug glue functions to ACPI CA subsystem
+ *
+ * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
+ * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
+ * Copyright (C) 2002,2003 NEC Corporation
+ * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com)
+ * Copyright (C) 2003-2005 Hewlett Packard
+ * Copyright (C) 2005 Rajesh Shah (rajesh.shah@intel.com)
+ * Copyright (C) 2005 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <kristen.c.accardi@intel.com>
+ *
+ */
+
+/*
+ * Lifetime rules for pci_dev:
+ * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
+ * when the bridge is scanned and it loses a refcount when the bridge
+ * is removed.
+ * - When a P2P bridge is present, we elevate the refcount on the subordinate
+ * bus. It loses the refcount when the the driver unloads.
+ */
+
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+
+#include "../pci.h"
+#include "acpiphp.h"
+
+static LIST_HEAD(bridge_list);
+static DEFINE_MUTEX(bridge_mutex);
+
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
+static void acpiphp_post_dock_fixup(struct acpi_device *adev);
+static void acpiphp_sanitize_bus(struct pci_bus *bus);
+static void hotplug_event(u32 type, struct acpiphp_context *context);
+static void free_bridge(struct kref *kref);
+
+/**
+ * acpiphp_init_context - Create hotplug context and grab a reference to it.
+ * @adev: ACPI device object to create the context for.
+ *
+ * Call under acpi_hp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
+{
+ struct acpiphp_context *context;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return NULL;
+
+ context->refcount = 1;
+ context->hp.notify = acpiphp_hotplug_notify;
+ context->hp.fixup = acpiphp_post_dock_fixup;
+ acpi_set_hp_context(adev, &context->hp);
+ return context;
+}
+
+/**
+ * acpiphp_get_context - Get hotplug context and grab a reference to it.
+ * @adev: ACPI device object to get the context for.
+ *
+ * Call under acpi_hp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_get_context(struct acpi_device *adev)
+{
+ struct acpiphp_context *context;
+
+ if (!adev->hp)
+ return NULL;
+
+ context = to_acpiphp_context(adev->hp);
+ context->refcount++;
+ return context;
+}
+
+/**
+ * acpiphp_put_context - Drop a reference to ACPI hotplug context.
+ * @context: ACPI hotplug context to drop a reference to.
+ *
+ * The context object is removed if there are no more references to it.
+ *
+ * Call under acpi_hp_context_lock.
+ */
+static void acpiphp_put_context(struct acpiphp_context *context)
+{
+ if (--context->refcount)
+ return;
+
+ WARN_ON(context->bridge);
+ context->hp.self->hp = NULL;
+ kfree(context);
+}
+
+static inline void get_bridge(struct acpiphp_bridge *bridge)
+{
+ kref_get(&bridge->ref);
+}
+
+static inline void put_bridge(struct acpiphp_bridge *bridge)
+{
+ kref_put(&bridge->ref, free_bridge);
+}
+
+static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
+{
+ struct acpiphp_context *context;
+
+ acpi_lock_hp_context();
+ context = acpiphp_get_context(adev);
+ if (!context || context->func.parent->is_going_away) {
+ acpi_unlock_hp_context();
+ return NULL;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return context;
+}
+
+static void acpiphp_let_context_go(struct acpiphp_context *context)
+{
+ put_bridge(context->func.parent);
+}
+
+static void free_bridge(struct kref *kref)
+{
+ struct acpiphp_context *context;
+ struct acpiphp_bridge *bridge;
+ struct acpiphp_slot *slot, *next;
+ struct acpiphp_func *func, *tmp;
+
+ acpi_lock_hp_context();
+
+ bridge = container_of(kref, struct acpiphp_bridge, ref);
+
+ list_for_each_entry_safe(slot, next, &bridge->slots, node) {
+ list_for_each_entry_safe(func, tmp, &slot->funcs, sibling)
+ acpiphp_put_context(func_to_context(func));
+
+ kfree(slot);
+ }
+
+ context = bridge->context;
+ /* Root bridges will not have hotplug context. */
+ if (context) {
+ /* Release the reference taken by acpiphp_enumerate_slots(). */
+ put_bridge(context->func.parent);
+ context->bridge = NULL;
+ acpiphp_put_context(context);
+ }
+
+ put_device(&bridge->pci_bus->dev);
+ pci_dev_put(bridge->pci_dev);
+ kfree(bridge);
+
+ acpi_unlock_hp_context();
+}
+
+/**
+ * acpiphp_post_dock_fixup - Post-dock fixups for PCI devices.
+ * @adev: ACPI device object corresponding to a PCI device.
+ *
+ * TBD - figure out a way to only call fixups for systems that require them.
+ */
+static void acpiphp_post_dock_fixup(struct acpi_device *adev)
+{
+ struct acpiphp_context *context = acpiphp_grab_context(adev);
+ struct pci_bus *bus;
+ u32 buses;
+
+ if (!context)
+ return;
+
+ bus = context->func.slot->bus;
+ if (!bus->self)
+ goto out;
+
+ /* fixup bad _DCK function that rewrites
+ * secondary bridge on slot
+ */
+ pci_read_config_dword(bus->self, PCI_PRIMARY_BUS, &buses);
+
+ if (((buses >> 8) & 0xff) != bus->busn_res.start) {
+ buses = (buses & 0xff000000)
+ | ((unsigned int)(bus->primary) << 0)
+ | ((unsigned int)(bus->busn_res.start) << 8)
+ | ((unsigned int)(bus->busn_res.end) << 16);
+ pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
+ }
+
+ out:
+ acpiphp_let_context_go(context);
+}
+
+/* Check whether the PCI device is managed by native PCIe hotplug driver */
+static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
+{
+ u32 reg32;
+ acpi_handle tmp;
+ struct acpi_pci_root *root;
+
+ /* Check whether the PCIe port supports native PCIe hotplug */
+ if (pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32))
+ return false;
+ if (!(reg32 & PCI_EXP_SLTCAP_HPC))
+ return false;
+
+ /*
+ * Check whether native PCIe hotplug has been enabled for
+ * this PCIe hierarchy.
+ */
+ tmp = acpi_find_root_bridge_handle(pdev);
+ if (!tmp)
+ return false;
+ root = acpi_pci_find_root(tmp);
+ if (!root)
+ return false;
+ if (!(root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+ return false;
+
+ return true;
+}
+
+/**
+ * acpiphp_add_context - Add ACPIPHP context to an ACPI device object.
+ * @handle: ACPI handle of the object to add a context to.
+ * @lvl: Not used.
+ * @data: The object's parent ACPIPHP bridge.
+ * @rv: Not used.
+ */
+static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
+ void **rv)
+{
+ struct acpiphp_bridge *bridge = data;
+ struct acpiphp_context *context;
+ struct acpi_device *adev;
+ struct acpiphp_slot *slot;
+ struct acpiphp_func *newfunc;
+ acpi_status status = AE_OK;
+ unsigned long long adr;
+ int device, function;
+ struct pci_bus *pbus = bridge->pci_bus;
+ struct pci_dev *pdev = bridge->pci_dev;
+ u32 val;
+
+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "can't evaluate _ADR (%#x)\n", status);
+ return AE_OK;
+ }
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ device = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+
+ acpi_lock_hp_context();
+ context = acpiphp_init_context(adev);
+ if (!context) {
+ acpi_unlock_hp_context();
+ acpi_handle_err(handle, "No hotplug context\n");
+ return AE_NOT_EXIST;
+ }
+ newfunc = &context->func;
+ newfunc->function = function;
+ newfunc->parent = bridge;
+ acpi_unlock_hp_context();
+
+ /*
+ * If this is a dock device, its _EJ0 should be executed by the dock
+ * notify handler after calling _DCK.
+ */
+ if (!is_dock_device(adev) && acpi_has_method(handle, "_EJ0"))
+ newfunc->flags = FUNC_HAS_EJ0;
+
+ if (acpi_has_method(handle, "_STA"))
+ newfunc->flags |= FUNC_HAS_STA;
+
+ /* search for objects that share the same slot */
+ list_for_each_entry(slot, &bridge->slots, node)
+ if (slot->device == device)
+ goto slot_found;
+
+ slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
+ if (!slot) {
+ acpi_lock_hp_context();
+ acpiphp_put_context(context);
+ acpi_unlock_hp_context();
+ return AE_NO_MEMORY;
+ }
+
+ slot->bus = bridge->pci_bus;
+ slot->device = device;
+ INIT_LIST_HEAD(&slot->funcs);
+
+ list_add_tail(&slot->node, &bridge->slots);
+
+ /*
+ * Expose slots to user space for functions that have _EJ0 or _RMV or
+ * are located in dock stations. Do not expose them for devices handled
+ * by the native PCIe hotplug (PCIeHP), becuase that code is supposed to
+ * expose slots to user space in those cases.
+ */
+ if ((acpi_pci_check_ejectable(pbus, handle) || is_dock_device(adev))
+ && !(pdev && device_is_managed_by_native_pciehp(pdev))) {
+ unsigned long long sun;
+ int retval;
+
+ bridge->nr_slots++;
+ status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
+ if (ACPI_FAILURE(status))
+ sun = bridge->nr_slots;
+
+ pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+ sun, pci_domain_nr(pbus), pbus->number, device);
+
+ retval = acpiphp_register_hotplug_slot(slot, sun);
+ if (retval) {
+ slot->slot = NULL;
+ bridge->nr_slots--;
+ if (retval == -EBUSY)
+ pr_warn("Slot %llu already registered by another hotplug driver\n", sun);
+ else
+ pr_warn("acpiphp_register_hotplug_slot failed (err code = 0x%x)\n", retval);
+ }
+ /* Even if the slot registration fails, we can still use it. */
+ }
+
+ slot_found:
+ newfunc->slot = slot;
+ list_add_tail(&newfunc->sibling, &slot->funcs);
+
+ if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
+ &val, 60*1000))
+ slot->flags |= SLOT_ENABLED;
+
+ return AE_OK;
+}
+
+static void cleanup_bridge(struct acpiphp_bridge *bridge)
+{
+ struct acpiphp_slot *slot;
+ struct acpiphp_func *func;
+
+ list_for_each_entry(slot, &bridge->slots, node) {
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ struct acpi_device *adev = func_to_acpi_device(func);
+
+ acpi_lock_hp_context();
+ adev->hp->notify = NULL;
+ adev->hp->fixup = NULL;
+ acpi_unlock_hp_context();
+ }
+ slot->flags |= SLOT_IS_GOING_AWAY;
+ if (slot->slot)
+ acpiphp_unregister_hotplug_slot(slot);
+ }
+
+ mutex_lock(&bridge_mutex);
+ list_del(&bridge->list);
+ mutex_unlock(&bridge_mutex);
+
+ acpi_lock_hp_context();
+ bridge->is_going_away = true;
+ acpi_unlock_hp_context();
+}
+
+/**
+ * acpiphp_max_busnr - return the highest reserved bus number under the given bus.
+ * @bus: bus to start search with
+ */
+static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
+{
+ struct pci_bus *tmp;
+ unsigned char max, n;
+
+ /*
+ * pci_bus_max_busnr will return the highest
+ * reserved busnr for all these children.
+ * that is equivalent to the bus->subordinate
+ * value. We don't want to use the parent's
+ * bus->subordinate value because it could have
+ * padding in it.
+ */
+ max = bus->busn_res.start;
+
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
+ if (n > max)
+ max = n;
+ }
+ return max;
+}
+
+static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+ union acpi_object params[2];
+ struct acpi_object_list arg_list;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ arg_list.count = 2;
+ arg_list.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = ACPI_ADR_SPACE_PCI_CONFIG;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = 1;
+ /* _REG is optional, we don't care about if there is failure */
+ acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list,
+ NULL);
+ }
+}
+
+static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
+{
+ struct acpiphp_func *func;
+
+ /* quirk, or pcie could set it already */
+ if (dev->is_hotplug_bridge)
+ return;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ if (PCI_FUNC(dev->devfn) == func->function) {
+ dev->is_hotplug_bridge = 1;
+ break;
+ }
+ }
+}
+
+static int acpiphp_rescan_slot(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ struct acpi_device *adev = func_to_acpi_device(func);
+
+ acpi_bus_scan(adev->handle);
+ if (acpi_device_enumerated(adev))
+ acpi_device_set_power(adev, ACPI_STATE_D0);
+ }
+ return pci_scan_slot(slot->bus, PCI_DEVFN(slot->device, 0));
+}
+
+/**
+ * enable_slot - enable, configure a slot
+ * @slot: slot to be enabled
+ *
+ * This function should be called per *physical slot*,
+ * not per each slot object in ACPI namespace.
+ */
+static void enable_slot(struct acpiphp_slot *slot)
+{
+ struct pci_dev *dev;
+ struct pci_bus *bus = slot->bus;
+ struct acpiphp_func *func;
+ int max, pass;
+ LIST_HEAD(add_list);
+
+ acpiphp_rescan_slot(slot);
+ max = acpiphp_max_busnr(bus);
+ for (pass = 0; pass < 2; pass++) {
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device)
+ continue;
+
+ if (pci_is_bridge(dev)) {
+ max = pci_scan_bridge(bus, dev, max, pass);
+ if (pass && dev->subordinate) {
+ check_hotplug_bridge(slot, dev);
+ pcibios_resource_survey_bus(dev->subordinate);
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ }
+ }
+ }
+ }
+ __pci_bus_assign_resources(bus, &add_list, NULL);
+
+ acpiphp_sanitize_bus(bus);
+ pcie_bus_configure_settings(bus);
+ acpiphp_set_acpi_region(slot);
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ /* Assume that newly added devices are powered on already. */
+ if (!dev->is_added)
+ dev->current_state = PCI_D0;
+ }
+
+ pci_bus_add_devices(bus);
+
+ slot->flags |= SLOT_ENABLED;
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
+ func->function));
+ if (!dev) {
+ /* Do not set SLOT_ENABLED flag if some funcs
+ are not added. */
+ slot->flags &= (~SLOT_ENABLED);
+ continue;
+ }
+ }
+}
+
+/**
+ * disable_slot - disable a slot
+ * @slot: ACPI PHP slot
+ */
+static void disable_slot(struct acpiphp_slot *slot)
+{
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *prev;
+ struct acpiphp_func *func;
+
+ /*
+ * enable_slot() enumerates all functions in this device via
+ * pci_scan_slot(), whether they have associated ACPI hotplug
+ * methods (_EJ0, etc.) or not. Therefore, we remove all functions
+ * here.
+ */
+ list_for_each_entry_safe_reverse(dev, prev, &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ pci_stop_and_remove_bus_device(dev);
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ acpi_bus_trim(func_to_acpi_device(func));
+
+ slot->flags &= (~SLOT_ENABLED);
+}
+
+static bool slot_no_hotplug(struct acpiphp_slot *slot)
+{
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) == slot->device && dev->ignore_hotplug)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * get_slot_status - get ACPI slot status
+ * @slot: ACPI PHP slot
+ *
+ * If a slot has _STA for each function and if any one of them
+ * returned non-zero status, return it.
+ *
+ * If a slot doesn't have _STA and if any one of its functions'
+ * configuration space is configured, return 0x0f as a _STA.
+ *
+ * Otherwise return 0.
+ */
+static unsigned int get_slot_status(struct acpiphp_slot *slot)
+{
+ unsigned long long sta = 0;
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling) {
+ if (func->flags & FUNC_HAS_STA) {
+ acpi_status status;
+
+ status = acpi_evaluate_integer(func_to_handle(func),
+ "_STA", NULL, &sta);
+ if (ACPI_SUCCESS(status) && sta)
+ break;
+ } else {
+ u32 dvid;
+
+ pci_bus_read_config_dword(slot->bus,
+ PCI_DEVFN(slot->device,
+ func->function),
+ PCI_VENDOR_ID, &dvid);
+ if (dvid != 0xffffffff) {
+ sta = ACPI_STA_ALL;
+ break;
+ }
+ }
+ }
+
+ return (unsigned int)sta;
+}
+
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
+/**
+ * trim_stale_devices - remove PCI devices that are not responding.
+ * @dev: PCI device to start walking the hierarchy from.
+ */
+static void trim_stale_devices(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ struct pci_bus *bus = dev->subordinate;
+ bool alive = false;
+
+ if (adev) {
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
+ || dev->ignore_hotplug;
+ }
+ if (!alive)
+ alive = pci_device_is_present(dev);
+
+ if (!alive) {
+ pci_stop_and_remove_bus_device(dev);
+ if (adev)
+ acpi_bus_trim(adev);
+ } else if (bus) {
+ struct pci_dev *child, *tmp;
+
+ /* The device is a bridge. so check the bus below it. */
+ pm_runtime_get_sync(&dev->dev);
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
+ trim_stale_devices(child);
+
+ pm_runtime_put(&dev->dev);
+ }
+}
+
+/**
+ * acpiphp_check_bridge - re-enumerate devices
+ * @bridge: where to begin re-enumeration
+ *
+ * Iterate over all slots under this bridge and make sure that if a
+ * card is present they are enabled, and if not they are disabled.
+ */
+static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
+{
+ struct acpiphp_slot *slot;
+
+ /* Bail out if the bridge is going away. */
+ if (bridge->is_going_away)
+ return;
+
+ list_for_each_entry(slot, &bridge->slots, node) {
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *tmp;
+
+ if (slot_no_hotplug(slot)) {
+ ; /* do nothing */
+ } else if (device_status_valid(get_slot_status(slot))) {
+ /* remove stale devices if any */
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ trim_stale_devices(dev);
+
+ /* configure all functions */
+ enable_slot(slot);
+ } else {
+ disable_slot(slot);
+ }
+ }
+}
+
+/*
+ * Remove devices for which we could not assign resources, call
+ * arch specific code to fix-up the bus
+ */
+static void acpiphp_sanitize_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev, *tmp;
+ int i;
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
+
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
+ for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+ if ((res->flags & type_mask) && !res->start &&
+ res->end) {
+ /* Could not assign a required resources
+ * for this device, remove it */
+ pci_stop_and_remove_bus_device(dev);
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * ACPI event handlers
+ */
+
+void acpiphp_check_host_bridge(struct acpi_device *adev)
+{
+ struct acpiphp_bridge *bridge = NULL;
+
+ acpi_lock_hp_context();
+ if (adev->hp) {
+ bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
+ if (bridge)
+ get_bridge(bridge);
+ }
+ acpi_unlock_hp_context();
+ if (bridge) {
+ pci_lock_rescan_remove();
+
+ acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
+ put_bridge(bridge);
+ }
+}
+
+static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
+
+static void hotplug_event(u32 type, struct acpiphp_context *context)
+{
+ acpi_handle handle = context->hp.self->handle;
+ struct acpiphp_func *func = &context->func;
+ struct acpiphp_slot *slot = func->slot;
+ struct acpiphp_bridge *bridge;
+
+ acpi_lock_hp_context();
+ bridge = context->bridge;
+ if (bridge)
+ get_bridge(bridge);
+
+ acpi_unlock_hp_context();
+
+ pci_lock_rescan_remove();
+
+ switch (type) {
+ case ACPI_NOTIFY_BUS_CHECK:
+ /* bus re-enumerate */
+ acpi_handle_debug(handle, "Bus check in %s()\n", __func__);
+ if (bridge)
+ acpiphp_check_bridge(bridge);
+ else if (!(slot->flags & SLOT_IS_GOING_AWAY))
+ enable_slot(slot);
+
+ break;
+
+ case ACPI_NOTIFY_DEVICE_CHECK:
+ /* device check */
+ acpi_handle_debug(handle, "Device check in %s()\n", __func__);
+ if (bridge) {
+ acpiphp_check_bridge(bridge);
+ } else if (!(slot->flags & SLOT_IS_GOING_AWAY)) {
+ /*
+ * Check if anything has changed in the slot and rescan
+ * from the parent if that's the case.
+ */
+ if (acpiphp_rescan_slot(slot))
+ acpiphp_check_bridge(func->parent);
+ }
+ break;
+
+ case ACPI_NOTIFY_EJECT_REQUEST:
+ /* request device eject */
+ acpi_handle_debug(handle, "Eject request in %s()\n", __func__);
+ acpiphp_disable_and_eject_slot(slot);
+ break;
+ }
+
+ pci_unlock_rescan_remove();
+ if (bridge)
+ put_bridge(bridge);
+}
+
+static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type)
+{
+ struct acpiphp_context *context;
+
+ context = acpiphp_grab_context(adev);
+ if (!context)
+ return -ENODATA;
+
+ hotplug_event(type, context);
+ acpiphp_let_context_go(context);
+ return 0;
+}
+
+/**
+ * acpiphp_enumerate_slots - Enumerate PCI slots for a given bus.
+ * @bus: PCI bus to enumerate the slots for.
+ *
+ * A "slot" is an object associated with a PCI device number. All functions
+ * (PCI devices) with the same bus and device number belong to the same slot.
+ */
+void acpiphp_enumerate_slots(struct pci_bus *bus)
+{
+ struct acpiphp_bridge *bridge;
+ struct acpi_device *adev;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (acpiphp_disabled)
+ return;
+
+ adev = ACPI_COMPANION(bus->bridge);
+ if (!adev)
+ return;
+
+ handle = adev->handle;
+ bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
+ if (!bridge) {
+ acpi_handle_err(handle, "No memory for bridge object\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&bridge->slots);
+ kref_init(&bridge->ref);
+ bridge->pci_dev = pci_dev_get(bus->self);
+ bridge->pci_bus = bus;
+
+ /*
+ * Grab a ref to the subordinate PCI bus in case the bus is
+ * removed via PCI core logical hotplug. The ref pins the bus
+ * (which we access during module unload).
+ */
+ get_device(&bus->dev);
+
+ acpi_lock_hp_context();
+ if (pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_root_context *root_context;
+
+ root_context = kzalloc(sizeof(*root_context), GFP_KERNEL);
+ if (!root_context)
+ goto err;
+
+ root_context->root_bridge = bridge;
+ acpi_set_hp_context(adev, &root_context->hp);
+ } else {
+ struct acpiphp_context *context;
+
+ /*
+ * This bridge should have been registered as a hotplug function
+ * under its parent, so the context should be there, unless the
+ * parent is going to be handled by pciehp, in which case this
+ * bridge is not interesting to us either.
+ */
+ context = acpiphp_get_context(adev);
+ if (!context)
+ goto err;
+
+ bridge->context = context;
+ context->bridge = bridge;
+ /* Get a reference to the parent bridge. */
+ get_bridge(context->func.parent);
+ }
+ acpi_unlock_hp_context();
+
+ /* Must be added to the list prior to calling acpiphp_add_context(). */
+ mutex_lock(&bridge_mutex);
+ list_add(&bridge->list, &bridge_list);
+ mutex_unlock(&bridge_mutex);
+
+ /* register all slot objects under this bridge */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpiphp_add_context, NULL, bridge, NULL);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to register slots\n");
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
+ }
+ return;
+
+ err:
+ acpi_unlock_hp_context();
+ put_device(&bus->dev);
+ pci_dev_put(bridge->pci_dev);
+ kfree(bridge);
+}
+
+static void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
+{
+ if (pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_root_context *root_context;
+ struct acpi_device *adev;
+
+ acpi_lock_hp_context();
+ adev = ACPI_COMPANION(bridge->pci_bus->bridge);
+ root_context = to_acpiphp_root_context(adev->hp);
+ adev->hp = NULL;
+ acpi_unlock_hp_context();
+ kfree(root_context);
+ }
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
+}
+
+/**
+ * acpiphp_remove_slots - Remove slot objects associated with a given bus.
+ * @bus: PCI bus to remove the slot objects for.
+ */
+void acpiphp_remove_slots(struct pci_bus *bus)
+{
+ struct acpiphp_bridge *bridge;
+
+ if (acpiphp_disabled)
+ return;
+
+ mutex_lock(&bridge_mutex);
+ list_for_each_entry(bridge, &bridge_list, list)
+ if (bridge->pci_bus == bus) {
+ mutex_unlock(&bridge_mutex);
+ acpiphp_drop_bridge(bridge);
+ return;
+ }
+
+ mutex_unlock(&bridge_mutex);
+}
+
+/**
+ * acpiphp_enable_slot - power on slot
+ * @slot: ACPI PHP slot
+ */
+int acpiphp_enable_slot(struct acpiphp_slot *slot)
+{
+ pci_lock_rescan_remove();
+
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ return -ENODEV;
+
+ /* configure all functions */
+ if (!(slot->flags & SLOT_ENABLED))
+ enable_slot(slot);
+
+ pci_unlock_rescan_remove();
+ return 0;
+}
+
+/**
+ * acpiphp_disable_and_eject_slot - power off and eject slot
+ * @slot: ACPI PHP slot
+ */
+static int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ if (slot->flags & SLOT_IS_GOING_AWAY)
+ return -ENODEV;
+
+ /* unconfigure all functions */
+ disable_slot(slot);
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (func->flags & FUNC_HAS_EJ0) {
+ acpi_handle handle = func_to_handle(func);
+
+ if (ACPI_FAILURE(acpi_evaluate_ej0(handle)))
+ acpi_handle_err(handle, "_EJ0 failed\n");
+
+ break;
+ }
+
+ return 0;
+}
+
+int acpiphp_disable_slot(struct acpiphp_slot *slot)
+{
+ int ret;
+
+ /*
+ * Acquire acpi_scan_lock to ensure that the execution of _EJ0 in
+ * acpiphp_disable_and_eject_slot() will be synchronized properly.
+ */
+ acpi_scan_lock_acquire();
+ pci_lock_rescan_remove();
+ ret = acpiphp_disable_and_eject_slot(slot);
+ pci_unlock_rescan_remove();
+ acpi_scan_lock_release();
+ return ret;
+}
+
+/*
+ * slot enabled: 1
+ * slot disabled: 0
+ */
+u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
+{
+ return (slot->flags & SLOT_ENABLED);
+}
+
+/*
+ * latch open: 1
+ * latch closed: 0
+ */
+u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
+{
+ return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI);
+}
+
+/*
+ * adapter presence : 1
+ * absence : 0
+ */
+u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
+{
+ return !!get_slot_status(slot);
+}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
new file mode 100644
index 000000000..6ca23998e
--- /dev/null
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -0,0 +1,487 @@
+/*
+ * ACPI PCI Hot Plug IBM Extension
+ *
+ * Copyright (C) 2004 Vernon Mauery <vernux@us.ibm.com>
+ * Copyright (C) 2004 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <vernux@us.ibm.com>
+ *
+ */
+
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <asm/uaccess.h>
+
+#include "acpiphp.h"
+#include "../pci.h"
+
+#define DRIVER_VERSION "1.0.1"
+#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
+#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension"
+
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+#define FOUND_APCI 0x61504349
+/* these are the names for the IBM ACPI pseudo-device */
+#define IBM_HARDWARE_ID1 "IBM37D0"
+#define IBM_HARDWARE_ID2 "IBM37D4"
+
+#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
+
+/* union apci_descriptor - allows access to the
+ * various device descriptors that are embedded in the
+ * aPCI table
+ */
+union apci_descriptor {
+ struct {
+ char sig[4];
+ u8 len;
+ } header;
+ struct {
+ u8 type;
+ u8 len;
+ u16 slot_id;
+ u8 bus_id;
+ u8 dev_num;
+ u8 slot_num;
+ u8 slot_attr[2];
+ u8 attn;
+ u8 status[2];
+ u8 sun;
+ u8 res[3];
+ } slot;
+ struct {
+ u8 type;
+ u8 len;
+ } generic;
+};
+
+/* struct notification - keeps info about the device
+ * that cause the ACPI notification event
+ */
+struct notification {
+ struct acpi_device *device;
+ u8 event;
+};
+
+static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status);
+static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status);
+static void ibm_handle_events(acpi_handle handle, u32 event, void *context);
+static int ibm_get_table_from_acpi(char **bufp);
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t size);
+static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
+ u32 lvl, void *context, void **rv);
+static int __init ibm_acpiphp_init(void);
+static void __exit ibm_acpiphp_exit(void);
+
+static acpi_handle ibm_acpi_handle;
+static struct notification ibm_note;
+static struct bin_attribute ibm_apci_table_attr = {
+ .attr = {
+ .name = "apci_table",
+ .mode = S_IRUGO,
+ },
+ .read = ibm_read_apci_table,
+ .write = NULL,
+};
+static struct acpiphp_attention_info ibm_attention_info =
+{
+ .set_attn = ibm_set_attention_status,
+ .get_attn = ibm_get_attention_status,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ibm_slot_from_id - workaround for bad ibm hardware
+ * @id: the slot number that linux refers to the slot by
+ *
+ * Description: This method returns the aCPI slot descriptor
+ * corresponding to the Linux slot number. This descriptor
+ * has info about the aPCI slot id and attention status.
+ * This descriptor must be freed using kfree when done.
+ */
+static union apci_descriptor *ibm_slot_from_id(int id)
+{
+ int ind = 0, size;
+ union apci_descriptor *ret = NULL, *des;
+ char *table;
+
+ size = ibm_get_table_from_acpi(&table);
+ des = (union apci_descriptor *)table;
+ if (memcmp(des->header.sig, "aPCI", 4) != 0)
+ goto ibm_slot_done;
+
+ des = (union apci_descriptor *)&table[ind += des->header.len];
+ while (ind < size && (des->generic.type != 0x82 ||
+ des->slot.slot_num != id)) {
+ des = (union apci_descriptor *)&table[ind += des->generic.len];
+ }
+
+ if (ind < size && des->slot.slot_num == id)
+ ret = des;
+
+ibm_slot_done:
+ if (ret) {
+ ret = kmalloc(sizeof(union apci_descriptor), GFP_KERNEL);
+ memcpy(ret, des, sizeof(union apci_descriptor));
+ }
+ kfree(table);
+ return ret;
+}
+
+/**
+ * ibm_set_attention_status - callback method to set the attention LED
+ * @slot: the hotplug_slot to work with
+ * @status: what to set the LED to (0 or 1)
+ *
+ * Description: This method is registered with the acpiphp module as a
+ * callback to do the device specific task of setting the LED status.
+ */
+static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
+{
+ union acpi_object args[2];
+ struct acpi_object_list params = { .pointer = args, .count = 2 };
+ acpi_status stat;
+ unsigned long long rc;
+ union apci_descriptor *ibm_slot;
+
+ ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
+
+ pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
+ ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
+ (status ? 1 : 0));
+
+ args[0].type = ACPI_TYPE_INTEGER;
+ args[0].integer.value = ibm_slot->slot.slot_id;
+ args[1].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = (status) ? 1 : 0;
+
+ kfree(ibm_slot);
+
+ stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
+ if (ACPI_FAILURE(stat)) {
+ pr_err("APLS evaluation failed: 0x%08x\n", stat);
+ return -ENODEV;
+ } else if (!rc) {
+ pr_err("APLS method failed: 0x%08llx\n", rc);
+ return -ERANGE;
+ }
+ return 0;
+}
+
+/**
+ * ibm_get_attention_status - callback method to get attention LED status
+ * @slot: the hotplug_slot to work with
+ * @status: returns what the LED is set to (0 or 1)
+ *
+ * Description: This method is registered with the acpiphp module as a
+ * callback to do the device specific task of getting the LED status.
+ *
+ * Because there is no direct method of getting the LED status directly
+ * from an ACPI call, we read the aPCI table and parse out our
+ * slot descriptor to read the status from that.
+ */
+static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status)
+{
+ union apci_descriptor *ibm_slot;
+
+ ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
+
+ if (ibm_slot->slot.attn & 0xa0 || ibm_slot->slot.status[1] & 0x08)
+ *status = 1;
+ else
+ *status = 0;
+
+ pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
+ ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
+ *status);
+
+ kfree(ibm_slot);
+ return 0;
+}
+
+/**
+ * ibm_handle_events - listens for ACPI events for the IBM37D0 device
+ * @handle: an ACPI handle to the device that caused the event
+ * @event: the event info (device specific)
+ * @context: passed context (our notification struct)
+ *
+ * Description: This method is registered as a callback with the ACPI
+ * subsystem it is called when this device has an event to notify the OS of.
+ *
+ * The events actually come from the device as two events that get
+ * synthesized into one event with data by this function. The event
+ * ID comes first and then the slot number that caused it. We report
+ * this as one event to the OS.
+ *
+ * From section 5.6.2.2 of the ACPI 2.0 spec, I understand that the OSPM will
+ * only re-enable the interrupt that causes this event AFTER this method
+ * has returned, thereby enforcing serial access for the notification struct.
+ */
+static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
+{
+ u8 detail = event & 0x0f;
+ u8 subevent = event & 0xf0;
+ struct notification *note = context;
+
+ pr_debug("%s: Received notification %02x\n", __func__, event);
+
+ if (subevent == 0x80) {
+ pr_debug("%s: generating bus event\n", __func__);
+ acpi_bus_generate_netlink_event(note->device->pnp.device_class,
+ dev_name(&note->device->dev),
+ note->event, detail);
+ } else
+ note->event = event;
+}
+
+/**
+ * ibm_get_table_from_acpi - reads the APLS buffer from ACPI
+ * @bufp: address to pointer to allocate for the table
+ *
+ * Description: This method reads the APLS buffer in from ACPI and
+ * stores the "stripped" table into a single buffer
+ * it allocates and passes the address back in bufp.
+ *
+ * If NULL is passed in as buffer, this method only calculates
+ * the size of the table and returns that without filling
+ * in the buffer.
+ *
+ * Returns < 0 on error or the size of the table on success.
+ */
+static int ibm_get_table_from_acpi(char **bufp)
+{
+ union acpi_object *package;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ char *lbuf = NULL;
+ int i, size = -EIO;
+
+ status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s: APCI evaluation failed\n", __func__);
+ return -ENODEV;
+ }
+
+ package = (union acpi_object *) buffer.pointer;
+ if (!(package) ||
+ (package->type != ACPI_TYPE_PACKAGE) ||
+ !(package->package.elements)) {
+ pr_err("%s: Invalid APCI object\n", __func__);
+ goto read_table_done;
+ }
+
+ for (size = 0, i = 0; i < package->package.count; i++) {
+ if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
+ pr_err("%s: Invalid APCI element %d\n", __func__, i);
+ goto read_table_done;
+ }
+ size += package->package.elements[i].buffer.length;
+ }
+
+ if (bufp == NULL)
+ goto read_table_done;
+
+ lbuf = kzalloc(size, GFP_KERNEL);
+ pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+ __func__, package->package.count, size, lbuf);
+
+ if (lbuf) {
+ *bufp = lbuf;
+ } else {
+ size = -ENOMEM;
+ goto read_table_done;
+ }
+
+ size = 0;
+ for (i=0; i<package->package.count; i++) {
+ memcpy(&lbuf[size],
+ package->package.elements[i].buffer.pointer,
+ package->package.elements[i].buffer.length);
+ size += package->package.elements[i].buffer.length;
+ }
+
+read_table_done:
+ kfree(buffer.pointer);
+ return size;
+}
+
+/**
+ * ibm_read_apci_table - callback for the sysfs apci_table file
+ * @filp: the open sysfs file
+ * @kobj: the kobject this binary attribute is a part of
+ * @bin_attr: struct bin_attribute for this file
+ * @buffer: the kernel space buffer to fill
+ * @pos: the offset into the file
+ * @size: the number of bytes requested
+ *
+ * Description: Gets registered with sysfs as the reader callback
+ * to be executed when /sys/bus/pci/slots/apci_table gets read.
+ *
+ * Since we don't get notified on open and close for this file,
+ * things get really tricky here...
+ * our solution is to only allow reading the table in all at once.
+ */
+static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t size)
+{
+ int bytes_read = -EINVAL;
+ char *table = NULL;
+
+ pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+ if (pos == 0) {
+ bytes_read = ibm_get_table_from_acpi(&table);
+ if (bytes_read > 0 && bytes_read <= size)
+ memcpy(buffer, table, bytes_read);
+ kfree(table);
+ }
+ return bytes_read;
+}
+
+/**
+ * ibm_find_acpi_device - callback to find our ACPI device
+ * @handle: the ACPI handle of the device we are inspecting
+ * @lvl: depth into the namespace tree
+ * @context: a pointer to our handle to fill when we find the device
+ * @rv: a return value to fill if desired
+ *
+ * Description: Used as a callback when calling acpi_walk_namespace
+ * to find our device. When this method returns non-zero
+ * acpi_walk_namespace quits its search and returns our value.
+ */
+static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ acpi_handle *phandle = (acpi_handle *)context;
+ acpi_status status;
+ struct acpi_device_info *info;
+ int retval = 0;
+
+ status = acpi_get_object_info(handle, &info);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s: Failed to get device information status=0x%x\n",
+ __func__, status);
+ return retval;
+ }
+
+ if (info->current_status && (info->valid & ACPI_VALID_HID) &&
+ (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
+ !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
+ pr_debug("found hardware: %s, handle: %p\n",
+ info->hardware_id.string, handle);
+ *phandle = handle;
+ /* returning non-zero causes the search to stop
+ * and returns this value to the caller of
+ * acpi_walk_namespace, but it also causes some warnings
+ * in the acpi debug code to print...
+ */
+ retval = FOUND_APCI;
+ }
+ kfree(info);
+ return retval;
+}
+
+static int __init ibm_acpiphp_init(void)
+{
+ int retval = 0;
+ acpi_status status;
+ struct acpi_device *device;
+ struct kobject *sysdir = &pci_slots_kset->kobj;
+
+ pr_debug("%s\n", __func__);
+
+ if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
+ &ibm_acpi_handle, NULL) != FOUND_APCI) {
+ pr_err("%s: acpi_walk_namespace failed\n", __func__);
+ retval = -ENODEV;
+ goto init_return;
+ }
+ pr_debug("%s: found IBM aPCI device\n", __func__);
+ if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
+ pr_err("%s: acpi_bus_get_device failed\n", __func__);
+ retval = -ENODEV;
+ goto init_return;
+ }
+ if (acpiphp_register_attention(&ibm_attention_info)) {
+ retval = -ENODEV;
+ goto init_return;
+ }
+
+ ibm_note.device = device;
+ status = acpi_install_notify_handler(ibm_acpi_handle,
+ ACPI_DEVICE_NOTIFY, ibm_handle_events,
+ &ibm_note);
+ if (ACPI_FAILURE(status)) {
+ pr_err("%s: Failed to register notification handler\n",
+ __func__);
+ retval = -EBUSY;
+ goto init_cleanup;
+ }
+
+ ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
+ retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
+
+ return retval;
+
+init_cleanup:
+ acpiphp_unregister_attention(&ibm_attention_info);
+init_return:
+ return retval;
+}
+
+static void __exit ibm_acpiphp_exit(void)
+{
+ acpi_status status;
+ struct kobject *sysdir = &pci_slots_kset->kobj;
+
+ pr_debug("%s\n", __func__);
+
+ if (acpiphp_unregister_attention(&ibm_attention_info))
+ pr_err("%s: attention info deregistration failed", __func__);
+
+ status = acpi_remove_notify_handler(
+ ibm_acpi_handle,
+ ACPI_DEVICE_NOTIFY,
+ ibm_handle_events);
+ if (ACPI_FAILURE(status))
+ pr_err("%s: Notification handler removal failed\n", __func__);
+ /* remove the /sys entries */
+ sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
+}
+
+module_init(ibm_acpiphp_init);
+module_exit(ibm_acpiphp_exit);
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
new file mode 100644
index 000000000..6a0ddf757
--- /dev/null
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -0,0 +1,110 @@
+/*
+ * CompactPCI Hot Plug Core Functions
+ *
+ * Copyright (C) 2002 SOMA Networks, Inc.
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <scottm@somanetworks.com>
+ */
+
+#ifndef _CPCI_HOTPLUG_H
+#define _CPCI_HOTPLUG_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+
+/* PICMG 2.1 R2.0 HS CSR bits: */
+#define HS_CSR_INS 0x0080
+#define HS_CSR_EXT 0x0040
+#define HS_CSR_PI 0x0030
+#define HS_CSR_LOO 0x0008
+#define HS_CSR_PIE 0x0004
+#define HS_CSR_EIM 0x0002
+#define HS_CSR_DHA 0x0001
+
+struct slot {
+ u8 number;
+ unsigned int devfn;
+ struct pci_bus *bus;
+ struct pci_dev *dev;
+ unsigned int extracting;
+ struct hotplug_slot *hotplug_slot;
+ struct list_head slot_list;
+};
+
+struct cpci_hp_controller_ops {
+ int (*query_enum) (void);
+ int (*enable_irq) (void);
+ int (*disable_irq) (void);
+ int (*check_irq) (void *dev_id);
+ int (*hardware_test) (struct slot *slot, u32 value);
+ u8 (*get_power) (struct slot *slot);
+ int (*set_power) (struct slot *slot, int value);
+};
+
+struct cpci_hp_controller {
+ unsigned int irq;
+ unsigned long irq_flags;
+ char *devname;
+ void *dev_id;
+ char *name;
+ struct cpci_hp_controller_ops *ops;
+};
+
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
+int cpci_hp_register_controller(struct cpci_hp_controller *controller);
+int cpci_hp_unregister_controller(struct cpci_hp_controller *controller);
+int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last);
+int cpci_hp_unregister_bus(struct pci_bus *bus);
+int cpci_hp_start(void);
+int cpci_hp_stop(void);
+
+/*
+ * Internal function prototypes, these functions should not be used by
+ * board/chassis drivers.
+ */
+u8 cpci_get_attention_status(struct slot *slot);
+u8 cpci_get_latch_status(struct slot *slot);
+u8 cpci_get_adapter_status(struct slot *slot);
+u16 cpci_get_hs_csr(struct slot *slot);
+int cpci_set_attention_status(struct slot *slot, int status);
+int cpci_check_and_clear_ins(struct slot *slot);
+int cpci_check_ext(struct slot *slot);
+int cpci_clear_ext(struct slot *slot);
+int cpci_led_on(struct slot *slot);
+int cpci_led_off(struct slot *slot);
+int cpci_configure_slot(struct slot *slot);
+int cpci_unconfigure_slot(struct slot *slot);
+
+#ifdef CONFIG_HOTPLUG_PCI_CPCI
+int cpci_hotplug_init(int debug);
+void cpci_hotplug_exit(void);
+#else
+static inline int cpci_hotplug_init(int debug) { return 0; }
+static inline void cpci_hotplug_exit(void) { }
+#endif
+
+#endif /* _CPCI_HOTPLUG_H */
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
new file mode 100644
index 000000000..46db29395
--- /dev/null
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -0,0 +1,731 @@
+/*
+ * CompactPCI Hot Plug Driver
+ *
+ * Copyright (C) 2002,2005 SOMA Networks, Inc.
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <scottm@somanetworks.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include "cpci_hotplug.h"
+
+#define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>"
+#define DRIVER_DESC "CompactPCI Hot Plug Core"
+
+#define MY_NAME "cpci_hotplug"
+
+#define dbg(format, arg...) \
+ do { \
+ if (cpci_debug) \
+ printk (KERN_DEBUG "%s: " format "\n", \
+ MY_NAME , ## arg); \
+ } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
+
+/* local variables */
+static DECLARE_RWSEM(list_rwsem);
+static LIST_HEAD(slot_list);
+static int slots;
+static atomic_t extracting;
+int cpci_debug;
+static struct cpci_hp_controller *controller;
+static struct task_struct *cpci_thread;
+static int thread_finished;
+
+static int enable_slot(struct hotplug_slot *slot);
+static int disable_slot(struct hotplug_slot *slot);
+static int set_attention_status(struct hotplug_slot *slot, u8 value);
+static int get_power_status(struct hotplug_slot *slot, u8 *value);
+static int get_attention_status(struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
+static int get_latch_status(struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .set_attention_status = set_attention_status,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_adapter_status = get_adapter_status,
+ .get_latch_status = get_latch_status,
+};
+
+static int
+update_latch_status(struct hotplug_slot *hotplug_slot, u8 value)
+{
+ struct hotplug_slot_info info;
+
+ memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
+ info.latch_status = value;
+ return pci_hp_change_slot_info(hotplug_slot, &info);
+}
+
+static int
+update_adapter_status(struct hotplug_slot *hotplug_slot, u8 value)
+{
+ struct hotplug_slot_info info;
+
+ memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
+ info.adapter_status = value;
+ return pci_hp_change_slot_info(hotplug_slot, &info);
+}
+
+static int
+enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s", __func__, slot_name(slot));
+
+ if (controller->ops->set_power)
+ retval = controller->ops->set_power(slot, 1);
+ return retval;
+}
+
+static int
+disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s", __func__, slot_name(slot));
+
+ down_write(&list_rwsem);
+
+ /* Unconfigure device */
+ dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
+ retval = cpci_unconfigure_slot(slot);
+ if (retval) {
+ err("%s - could not unconfigure slot %s",
+ __func__, slot_name(slot));
+ goto disable_error;
+ }
+ dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot));
+
+ /* Clear EXT (by setting it) */
+ if (cpci_clear_ext(slot)) {
+ err("%s - could not clear EXT for slot %s",
+ __func__, slot_name(slot));
+ retval = -ENODEV;
+ goto disable_error;
+ }
+ cpci_led_on(slot);
+
+ if (controller->ops->set_power) {
+ retval = controller->ops->set_power(slot, 0);
+ if (retval)
+ goto disable_error;
+ }
+
+ if (update_adapter_status(slot->hotplug_slot, 0))
+ warn("failure to update adapter file");
+
+ if (slot->extracting) {
+ slot->extracting = 0;
+ atomic_dec(&extracting);
+ }
+disable_error:
+ up_write(&list_rwsem);
+ return retval;
+}
+
+static u8
+cpci_get_power_status(struct slot *slot)
+{
+ u8 power = 1;
+
+ if (controller->ops->get_power)
+ power = controller->ops->get_power(slot);
+ return power;
+}
+
+static int
+get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ *value = cpci_get_power_status(slot);
+ return 0;
+}
+
+static int
+get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ *value = cpci_get_attention_status(slot);
+ return 0;
+}
+
+static int
+set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
+ return cpci_set_attention_status(hotplug_slot->private, status);
+}
+
+static int
+get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ *value = hotplug_slot->info->adapter_status;
+ return 0;
+}
+
+static int
+get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ *value = hotplug_slot->info->latch_status;
+ return 0;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ pci_dev_put(slot->dev);
+ kfree(slot);
+}
+
+#define SLOT_NAME_SIZE 6
+
+int
+cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
+ int status;
+ int i;
+
+ if (!(controller && bus))
+ return -ENODEV;
+
+ /*
+ * Create a structure for each slot, and register that slot
+ * with the pci_hotplug subsystem.
+ */
+ for (i = first; i <= last; ++i) {
+ slot = kzalloc(sizeof (struct slot), GFP_KERNEL);
+ if (!slot) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ hotplug_slot =
+ kzalloc(sizeof (struct hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot) {
+ status = -ENOMEM;
+ goto error_slot;
+ }
+ slot->hotplug_slot = hotplug_slot;
+
+ info = kzalloc(sizeof (struct hotplug_slot_info), GFP_KERNEL);
+ if (!info) {
+ status = -ENOMEM;
+ goto error_hpslot;
+ }
+ hotplug_slot->info = info;
+
+ slot->bus = bus;
+ slot->number = i;
+ slot->devfn = PCI_DEVFN(i, 0);
+
+ snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
+
+ hotplug_slot->private = slot;
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->ops = &cpci_hotplug_slot_ops;
+
+ /*
+ * Initialize the slot info structure with some known
+ * good values.
+ */
+ dbg("initializing slot %s", name);
+ info->power_status = cpci_get_power_status(slot);
+ info->attention_status = cpci_get_attention_status(slot);
+
+ dbg("registering slot %s", name);
+ status = pci_hp_register(slot->hotplug_slot, bus, i, name);
+ if (status) {
+ err("pci_hp_register failed with error %d", status);
+ goto error_info;
+ }
+ dbg("slot registered with name: %s", slot_name(slot));
+
+ /* Add slot to our internal list */
+ down_write(&list_rwsem);
+ list_add(&slot->slot_list, &slot_list);
+ slots++;
+ up_write(&list_rwsem);
+ }
+ return 0;
+error_info:
+ kfree(info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return status;
+}
+EXPORT_SYMBOL_GPL(cpci_hp_register_bus);
+
+int
+cpci_hp_unregister_bus(struct pci_bus *bus)
+{
+ struct slot *slot;
+ struct slot *tmp;
+ int status = 0;
+
+ down_write(&list_rwsem);
+ if (!slots) {
+ up_write(&list_rwsem);
+ return -1;
+ }
+ list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
+ if (slot->bus == bus) {
+ list_del(&slot->slot_list);
+ slots--;
+
+ dbg("deregistering slot %s", slot_name(slot));
+ status = pci_hp_deregister(slot->hotplug_slot);
+ if (status) {
+ err("pci_hp_deregister failed with error %d",
+ status);
+ break;
+ }
+ }
+ }
+ up_write(&list_rwsem);
+ return status;
+}
+EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus);
+
+/* This is the interrupt mode interrupt handler */
+static irqreturn_t
+cpci_hp_intr(int irq, void *data)
+{
+ dbg("entered cpci_hp_intr");
+
+ /* Check to see if it was our interrupt */
+ if ((controller->irq_flags & IRQF_SHARED) &&
+ !controller->ops->check_irq(controller->dev_id)) {
+ dbg("exited cpci_hp_intr, not our interrupt");
+ return IRQ_NONE;
+ }
+
+ /* Disable ENUM interrupt */
+ controller->ops->disable_irq();
+
+ /* Trigger processing by the event thread */
+ wake_up_process(cpci_thread);
+ return IRQ_HANDLED;
+}
+
+/*
+ * According to PICMG 2.1 R2.0, section 6.3.2, upon
+ * initialization, the system driver shall clear the
+ * INS bits of the cold-inserted devices.
+ */
+static int
+init_slots(int clear_ins)
+{
+ struct slot *slot;
+ struct pci_dev *dev;
+
+ dbg("%s - enter", __func__);
+ down_read(&list_rwsem);
+ if (!slots) {
+ up_read(&list_rwsem);
+ return -1;
+ }
+ list_for_each_entry(slot, &slot_list, slot_list) {
+ dbg("%s - looking at slot %s", __func__, slot_name(slot));
+ if (clear_ins && cpci_check_and_clear_ins(slot))
+ dbg("%s - cleared INS for slot %s",
+ __func__, slot_name(slot));
+ dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
+ if (dev) {
+ if (update_adapter_status(slot->hotplug_slot, 1))
+ warn("failure to update adapter file");
+ if (update_latch_status(slot->hotplug_slot, 1))
+ warn("failure to update latch file");
+ slot->dev = dev;
+ }
+ }
+ up_read(&list_rwsem);
+ dbg("%s - exit", __func__);
+ return 0;
+}
+
+static int
+check_slots(void)
+{
+ struct slot *slot;
+ int extracted;
+ int inserted;
+ u16 hs_csr;
+
+ down_read(&list_rwsem);
+ if (!slots) {
+ up_read(&list_rwsem);
+ err("no slots registered, shutting down");
+ return -1;
+ }
+ extracted = inserted = 0;
+ list_for_each_entry(slot, &slot_list, slot_list) {
+ dbg("%s - looking at slot %s", __func__, slot_name(slot));
+ if (cpci_check_and_clear_ins(slot)) {
+ /*
+ * Some broken hardware (e.g. PLX 9054AB) asserts
+ * ENUM# twice...
+ */
+ if (slot->dev) {
+ warn("slot %s already inserted",
+ slot_name(slot));
+ inserted++;
+ continue;
+ }
+
+ /* Process insertion */
+ dbg("%s - slot %s inserted", __func__, slot_name(slot));
+
+ /* GSM, debug */
+ hs_csr = cpci_get_hs_csr(slot);
+ dbg("%s - slot %s HS_CSR (1) = %04x",
+ __func__, slot_name(slot), hs_csr);
+
+ /* Configure device */
+ dbg("%s - configuring slot %s",
+ __func__, slot_name(slot));
+ if (cpci_configure_slot(slot)) {
+ err("%s - could not configure slot %s",
+ __func__, slot_name(slot));
+ continue;
+ }
+ dbg("%s - finished configuring slot %s",
+ __func__, slot_name(slot));
+
+ /* GSM, debug */
+ hs_csr = cpci_get_hs_csr(slot);
+ dbg("%s - slot %s HS_CSR (2) = %04x",
+ __func__, slot_name(slot), hs_csr);
+
+ if (update_latch_status(slot->hotplug_slot, 1))
+ warn("failure to update latch file");
+
+ if (update_adapter_status(slot->hotplug_slot, 1))
+ warn("failure to update adapter file");
+
+ cpci_led_off(slot);
+
+ /* GSM, debug */
+ hs_csr = cpci_get_hs_csr(slot);
+ dbg("%s - slot %s HS_CSR (3) = %04x",
+ __func__, slot_name(slot), hs_csr);
+
+ inserted++;
+ } else if (cpci_check_ext(slot)) {
+ /* Process extraction request */
+ dbg("%s - slot %s extracted",
+ __func__, slot_name(slot));
+
+ /* GSM, debug */
+ hs_csr = cpci_get_hs_csr(slot);
+ dbg("%s - slot %s HS_CSR = %04x",
+ __func__, slot_name(slot), hs_csr);
+
+ if (!slot->extracting) {
+ if (update_latch_status(slot->hotplug_slot, 0))
+ warn("failure to update latch file");
+
+ slot->extracting = 1;
+ atomic_inc(&extracting);
+ }
+ extracted++;
+ } else if (slot->extracting) {
+ hs_csr = cpci_get_hs_csr(slot);
+ if (hs_csr == 0xffff) {
+ /*
+ * Hmmm, we're likely hosed at this point, should we
+ * bother trying to tell the driver or not?
+ */
+ err("card in slot %s was improperly removed",
+ slot_name(slot));
+ if (update_adapter_status(slot->hotplug_slot, 0))
+ warn("failure to update adapter file");
+ slot->extracting = 0;
+ atomic_dec(&extracting);
+ }
+ }
+ }
+ up_read(&list_rwsem);
+ dbg("inserted=%d, extracted=%d, extracting=%d",
+ inserted, extracted, atomic_read(&extracting));
+ if (inserted || extracted)
+ return extracted;
+ else if (!atomic_read(&extracting)) {
+ err("cannot find ENUM# source, shutting down");
+ return -1;
+ }
+ return 0;
+}
+
+/* This is the interrupt mode worker thread body */
+static int
+event_thread(void *data)
+{
+ int rc;
+
+ dbg("%s - event thread started", __func__);
+ while (1) {
+ dbg("event thread sleeping");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ if (kthread_should_stop())
+ break;
+ do {
+ rc = check_slots();
+ if (rc > 0) {
+ /* Give userspace a chance to handle extraction */
+ msleep(500);
+ } else if (rc < 0) {
+ dbg("%s - error checking slots", __func__);
+ thread_finished = 1;
+ goto out;
+ }
+ } while (atomic_read(&extracting) && !kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ /* Re-enable ENUM# interrupt */
+ dbg("%s - re-enabling irq", __func__);
+ controller->ops->enable_irq();
+ }
+ out:
+ return 0;
+}
+
+/* This is the polling mode worker thread body */
+static int
+poll_thread(void *data)
+{
+ int rc;
+
+ while (1) {
+ if (kthread_should_stop() || signal_pending(current))
+ break;
+ if (controller->ops->query_enum()) {
+ do {
+ rc = check_slots();
+ if (rc > 0) {
+ /* Give userspace a chance to handle extraction */
+ msleep(500);
+ } else if (rc < 0) {
+ dbg("%s - error checking slots", __func__);
+ thread_finished = 1;
+ goto out;
+ }
+ } while (atomic_read(&extracting) && !kthread_should_stop());
+ }
+ msleep(100);
+ }
+ out:
+ return 0;
+}
+
+static int
+cpci_start_thread(void)
+{
+ if (controller->irq)
+ cpci_thread = kthread_run(event_thread, NULL, "cpci_hp_eventd");
+ else
+ cpci_thread = kthread_run(poll_thread, NULL, "cpci_hp_polld");
+ if (IS_ERR(cpci_thread)) {
+ err("Can't start up our thread");
+ return PTR_ERR(cpci_thread);
+ }
+ thread_finished = 0;
+ return 0;
+}
+
+static void
+cpci_stop_thread(void)
+{
+ kthread_stop(cpci_thread);
+ thread_finished = 1;
+}
+
+int
+cpci_hp_register_controller(struct cpci_hp_controller *new_controller)
+{
+ int status = 0;
+
+ if (controller)
+ return -1;
+ if (!(new_controller && new_controller->ops))
+ return -EINVAL;
+ if (new_controller->irq) {
+ if (!(new_controller->ops->enable_irq &&
+ new_controller->ops->disable_irq))
+ status = -EINVAL;
+ if (request_irq(new_controller->irq,
+ cpci_hp_intr,
+ new_controller->irq_flags,
+ MY_NAME,
+ new_controller->dev_id)) {
+ err("Can't get irq %d for the hotplug cPCI controller",
+ new_controller->irq);
+ status = -ENODEV;
+ }
+ dbg("%s - acquired controller irq %d",
+ __func__, new_controller->irq);
+ }
+ if (!status)
+ controller = new_controller;
+ return status;
+}
+EXPORT_SYMBOL_GPL(cpci_hp_register_controller);
+
+static void
+cleanup_slots(void)
+{
+ struct slot *slot;
+ struct slot *tmp;
+
+ /*
+ * Unregister all of our slots with the pci_hotplug subsystem,
+ * and free up all memory that we had allocated.
+ */
+ down_write(&list_rwsem);
+ if (!slots)
+ goto cleanup_null;
+ list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+cleanup_null:
+ up_write(&list_rwsem);
+ return;
+}
+
+int
+cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller)
+{
+ int status = 0;
+
+ if (controller) {
+ if (!thread_finished)
+ cpci_stop_thread();
+ if (controller->irq)
+ free_irq(controller->irq, controller->dev_id);
+ controller = NULL;
+ cleanup_slots();
+ } else
+ status = -ENODEV;
+ return status;
+}
+EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller);
+
+int
+cpci_hp_start(void)
+{
+ static int first = 1;
+ int status;
+
+ dbg("%s - enter", __func__);
+ if (!controller)
+ return -ENODEV;
+
+ down_read(&list_rwsem);
+ if (list_empty(&slot_list)) {
+ up_read(&list_rwsem);
+ return -ENODEV;
+ }
+ up_read(&list_rwsem);
+
+ status = init_slots(first);
+ if (first)
+ first = 0;
+ if (status)
+ return status;
+
+ status = cpci_start_thread();
+ if (status)
+ return status;
+ dbg("%s - thread started", __func__);
+
+ if (controller->irq) {
+ /* Start enum interrupt processing */
+ dbg("%s - enabling irq", __func__);
+ controller->ops->enable_irq();
+ }
+ dbg("%s - exit", __func__);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpci_hp_start);
+
+int
+cpci_hp_stop(void)
+{
+ if (!controller)
+ return -ENODEV;
+ if (controller->irq) {
+ /* Stop enum interrupt processing */
+ dbg("%s - disabling irq", __func__);
+ controller->ops->disable_irq();
+ }
+ cpci_stop_thread();
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpci_hp_stop);
+
+int __init
+cpci_hotplug_init(int debug)
+{
+ cpci_debug = debug;
+ return 0;
+}
+
+void __exit
+cpci_hotplug_exit(void)
+{
+ /*
+ * Clean everything up.
+ */
+ cpci_hp_stop();
+ cpci_hp_unregister_controller(controller);
+}
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
new file mode 100644
index 000000000..788db48db
--- /dev/null
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -0,0 +1,333 @@
+/*
+ * CompactPCI Hot Plug Driver PCI functions
+ *
+ * Copyright (C) 2002,2005 by SOMA Networks, Inc.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <scottm@somanetworks.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/proc_fs.h>
+#include "../pci.h"
+#include "cpci_hotplug.h"
+
+#define MY_NAME "cpci_hotplug"
+
+extern int cpci_debug;
+
+#define dbg(format, arg...) \
+ do { \
+ if (cpci_debug) \
+ printk (KERN_DEBUG "%s: " format "\n", \
+ MY_NAME , ## arg); \
+ } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
+
+
+u8 cpci_get_attention_status(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return 0;
+
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return 0;
+
+ return hs_csr & 0x0008 ? 1 : 0;
+}
+
+int cpci_set_attention_status(struct slot *slot, int status)
+{
+ int hs_cap;
+ u16 hs_csr;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return 0;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return 0;
+ if (status)
+ hs_csr |= HS_CSR_LOO;
+ else
+ hs_csr &= ~HS_CSR_LOO;
+ if (pci_bus_write_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ hs_csr))
+ return 0;
+ return 1;
+}
+
+u16 cpci_get_hs_csr(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return 0xFFFF;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return 0xFFFF;
+ return hs_csr;
+}
+
+int cpci_check_and_clear_ins(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+ int ins = 0;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return 0;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return 0;
+ if (hs_csr & HS_CSR_INS) {
+ /* Clear INS (by setting it) */
+ if (pci_bus_write_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ hs_csr))
+ ins = 0;
+ else
+ ins = 1;
+ }
+ return ins;
+}
+
+int cpci_check_ext(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+ int ext = 0;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return 0;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return 0;
+ if (hs_csr & HS_CSR_EXT)
+ ext = 1;
+ return ext;
+}
+
+int cpci_clear_ext(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return -ENODEV;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return -ENODEV;
+ if (hs_csr & HS_CSR_EXT) {
+ /* Clear EXT (by setting it) */
+ if (pci_bus_write_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ hs_csr))
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int cpci_led_on(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return -ENODEV;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return -ENODEV;
+ if ((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) {
+ hs_csr |= HS_CSR_LOO;
+ if (pci_bus_write_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ hs_csr)) {
+ err("Could not set LOO for slot %s",
+ hotplug_slot_name(slot->hotplug_slot));
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+int cpci_led_off(struct slot *slot)
+{
+ int hs_cap;
+ u16 hs_csr;
+
+ hs_cap = pci_bus_find_capability(slot->bus,
+ slot->devfn,
+ PCI_CAP_ID_CHSWP);
+ if (!hs_cap)
+ return -ENODEV;
+ if (pci_bus_read_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ &hs_csr))
+ return -ENODEV;
+ if (hs_csr & HS_CSR_LOO) {
+ hs_csr &= ~HS_CSR_LOO;
+ if (pci_bus_write_config_word(slot->bus,
+ slot->devfn,
+ hs_cap + 2,
+ hs_csr)) {
+ err("Could not clear LOO for slot %s",
+ hotplug_slot_name(slot->hotplug_slot));
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * Device configuration functions
+ */
+
+int cpci_configure_slot(struct slot *slot)
+{
+ struct pci_dev *dev;
+ struct pci_bus *parent;
+ int ret = 0;
+
+ dbg("%s - enter", __func__);
+
+ pci_lock_rescan_remove();
+
+ if (slot->dev == NULL) {
+ dbg("pci_dev null, finding %02x:%02x:%x",
+ slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn));
+ slot->dev = pci_get_slot(slot->bus, slot->devfn);
+ }
+
+ /* Still NULL? Well then scan for it! */
+ if (slot->dev == NULL) {
+ int n;
+ dbg("pci_dev still null");
+
+ /*
+ * This will generate pci_dev structures for all functions, but
+ * we will only call this case when lookup fails.
+ */
+ n = pci_scan_slot(slot->bus, slot->devfn);
+ dbg("%s: pci_scan_slot returned %d", __func__, n);
+ slot->dev = pci_get_slot(slot->bus, slot->devfn);
+ if (slot->dev == NULL) {
+ err("Could not find PCI device for slot %02x", slot->number);
+ ret = -ENODEV;
+ goto out;
+ }
+ }
+ parent = slot->dev->bus;
+
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
+ continue;
+ if (pci_is_bridge(dev))
+ pci_hp_add_bridge(dev);
+ }
+
+
+ pci_assign_unassigned_bridge_resources(parent->self);
+
+ pci_bus_add_devices(parent);
+
+ out:
+ pci_unlock_rescan_remove();
+ dbg("%s - exit", __func__);
+ return ret;
+}
+
+int cpci_unconfigure_slot(struct slot *slot)
+{
+ struct pci_dev *dev, *temp;
+
+ dbg("%s - enter", __func__);
+ if (!slot->dev) {
+ err("No device for slot %02x\n", slot->number);
+ return -ENODEV;
+ }
+
+ pci_lock_rescan_remove();
+
+ list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn))
+ continue;
+ pci_dev_get(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
+ pci_dev_put(slot->dev);
+ slot->dev = NULL;
+
+ pci_unlock_rescan_remove();
+
+ dbg("%s - exit", __func__);
+ return 0;
+}
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
new file mode 100644
index 000000000..66b7bbebe
--- /dev/null
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -0,0 +1,226 @@
+/*
+ * cpcihp_generic.c
+ *
+ * Generic port I/O CompactPCI driver
+ *
+ * Copyright 2002 SOMA Networks, Inc.
+ * Copyright 2001 Intel San Luis Obispo
+ * Copyright 2000,2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This generic CompactPCI hotplug driver should allow using the PCI hotplug
+ * mechanism on any CompactPCI board that exposes the #ENUM signal as a bit
+ * in a system register that can be read through standard port I/O.
+ *
+ * Send feedback to <scottm@somanetworks.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include "cpci_hotplug.h"
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>"
+#define DRIVER_DESC "Generic port I/O CompactPCI Hot Plug Driver"
+
+#if !defined(MODULE)
+#define MY_NAME "cpcihp_generic"
+#else
+#define MY_NAME THIS_MODULE->name
+#endif
+
+#define dbg(format, arg...) \
+ do { \
+ if (debug) \
+ printk (KERN_DEBUG "%s: " format "\n", \
+ MY_NAME , ## arg); \
+ } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
+
+/* local variables */
+static bool debug;
+static char *bridge;
+static u8 bridge_busnr;
+static u8 bridge_slot;
+static struct pci_bus *bus;
+static u8 first_slot;
+static u8 last_slot;
+static u16 port;
+static unsigned int enum_bit;
+static u8 enum_mask;
+
+static struct cpci_hp_controller_ops generic_hpc_ops;
+static struct cpci_hp_controller generic_hpc;
+
+static int __init validate_parameters(void)
+{
+ char *str;
+ char *p;
+ unsigned long tmp;
+
+ if (!bridge) {
+ info("not configured, disabling.");
+ return -EINVAL;
+ }
+ str = bridge;
+ if (!*str)
+ return -EINVAL;
+
+ tmp = simple_strtoul(str, &p, 16);
+ if (p == str || tmp > 0xff) {
+ err("Invalid hotplug bus bridge device bus number");
+ return -EINVAL;
+ }
+ bridge_busnr = (u8) tmp;
+ dbg("bridge_busnr = 0x%02x", bridge_busnr);
+ if (*p != ':') {
+ err("Invalid hotplug bus bridge device");
+ return -EINVAL;
+ }
+ str = p + 1;
+ tmp = simple_strtoul(str, &p, 16);
+ if (p == str || tmp > 0x1f) {
+ err("Invalid hotplug bus bridge device slot number");
+ return -EINVAL;
+ }
+ bridge_slot = (u8) tmp;
+ dbg("bridge_slot = 0x%02x", bridge_slot);
+
+ dbg("first_slot = 0x%02x", first_slot);
+ dbg("last_slot = 0x%02x", last_slot);
+ if (!(first_slot && last_slot)) {
+ err("Need to specify first_slot and last_slot");
+ return -EINVAL;
+ }
+ if (last_slot < first_slot) {
+ err("first_slot must be less than last_slot");
+ return -EINVAL;
+ }
+
+ dbg("port = 0x%04x", port);
+ dbg("enum_bit = 0x%02x", enum_bit);
+ if (enum_bit > 7) {
+ err("Invalid #ENUM bit");
+ return -EINVAL;
+ }
+ enum_mask = 1 << enum_bit;
+ return 0;
+}
+
+static int query_enum(void)
+{
+ u8 value;
+
+ value = inb_p(port);
+ return ((value & enum_mask) == enum_mask);
+}
+
+static int __init cpcihp_generic_init(void)
+{
+ int status;
+ struct resource *r;
+ struct pci_dev *dev;
+
+ info(DRIVER_DESC " version: " DRIVER_VERSION);
+ status = validate_parameters();
+ if (status)
+ return status;
+
+ r = request_region(port, 1, "#ENUM hotswap signal register");
+ if (!r)
+ return -EBUSY;
+
+ dev = pci_get_domain_bus_and_slot(0, bridge_busnr,
+ PCI_DEVFN(bridge_slot, 0));
+ if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ err("Invalid bridge device %s", bridge);
+ pci_dev_put(dev);
+ return -EINVAL;
+ }
+ bus = dev->subordinate;
+ pci_dev_put(dev);
+
+ memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
+ generic_hpc_ops.query_enum = query_enum;
+ generic_hpc.ops = &generic_hpc_ops;
+
+ status = cpci_hp_register_controller(&generic_hpc);
+ if (status != 0) {
+ err("Could not register cPCI hotplug controller");
+ return -ENODEV;
+ }
+ dbg("registered controller");
+
+ status = cpci_hp_register_bus(bus, first_slot, last_slot);
+ if (status != 0) {
+ err("Could not register cPCI hotplug bus");
+ goto init_bus_register_error;
+ }
+ dbg("registered bus");
+
+ status = cpci_hp_start();
+ if (status != 0) {
+ err("Could not started cPCI hotplug system");
+ goto init_start_error;
+ }
+ dbg("started cpci hp system");
+ return 0;
+init_start_error:
+ cpci_hp_unregister_bus(bus);
+init_bus_register_error:
+ cpci_hp_unregister_controller(&generic_hpc);
+ err("status = %d", status);
+ return status;
+
+}
+
+static void __exit cpcihp_generic_exit(void)
+{
+ cpci_hp_stop();
+ cpci_hp_unregister_bus(bus);
+ cpci_hp_unregister_controller(&generic_hpc);
+ release_region(port, 1);
+}
+
+module_init(cpcihp_generic_init);
+module_exit(cpcihp_generic_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+module_param(bridge, charp, 0);
+MODULE_PARM_DESC(bridge, "Hotswap bus bridge device, <bus>:<slot> (bus and slot are in hexadecimal)");
+module_param(first_slot, byte, 0);
+MODULE_PARM_DESC(first_slot, "Hotswap bus first slot number");
+module_param(last_slot, byte, 0);
+MODULE_PARM_DESC(last_slot, "Hotswap bus last slot number");
+module_param(port, ushort, 0);
+MODULE_PARM_DESC(port, "#ENUM signal I/O port");
+module_param(enum_bit, uint, 0);
+MODULE_PARM_DESC(enum_bit, "#ENUM signal bit (0-7)");
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
new file mode 100644
index 000000000..7ecf34e76
--- /dev/null
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -0,0 +1,328 @@
+/*
+ * cpcihp_zt5550.c
+ *
+ * Intel/Ziatech ZT5550 CompactPCI Host Controller driver
+ *
+ * Copyright 2002 SOMA Networks, Inc.
+ * Copyright 2001 Intel San Luis Obispo
+ * Copyright 2000,2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <scottm@somanetworks.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/signal.h> /* IRQF_SHARED */
+#include "cpci_hotplug.h"
+#include "cpcihp_zt5550.h"
+
+#define DRIVER_VERSION "0.2"
+#define DRIVER_AUTHOR "Scott Murray <scottm@somanetworks.com>"
+#define DRIVER_DESC "ZT5550 CompactPCI Hot Plug Driver"
+
+#define MY_NAME "cpcihp_zt5550"
+
+#define dbg(format, arg...) \
+ do { \
+ if (debug) \
+ printk (KERN_DEBUG "%s: " format "\n", \
+ MY_NAME , ## arg); \
+ } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
+
+/* local variables */
+static bool debug;
+static bool poll;
+static struct cpci_hp_controller_ops zt5550_hpc_ops;
+static struct cpci_hp_controller zt5550_hpc;
+
+/* Primary cPCI bus bridge device */
+static struct pci_dev *bus0_dev;
+static struct pci_bus *bus0;
+
+/* Host controller device */
+static struct pci_dev *hc_dev;
+
+/* Host controller register addresses */
+static void __iomem *hc_registers;
+static void __iomem *csr_hc_index;
+static void __iomem *csr_hc_data;
+static void __iomem *csr_int_status;
+static void __iomem *csr_int_mask;
+
+
+static int zt5550_hc_config(struct pci_dev *pdev)
+{
+ int ret;
+
+ /* Since we know that no boards exist with two HC chips, treat it as an error */
+ if (hc_dev) {
+ err("too many host controller devices?");
+ return -EBUSY;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ err("cannot enable %s\n", pci_name(pdev));
+ return ret;
+ }
+
+ hc_dev = pdev;
+ dbg("hc_dev = %p", hc_dev);
+ dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1));
+ dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1));
+
+ if (!request_mem_region(pci_resource_start(hc_dev, 1),
+ pci_resource_len(hc_dev, 1), MY_NAME)) {
+ err("cannot reserve MMIO region");
+ ret = -ENOMEM;
+ goto exit_disable_device;
+ }
+
+ hc_registers =
+ ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1));
+ if (!hc_registers) {
+ err("cannot remap MMIO region %llx @ %llx",
+ (unsigned long long)pci_resource_len(hc_dev, 1),
+ (unsigned long long)pci_resource_start(hc_dev, 1));
+ ret = -ENODEV;
+ goto exit_release_region;
+ }
+
+ csr_hc_index = hc_registers + CSR_HCINDEX;
+ csr_hc_data = hc_registers + CSR_HCDATA;
+ csr_int_status = hc_registers + CSR_INTSTAT;
+ csr_int_mask = hc_registers + CSR_INTMASK;
+
+ /*
+ * Disable host control, fault and serial interrupts
+ */
+ dbg("disabling host control, fault and serial interrupts");
+ writeb((u8) HC_INT_MASK_REG, csr_hc_index);
+ writeb((u8) ALL_INDEXED_INTS_MASK, csr_hc_data);
+ dbg("disabled host control, fault and serial interrupts");
+
+ /*
+ * Disable timer0, timer1 and ENUM interrupts
+ */
+ dbg("disabling timer0, timer1 and ENUM interrupts");
+ writeb((u8) ALL_DIRECT_INTS_MASK, csr_int_mask);
+ dbg("disabled timer0, timer1 and ENUM interrupts");
+ return 0;
+
+exit_release_region:
+ release_mem_region(pci_resource_start(hc_dev, 1),
+ pci_resource_len(hc_dev, 1));
+exit_disable_device:
+ pci_disable_device(hc_dev);
+ return ret;
+}
+
+static int zt5550_hc_cleanup(void)
+{
+ if (!hc_dev)
+ return -ENODEV;
+
+ iounmap(hc_registers);
+ release_mem_region(pci_resource_start(hc_dev, 1),
+ pci_resource_len(hc_dev, 1));
+ pci_disable_device(hc_dev);
+ return 0;
+}
+
+static int zt5550_hc_query_enum(void)
+{
+ u8 value;
+
+ value = inb_p(ENUM_PORT);
+ return ((value & ENUM_MASK) == ENUM_MASK);
+}
+
+static int zt5550_hc_check_irq(void *dev_id)
+{
+ int ret;
+ u8 reg;
+
+ ret = 0;
+ if (dev_id == zt5550_hpc.dev_id) {
+ reg = readb(csr_int_status);
+ if (reg)
+ ret = 1;
+ }
+ return ret;
+}
+
+static int zt5550_hc_enable_irq(void)
+{
+ u8 reg;
+
+ if (hc_dev == NULL)
+ return -ENODEV;
+
+ reg = readb(csr_int_mask);
+ reg = reg & ~ENUM_INT_MASK;
+ writeb(reg, csr_int_mask);
+ return 0;
+}
+
+static int zt5550_hc_disable_irq(void)
+{
+ u8 reg;
+
+ if (hc_dev == NULL)
+ return -ENODEV;
+
+ reg = readb(csr_int_mask);
+ reg = reg | ENUM_INT_MASK;
+ writeb(reg, csr_int_mask);
+ return 0;
+}
+
+static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int status;
+
+ status = zt5550_hc_config(pdev);
+ if (status != 0)
+ return status;
+
+ dbg("returned from zt5550_hc_config");
+
+ memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
+ zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
+ zt5550_hpc.ops = &zt5550_hpc_ops;
+ if (!poll) {
+ zt5550_hpc.irq = hc_dev->irq;
+ zt5550_hpc.irq_flags = IRQF_SHARED;
+ zt5550_hpc.dev_id = hc_dev;
+
+ zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
+ zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
+ zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
+ } else {
+ info("using ENUM# polling mode");
+ }
+
+ status = cpci_hp_register_controller(&zt5550_hpc);
+ if (status != 0) {
+ err("could not register cPCI hotplug controller");
+ goto init_hc_error;
+ }
+ dbg("registered controller");
+
+ /* Look for first device matching cPCI bus's bridge vendor and device IDs */
+ bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
+ PCI_DEVICE_ID_DEC_21154, NULL);
+ if (!bus0_dev) {
+ status = -ENODEV;
+ goto init_register_error;
+ }
+ bus0 = bus0_dev->subordinate;
+ pci_dev_put(bus0_dev);
+
+ status = cpci_hp_register_bus(bus0, 0x0a, 0x0f);
+ if (status != 0) {
+ err("could not register cPCI hotplug bus");
+ goto init_register_error;
+ }
+ dbg("registered bus");
+
+ status = cpci_hp_start();
+ if (status != 0) {
+ err("could not started cPCI hotplug system");
+ cpci_hp_unregister_bus(bus0);
+ goto init_register_error;
+ }
+ dbg("started cpci hp system");
+
+ return 0;
+init_register_error:
+ cpci_hp_unregister_controller(&zt5550_hpc);
+init_hc_error:
+ err("status = %d", status);
+ zt5550_hc_cleanup();
+ return status;
+
+}
+
+static void zt5550_hc_remove_one(struct pci_dev *pdev)
+{
+ cpci_hp_stop();
+ cpci_hp_unregister_bus(bus0);
+ cpci_hp_unregister_controller(&zt5550_hpc);
+ zt5550_hc_cleanup();
+}
+
+
+static struct pci_device_id zt5550_hc_pci_tbl[] = {
+ { PCI_VENDOR_ID_ZIATECH, PCI_DEVICE_ID_ZIATECH_5550_HC, PCI_ANY_ID, PCI_ANY_ID, },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl);
+
+static struct pci_driver zt5550_hc_driver = {
+ .name = "zt5550_hc",
+ .id_table = zt5550_hc_pci_tbl,
+ .probe = zt5550_hc_init_one,
+ .remove = zt5550_hc_remove_one,
+};
+
+static int __init zt5550_init(void)
+{
+ struct resource *r;
+ int rc;
+
+ info(DRIVER_DESC " version: " DRIVER_VERSION);
+ r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register");
+ if (!r)
+ return -EBUSY;
+
+ rc = pci_register_driver(&zt5550_hc_driver);
+ if (rc < 0)
+ release_region(ENUM_PORT, 1);
+ return rc;
+}
+
+static void __exit
+zt5550_exit(void)
+{
+ pci_unregister_driver(&zt5550_hc_driver);
+ release_region(ENUM_PORT, 1);
+}
+
+module_init(zt5550_init);
+module_exit(zt5550_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+module_param(poll, bool, 0644);
+MODULE_PARM_DESC(poll, "#ENUM polling mode enabled or not");
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.h b/drivers/pci/hotplug/cpcihp_zt5550.h
new file mode 100644
index 000000000..9a57fda53
--- /dev/null
+++ b/drivers/pci/hotplug/cpcihp_zt5550.h
@@ -0,0 +1,79 @@
+/*
+ * cpcihp_zt5550.h
+ *
+ * Intel/Ziatech ZT5550 CompactPCI Host Controller driver definitions
+ *
+ * Copyright 2002 SOMA Networks, Inc.
+ * Copyright 2001 Intel San Luis Obispo
+ * Copyright 2000,2001 MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <scottm@somanetworks.com>
+ */
+
+#ifndef _CPCIHP_ZT5550_H
+#define _CPCIHP_ZT5550_H
+
+/* Direct registers */
+#define CSR_HCINDEX 0x00
+#define CSR_HCDATA 0x04
+#define CSR_INTSTAT 0x08
+#define CSR_INTMASK 0x09
+#define CSR_CNT0CMD 0x0C
+#define CSR_CNT1CMD 0x0E
+#define CSR_CNT0 0x10
+#define CSR_CNT1 0x14
+
+/* Masks for interrupt bits in CSR_INTMASK direct register */
+#define CNT0_INT_MASK 0x01
+#define CNT1_INT_MASK 0x02
+#define ENUM_INT_MASK 0x04
+#define ALL_DIRECT_INTS_MASK 0x07
+
+/* Indexed registers (through CSR_INDEX, CSR_DATA) */
+#define HC_INT_MASK_REG 0x04
+#define HC_STATUS_REG 0x08
+#define HC_CMD_REG 0x0C
+#define ARB_CONFIG_GNT_REG 0x10
+#define ARB_CONFIG_CFG_REG 0x12
+#define ARB_CONFIG_REG 0x10
+#define ISOL_CONFIG_REG 0x18
+#define FAULT_STATUS_REG 0x20
+#define FAULT_CONFIG_REG 0x24
+#define WD_CONFIG_REG 0x2C
+#define HC_DIAG_REG 0x30
+#define SERIAL_COMM_REG 0x34
+#define SERIAL_OUT_REG 0x38
+#define SERIAL_IN_REG 0x3C
+
+/* Masks for interrupt bits in HC_INT_MASK_REG indexed register */
+#define SERIAL_INT_MASK 0x01
+#define FAULT_INT_MASK 0x02
+#define HCF_INT_MASK 0x04
+#define ALL_INDEXED_INTS_MASK 0x07
+
+/* Digital I/O port storing ENUM# */
+#define ENUM_PORT 0xE1
+/* Mask to get to the ENUM# bit on the bus */
+#define ENUM_MASK 0x40
+
+#endif /* _CPCIHP_ZT5550_H */
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
new file mode 100644
index 000000000..b28b2d218
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -0,0 +1,738 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+#ifndef _CPQPHP_H
+#define _CPQPHP_H
+
+#include <linux/interrupt.h>
+#include <asm/io.h> /* for read? and write? functions */
+#include <linux/delay.h> /* for delays */
+#include <linux/mutex.h>
+#include <linux/sched.h> /* for signal_pending() */
+
+#define MY_NAME "cpqphp"
+
+#define dbg(fmt, arg...) do { if (cpqhp_debug) printk(KERN_DEBUG "%s: " fmt , MY_NAME , ## arg); } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
+
+
+
+struct smbios_system_slot {
+ u8 type;
+ u8 length;
+ u16 handle;
+ u8 name_string_num;
+ u8 slot_type;
+ u8 slot_width;
+ u8 slot_current_usage;
+ u8 slot_length;
+ u16 slot_number;
+ u8 properties1;
+ u8 properties2;
+} __attribute__ ((packed));
+
+/* offsets to the smbios generic type based on the above structure layout */
+enum smbios_system_slot_offsets {
+ SMBIOS_SLOT_GENERIC_TYPE = offsetof(struct smbios_system_slot, type),
+ SMBIOS_SLOT_GENERIC_LENGTH = offsetof(struct smbios_system_slot, length),
+ SMBIOS_SLOT_GENERIC_HANDLE = offsetof(struct smbios_system_slot, handle),
+ SMBIOS_SLOT_NAME_STRING_NUM = offsetof(struct smbios_system_slot, name_string_num),
+ SMBIOS_SLOT_TYPE = offsetof(struct smbios_system_slot, slot_type),
+ SMBIOS_SLOT_WIDTH = offsetof(struct smbios_system_slot, slot_width),
+ SMBIOS_SLOT_CURRENT_USAGE = offsetof(struct smbios_system_slot, slot_current_usage),
+ SMBIOS_SLOT_LENGTH = offsetof(struct smbios_system_slot, slot_length),
+ SMBIOS_SLOT_NUMBER = offsetof(struct smbios_system_slot, slot_number),
+ SMBIOS_SLOT_PROPERTIES1 = offsetof(struct smbios_system_slot, properties1),
+ SMBIOS_SLOT_PROPERTIES2 = offsetof(struct smbios_system_slot, properties2),
+};
+
+struct smbios_generic {
+ u8 type;
+ u8 length;
+ u16 handle;
+} __attribute__ ((packed));
+
+/* offsets to the smbios generic type based on the above structure layout */
+enum smbios_generic_offsets {
+ SMBIOS_GENERIC_TYPE = offsetof(struct smbios_generic, type),
+ SMBIOS_GENERIC_LENGTH = offsetof(struct smbios_generic, length),
+ SMBIOS_GENERIC_HANDLE = offsetof(struct smbios_generic, handle),
+};
+
+struct smbios_entry_point {
+ char anchor[4];
+ u8 ep_checksum;
+ u8 ep_length;
+ u8 major_version;
+ u8 minor_version;
+ u16 max_size_entry;
+ u8 ep_rev;
+ u8 reserved[5];
+ char int_anchor[5];
+ u8 int_checksum;
+ u16 st_length;
+ u32 st_address;
+ u16 number_of_entrys;
+ u8 bcd_rev;
+} __attribute__ ((packed));
+
+/* offsets to the smbios entry point based on the above structure layout */
+enum smbios_entry_point_offsets {
+ ANCHOR = offsetof(struct smbios_entry_point, anchor[0]),
+ EP_CHECKSUM = offsetof(struct smbios_entry_point, ep_checksum),
+ EP_LENGTH = offsetof(struct smbios_entry_point, ep_length),
+ MAJOR_VERSION = offsetof(struct smbios_entry_point, major_version),
+ MINOR_VERSION = offsetof(struct smbios_entry_point, minor_version),
+ MAX_SIZE_ENTRY = offsetof(struct smbios_entry_point, max_size_entry),
+ EP_REV = offsetof(struct smbios_entry_point, ep_rev),
+ INT_ANCHOR = offsetof(struct smbios_entry_point, int_anchor[0]),
+ INT_CHECKSUM = offsetof(struct smbios_entry_point, int_checksum),
+ ST_LENGTH = offsetof(struct smbios_entry_point, st_length),
+ ST_ADDRESS = offsetof(struct smbios_entry_point, st_address),
+ NUMBER_OF_ENTRYS = offsetof(struct smbios_entry_point, number_of_entrys),
+ BCD_REV = offsetof(struct smbios_entry_point, bcd_rev),
+};
+
+struct ctrl_reg { /* offset */
+ u8 slot_RST; /* 0x00 */
+ u8 slot_enable; /* 0x01 */
+ u16 misc; /* 0x02 */
+ u32 led_control; /* 0x04 */
+ u32 int_input_clear; /* 0x08 */
+ u32 int_mask; /* 0x0a */
+ u8 reserved0; /* 0x10 */
+ u8 reserved1; /* 0x11 */
+ u8 reserved2; /* 0x12 */
+ u8 gen_output_AB; /* 0x13 */
+ u32 non_int_input; /* 0x14 */
+ u32 reserved3; /* 0x18 */
+ u32 reserved4; /* 0x1a */
+ u32 reserved5; /* 0x20 */
+ u8 reserved6; /* 0x24 */
+ u8 reserved7; /* 0x25 */
+ u16 reserved8; /* 0x26 */
+ u8 slot_mask; /* 0x28 */
+ u8 reserved9; /* 0x29 */
+ u8 reserved10; /* 0x2a */
+ u8 reserved11; /* 0x2b */
+ u8 slot_SERR; /* 0x2c */
+ u8 slot_power; /* 0x2d */
+ u8 reserved12; /* 0x2e */
+ u8 reserved13; /* 0x2f */
+ u8 next_curr_freq; /* 0x30 */
+ u8 reset_freq_mode; /* 0x31 */
+} __attribute__ ((packed));
+
+/* offsets to the controller registers based on the above structure layout */
+enum ctrl_offsets {
+ SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
+ SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
+ MISC = offsetof(struct ctrl_reg, misc),
+ LED_CONTROL = offsetof(struct ctrl_reg, led_control),
+ INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
+ INT_MASK = offsetof(struct ctrl_reg, int_mask),
+ CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
+ CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
+ CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
+ GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
+ NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
+ CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
+ CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
+ CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
+ CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
+ CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
+ CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
+ SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
+ CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
+ CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
+ CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
+ SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
+ SLOT_POWER = offsetof(struct ctrl_reg, slot_power),
+ NEXT_CURR_FREQ = offsetof(struct ctrl_reg, next_curr_freq),
+ RESET_FREQ_MODE = offsetof(struct ctrl_reg, reset_freq_mode),
+};
+
+struct hrt {
+ char sig0;
+ char sig1;
+ char sig2;
+ char sig3;
+ u16 unused_IRQ;
+ u16 PCIIRQ;
+ u8 number_of_entries;
+ u8 revision;
+ u16 reserved1;
+ u32 reserved2;
+} __attribute__ ((packed));
+
+/* offsets to the hotplug resource table registers based on the above
+ * structure layout
+ */
+enum hrt_offsets {
+ SIG0 = offsetof(struct hrt, sig0),
+ SIG1 = offsetof(struct hrt, sig1),
+ SIG2 = offsetof(struct hrt, sig2),
+ SIG3 = offsetof(struct hrt, sig3),
+ UNUSED_IRQ = offsetof(struct hrt, unused_IRQ),
+ PCIIRQ = offsetof(struct hrt, PCIIRQ),
+ NUMBER_OF_ENTRIES = offsetof(struct hrt, number_of_entries),
+ REVISION = offsetof(struct hrt, revision),
+ HRT_RESERVED1 = offsetof(struct hrt, reserved1),
+ HRT_RESERVED2 = offsetof(struct hrt, reserved2),
+};
+
+struct slot_rt {
+ u8 dev_func;
+ u8 primary_bus;
+ u8 secondary_bus;
+ u8 max_bus;
+ u16 io_base;
+ u16 io_length;
+ u16 mem_base;
+ u16 mem_length;
+ u16 pre_mem_base;
+ u16 pre_mem_length;
+} __attribute__ ((packed));
+
+/* offsets to the hotplug slot resource table registers based on the above
+ * structure layout
+ */
+enum slot_rt_offsets {
+ DEV_FUNC = offsetof(struct slot_rt, dev_func),
+ PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
+ SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
+ MAX_BUS = offsetof(struct slot_rt, max_bus),
+ IO_BASE = offsetof(struct slot_rt, io_base),
+ IO_LENGTH = offsetof(struct slot_rt, io_length),
+ MEM_BASE = offsetof(struct slot_rt, mem_base),
+ MEM_LENGTH = offsetof(struct slot_rt, mem_length),
+ PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
+ PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
+};
+
+struct pci_func {
+ struct pci_func *next;
+ u8 bus;
+ u8 device;
+ u8 function;
+ u8 is_a_board;
+ u16 status;
+ u8 configured;
+ u8 switch_save;
+ u8 presence_save;
+ u32 base_length[0x06];
+ u8 base_type[0x06];
+ u16 reserved2;
+ u32 config_space[0x20];
+ struct pci_resource *mem_head;
+ struct pci_resource *p_mem_head;
+ struct pci_resource *io_head;
+ struct pci_resource *bus_head;
+ struct timer_list *p_task_event;
+ struct pci_dev *pci_dev;
+};
+
+struct slot {
+ struct slot *next;
+ u8 bus;
+ u8 device;
+ u8 number;
+ u8 is_a_board;
+ u8 configured;
+ u8 state;
+ u8 switch_save;
+ u8 presence_save;
+ u32 capabilities;
+ u16 reserved2;
+ struct timer_list task_event;
+ u8 hp_slot;
+ struct controller *ctrl;
+ void __iomem *p_sm_slot;
+ struct hotplug_slot *hotplug_slot;
+};
+
+struct pci_resource {
+ struct pci_resource *next;
+ u32 base;
+ u32 length;
+};
+
+struct event_info {
+ u32 event_type;
+ u8 hp_slot;
+};
+
+struct controller {
+ struct controller *next;
+ u32 ctrl_int_comp;
+ struct mutex crit_sect; /* critical section mutex */
+ void __iomem *hpc_reg; /* cookie for our pci controller location */
+ struct pci_resource *mem_head;
+ struct pci_resource *p_mem_head;
+ struct pci_resource *io_head;
+ struct pci_resource *bus_head;
+ struct pci_dev *pci_dev;
+ struct pci_bus *pci_bus;
+ struct event_info event_queue[10];
+ struct slot *slot;
+ u8 next_event;
+ u8 interrupt;
+ u8 cfgspc_irq;
+ u8 bus; /* bus number for the pci hotplug controller */
+ u8 rev;
+ u8 slot_device_offset;
+ u8 first_slot;
+ u8 add_support;
+ u8 push_flag;
+ u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */
+ u8 slot_switch_type; /* 0 = no switch, 1 = switch present */
+ u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */
+ u8 alternate_base_address; /* 0 = not supported, 1 = supported */
+ u8 pci_config_space; /* Index/data access to working registers 0 = not supported, 1 = supported */
+ u8 pcix_speed_capability; /* PCI-X */
+ u8 pcix_support; /* PCI-X */
+ u16 vendor_id;
+ struct work_struct int_task_event;
+ wait_queue_head_t queue; /* sleep & wake process */
+ struct dentry *dentry; /* debugfs dentry */
+};
+
+struct irq_mapping {
+ u8 barber_pole;
+ u8 valid_INT;
+ u8 interrupt[4];
+};
+
+struct resource_lists {
+ struct pci_resource *mem_head;
+ struct pci_resource *p_mem_head;
+ struct pci_resource *io_head;
+ struct pci_resource *bus_head;
+ struct irq_mapping *irqs;
+};
+
+#define ROM_PHY_ADDR 0x0F0000
+#define ROM_PHY_LEN 0x00ffff
+
+#define PCI_HPC_ID 0xA0F7
+#define PCI_SUB_HPC_ID 0xA2F7
+#define PCI_SUB_HPC_ID2 0xA2F8
+#define PCI_SUB_HPC_ID3 0xA2F9
+#define PCI_SUB_HPC_ID_INTC 0xA2FA
+#define PCI_SUB_HPC_ID4 0xA2FD
+
+#define INT_BUTTON_IGNORE 0
+#define INT_PRESENCE_ON 1
+#define INT_PRESENCE_OFF 2
+#define INT_SWITCH_CLOSE 3
+#define INT_SWITCH_OPEN 4
+#define INT_POWER_FAULT 5
+#define INT_POWER_FAULT_CLEAR 6
+#define INT_BUTTON_PRESS 7
+#define INT_BUTTON_RELEASE 8
+#define INT_BUTTON_CANCEL 9
+
+#define STATIC_STATE 0
+#define BLINKINGON_STATE 1
+#define BLINKINGOFF_STATE 2
+#define POWERON_STATE 3
+#define POWEROFF_STATE 4
+
+#define PCISLOT_INTERLOCK_CLOSED 0x00000001
+#define PCISLOT_ADAPTER_PRESENT 0x00000002
+#define PCISLOT_POWERED 0x00000004
+#define PCISLOT_66_MHZ_OPERATION 0x00000008
+#define PCISLOT_64_BIT_OPERATION 0x00000010
+#define PCISLOT_REPLACE_SUPPORTED 0x00000020
+#define PCISLOT_ADD_SUPPORTED 0x00000040
+#define PCISLOT_INTERLOCK_SUPPORTED 0x00000080
+#define PCISLOT_66_MHZ_SUPPORTED 0x00000100
+#define PCISLOT_64_BIT_SUPPORTED 0x00000200
+
+#define PCI_TO_PCI_BRIDGE_CLASS 0x00060400
+
+#define INTERLOCK_OPEN 0x00000002
+#define ADD_NOT_SUPPORTED 0x00000003
+#define CARD_FUNCTIONING 0x00000005
+#define ADAPTER_NOT_SAME 0x00000006
+#define NO_ADAPTER_PRESENT 0x00000009
+#define NOT_ENOUGH_RESOURCES 0x0000000B
+#define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C
+#define POWER_FAILURE 0x0000000E
+
+#define REMOVE_NOT_SUPPORTED 0x00000003
+
+
+/*
+ * error Messages
+ */
+#define msg_initialization_err "Initialization failure, error=%d\n"
+#define msg_HPC_rev_error "Unsupported revision of the PCI hot plug controller found.\n"
+#define msg_HPC_non_compaq_or_intel "The PCI hot plug controller is not supported by this driver.\n"
+#define msg_HPC_not_supported "this system is not supported by this version of cpqphpd. Upgrade to a newer version of cpqphpd\n"
+#define msg_unable_to_save "unable to store PCI hot plug add resource information. This system must be rebooted before adding any PCI devices.\n"
+#define msg_button_on "PCI slot #%d - powering on due to button press.\n"
+#define msg_button_off "PCI slot #%d - powering off due to button press.\n"
+#define msg_button_cancel "PCI slot #%d - action canceled due to button press.\n"
+#define msg_button_ignore "PCI slot #%d - button press ignored. (action in progress...)\n"
+
+
+/* debugfs functions for the hotplug controller info */
+void cpqhp_initialize_debugfs(void);
+void cpqhp_shutdown_debugfs(void);
+void cpqhp_create_debugfs_files(struct controller *ctrl);
+void cpqhp_remove_debugfs_files(struct controller *ctrl);
+
+/* controller functions */
+void cpqhp_pushbutton_thread(unsigned long event_pointer);
+irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+int cpqhp_event_start_thread(void);
+void cpqhp_event_stop_thread(void);
+struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+int cpqhp_hardware_test(struct controller *ctrl, int test_num);
+
+/* resource functions */
+int cpqhp_resource_sort_and_combine (struct pci_resource **head);
+
+/* pci functions */
+int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug);
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func);
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func);
+int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot);
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+void cpqhp_destroy_board_resources(struct pci_func *func);
+int cpqhp_return_board_resources(struct pci_func *func,
+ struct resource_lists *resources);
+void cpqhp_destroy_resource_list(struct resource_lists *resources);
+int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func);
+int cpqhp_unconfigure_device(struct pci_func *func);
+
+/* Global variables */
+extern int cpqhp_debug;
+extern int cpqhp_legacy_mode;
+extern struct controller *cpqhp_ctrl_list;
+extern struct pci_func *cpqhp_slot_list[256];
+extern struct irq_routing_table *cpqhp_routing_table;
+
+/* these can be gotten rid of, but for debugging they are purty */
+extern u8 cpqhp_nic_irq;
+extern u8 cpqhp_disk_irq;
+
+
+/* inline functions */
+
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
+/*
+ * return_resource
+ *
+ * Puts node back in the resource list pointed to by head
+ */
+static inline void return_resource(struct pci_resource **head,
+ struct pci_resource *node)
+{
+ if (!node || !head)
+ return;
+ node->next = *head;
+ *head = node;
+}
+
+static inline void set_SOGO(struct controller *ctrl)
+{
+ u16 misc;
+
+ misc = readw(ctrl->hpc_reg + MISC);
+ misc = (misc | 0x0001) & 0xFFFB;
+ writew(misc, ctrl->hpc_reg + MISC);
+}
+
+
+static inline void amber_LED_on(struct controller *ctrl, u8 slot)
+{
+ u32 led_control;
+
+ led_control = readl(ctrl->hpc_reg + LED_CONTROL);
+ led_control |= (0x01010000L << slot);
+ writel(led_control, ctrl->hpc_reg + LED_CONTROL);
+}
+
+
+static inline void amber_LED_off(struct controller *ctrl, u8 slot)
+{
+ u32 led_control;
+
+ led_control = readl(ctrl->hpc_reg + LED_CONTROL);
+ led_control &= ~(0x01010000L << slot);
+ writel(led_control, ctrl->hpc_reg + LED_CONTROL);
+}
+
+
+static inline int read_amber_LED(struct controller *ctrl, u8 slot)
+{
+ u32 led_control;
+
+ led_control = readl(ctrl->hpc_reg + LED_CONTROL);
+ led_control &= (0x01010000L << slot);
+
+ return led_control ? 1 : 0;
+}
+
+
+static inline void green_LED_on(struct controller *ctrl, u8 slot)
+{
+ u32 led_control;
+
+ led_control = readl(ctrl->hpc_reg + LED_CONTROL);
+ led_control |= 0x0101L << slot;
+ writel(led_control, ctrl->hpc_reg + LED_CONTROL);
+}
+
+static inline void green_LED_off(struct controller *ctrl, u8 slot)
+{
+ u32 led_control;
+
+ led_control = readl(ctrl->hpc_reg + LED_CONTROL);
+ led_control &= ~(0x0101L << slot);
+ writel(led_control, ctrl->hpc_reg + LED_CONTROL);
+}
+
+
+static inline void green_LED_blink(struct controller *ctrl, u8 slot)
+{
+ u32 led_control;
+
+ led_control = readl(ctrl->hpc_reg + LED_CONTROL);
+ led_control &= ~(0x0101L << slot);
+ led_control |= (0x0001L << slot);
+ writel(led_control, ctrl->hpc_reg + LED_CONTROL);
+}
+
+
+static inline void slot_disable(struct controller *ctrl, u8 slot)
+{
+ u8 slot_enable;
+
+ slot_enable = readb(ctrl->hpc_reg + SLOT_ENABLE);
+ slot_enable &= ~(0x01 << slot);
+ writeb(slot_enable, ctrl->hpc_reg + SLOT_ENABLE);
+}
+
+
+static inline void slot_enable(struct controller *ctrl, u8 slot)
+{
+ u8 slot_enable;
+
+ slot_enable = readb(ctrl->hpc_reg + SLOT_ENABLE);
+ slot_enable |= (0x01 << slot);
+ writeb(slot_enable, ctrl->hpc_reg + SLOT_ENABLE);
+}
+
+
+static inline u8 is_slot_enabled(struct controller *ctrl, u8 slot)
+{
+ u8 slot_enable;
+
+ slot_enable = readb(ctrl->hpc_reg + SLOT_ENABLE);
+ slot_enable &= (0x01 << slot);
+ return slot_enable ? 1 : 0;
+}
+
+
+static inline u8 read_slot_enable(struct controller *ctrl)
+{
+ return readb(ctrl->hpc_reg + SLOT_ENABLE);
+}
+
+
+/**
+ * get_controller_speed - find the current frequency/mode of controller.
+ *
+ * @ctrl: controller to get frequency/mode for.
+ *
+ * Returns controller speed.
+ */
+static inline u8 get_controller_speed(struct controller *ctrl)
+{
+ u8 curr_freq;
+ u16 misc;
+
+ if (ctrl->pcix_support) {
+ curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
+ if ((curr_freq & 0xB0) == 0xB0)
+ return PCI_SPEED_133MHz_PCIX;
+ if ((curr_freq & 0xA0) == 0xA0)
+ return PCI_SPEED_100MHz_PCIX;
+ if ((curr_freq & 0x90) == 0x90)
+ return PCI_SPEED_66MHz_PCIX;
+ if (curr_freq & 0x10)
+ return PCI_SPEED_66MHz;
+
+ return PCI_SPEED_33MHz;
+ }
+
+ misc = readw(ctrl->hpc_reg + MISC);
+ return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+}
+
+
+/**
+ * get_adapter_speed - find the max supported frequency/mode of adapter.
+ *
+ * @ctrl: hotplug controller.
+ * @hp_slot: hotplug slot where adapter is installed.
+ *
+ * Returns adapter speed.
+ */
+static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
+{
+ u32 temp_dword = readl(ctrl->hpc_reg + NON_INT_INPUT);
+ dbg("slot: %d, PCIXCAP: %8x\n", hp_slot, temp_dword);
+ if (ctrl->pcix_support) {
+ if (temp_dword & (0x10000 << hp_slot))
+ return PCI_SPEED_133MHz_PCIX;
+ if (temp_dword & (0x100 << hp_slot))
+ return PCI_SPEED_66MHz_PCIX;
+ }
+
+ if (temp_dword & (0x01 << hp_slot))
+ return PCI_SPEED_66MHz;
+
+ return PCI_SPEED_33MHz;
+}
+
+static inline void enable_slot_power(struct controller *ctrl, u8 slot)
+{
+ u8 slot_power;
+
+ slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
+ slot_power |= (0x01 << slot);
+ writeb(slot_power, ctrl->hpc_reg + SLOT_POWER);
+}
+
+static inline void disable_slot_power(struct controller *ctrl, u8 slot)
+{
+ u8 slot_power;
+
+ slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
+ slot_power &= ~(0x01 << slot);
+ writeb(slot_power, ctrl->hpc_reg + SLOT_POWER);
+}
+
+
+static inline int cpq_get_attention_status(struct controller *ctrl, struct slot *slot)
+{
+ u8 hp_slot;
+
+ hp_slot = slot->device - ctrl->slot_device_offset;
+
+ return read_amber_LED(ctrl, hp_slot);
+}
+
+
+static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
+{
+ u8 hp_slot;
+
+ hp_slot = slot->device - ctrl->slot_device_offset;
+
+ return is_slot_enabled(ctrl, hp_slot);
+}
+
+
+static inline int cpq_get_latch_status(struct controller *ctrl,
+ struct slot *slot)
+{
+ u32 status;
+ u8 hp_slot;
+
+ hp_slot = slot->device - ctrl->slot_device_offset;
+ dbg("%s: slot->device = %d, ctrl->slot_device_offset = %d \n",
+ __func__, slot->device, ctrl->slot_device_offset);
+
+ status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot));
+
+ return (status == 0) ? 1 : 0;
+}
+
+
+static inline int get_presence_status(struct controller *ctrl,
+ struct slot *slot)
+{
+ int presence_save = 0;
+ u8 hp_slot;
+ u32 tempdword;
+
+ hp_slot = slot->device - ctrl->slot_device_offset;
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+ presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
+ >> hp_slot) & 0x02;
+
+ return presence_save;
+}
+
+static inline int wait_for_ctrl_irq(struct controller *ctrl)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval = 0;
+
+ dbg("%s - start\n", __func__);
+ add_wait_queue(&ctrl->queue, &wait);
+ /* Sleep for up to 1 second to wait for the LED to change. */
+ msleep_interruptible(1000);
+ remove_wait_queue(&ctrl->queue, &wait);
+ if (signal_pending(current))
+ retval = -EINTR;
+
+ dbg("%s - end\n", __func__);
+ return retval;
+}
+
+#include <asm/pci_x86.h>
+static inline int cpqhp_routing_table_length(void)
+{
+ BUG_ON(cpqhp_routing_table == NULL);
+ return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info));
+}
+
+#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
new file mode 100644
index 000000000..a53084ddc
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -0,0 +1,1467 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ * Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
+ * Torben Mathiasen <torben.mathiasen@hp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include <asm/uaccess.h>
+
+#include "cpqphp.h"
+#include "cpqphp_nvram.h"
+
+
+/* Global variables */
+int cpqhp_debug;
+int cpqhp_legacy_mode;
+struct controller *cpqhp_ctrl_list; /* = NULL */
+struct pci_func *cpqhp_slot_list[256];
+struct irq_routing_table *cpqhp_routing_table;
+
+/* local variables */
+static void __iomem *smbios_table;
+static void __iomem *smbios_start;
+static void __iomem *cpqhp_rom_start;
+static bool power_mode;
+static bool debug;
+static int initialized;
+
+#define DRIVER_VERSION "0.9.8"
+#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>"
+#define DRIVER_DESC "Compaq Hot Plug PCI Controller Driver"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_param(power_mode, bool, 0644);
+MODULE_PARM_DESC(power_mode, "Power mode enabled or not");
+
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+
+#define CPQHPC_MODULE_MINOR 208
+
+static inline int is_slot64bit(struct slot *slot)
+{
+ return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
+}
+
+static inline int is_slot66mhz(struct slot *slot)
+{
+ return (readb(slot->p_sm_slot + SMBIOS_SLOT_TYPE) == 0x0E) ? 1 : 0;
+}
+
+/**
+ * detect_SMBIOS_pointer - find the System Management BIOS Table in mem region.
+ * @begin: begin pointer for region to be scanned.
+ * @end: end pointer for region to be scanned.
+ *
+ * Returns pointer to the head of the SMBIOS tables (or %NULL).
+ */
+static void __iomem *detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end)
+{
+ void __iomem *fp;
+ void __iomem *endp;
+ u8 temp1, temp2, temp3, temp4;
+ int status = 0;
+
+ endp = (end - sizeof(u32) + 1);
+
+ for (fp = begin; fp <= endp; fp += 16) {
+ temp1 = readb(fp);
+ temp2 = readb(fp+1);
+ temp3 = readb(fp+2);
+ temp4 = readb(fp+3);
+ if (temp1 == '_' &&
+ temp2 == 'S' &&
+ temp3 == 'M' &&
+ temp4 == '_') {
+ status = 1;
+ break;
+ }
+ }
+
+ if (!status)
+ fp = NULL;
+
+ dbg("Discovered SMBIOS Entry point at %p\n", fp);
+
+ return fp;
+}
+
+/**
+ * init_SERR - Initializes the per slot SERR generation.
+ * @ctrl: controller to use
+ *
+ * For unexpected switch opens
+ */
+static int init_SERR(struct controller *ctrl)
+{
+ u32 tempdword;
+ u32 number_of_slots;
+ u8 physical_slot;
+
+ if (!ctrl)
+ return 1;
+
+ tempdword = ctrl->first_slot;
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ /* Loop through slots */
+ while (number_of_slots) {
+ physical_slot = tempdword;
+ writeb(0, ctrl->hpc_reg + SLOT_SERR);
+ tempdword++;
+ number_of_slots--;
+ }
+
+ return 0;
+}
+
+static int init_cpqhp_routing_table(void)
+{
+ int len;
+
+ cpqhp_routing_table = pcibios_get_irq_routing_table();
+ if (cpqhp_routing_table == NULL)
+ return -ENOMEM;
+
+ len = cpqhp_routing_table_length();
+ if (len == 0) {
+ kfree(cpqhp_routing_table);
+ cpqhp_routing_table = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+/* nice debugging output */
+static void pci_print_IRQ_route(void)
+{
+ int len;
+ int loop;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+
+ dbg("bus dev func slot\n");
+ for (loop = 0; loop < len; ++loop) {
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
+ dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
+
+ }
+ return;
+}
+
+
+/**
+ * get_subsequent_smbios_entry: get the next entry from bios table.
+ * @smbios_start: where to start in the SMBIOS table
+ * @smbios_table: location of the SMBIOS table
+ * @curr: %NULL or pointer to previously returned structure
+ *
+ * Gets the first entry if previous == NULL;
+ * otherwise, returns the next entry.
+ * Uses global SMBIOS Table pointer.
+ *
+ * Returns a pointer to an SMBIOS structure or NULL if none found.
+ */
+static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
+ void __iomem *smbios_table,
+ void __iomem *curr)
+{
+ u8 bail = 0;
+ u8 previous_byte = 1;
+ void __iomem *p_temp;
+ void __iomem *p_max;
+
+ if (!smbios_table || !curr)
+ return NULL;
+
+ /* set p_max to the end of the table */
+ p_max = smbios_start + readw(smbios_table + ST_LENGTH);
+
+ p_temp = curr;
+ p_temp += readb(curr + SMBIOS_GENERIC_LENGTH);
+
+ while ((p_temp < p_max) && !bail) {
+ /* Look for the double NULL terminator
+ * The first condition is the previous byte
+ * and the second is the curr
+ */
+ if (!previous_byte && !(readb(p_temp)))
+ bail = 1;
+
+ previous_byte = readb(p_temp);
+ p_temp++;
+ }
+
+ if (p_temp < p_max)
+ return p_temp;
+ else
+ return NULL;
+}
+
+
+/**
+ * get_SMBIOS_entry - return the requested SMBIOS entry or %NULL
+ * @smbios_start: where to start in the SMBIOS table
+ * @smbios_table: location of the SMBIOS table
+ * @type: SMBIOS structure type to be returned
+ * @previous: %NULL or pointer to previously returned structure
+ *
+ * Gets the first entry of the specified type if previous == %NULL;
+ * Otherwise, returns the next entry of the given type.
+ * Uses global SMBIOS Table pointer.
+ * Uses get_subsequent_smbios_entry.
+ *
+ * Returns a pointer to an SMBIOS structure or %NULL if none found.
+ */
+static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
+ void __iomem *smbios_table,
+ u8 type,
+ void __iomem *previous)
+{
+ if (!smbios_table)
+ return NULL;
+
+ if (!previous)
+ previous = smbios_start;
+ else
+ previous = get_subsequent_smbios_entry(smbios_start,
+ smbios_table, previous);
+
+ while (previous)
+ if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
+ previous = get_subsequent_smbios_entry(smbios_start,
+ smbios_table, previous);
+ else
+ break;
+
+ return previous;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static int ctrl_slot_cleanup (struct controller *ctrl)
+{
+ struct slot *old_slot, *next_slot;
+
+ old_slot = ctrl->slot;
+ ctrl->slot = NULL;
+
+ while (old_slot) {
+ /* memory will be freed by the release_slot callback */
+ next_slot = old_slot->next;
+ pci_hp_deregister (old_slot->hotplug_slot);
+ old_slot = next_slot;
+ }
+
+ cpqhp_remove_debugfs_files(ctrl);
+
+ /* Free IRQ associated with hot plug device */
+ free_irq(ctrl->interrupt, ctrl);
+ /* Unmap the memory */
+ iounmap(ctrl->hpc_reg);
+ /* Finally reclaim PCI mem */
+ release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
+ pci_resource_len(ctrl->pci_dev, 0));
+
+ return 0;
+}
+
+
+/**
+ * get_slot_mapping - determine logical slot mapping for PCI device
+ *
+ * Won't work for more than one PCI-PCI bridge in a slot.
+ *
+ * @bus_num - bus number of PCI device
+ * @dev_num - device number of PCI device
+ * @slot - Pointer to u8 where slot number will be returned
+ *
+ * Output: SUCCESS or FAILURE
+ */
+static int
+get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
+{
+ u32 work;
+ long len;
+ long loop;
+
+ u8 tbus, tdevice, tslot, bridgeSlot;
+
+ dbg("%s: %p, %d, %d, %p\n", __func__, bus, bus_num, dev_num, slot);
+
+ bridgeSlot = 0xFF;
+
+ len = cpqhp_routing_table_length();
+ for (loop = 0; loop < len; ++loop) {
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
+ tslot = cpqhp_routing_table->slots[loop].slot;
+
+ if ((tbus == bus_num) && (tdevice == dev_num)) {
+ *slot = tslot;
+ return 0;
+ } else {
+ /* Did not get a match on the target PCI device. Check
+ * if the current IRQ table entry is a PCI-to-PCI
+ * bridge device. If so, and it's secondary bus
+ * matches the bus number for the target device, I need
+ * to save the bridge's slot number. If I can not find
+ * an entry for the target device, I will have to
+ * assume it's on the other side of the bridge, and
+ * assign it the bridge's slot.
+ */
+ bus->number = tbus;
+ pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
+ PCI_CLASS_REVISION, &work);
+
+ if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
+ pci_bus_read_config_dword(bus,
+ PCI_DEVFN(tdevice, 0),
+ PCI_PRIMARY_BUS, &work);
+ // See if bridge's secondary bus matches target bus.
+ if (((work >> 8) & 0x000000FF) == (long) bus_num)
+ bridgeSlot = tslot;
+ }
+ }
+
+ }
+
+ /* If we got here, we didn't find an entry in the IRQ mapping table for
+ * the target PCI device. If we did determine that the target device
+ * is on the other side of a PCI-to-PCI bridge, return the slot number
+ * for the bridge.
+ */
+ if (bridgeSlot != 0xFF) {
+ *slot = bridgeSlot;
+ return 0;
+ }
+ /* Couldn't find an entry in the routing table for this PCI device */
+ return -1;
+}
+
+
+/**
+ * cpqhp_set_attention_status - Turns the Amber LED for a slot on or off
+ * @ctrl: struct controller to use
+ * @func: PCI device/function info
+ * @status: LED control flag: 1 = LED on, 0 = LED off
+ */
+static int
+cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
+ u32 status)
+{
+ u8 hp_slot;
+
+ if (func == NULL)
+ return 1;
+
+ hp_slot = func->device - ctrl->slot_device_offset;
+
+ /* Wait for exclusive access to hardware */
+ mutex_lock(&ctrl->crit_sect);
+
+ if (status == 1)
+ amber_LED_on (ctrl, hp_slot);
+ else if (status == 0)
+ amber_LED_off (ctrl, hp_slot);
+ else {
+ /* Done with exclusive hardware access */
+ mutex_unlock(&ctrl->crit_sect);
+ return 1;
+ }
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Done with exclusive hardware access */
+ mutex_unlock(&ctrl->crit_sect);
+
+ return 0;
+}
+
+
+/**
+ * set_attention_status - Turns the Amber LED for a slot on or off
+ * @hotplug_slot: slot to change LED on
+ * @status: LED control flag
+ */
+static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
+{
+ struct pci_func *slot_func;
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+ u8 bus;
+ u8 devfn;
+ u8 device;
+ u8 function;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
+ return -ENODEV;
+
+ device = devfn >> 3;
+ function = devfn & 0x7;
+ dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function);
+
+ slot_func = cpqhp_slot_find(bus, device, function);
+ if (!slot_func)
+ return -ENODEV;
+
+ return cpqhp_set_attention_status(ctrl, slot_func, status);
+}
+
+
+static int process_SI(struct hotplug_slot *hotplug_slot)
+{
+ struct pci_func *slot_func;
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+ u8 bus;
+ u8 devfn;
+ u8 device;
+ u8 function;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
+ return -ENODEV;
+
+ device = devfn >> 3;
+ function = devfn & 0x7;
+ dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function);
+
+ slot_func = cpqhp_slot_find(bus, device, function);
+ if (!slot_func)
+ return -ENODEV;
+
+ slot_func->bus = bus;
+ slot_func->device = device;
+ slot_func->function = function;
+ slot_func->configured = 0;
+ dbg("board_added(%p, %p)\n", slot_func, ctrl);
+ return cpqhp_process_SI(ctrl, slot_func);
+}
+
+
+static int process_SS(struct hotplug_slot *hotplug_slot)
+{
+ struct pci_func *slot_func;
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+ u8 bus;
+ u8 devfn;
+ u8 device;
+ u8 function;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
+ return -ENODEV;
+
+ device = devfn >> 3;
+ function = devfn & 0x7;
+ dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function);
+
+ slot_func = cpqhp_slot_find(bus, device, function);
+ if (!slot_func)
+ return -ENODEV;
+
+ dbg("In %s, slot_func = %p, ctrl = %p\n", __func__, slot_func, ctrl);
+ return cpqhp_process_SS(ctrl, slot_func);
+}
+
+
+static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ return cpqhp_hardware_test(ctrl, value);
+}
+
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = get_slot_enabled(ctrl, slot);
+ return 0;
+}
+
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = cpq_get_attention_status(ctrl, slot);
+ return 0;
+}
+
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = cpq_get_latch_status(ctrl, slot);
+
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ struct controller *ctrl = slot->ctrl;
+
+ dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+
+ *value = get_presence_status(ctrl, slot);
+
+ return 0;
+}
+
+static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = process_SI,
+ .disable_slot = process_SS,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+#define SLOT_NAME_SIZE 10
+
+static int ctrl_slot_setup(struct controller *ctrl,
+ void __iomem *smbios_start,
+ void __iomem *smbios_table)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
+ struct pci_bus *bus = ctrl->pci_bus;
+ u8 number_of_slots;
+ u8 slot_device;
+ u8 slot_number;
+ u8 ctrl_slot;
+ u32 tempdword;
+ char name[SLOT_NAME_SIZE];
+ void __iomem *slot_entry= NULL;
+ int result;
+
+ dbg("%s\n", __func__);
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ slot_number = ctrl->first_slot;
+
+ while (number_of_slots) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ result = -ENOMEM;
+ goto error;
+ }
+
+ slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot) {
+ result = -ENOMEM;
+ goto error_slot;
+ }
+ hotplug_slot = slot->hotplug_slot;
+
+ hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
+ GFP_KERNEL);
+ if (!hotplug_slot->info) {
+ result = -ENOMEM;
+ goto error_hpslot;
+ }
+ hotplug_slot_info = hotplug_slot->info;
+
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %u\n", slot->number);
+
+ slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
+ slot_entry);
+
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
+ slot_entry = get_SMBIOS_entry(smbios_start,
+ smbios_table, 9, slot_entry);
+ }
+
+ slot->p_sm_slot = slot_entry;
+
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
+
+ /*FIXME: these capabilities aren't used but if they are
+ * they need to be correctly implemented
+ */
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (bus->cur_bus_speed == PCI_SPEED_66MHz)
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+
+ /* Check presence */
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ /* Check the switch state */
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ /* Check the slot enable */
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
+
+ dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n",
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
+ slot_number);
+ result = pci_hp_register(hotplug_slot,
+ ctrl->pci_dev->bus,
+ slot->device,
+ name);
+ if (result) {
+ err("pci_hp_register failed with error %d\n", result);
+ goto error_info;
+ }
+
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
+
+ number_of_slots--;
+ slot_device++;
+ slot_number++;
+ }
+
+ return 0;
+error_info:
+ kfree(hotplug_slot_info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return result;
+}
+
+static int one_time_init(void)
+{
+ int loop;
+ int retval = 0;
+
+ if (initialized)
+ return 0;
+
+ power_mode = 0;
+
+ retval = init_cpqhp_routing_table();
+ if (retval)
+ goto error;
+
+ if (cpqhp_debug)
+ pci_print_IRQ_route();
+
+ dbg("Initialize + Start the notification mechanism \n");
+
+ retval = cpqhp_event_start_thread();
+ if (retval)
+ goto error;
+
+ dbg("Initialize slot lists\n");
+ for (loop = 0; loop < 256; loop++)
+ cpqhp_slot_list[loop] = NULL;
+
+ /* FIXME: We also need to hook the NMI handler eventually.
+ * this also needs to be worked with Christoph
+ * register_NMI_handler();
+ */
+ /* Map rom address */
+ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
+ if (!cpqhp_rom_start) {
+ err ("Could not ioremap memory region for ROM\n");
+ retval = -EIO;
+ goto error;
+ }
+
+ /* Now, map the int15 entry point if we are on compaq specific
+ * hardware
+ */
+ compaq_nvram_init(cpqhp_rom_start);
+
+ /* Map smbios table entry point structure */
+ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
+ cpqhp_rom_start + ROM_PHY_LEN);
+ if (!smbios_table) {
+ err ("Could not find the SMBIOS pointer in memory\n");
+ retval = -EIO;
+ goto error_rom_start;
+ }
+
+ smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
+ readw(smbios_table + ST_LENGTH));
+ if (!smbios_start) {
+ err ("Could not ioremap memory region taken from SMBIOS values\n");
+ retval = -EIO;
+ goto error_smbios_start;
+ }
+
+ initialized = 1;
+
+ return retval;
+
+error_smbios_start:
+ iounmap(smbios_start);
+error_rom_start:
+ iounmap(cpqhp_rom_start);
+error:
+ return retval;
+}
+
+static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ u8 num_of_slots = 0;
+ u8 hp_slot = 0;
+ u8 device;
+ u8 bus_cap;
+ u16 temp_word;
+ u16 vendor_id;
+ u16 subsystem_vid;
+ u16 subsystem_deviceid;
+ u32 rc;
+ struct controller *ctrl;
+ struct pci_func *func;
+ struct pci_bus *bus;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR MY_NAME ": cannot enable PCI device %s (%d)\n",
+ pci_name(pdev), err);
+ return err;
+ }
+
+ bus = pdev->subordinate;
+ if (!bus) {
+ dev_notice(&pdev->dev, "the device is not a bridge, skipping\n");
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
+
+ /* Need to read VID early b/c it's used to differentiate CPQ and INTC
+ * discovery
+ */
+ vendor_id = pdev->vendor;
+ if ((vendor_id != PCI_VENDOR_ID_COMPAQ) &&
+ (vendor_id != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
+ dbg("Vendor ID: %x\n", vendor_id);
+
+ dbg("revision: %d\n", pdev->revision);
+ if ((vendor_id == PCI_VENDOR_ID_COMPAQ) && (!pdev->revision)) {
+ err(msg_HPC_rev_error);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
+
+ /* Check for the proper subsystem IDs
+ * Intel uses a different SSID programming model than Compaq.
+ * For Intel, each SSID bit identifies a PHP capability.
+ * Also Intel HPCs may have RID=0.
+ */
+ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_not_supported);
+ return -ENODEV;
+ }
+
+ /* TODO: This code can be made to support non-Compaq or Intel
+ * subsystem IDs
+ */
+ subsystem_vid = pdev->subsystem_vendor;
+ dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
+ if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
+
+ ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!ctrl) {
+ err("%s : out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
+
+ subsystem_deviceid = pdev->subsystem_device;
+
+ info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
+
+ /* Set Vendor ID, so it can be accessed later from other
+ * functions
+ */
+ ctrl->vendor_id = vendor_id;
+
+ switch (subsystem_vid) {
+ case PCI_VENDOR_ID_COMPAQ:
+ if (pdev->revision >= 0x13) { /* CIOBX */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 1;
+ pci_read_config_byte(pdev, 0x41, &bus_cap);
+ if (bus_cap & 0x80) {
+ dbg("bus max supports 133MHz PCI-X\n");
+ bus->max_bus_speed = PCI_SPEED_133MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 0x40) {
+ dbg("bus max supports 100MHz PCI-X\n");
+ bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 0x20) {
+ dbg("bus max supports 66MHz PCI-X\n");
+ bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 0x10) {
+ dbg("bus max supports 66MHz PCI\n");
+ bus->max_bus_speed = PCI_SPEED_66MHz;
+ break;
+ }
+
+ break;
+ }
+
+ switch (subsystem_deviceid) {
+ case PCI_SUB_HPC_ID:
+ /* Original 6500/7000 implementation */
+ ctrl->slot_switch_type = 1;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID2:
+ /* First Pushbutton implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID_INTC:
+ /* Third party (6500/7000) */
+ ctrl->slot_switch_type = 1;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID3:
+ /* First 66 Mhz implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ bus->max_bus_speed = PCI_SPEED_66MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID4:
+ /* First PCI-X implementation, 100MHz */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
+ }
+ break;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Check for speed capability (0=33, 1=66) */
+ if (subsystem_deviceid & 0x0001)
+ bus->max_bus_speed = PCI_SPEED_66MHz;
+ else
+ bus->max_bus_speed = PCI_SPEED_33MHz;
+
+ /* Check for push button */
+ if (subsystem_deviceid & 0x0002)
+ ctrl->push_button = 0;
+ else
+ ctrl->push_button = 1;
+
+ /* Check for slot switch type (0=mechanical, 1=not mechanical) */
+ if (subsystem_deviceid & 0x0004)
+ ctrl->slot_switch_type = 0;
+ else
+ ctrl->slot_switch_type = 1;
+
+ /* PHP Status (0=De-feature PHP, 1=Normal operation) */
+ if (subsystem_deviceid & 0x0008)
+ ctrl->defeature_PHP = 1; /* PHP supported */
+ else
+ ctrl->defeature_PHP = 0; /* PHP not supported */
+
+ /* Alternate Base Address Register Interface
+ * (0=not supported, 1=supported)
+ */
+ if (subsystem_deviceid & 0x0010)
+ ctrl->alternate_base_address = 1;
+ else
+ ctrl->alternate_base_address = 0;
+
+ /* PCI Config Space Index (0=not supported, 1=supported) */
+ if (subsystem_deviceid & 0x0020)
+ ctrl->pci_config_space = 1;
+ else
+ ctrl->pci_config_space = 0;
+
+ /* PCI-X support */
+ if (subsystem_deviceid & 0x0080) {
+ ctrl->pcix_support = 1;
+ if (subsystem_deviceid & 0x0040)
+ /* 133MHz PCI-X if bit 7 is 1 */
+ ctrl->pcix_speed_capability = 1;
+ else
+ /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
+ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
+ ctrl->pcix_speed_capability = 0;
+ } else {
+ /* Conventional PCI */
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ }
+ break;
+
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
+ }
+
+ /* Tell the user that we found one. */
+ info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
+ pdev->bus->number);
+
+ dbg("Hotplug controller capabilities:\n");
+ dbg(" speed_capability %d\n", bus->max_bus_speed);
+ dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ?
+ "switch present" : "no switch");
+ dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ?
+ "PHP supported" : "PHP not supported");
+ dbg(" alternate_base_address %s\n", ctrl->alternate_base_address ?
+ "supported" : "not supported");
+ dbg(" pci_config_space %s\n", ctrl->pci_config_space ?
+ "supported" : "not supported");
+ dbg(" pcix_speed_capability %s\n", ctrl->pcix_speed_capability ?
+ "supported" : "not supported");
+ dbg(" pcix_support %s\n", ctrl->pcix_support ?
+ "supported" : "not supported");
+
+ ctrl->pci_dev = pdev;
+ pci_set_drvdata(pdev, ctrl);
+
+ /* make our own copy of the pci bus structure,
+ * as we like tweaking it a lot */
+ ctrl->pci_bus = kmemdup(pdev->bus, sizeof(*ctrl->pci_bus), GFP_KERNEL);
+ if (!ctrl->pci_bus) {
+ err("out of memory\n");
+ rc = -ENOMEM;
+ goto err_free_ctrl;
+ }
+
+ ctrl->bus = pdev->bus->number;
+ ctrl->rev = pdev->revision;
+ dbg("bus device function rev: %d %d %d %d\n", ctrl->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), ctrl->rev);
+
+ mutex_init(&ctrl->crit_sect);
+ init_waitqueue_head(&ctrl->queue);
+
+ /* initialize our threads if they haven't already been started up */
+ rc = one_time_init();
+ if (rc)
+ goto err_free_bus;
+
+ dbg("pdev = %p\n", pdev);
+ dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
+ dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
+
+ if (!request_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0), MY_NAME)) {
+ err("cannot reserve MMIO region\n");
+ rc = -ENOMEM;
+ goto err_free_bus;
+ }
+
+ ctrl->hpc_reg = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!ctrl->hpc_reg) {
+ err("cannot remap MMIO region %llx @ %llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0),
+ (unsigned long long)pci_resource_start(pdev, 0));
+ rc = -ENODEV;
+ goto err_free_mem_region;
+ }
+
+ /* Check for 66Mhz operation */
+ bus->cur_bus_speed = get_controller_speed(ctrl);
+
+
+ /********************************************************
+ *
+ * Save configuration headers for this and
+ * subordinate PCI buses
+ *
+ ********************************************************/
+
+ /* find the physical slot number of the first hot plug slot */
+
+ /* Get slot won't work for devices behind bridges, but
+ * in this case it will always be called for the "base"
+ * bus/dev/func of a slot.
+ * CS: this is leveraging the PCIIRQ routing code from the kernel
+ * (pci-pc.c: get_irq_routing_table) */
+ rc = get_slot_mapping(ctrl->pci_bus, pdev->bus->number,
+ (readb(ctrl->hpc_reg + SLOT_MASK) >> 4),
+ &(ctrl->first_slot));
+ dbg("get_slot_mapping: first_slot = %d, returned = %d\n",
+ ctrl->first_slot, rc);
+ if (rc) {
+ err(msg_initialization_err, rc);
+ goto err_iounmap;
+ }
+
+ /* Store PCI Config Space for all devices on this bus */
+ rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
+ if (rc) {
+ err("%s: unable to save PCI configuration data, error %d\n",
+ __func__, rc);
+ goto err_iounmap;
+ }
+
+ /*
+ * Get IO, memory, and IRQ resources for new devices
+ */
+ /* The next line is required for cpqhp_find_available_resources */
+ ctrl->interrupt = pdev->irq;
+ if (ctrl->interrupt < 0x10) {
+ cpqhp_legacy_mode = 1;
+ dbg("System seems to be configured for Full Table Mapped MPS mode\n");
+ }
+
+ ctrl->cfgspc_irq = 0;
+ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &ctrl->cfgspc_irq);
+
+ rc = cpqhp_find_available_resources(ctrl, cpqhp_rom_start);
+ ctrl->add_support = !rc;
+ if (rc) {
+ dbg("cpqhp_find_available_resources = 0x%x\n", rc);
+ err("unable to locate PCI configuration resources for hot plug add.\n");
+ goto err_iounmap;
+ }
+
+ /*
+ * Finish setting up the hot plug ctrl device
+ */
+ ctrl->slot_device_offset = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ dbg("NumSlots %d \n", ctrl->slot_device_offset);
+
+ ctrl->next_event = 0;
+
+ /* Setup the slot information structures */
+ rc = ctrl_slot_setup(ctrl, smbios_start, smbios_table);
+ if (rc) {
+ err(msg_initialization_err, 6);
+ err("%s: unable to save PCI configuration data, error %d\n",
+ __func__, rc);
+ goto err_iounmap;
+ }
+
+ /* Mask all general input interrupts */
+ writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
+
+ /* set up the interrupt */
+ dbg("HPC interrupt = %d \n", ctrl->interrupt);
+ if (request_irq(ctrl->interrupt, cpqhp_ctrl_intr,
+ IRQF_SHARED, MY_NAME, ctrl)) {
+ err("Can't get irq %d for the hotplug pci controller\n",
+ ctrl->interrupt);
+ rc = -ENODEV;
+ goto err_iounmap;
+ }
+
+ /* Enable Shift Out interrupt and clear it, also enable SERR on power
+ * fault
+ */
+ temp_word = readw(ctrl->hpc_reg + MISC);
+ temp_word |= 0x4006;
+ writew(temp_word, ctrl->hpc_reg + MISC);
+
+ /* Changed 05/05/97 to clear all interrupts at start */
+ writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ writel(0x0L, ctrl->hpc_reg + INT_MASK);
+
+ if (!cpqhp_ctrl_list) {
+ cpqhp_ctrl_list = ctrl;
+ ctrl->next = NULL;
+ } else {
+ ctrl->next = cpqhp_ctrl_list;
+ cpqhp_ctrl_list = ctrl;
+ }
+
+ /* turn off empty slots here unless command line option "ON" set
+ * Wait for exclusive access to hardware
+ */
+ mutex_lock(&ctrl->crit_sect);
+
+ num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+
+ /* find first device number for the ctrl */
+ device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+
+ while (num_of_slots) {
+ dbg("num_of_slots: %d\n", num_of_slots);
+ func = cpqhp_slot_find(ctrl->bus, device, 0);
+ if (!func)
+ break;
+
+ hp_slot = func->device - ctrl->slot_device_offset;
+ dbg("hp_slot: %d\n", hp_slot);
+
+ /* We have to save the presence info for these slots */
+ temp_word = ctrl->ctrl_int_comp >> 16;
+ func->presence_save = (temp_word >> hp_slot) & 0x01;
+ func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
+
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
+ func->switch_save = 0;
+ else
+ func->switch_save = 0x10;
+
+ if (!power_mode)
+ if (!func->is_a_board) {
+ green_LED_off(ctrl, hp_slot);
+ slot_disable(ctrl, hp_slot);
+ }
+
+ device++;
+ num_of_slots--;
+ }
+
+ if (!power_mode) {
+ set_SOGO(ctrl);
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq(ctrl);
+ }
+
+ rc = init_SERR(ctrl);
+ if (rc) {
+ err("init_SERR failed\n");
+ mutex_unlock(&ctrl->crit_sect);
+ goto err_free_irq;
+ }
+
+ /* Done with exclusive hardware access */
+ mutex_unlock(&ctrl->crit_sect);
+
+ cpqhp_create_debugfs_files(ctrl);
+
+ return 0;
+
+err_free_irq:
+ free_irq(ctrl->interrupt, ctrl);
+err_iounmap:
+ iounmap(ctrl->hpc_reg);
+err_free_mem_region:
+ release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+err_free_bus:
+ kfree(ctrl->pci_bus);
+err_free_ctrl:
+ kfree(ctrl);
+err_disable_device:
+ pci_disable_device(pdev);
+ return rc;
+}
+
+static void __exit unload_cpqphpd(void)
+{
+ struct pci_func *next;
+ struct pci_func *TempSlot;
+ int loop;
+ u32 rc;
+ struct controller *ctrl;
+ struct controller *tctrl;
+ struct pci_resource *res;
+ struct pci_resource *tres;
+
+ rc = compaq_nvram_store(cpqhp_rom_start);
+
+ ctrl = cpqhp_ctrl_list;
+
+ while (ctrl) {
+ if (ctrl->hpc_reg) {
+ u16 misc;
+ rc = read_slot_enable (ctrl);
+
+ writeb(0, ctrl->hpc_reg + SLOT_SERR);
+ writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
+
+ misc = readw(ctrl->hpc_reg + MISC);
+ misc &= 0xFFFD;
+ writew(misc, ctrl->hpc_reg + MISC);
+ }
+
+ ctrl_slot_cleanup(ctrl);
+
+ res = ctrl->io_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = ctrl->mem_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = ctrl->p_mem_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = ctrl->bus_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ kfree (ctrl->pci_bus);
+
+ tctrl = ctrl;
+ ctrl = ctrl->next;
+ kfree(tctrl);
+ }
+
+ for (loop = 0; loop < 256; loop++) {
+ next = cpqhp_slot_list[loop];
+ while (next != NULL) {
+ res = next->io_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = next->mem_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = next->p_mem_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = next->bus_head;
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ TempSlot = next;
+ next = next->next;
+ kfree(TempSlot);
+ }
+ }
+
+ /* Stop the notification mechanism */
+ if (initialized)
+ cpqhp_event_stop_thread();
+
+ /* unmap the rom address */
+ if (cpqhp_rom_start)
+ iounmap(cpqhp_rom_start);
+ if (smbios_start)
+ iounmap(smbios_start);
+}
+
+static struct pci_device_id hpcd_pci_tbl[] = {
+ {
+ /* handle any PCI Hotplug controller */
+ .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
+ .class_mask = ~0,
+
+ /* no matter who makes it */
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+
+ }, { /* end: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
+
+static struct pci_driver cpqhpc_driver = {
+ .name = "compaq_pci_hotplug",
+ .id_table = hpcd_pci_tbl,
+ .probe = cpqhpc_probe,
+ /* remove: cpqhpc_remove_one, */
+};
+
+static int __init cpqhpc_init(void)
+{
+ int result;
+
+ cpqhp_debug = debug;
+
+ info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ cpqhp_initialize_debugfs();
+ result = pci_register_driver(&cpqhpc_driver);
+ dbg("pci_register_driver = %d\n", result);
+ return result;
+}
+
+static void __exit cpqhpc_cleanup(void)
+{
+ dbg("unload_cpqphpd()\n");
+ unload_cpqphpd();
+
+ dbg("pci_unregister_driver\n");
+ pci_unregister_driver(&cpqhpc_driver);
+ cpqhp_shutdown_debugfs();
+}
+
+module_init(cpqhpc_init);
+module_exit(cpqhpc_cleanup);
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
new file mode 100644
index 000000000..c5cbefee5
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -0,0 +1,2971 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/kthread.h>
+#include "cpqphp.h"
+
+static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
+ u8 behind_bridge, struct resource_lists *resources);
+static int configure_new_function(struct controller *ctrl, struct pci_func *func,
+ u8 behind_bridge, struct resource_lists *resources);
+static void interrupt_event_handler(struct controller *ctrl);
+
+
+static struct task_struct *cpqhp_event_thread;
+static unsigned long pushbutton_pending; /* = 0 */
+
+/* delay is in jiffies to wait for */
+static void long_delay(int delay)
+{
+ /*
+ * XXX(hch): if someone is bored please convert all callers
+ * to call msleep_interruptible directly. They really want
+ * to specify timeouts in natural units and spend a lot of
+ * effort converting them to jiffies..
+ */
+ msleep_interruptible(jiffies_to_msecs(delay));
+}
+
+
+/* FIXME: The following line needs to be somewhere else... */
+#define WRONG_BUS_FREQUENCY 0x07
+static u8 handle_switch_change(u8 change, struct controller *ctrl)
+{
+ int hp_slot;
+ u8 rc = 0;
+ u16 temp_word;
+ struct pci_func *func;
+ struct event_info *taskInfo;
+
+ if (!change)
+ return 0;
+
+ /* Switch Change */
+ dbg("cpqsbd: Switch interrupt received.\n");
+
+ for (hp_slot = 0; hp_slot < 6; hp_slot++) {
+ if (change & (0x1L << hp_slot)) {
+ /*
+ * this one changed.
+ */
+ func = cpqhp_slot_find(ctrl->bus,
+ (hp_slot + ctrl->slot_device_offset), 0);
+
+ /* this is the structure that tells the worker thread
+ * what to do
+ */
+ taskInfo = &(ctrl->event_queue[ctrl->next_event]);
+ ctrl->next_event = (ctrl->next_event + 1) % 10;
+ taskInfo->hp_slot = hp_slot;
+
+ rc++;
+
+ temp_word = ctrl->ctrl_int_comp >> 16;
+ func->presence_save = (temp_word >> hp_slot) & 0x01;
+ func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
+
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ /*
+ * Switch opened
+ */
+
+ func->switch_save = 0;
+
+ taskInfo->event_type = INT_SWITCH_OPEN;
+ } else {
+ /*
+ * Switch closed
+ */
+
+ func->switch_save = 0x10;
+
+ taskInfo->event_type = INT_SWITCH_CLOSE;
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * cpqhp_find_slot - find the struct slot of given device
+ * @ctrl: scan lots of this controller
+ * @device: the device id to find
+ */
+static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
+{
+ struct slot *slot = ctrl->slot;
+
+ while (slot && (slot->device != device))
+ slot = slot->next;
+
+ return slot;
+}
+
+
+static u8 handle_presence_change(u16 change, struct controller *ctrl)
+{
+ int hp_slot;
+ u8 rc = 0;
+ u8 temp_byte;
+ u16 temp_word;
+ struct pci_func *func;
+ struct event_info *taskInfo;
+ struct slot *p_slot;
+
+ if (!change)
+ return 0;
+
+ /*
+ * Presence Change
+ */
+ dbg("cpqsbd: Presence/Notify input change.\n");
+ dbg(" Changed bits are 0x%4.4x\n", change );
+
+ for (hp_slot = 0; hp_slot < 6; hp_slot++) {
+ if (change & (0x0101 << hp_slot)) {
+ /*
+ * this one changed.
+ */
+ func = cpqhp_slot_find(ctrl->bus,
+ (hp_slot + ctrl->slot_device_offset), 0);
+
+ taskInfo = &(ctrl->event_queue[ctrl->next_event]);
+ ctrl->next_event = (ctrl->next_event + 1) % 10;
+ taskInfo->hp_slot = hp_slot;
+
+ rc++;
+
+ p_slot = cpqhp_find_slot(ctrl, hp_slot + (readb(ctrl->hpc_reg + SLOT_MASK) >> 4));
+ if (!p_slot)
+ return 0;
+
+ /* If the switch closed, must be a button
+ * If not in button mode, nevermind
+ */
+ if (func->switch_save && (ctrl->push_button == 1)) {
+ temp_word = ctrl->ctrl_int_comp >> 16;
+ temp_byte = (temp_word >> hp_slot) & 0x01;
+ temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
+
+ if (temp_byte != func->presence_save) {
+ /*
+ * button Pressed (doesn't do anything)
+ */
+ dbg("hp_slot %d button pressed\n", hp_slot);
+ taskInfo->event_type = INT_BUTTON_PRESS;
+ } else {
+ /*
+ * button Released - TAKE ACTION!!!!
+ */
+ dbg("hp_slot %d button released\n", hp_slot);
+ taskInfo->event_type = INT_BUTTON_RELEASE;
+
+ /* Cancel if we are still blinking */
+ if ((p_slot->state == BLINKINGON_STATE)
+ || (p_slot->state == BLINKINGOFF_STATE)) {
+ taskInfo->event_type = INT_BUTTON_CANCEL;
+ dbg("hp_slot %d button cancel\n", hp_slot);
+ } else if ((p_slot->state == POWERON_STATE)
+ || (p_slot->state == POWEROFF_STATE)) {
+ /* info(msg_button_ignore, p_slot->number); */
+ taskInfo->event_type = INT_BUTTON_IGNORE;
+ dbg("hp_slot %d button ignore\n", hp_slot);
+ }
+ }
+ } else {
+ /* Switch is open, assume a presence change
+ * Save the presence state
+ */
+ temp_word = ctrl->ctrl_int_comp >> 16;
+ func->presence_save = (temp_word >> hp_slot) & 0x01;
+ func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
+
+ if ((!(ctrl->ctrl_int_comp & (0x010000 << hp_slot))) ||
+ (!(ctrl->ctrl_int_comp & (0x01000000 << hp_slot)))) {
+ /* Present */
+ taskInfo->event_type = INT_PRESENCE_ON;
+ } else {
+ /* Not Present */
+ taskInfo->event_type = INT_PRESENCE_OFF;
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+static u8 handle_power_fault(u8 change, struct controller *ctrl)
+{
+ int hp_slot;
+ u8 rc = 0;
+ struct pci_func *func;
+ struct event_info *taskInfo;
+
+ if (!change)
+ return 0;
+
+ /*
+ * power fault
+ */
+
+ info("power fault interrupt\n");
+
+ for (hp_slot = 0; hp_slot < 6; hp_slot++) {
+ if (change & (0x01 << hp_slot)) {
+ /*
+ * this one changed.
+ */
+ func = cpqhp_slot_find(ctrl->bus,
+ (hp_slot + ctrl->slot_device_offset), 0);
+
+ taskInfo = &(ctrl->event_queue[ctrl->next_event]);
+ ctrl->next_event = (ctrl->next_event + 1) % 10;
+ taskInfo->hp_slot = hp_slot;
+
+ rc++;
+
+ if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
+ /*
+ * power fault Cleared
+ */
+ func->status = 0x00;
+
+ taskInfo->event_type = INT_POWER_FAULT_CLEAR;
+ } else {
+ /*
+ * power fault
+ */
+ taskInfo->event_type = INT_POWER_FAULT;
+
+ if (ctrl->rev < 4) {
+ amber_LED_on (ctrl, hp_slot);
+ green_LED_off (ctrl, hp_slot);
+ set_SOGO (ctrl);
+
+ /* this is a fatal condition, we want
+ * to crash the machine to protect from
+ * data corruption. simulated_NMI
+ * shouldn't ever return */
+ /* FIXME
+ simulated_NMI(hp_slot, ctrl); */
+
+ /* The following code causes a software
+ * crash just in case simulated_NMI did
+ * return */
+ /*FIXME
+ panic(msg_power_fault); */
+ } else {
+ /* set power fault status for this board */
+ func->status = 0xFF;
+ info("power fault bit %x set\n", hp_slot);
+ }
+ }
+ }
+ }
+
+ return rc;
+}
+
+
+/**
+ * sort_by_size - sort nodes on the list by their length, smallest first.
+ * @head: list to sort
+ */
+static int sort_by_size(struct pci_resource **head)
+{
+ struct pci_resource *current_res;
+ struct pci_resource *next_res;
+ int out_of_order = 1;
+
+ if (!(*head))
+ return 1;
+
+ if (!((*head)->next))
+ return 0;
+
+ while (out_of_order) {
+ out_of_order = 0;
+
+ /* Special case for swapping list head */
+ if (((*head)->next) &&
+ ((*head)->length > (*head)->next->length)) {
+ out_of_order++;
+ current_res = *head;
+ *head = (*head)->next;
+ current_res->next = (*head)->next;
+ (*head)->next = current_res;
+ }
+
+ current_res = *head;
+
+ while (current_res->next && current_res->next->next) {
+ if (current_res->next->length > current_res->next->next->length) {
+ out_of_order++;
+ next_res = current_res->next;
+ current_res->next = current_res->next->next;
+ current_res = current_res->next;
+ next_res->next = current_res->next;
+ current_res->next = next_res;
+ } else
+ current_res = current_res->next;
+ }
+ } /* End of out_of_order loop */
+
+ return 0;
+}
+
+
+/**
+ * sort_by_max_size - sort nodes on the list by their length, largest first.
+ * @head: list to sort
+ */
+static int sort_by_max_size(struct pci_resource **head)
+{
+ struct pci_resource *current_res;
+ struct pci_resource *next_res;
+ int out_of_order = 1;
+
+ if (!(*head))
+ return 1;
+
+ if (!((*head)->next))
+ return 0;
+
+ while (out_of_order) {
+ out_of_order = 0;
+
+ /* Special case for swapping list head */
+ if (((*head)->next) &&
+ ((*head)->length < (*head)->next->length)) {
+ out_of_order++;
+ current_res = *head;
+ *head = (*head)->next;
+ current_res->next = (*head)->next;
+ (*head)->next = current_res;
+ }
+
+ current_res = *head;
+
+ while (current_res->next && current_res->next->next) {
+ if (current_res->next->length < current_res->next->next->length) {
+ out_of_order++;
+ next_res = current_res->next;
+ current_res->next = current_res->next->next;
+ current_res = current_res->next;
+ next_res->next = current_res->next;
+ current_res->next = next_res;
+ } else
+ current_res = current_res->next;
+ }
+ } /* End of out_of_order loop */
+
+ return 0;
+}
+
+
+/**
+ * do_pre_bridge_resource_split - find node of resources that are unused
+ * @head: new list head
+ * @orig_head: original list head
+ * @alignment: max node size (?)
+ */
+static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **head,
+ struct pci_resource **orig_head, u32 alignment)
+{
+ struct pci_resource *prevnode = NULL;
+ struct pci_resource *node;
+ struct pci_resource *split_node;
+ u32 rc;
+ u32 temp_dword;
+ dbg("do_pre_bridge_resource_split\n");
+
+ if (!(*head) || !(*orig_head))
+ return NULL;
+
+ rc = cpqhp_resource_sort_and_combine(head);
+
+ if (rc)
+ return NULL;
+
+ if ((*head)->base != (*orig_head)->base)
+ return NULL;
+
+ if ((*head)->length == (*orig_head)->length)
+ return NULL;
+
+
+ /* If we got here, there the bridge requires some of the resource, but
+ * we may be able to split some off of the front
+ */
+
+ node = *head;
+
+ if (node->length & (alignment -1)) {
+ /* this one isn't an aligned length, so we'll make a new entry
+ * and split it up.
+ */
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+
+ temp_dword = (node->length | (alignment-1)) + 1 - alignment;
+
+ split_node->base = node->base;
+ split_node->length = temp_dword;
+
+ node->length -= temp_dword;
+ node->base += split_node->length;
+
+ /* Put it in the list */
+ *head = split_node;
+ split_node->next = node;
+ }
+
+ if (node->length < alignment)
+ return NULL;
+
+ /* Now unlink it */
+ if (*head == node) {
+ *head = node->next;
+ } else {
+ prevnode = *head;
+ while (prevnode->next != node)
+ prevnode = prevnode->next;
+
+ prevnode->next = node->next;
+ }
+ node->next = NULL;
+
+ return node;
+}
+
+
+/**
+ * do_bridge_resource_split - find one node of resources that aren't in use
+ * @head: list head
+ * @alignment: max node size (?)
+ */
+static struct pci_resource *do_bridge_resource_split(struct pci_resource **head, u32 alignment)
+{
+ struct pci_resource *prevnode = NULL;
+ struct pci_resource *node;
+ u32 rc;
+ u32 temp_dword;
+
+ rc = cpqhp_resource_sort_and_combine(head);
+
+ if (rc)
+ return NULL;
+
+ node = *head;
+
+ while (node->next) {
+ prevnode = node;
+ node = node->next;
+ kfree(prevnode);
+ }
+
+ if (node->length < alignment)
+ goto error;
+
+ if (node->base & (alignment - 1)) {
+ /* Short circuit if adjusted size is too small */
+ temp_dword = (node->base | (alignment-1)) + 1;
+ if ((node->length - (temp_dword - node->base)) < alignment)
+ goto error;
+
+ node->length -= (temp_dword - node->base);
+ node->base = temp_dword;
+ }
+
+ if (node->length & (alignment - 1))
+ /* There's stuff in use after this node */
+ goto error;
+
+ return node;
+error:
+ kfree(node);
+ return NULL;
+}
+
+
+/**
+ * get_io_resource - find first node of given size not in ISA aliasing window.
+ * @head: list to search
+ * @size: size of node to find, must be a power of two.
+ *
+ * Description: This function sorts the resource list by size and then returns
+ * returns the first node of "size" length that is not in the ISA aliasing
+ * window. If it finds a node larger than "size" it will split it up.
+ */
+static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size)
+{
+ struct pci_resource *prevnode;
+ struct pci_resource *node;
+ struct pci_resource *split_node;
+ u32 temp_dword;
+
+ if (!(*head))
+ return NULL;
+
+ if (cpqhp_resource_sort_and_combine(head))
+ return NULL;
+
+ if (sort_by_size(head))
+ return NULL;
+
+ for (node = *head; node; node = node->next) {
+ if (node->length < size)
+ continue;
+
+ if (node->base & (size - 1)) {
+ /* this one isn't base aligned properly
+ * so we'll make a new entry and split it up
+ */
+ temp_dword = (node->base | (size-1)) + 1;
+
+ /* Short circuit if adjusted size is too small */
+ if ((node->length - (temp_dword - node->base)) < size)
+ continue;
+
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+
+ split_node->base = node->base;
+ split_node->length = temp_dword - node->base;
+ node->base = temp_dword;
+ node->length -= split_node->length;
+
+ /* Put it in the list */
+ split_node->next = node->next;
+ node->next = split_node;
+ } /* End of non-aligned base */
+
+ /* Don't need to check if too small since we already did */
+ if (node->length > size) {
+ /* this one is longer than we need
+ * so we'll make a new entry and split it up
+ */
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+
+ split_node->base = node->base + size;
+ split_node->length = node->length - size;
+ node->length = size;
+
+ /* Put it in the list */
+ split_node->next = node->next;
+ node->next = split_node;
+ } /* End of too big on top end */
+
+ /* For IO make sure it's not in the ISA aliasing space */
+ if (node->base & 0x300L)
+ continue;
+
+ /* If we got here, then it is the right size
+ * Now take it out of the list and break
+ */
+ if (*head == node) {
+ *head = node->next;
+ } else {
+ prevnode = *head;
+ while (prevnode->next != node)
+ prevnode = prevnode->next;
+
+ prevnode->next = node->next;
+ }
+ node->next = NULL;
+ break;
+ }
+
+ return node;
+}
+
+
+/**
+ * get_max_resource - get largest node which has at least the given size.
+ * @head: the list to search the node in
+ * @size: the minimum size of the node to find
+ *
+ * Description: Gets the largest node that is at least "size" big from the
+ * list pointed to by head. It aligns the node on top and bottom
+ * to "size" alignment before returning it.
+ */
+static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size)
+{
+ struct pci_resource *max;
+ struct pci_resource *temp;
+ struct pci_resource *split_node;
+ u32 temp_dword;
+
+ if (cpqhp_resource_sort_and_combine(head))
+ return NULL;
+
+ if (sort_by_max_size(head))
+ return NULL;
+
+ for (max = *head; max; max = max->next) {
+ /* If not big enough we could probably just bail,
+ * instead we'll continue to the next.
+ */
+ if (max->length < size)
+ continue;
+
+ if (max->base & (size - 1)) {
+ /* this one isn't base aligned properly
+ * so we'll make a new entry and split it up
+ */
+ temp_dword = (max->base | (size-1)) + 1;
+
+ /* Short circuit if adjusted size is too small */
+ if ((max->length - (temp_dword - max->base)) < size)
+ continue;
+
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+
+ split_node->base = max->base;
+ split_node->length = temp_dword - max->base;
+ max->base = temp_dword;
+ max->length -= split_node->length;
+
+ split_node->next = max->next;
+ max->next = split_node;
+ }
+
+ if ((max->base + max->length) & (size - 1)) {
+ /* this one isn't end aligned properly at the top
+ * so we'll make a new entry and split it up
+ */
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+ temp_dword = ((max->base + max->length) & ~(size - 1));
+ split_node->base = temp_dword;
+ split_node->length = max->length + max->base
+ - split_node->base;
+ max->length -= split_node->length;
+
+ split_node->next = max->next;
+ max->next = split_node;
+ }
+
+ /* Make sure it didn't shrink too much when we aligned it */
+ if (max->length < size)
+ continue;
+
+ /* Now take it out of the list */
+ temp = *head;
+ if (temp == max) {
+ *head = max->next;
+ } else {
+ while (temp && temp->next != max)
+ temp = temp->next;
+
+ if (temp)
+ temp->next = max->next;
+ }
+
+ max->next = NULL;
+ break;
+ }
+
+ return max;
+}
+
+
+/**
+ * get_resource - find resource of given size and split up larger ones.
+ * @head: the list to search for resources
+ * @size: the size limit to use
+ *
+ * Description: This function sorts the resource list by size and then
+ * returns the first node of "size" length. If it finds a node
+ * larger than "size" it will split it up.
+ *
+ * size must be a power of two.
+ */
+static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
+{
+ struct pci_resource *prevnode;
+ struct pci_resource *node;
+ struct pci_resource *split_node;
+ u32 temp_dword;
+
+ if (cpqhp_resource_sort_and_combine(head))
+ return NULL;
+
+ if (sort_by_size(head))
+ return NULL;
+
+ for (node = *head; node; node = node->next) {
+ dbg("%s: req_size =%x node=%p, base=%x, length=%x\n",
+ __func__, size, node, node->base, node->length);
+ if (node->length < size)
+ continue;
+
+ if (node->base & (size - 1)) {
+ dbg("%s: not aligned\n", __func__);
+ /* this one isn't base aligned properly
+ * so we'll make a new entry and split it up
+ */
+ temp_dword = (node->base | (size-1)) + 1;
+
+ /* Short circuit if adjusted size is too small */
+ if ((node->length - (temp_dword - node->base)) < size)
+ continue;
+
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+
+ split_node->base = node->base;
+ split_node->length = temp_dword - node->base;
+ node->base = temp_dword;
+ node->length -= split_node->length;
+
+ split_node->next = node->next;
+ node->next = split_node;
+ } /* End of non-aligned base */
+
+ /* Don't need to check if too small since we already did */
+ if (node->length > size) {
+ dbg("%s: too big\n", __func__);
+ /* this one is longer than we need
+ * so we'll make a new entry and split it up
+ */
+ split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
+
+ if (!split_node)
+ return NULL;
+
+ split_node->base = node->base + size;
+ split_node->length = node->length - size;
+ node->length = size;
+
+ /* Put it in the list */
+ split_node->next = node->next;
+ node->next = split_node;
+ } /* End of too big on top end */
+
+ dbg("%s: got one!!!\n", __func__);
+ /* If we got here, then it is the right size
+ * Now take it out of the list */
+ if (*head == node) {
+ *head = node->next;
+ } else {
+ prevnode = *head;
+ while (prevnode->next != node)
+ prevnode = prevnode->next;
+
+ prevnode->next = node->next;
+ }
+ node->next = NULL;
+ break;
+ }
+ return node;
+}
+
+
+/**
+ * cpqhp_resource_sort_and_combine - sort nodes by base addresses and clean up
+ * @head: the list to sort and clean up
+ *
+ * Description: Sorts all of the nodes in the list in ascending order by
+ * their base addresses. Also does garbage collection by
+ * combining adjacent nodes.
+ *
+ * Returns %0 if success.
+ */
+int cpqhp_resource_sort_and_combine(struct pci_resource **head)
+{
+ struct pci_resource *node1;
+ struct pci_resource *node2;
+ int out_of_order = 1;
+
+ dbg("%s: head = %p, *head = %p\n", __func__, head, *head);
+
+ if (!(*head))
+ return 1;
+
+ dbg("*head->next = %p\n",(*head)->next);
+
+ if (!(*head)->next)
+ return 0; /* only one item on the list, already sorted! */
+
+ dbg("*head->base = 0x%x\n",(*head)->base);
+ dbg("*head->next->base = 0x%x\n",(*head)->next->base);
+ while (out_of_order) {
+ out_of_order = 0;
+
+ /* Special case for swapping list head */
+ if (((*head)->next) &&
+ ((*head)->base > (*head)->next->base)) {
+ node1 = *head;
+ (*head) = (*head)->next;
+ node1->next = (*head)->next;
+ (*head)->next = node1;
+ out_of_order++;
+ }
+
+ node1 = (*head);
+
+ while (node1->next && node1->next->next) {
+ if (node1->next->base > node1->next->next->base) {
+ out_of_order++;
+ node2 = node1->next;
+ node1->next = node1->next->next;
+ node1 = node1->next;
+ node2->next = node1->next;
+ node1->next = node2;
+ } else
+ node1 = node1->next;
+ }
+ } /* End of out_of_order loop */
+
+ node1 = *head;
+
+ while (node1 && node1->next) {
+ if ((node1->base + node1->length) == node1->next->base) {
+ /* Combine */
+ dbg("8..\n");
+ node1->length += node1->next->length;
+ node2 = node1->next;
+ node1->next = node1->next->next;
+ kfree(node2);
+ } else
+ node1 = node1->next;
+ }
+
+ return 0;
+}
+
+
+irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
+{
+ struct controller *ctrl = data;
+ u8 schedule_flag = 0;
+ u8 reset;
+ u16 misc;
+ u32 Diff;
+ u32 temp_dword;
+
+
+ misc = readw(ctrl->hpc_reg + MISC);
+ /*
+ * Check to see if it was our interrupt
+ */
+ if (!(misc & 0x000C))
+ return IRQ_NONE;
+
+ if (misc & 0x0004) {
+ /*
+ * Serial Output interrupt Pending
+ */
+
+ /* Clear the interrupt */
+ misc |= 0x0004;
+ writew(misc, ctrl->hpc_reg + MISC);
+
+ /* Read to clear posted writes */
+ misc = readw(ctrl->hpc_reg + MISC);
+
+ dbg ("%s - waking up\n", __func__);
+ wake_up_interruptible(&ctrl->queue);
+ }
+
+ if (misc & 0x0008) {
+ /* General-interrupt-input interrupt Pending */
+ Diff = readl(ctrl->hpc_reg + INT_INPUT_CLEAR) ^ ctrl->ctrl_int_comp;
+
+ ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ /* Clear the interrupt */
+ writel(Diff, ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ /* Read it back to clear any posted writes */
+ temp_dword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ if (!Diff)
+ /* Clear all interrupts */
+ writel(0xFFFFFFFF, ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ schedule_flag += handle_switch_change((u8)(Diff & 0xFFL), ctrl);
+ schedule_flag += handle_presence_change((u16)((Diff & 0xFFFF0000L) >> 16), ctrl);
+ schedule_flag += handle_power_fault((u8)((Diff & 0xFF00L) >> 8), ctrl);
+ }
+
+ reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE);
+ if (reset & 0x40) {
+ /* Bus reset has completed */
+ reset &= 0xCF;
+ writeb(reset, ctrl->hpc_reg + RESET_FREQ_MODE);
+ reset = readb(ctrl->hpc_reg + RESET_FREQ_MODE);
+ wake_up_interruptible(&ctrl->queue);
+ }
+
+ if (schedule_flag) {
+ wake_up_process(cpqhp_event_thread);
+ dbg("Waking even thread");
+ }
+ return IRQ_HANDLED;
+}
+
+
+/**
+ * cpqhp_slot_create - Creates a node and adds it to the proper bus.
+ * @busnumber: bus where new node is to be located
+ *
+ * Returns pointer to the new node or %NULL if unsuccessful.
+ */
+struct pci_func *cpqhp_slot_create(u8 busnumber)
+{
+ struct pci_func *new_slot;
+ struct pci_func *next;
+
+ new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
+ if (new_slot == NULL)
+ return new_slot;
+
+ new_slot->next = NULL;
+ new_slot->configured = 1;
+
+ if (cpqhp_slot_list[busnumber] == NULL) {
+ cpqhp_slot_list[busnumber] = new_slot;
+ } else {
+ next = cpqhp_slot_list[busnumber];
+ while (next->next != NULL)
+ next = next->next;
+ next->next = new_slot;
+ }
+ return new_slot;
+}
+
+
+/**
+ * slot_remove - Removes a node from the linked list of slots.
+ * @old_slot: slot to remove
+ *
+ * Returns %0 if successful, !0 otherwise.
+ */
+static int slot_remove(struct pci_func *old_slot)
+{
+ struct pci_func *next;
+
+ if (old_slot == NULL)
+ return 1;
+
+ next = cpqhp_slot_list[old_slot->bus];
+ if (next == NULL)
+ return 1;
+
+ if (next == old_slot) {
+ cpqhp_slot_list[old_slot->bus] = old_slot->next;
+ cpqhp_destroy_board_resources(old_slot);
+ kfree(old_slot);
+ return 0;
+ }
+
+ while ((next->next != old_slot) && (next->next != NULL))
+ next = next->next;
+
+ if (next->next == old_slot) {
+ next->next = old_slot->next;
+ cpqhp_destroy_board_resources(old_slot);
+ kfree(old_slot);
+ return 0;
+ } else
+ return 2;
+}
+
+
+/**
+ * bridge_slot_remove - Removes a node from the linked list of slots.
+ * @bridge: bridge to remove
+ *
+ * Returns %0 if successful, !0 otherwise.
+ */
+static int bridge_slot_remove(struct pci_func *bridge)
+{
+ u8 subordinateBus, secondaryBus;
+ u8 tempBus;
+ struct pci_func *next;
+
+ secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF;
+ subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF;
+
+ for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
+ next = cpqhp_slot_list[tempBus];
+
+ while (!slot_remove(next))
+ next = cpqhp_slot_list[tempBus];
+ }
+
+ next = cpqhp_slot_list[bridge->bus];
+
+ if (next == NULL)
+ return 1;
+
+ if (next == bridge) {
+ cpqhp_slot_list[bridge->bus] = bridge->next;
+ goto out;
+ }
+
+ while ((next->next != bridge) && (next->next != NULL))
+ next = next->next;
+
+ if (next->next != bridge)
+ return 2;
+ next->next = bridge->next;
+out:
+ kfree(bridge);
+ return 0;
+}
+
+
+/**
+ * cpqhp_slot_find - Looks for a node by bus, and device, multiple functions accessed
+ * @bus: bus to find
+ * @device: device to find
+ * @index: is %0 for first function found, %1 for the second...
+ *
+ * Returns pointer to the node if successful, %NULL otherwise.
+ */
+struct pci_func *cpqhp_slot_find(u8 bus, u8 device, u8 index)
+{
+ int found = -1;
+ struct pci_func *func;
+
+ func = cpqhp_slot_list[bus];
+
+ if ((func == NULL) || ((func->device == device) && (index == 0)))
+ return func;
+
+ if (func->device == device)
+ found++;
+
+ while (func->next != NULL) {
+ func = func->next;
+
+ if (func->device == device)
+ found++;
+
+ if (found == index)
+ return func;
+ }
+
+ return NULL;
+}
+
+
+/* DJZ: I don't think is_bridge will work as is.
+ * FIXME */
+static int is_bridge(struct pci_func *func)
+{
+ /* Check the header type */
+ if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01)
+ return 1;
+ else
+ return 0;
+}
+
+
+/**
+ * set_controller_speed - set the frequency and/or mode of a specific controller segment.
+ * @ctrl: controller to change frequency/mode for.
+ * @adapter_speed: the speed of the adapter we want to match.
+ * @hp_slot: the slot number where the adapter is installed.
+ *
+ * Returns %0 if we successfully change frequency and/or mode to match the
+ * adapter speed.
+ */
+static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot)
+{
+ struct slot *slot;
+ struct pci_bus *bus = ctrl->pci_bus;
+ u8 reg;
+ u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
+ u16 reg16;
+ u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
+
+ if (bus->cur_bus_speed == adapter_speed)
+ return 0;
+
+ /* We don't allow freq/mode changes if we find another adapter running
+ * in another slot on this controller
+ */
+ for (slot = ctrl->slot; slot; slot = slot->next) {
+ if (slot->device == (hp_slot + ctrl->slot_device_offset))
+ continue;
+ if (!slot->hotplug_slot || !slot->hotplug_slot->info)
+ continue;
+ if (slot->hotplug_slot->info->adapter_status == 0)
+ continue;
+ /* If another adapter is running on the same segment but at a
+ * lower speed/mode, we allow the new adapter to function at
+ * this rate if supported
+ */
+ if (bus->cur_bus_speed < adapter_speed)
+ return 0;
+
+ return 1;
+ }
+
+ /* If the controller doesn't support freq/mode changes and the
+ * controller is running at a higher mode, we bail
+ */
+ if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability))
+ return 1;
+
+ /* But we allow the adapter to run at a lower rate if possible */
+ if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability))
+ return 0;
+
+ /* We try to set the max speed supported by both the adapter and
+ * controller
+ */
+ if (bus->max_bus_speed < adapter_speed) {
+ if (bus->cur_bus_speed == bus->max_bus_speed)
+ return 0;
+ adapter_speed = bus->max_bus_speed;
+ }
+
+ writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
+ writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
+
+ set_SOGO(ctrl);
+ wait_for_ctrl_irq(ctrl);
+
+ if (adapter_speed != PCI_SPEED_133MHz_PCIX)
+ reg = 0xF5;
+ else
+ reg = 0xF4;
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
+ reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
+ reg16 &= ~0x000F;
+ switch (adapter_speed) {
+ case(PCI_SPEED_133MHz_PCIX):
+ reg = 0x75;
+ reg16 |= 0xB;
+ break;
+ case(PCI_SPEED_100MHz_PCIX):
+ reg = 0x74;
+ reg16 |= 0xA;
+ break;
+ case(PCI_SPEED_66MHz_PCIX):
+ reg = 0x73;
+ reg16 |= 0x9;
+ break;
+ case(PCI_SPEED_66MHz):
+ reg = 0x73;
+ reg16 |= 0x1;
+ break;
+ default: /* 33MHz PCI 2.2 */
+ reg = 0x71;
+ break;
+
+ }
+ reg16 |= 0xB << 12;
+ writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
+
+ mdelay(5);
+
+ /* Reenable interrupts */
+ writel(0, ctrl->hpc_reg + INT_MASK);
+
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
+ /* Restart state machine */
+ reg = ~0xF;
+ pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
+ pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
+
+ /* Only if mode change...*/
+ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
+ ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
+ set_SOGO(ctrl);
+
+ wait_for_ctrl_irq(ctrl);
+ mdelay(1100);
+
+ /* Restore LED/Slot state */
+ writel(leds, ctrl->hpc_reg + LED_CONTROL);
+ writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
+
+ set_SOGO(ctrl);
+ wait_for_ctrl_irq(ctrl);
+
+ bus->cur_bus_speed = adapter_speed;
+ slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+
+ info("Successfully changed frequency/mode for adapter in slot %d\n",
+ slot->number);
+ return 0;
+}
+
+/* the following routines constitute the bulk of the
+ * hotplug controller logic
+ */
+
+
+/**
+ * board_replaced - Called after a board has been replaced in the system.
+ * @func: PCI device/function information
+ * @ctrl: hotplug controller
+ *
+ * This is only used if we don't have resources for hot add.
+ * Turns power on for the board.
+ * Checks to see if board is the same.
+ * If board is same, reconfigures it.
+ * If board isn't same, turns it back off.
+ */
+static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
+{
+ struct pci_bus *bus = ctrl->pci_bus;
+ u8 hp_slot;
+ u8 temp_byte;
+ u8 adapter_speed;
+ u32 rc = 0;
+
+ hp_slot = func->device - ctrl->slot_device_offset;
+
+ /*
+ * The switch is open.
+ */
+ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
+ rc = INTERLOCK_OPEN;
+ /*
+ * The board is already on
+ */
+ else if (is_slot_enabled (ctrl, hp_slot))
+ rc = CARD_FUNCTIONING;
+ else {
+ mutex_lock(&ctrl->crit_sect);
+
+ /* turn on board without attaching to the bus */
+ enable_slot_power (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Change bits in slot power register to force another shift out
+ * NOTE: this is to work around the timer bug */
+ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
+ writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
+ writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ adapter_speed = get_adapter_speed(ctrl, hp_slot);
+ if (bus->cur_bus_speed != adapter_speed)
+ if (set_controller_speed(ctrl, adapter_speed, hp_slot))
+ rc = WRONG_BUS_FREQUENCY;
+
+ /* turn off board without attaching to the bus */
+ disable_slot_power (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+
+ if (rc)
+ return rc;
+
+ mutex_lock(&ctrl->crit_sect);
+
+ slot_enable (ctrl, hp_slot);
+ green_LED_blink (ctrl, hp_slot);
+
+ amber_LED_off (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+
+ /* Wait for ~1 second because of hot plug spec */
+ long_delay(1*HZ);
+
+ /* Check for a power fault */
+ if (func->status == 0xFF) {
+ /* power fault occurred, but it was benign */
+ rc = POWER_FAILURE;
+ func->status = 0;
+ } else
+ rc = cpqhp_valid_replace(ctrl, func);
+
+ if (!rc) {
+ /* It must be the same board */
+
+ rc = cpqhp_configure_board(ctrl, func);
+
+ /* If configuration fails, turn it off
+ * Get slot won't work for devices behind
+ * bridges, but in this case it will always be
+ * called for the "base" bus/dev/func of an
+ * adapter.
+ */
+
+ mutex_lock(&ctrl->crit_sect);
+
+ amber_LED_on (ctrl, hp_slot);
+ green_LED_off (ctrl, hp_slot);
+ slot_disable (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+
+ if (rc)
+ return rc;
+ else
+ return 1;
+
+ } else {
+ /* Something is wrong
+
+ * Get slot won't work for devices behind bridges, but
+ * in this case it will always be called for the "base"
+ * bus/dev/func of an adapter.
+ */
+
+ mutex_lock(&ctrl->crit_sect);
+
+ amber_LED_on (ctrl, hp_slot);
+ green_LED_off (ctrl, hp_slot);
+ slot_disable (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+ }
+
+ }
+ return rc;
+
+}
+
+
+/**
+ * board_added - Called after a board has been added to the system.
+ * @func: PCI device/function info
+ * @ctrl: hotplug controller
+ *
+ * Turns power on for the board.
+ * Configures board.
+ */
+static u32 board_added(struct pci_func *func, struct controller *ctrl)
+{
+ u8 hp_slot;
+ u8 temp_byte;
+ u8 adapter_speed;
+ int index;
+ u32 temp_register = 0xFFFFFFFF;
+ u32 rc = 0;
+ struct pci_func *new_slot = NULL;
+ struct pci_bus *bus = ctrl->pci_bus;
+ struct slot *p_slot;
+ struct resource_lists res_lists;
+
+ hp_slot = func->device - ctrl->slot_device_offset;
+ dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n",
+ __func__, func->device, ctrl->slot_device_offset, hp_slot);
+
+ mutex_lock(&ctrl->crit_sect);
+
+ /* turn on board without attaching to the bus */
+ enable_slot_power(ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Change bits in slot power register to force another shift out
+ * NOTE: this is to work around the timer bug
+ */
+ temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
+ writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
+ writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ adapter_speed = get_adapter_speed(ctrl, hp_slot);
+ if (bus->cur_bus_speed != adapter_speed)
+ if (set_controller_speed(ctrl, adapter_speed, hp_slot))
+ rc = WRONG_BUS_FREQUENCY;
+
+ /* turn off board without attaching to the bus */
+ disable_slot_power (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq(ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+
+ if (rc)
+ return rc;
+
+ p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+
+ /* turn on board and blink green LED */
+
+ dbg("%s: before down\n", __func__);
+ mutex_lock(&ctrl->crit_sect);
+ dbg("%s: after down\n", __func__);
+
+ dbg("%s: before slot_enable\n", __func__);
+ slot_enable (ctrl, hp_slot);
+
+ dbg("%s: before green_LED_blink\n", __func__);
+ green_LED_blink (ctrl, hp_slot);
+
+ dbg("%s: before amber_LED_blink\n", __func__);
+ amber_LED_off (ctrl, hp_slot);
+
+ dbg("%s: before set_SOGO\n", __func__);
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ dbg("%s: before wait_for_ctrl_irq\n", __func__);
+ wait_for_ctrl_irq (ctrl);
+ dbg("%s: after wait_for_ctrl_irq\n", __func__);
+
+ dbg("%s: before up\n", __func__);
+ mutex_unlock(&ctrl->crit_sect);
+ dbg("%s: after up\n", __func__);
+
+ /* Wait for ~1 second because of hot plug spec */
+ dbg("%s: before long_delay\n", __func__);
+ long_delay(1*HZ);
+ dbg("%s: after long_delay\n", __func__);
+
+ dbg("%s: func status = %x\n", __func__, func->status);
+ /* Check for a power fault */
+ if (func->status == 0xFF) {
+ /* power fault occurred, but it was benign */
+ temp_register = 0xFFFFFFFF;
+ dbg("%s: temp register set to %x by power fault\n", __func__, temp_register);
+ rc = POWER_FAILURE;
+ func->status = 0;
+ } else {
+ /* Get vendor/device ID u32 */
+ ctrl->pci_bus->number = func->bus;
+ rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), PCI_VENDOR_ID, &temp_register);
+ dbg("%s: pci_read_config_dword returns %d\n", __func__, rc);
+ dbg("%s: temp_register is %x\n", __func__, temp_register);
+
+ if (rc != 0) {
+ /* Something's wrong here */
+ temp_register = 0xFFFFFFFF;
+ dbg("%s: temp register set to %x by error\n", __func__, temp_register);
+ }
+ /* Preset return code. It will be changed later if things go okay. */
+ rc = NO_ADAPTER_PRESENT;
+ }
+
+ /* All F's is an empty slot or an invalid board */
+ if (temp_register != 0xFFFFFFFF) {
+ res_lists.io_head = ctrl->io_head;
+ res_lists.mem_head = ctrl->mem_head;
+ res_lists.p_mem_head = ctrl->p_mem_head;
+ res_lists.bus_head = ctrl->bus_head;
+ res_lists.irqs = NULL;
+
+ rc = configure_new_device(ctrl, func, 0, &res_lists);
+
+ dbg("%s: back from configure_new_device\n", __func__);
+ ctrl->io_head = res_lists.io_head;
+ ctrl->mem_head = res_lists.mem_head;
+ ctrl->p_mem_head = res_lists.p_mem_head;
+ ctrl->bus_head = res_lists.bus_head;
+
+ cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
+ cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
+ cpqhp_resource_sort_and_combine(&(ctrl->io_head));
+ cpqhp_resource_sort_and_combine(&(ctrl->bus_head));
+
+ if (rc) {
+ mutex_lock(&ctrl->crit_sect);
+
+ amber_LED_on (ctrl, hp_slot);
+ green_LED_off (ctrl, hp_slot);
+ slot_disable (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+ return rc;
+ } else {
+ cpqhp_save_slot_config(ctrl, func);
+ }
+
+
+ func->status = 0;
+ func->switch_save = 0x10;
+ func->is_a_board = 0x01;
+
+ /* next, we will instantiate the linux pci_dev structures (with
+ * appropriate driver notification, if already present) */
+ dbg("%s: configure linux pci_dev structure\n", __func__);
+ index = 0;
+ do {
+ new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
+ if (new_slot && !new_slot->pci_dev)
+ cpqhp_configure_device(ctrl, new_slot);
+ } while (new_slot);
+
+ mutex_lock(&ctrl->crit_sect);
+
+ green_LED_on (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+ } else {
+ mutex_lock(&ctrl->crit_sect);
+
+ amber_LED_on (ctrl, hp_slot);
+ green_LED_off (ctrl, hp_slot);
+ slot_disable (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+
+ return rc;
+ }
+ return 0;
+}
+
+
+/**
+ * remove_board - Turns off slot and LEDs
+ * @func: PCI device/function info
+ * @replace_flag: whether replacing or adding a new device
+ * @ctrl: target controller
+ */
+static u32 remove_board(struct pci_func *func, u32 replace_flag, struct controller *ctrl)
+{
+ int index;
+ u8 skip = 0;
+ u8 device;
+ u8 hp_slot;
+ u8 temp_byte;
+ u32 rc;
+ struct resource_lists res_lists;
+ struct pci_func *temp_func;
+
+ if (cpqhp_unconfigure_device(func))
+ return 1;
+
+ device = func->device;
+
+ hp_slot = func->device - ctrl->slot_device_offset;
+ dbg("In %s, hp_slot = %d\n", __func__, hp_slot);
+
+ /* When we get here, it is safe to change base address registers.
+ * We will attempt to save the base address register lengths */
+ if (replace_flag || !ctrl->add_support)
+ rc = cpqhp_save_base_addr_length(ctrl, func);
+ else if (!func->bus_head && !func->mem_head &&
+ !func->p_mem_head && !func->io_head) {
+ /* Here we check to see if we've saved any of the board's
+ * resources already. If so, we'll skip the attempt to
+ * determine what's being used. */
+ index = 0;
+ temp_func = cpqhp_slot_find(func->bus, func->device, index++);
+ while (temp_func) {
+ if (temp_func->bus_head || temp_func->mem_head
+ || temp_func->p_mem_head || temp_func->io_head) {
+ skip = 1;
+ break;
+ }
+ temp_func = cpqhp_slot_find(temp_func->bus, temp_func->device, index++);
+ }
+
+ if (!skip)
+ rc = cpqhp_save_used_resources(ctrl, func);
+ }
+ /* Change status to shutdown */
+ if (func->is_a_board)
+ func->status = 0x01;
+ func->configured = 0;
+
+ mutex_lock(&ctrl->crit_sect);
+
+ green_LED_off (ctrl, hp_slot);
+ slot_disable (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* turn off SERR for slot */
+ temp_byte = readb(ctrl->hpc_reg + SLOT_SERR);
+ temp_byte &= ~(0x01 << hp_slot);
+ writeb(temp_byte, ctrl->hpc_reg + SLOT_SERR);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+
+ if (!replace_flag && ctrl->add_support) {
+ while (func) {
+ res_lists.io_head = ctrl->io_head;
+ res_lists.mem_head = ctrl->mem_head;
+ res_lists.p_mem_head = ctrl->p_mem_head;
+ res_lists.bus_head = ctrl->bus_head;
+
+ cpqhp_return_board_resources(func, &res_lists);
+
+ ctrl->io_head = res_lists.io_head;
+ ctrl->mem_head = res_lists.mem_head;
+ ctrl->p_mem_head = res_lists.p_mem_head;
+ ctrl->bus_head = res_lists.bus_head;
+
+ cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
+ cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
+ cpqhp_resource_sort_and_combine(&(ctrl->io_head));
+ cpqhp_resource_sort_and_combine(&(ctrl->bus_head));
+
+ if (is_bridge(func)) {
+ bridge_slot_remove(func);
+ } else
+ slot_remove(func);
+
+ func = cpqhp_slot_find(ctrl->bus, device, 0);
+ }
+
+ /* Setup slot structure with entry for empty slot */
+ func = cpqhp_slot_create(ctrl->bus);
+
+ if (func == NULL)
+ return 1;
+
+ func->bus = ctrl->bus;
+ func->device = device;
+ func->function = 0;
+ func->configured = 0;
+ func->switch_save = 0x10;
+ func->is_a_board = 0;
+ func->p_task_event = NULL;
+ }
+
+ return 0;
+}
+
+static void pushbutton_helper_thread(unsigned long data)
+{
+ pushbutton_pending = data;
+ wake_up_process(cpqhp_event_thread);
+}
+
+
+/* this is the main worker thread */
+static int event_thread(void *data)
+{
+ struct controller *ctrl;
+
+ while (1) {
+ dbg("!!!!event_thread sleeping\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+ /* Do stuff here */
+ if (pushbutton_pending)
+ cpqhp_pushbutton_thread(pushbutton_pending);
+ else
+ for (ctrl = cpqhp_ctrl_list; ctrl; ctrl=ctrl->next)
+ interrupt_event_handler(ctrl);
+ }
+ dbg("event_thread signals exit\n");
+ return 0;
+}
+
+int cpqhp_event_start_thread(void)
+{
+ cpqhp_event_thread = kthread_run(event_thread, NULL, "phpd_event");
+ if (IS_ERR(cpqhp_event_thread)) {
+ err ("Can't start up our event thread\n");
+ return PTR_ERR(cpqhp_event_thread);
+ }
+
+ return 0;
+}
+
+
+void cpqhp_event_stop_thread(void)
+{
+ kthread_stop(cpqhp_event_thread);
+}
+
+
+static int update_slot_info(struct controller *ctrl, struct slot *slot)
+{
+ struct hotplug_slot_info *info;
+ int result;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->power_status = get_slot_enabled(ctrl, slot);
+ info->attention_status = cpq_get_attention_status(ctrl, slot);
+ info->latch_status = cpq_get_latch_status(ctrl, slot);
+ info->adapter_status = get_presence_status(ctrl, slot);
+ result = pci_hp_change_slot_info(slot->hotplug_slot, info);
+ kfree (info);
+ return result;
+}
+
+static void interrupt_event_handler(struct controller *ctrl)
+{
+ int loop = 0;
+ int change = 1;
+ struct pci_func *func;
+ u8 hp_slot;
+ struct slot *p_slot;
+
+ while (change) {
+ change = 0;
+
+ for (loop = 0; loop < 10; loop++) {
+ /* dbg("loop %d\n", loop); */
+ if (ctrl->event_queue[loop].event_type != 0) {
+ hp_slot = ctrl->event_queue[loop].hp_slot;
+
+ func = cpqhp_slot_find(ctrl->bus, (hp_slot + ctrl->slot_device_offset), 0);
+ if (!func)
+ return;
+
+ p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+ if (!p_slot)
+ return;
+
+ dbg("hp_slot %d, func %p, p_slot %p\n",
+ hp_slot, func, p_slot);
+
+ if (ctrl->event_queue[loop].event_type == INT_BUTTON_PRESS) {
+ dbg("button pressed\n");
+ } else if (ctrl->event_queue[loop].event_type ==
+ INT_BUTTON_CANCEL) {
+ dbg("button cancel\n");
+ del_timer(&p_slot->task_event);
+
+ mutex_lock(&ctrl->crit_sect);
+
+ if (p_slot->state == BLINKINGOFF_STATE) {
+ /* slot is on */
+ dbg("turn on green LED\n");
+ green_LED_on (ctrl, hp_slot);
+ } else if (p_slot->state == BLINKINGON_STATE) {
+ /* slot is off */
+ dbg("turn off green LED\n");
+ green_LED_off (ctrl, hp_slot);
+ }
+
+ info(msg_button_cancel, p_slot->number);
+
+ p_slot->state = STATIC_STATE;
+
+ amber_LED_off (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+ }
+ /*** button Released (No action on press...) */
+ else if (ctrl->event_queue[loop].event_type == INT_BUTTON_RELEASE) {
+ dbg("button release\n");
+
+ if (is_slot_enabled (ctrl, hp_slot)) {
+ dbg("slot is on\n");
+ p_slot->state = BLINKINGOFF_STATE;
+ info(msg_button_off, p_slot->number);
+ } else {
+ dbg("slot is off\n");
+ p_slot->state = BLINKINGON_STATE;
+ info(msg_button_on, p_slot->number);
+ }
+ mutex_lock(&ctrl->crit_sect);
+
+ dbg("blink green LED and turn off amber\n");
+
+ amber_LED_off (ctrl, hp_slot);
+ green_LED_blink (ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+
+ mutex_unlock(&ctrl->crit_sect);
+ init_timer(&p_slot->task_event);
+ p_slot->hp_slot = hp_slot;
+ p_slot->ctrl = ctrl;
+/* p_slot->physical_slot = physical_slot; */
+ p_slot->task_event.expires = jiffies + 5 * HZ; /* 5 second delay */
+ p_slot->task_event.function = pushbutton_helper_thread;
+ p_slot->task_event.data = (u32) p_slot;
+
+ dbg("add_timer p_slot = %p\n", p_slot);
+ add_timer(&p_slot->task_event);
+ }
+ /***********POWER FAULT */
+ else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) {
+ dbg("power fault\n");
+ } else {
+ /* refresh notification */
+ update_slot_info(ctrl, p_slot);
+ }
+
+ ctrl->event_queue[loop].event_type = 0;
+
+ change = 1;
+ }
+ } /* End of FOR loop */
+ }
+
+ return;
+}
+
+
+/**
+ * cpqhp_pushbutton_thread - handle pushbutton events
+ * @slot: target slot (struct)
+ *
+ * Scheduled procedure to handle blocking stuff for the pushbuttons.
+ * Handles all pending events and exits.
+ */
+void cpqhp_pushbutton_thread(unsigned long slot)
+{
+ u8 hp_slot;
+ u8 device;
+ struct pci_func *func;
+ struct slot *p_slot = (struct slot *) slot;
+ struct controller *ctrl = (struct controller *) p_slot->ctrl;
+
+ pushbutton_pending = 0;
+ hp_slot = p_slot->hp_slot;
+
+ device = p_slot->device;
+
+ if (is_slot_enabled(ctrl, hp_slot)) {
+ p_slot->state = POWEROFF_STATE;
+ /* power Down board */
+ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0);
+ dbg("In power_down_board, func = %p, ctrl = %p\n", func, ctrl);
+ if (!func) {
+ dbg("Error! func NULL in %s\n", __func__);
+ return ;
+ }
+
+ if (cpqhp_process_SS(ctrl, func) != 0) {
+ amber_LED_on(ctrl, hp_slot);
+ green_LED_on(ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq(ctrl);
+ }
+
+ p_slot->state = STATIC_STATE;
+ } else {
+ p_slot->state = POWERON_STATE;
+ /* slot is off */
+
+ func = cpqhp_slot_find(p_slot->bus, p_slot->device, 0);
+ dbg("In add_board, func = %p, ctrl = %p\n", func, ctrl);
+ if (!func) {
+ dbg("Error! func NULL in %s\n", __func__);
+ return ;
+ }
+
+ if (ctrl != NULL) {
+ if (cpqhp_process_SI(ctrl, func) != 0) {
+ amber_LED_on(ctrl, hp_slot);
+ green_LED_off(ctrl, hp_slot);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ }
+ }
+
+ p_slot->state = STATIC_STATE;
+ }
+
+ return;
+}
+
+
+int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
+{
+ u8 device, hp_slot;
+ u16 temp_word;
+ u32 tempdword;
+ int rc;
+ struct slot *p_slot;
+ int physical_slot = 0;
+
+ tempdword = 0;
+
+ device = func->device;
+ hp_slot = device - ctrl->slot_device_offset;
+ p_slot = cpqhp_find_slot(ctrl, device);
+ if (p_slot)
+ physical_slot = p_slot->number;
+
+ /* Check to see if the interlock is closed */
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ if (tempdword & (0x01 << hp_slot))
+ return 1;
+
+ if (func->is_a_board) {
+ rc = board_replaced(func, ctrl);
+ } else {
+ /* add board */
+ slot_remove(func);
+
+ func = cpqhp_slot_create(ctrl->bus);
+ if (func == NULL)
+ return 1;
+
+ func->bus = ctrl->bus;
+ func->device = device;
+ func->function = 0;
+ func->configured = 0;
+ func->is_a_board = 1;
+
+ /* We have to save the presence info for these slots */
+ temp_word = ctrl->ctrl_int_comp >> 16;
+ func->presence_save = (temp_word >> hp_slot) & 0x01;
+ func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
+
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ func->switch_save = 0;
+ } else {
+ func->switch_save = 0x10;
+ }
+
+ rc = board_added(func, ctrl);
+ if (rc) {
+ if (is_bridge(func)) {
+ bridge_slot_remove(func);
+ } else
+ slot_remove(func);
+
+ /* Setup slot structure with entry for empty slot */
+ func = cpqhp_slot_create(ctrl->bus);
+
+ if (func == NULL)
+ return 1;
+
+ func->bus = ctrl->bus;
+ func->device = device;
+ func->function = 0;
+ func->configured = 0;
+ func->is_a_board = 0;
+
+ /* We have to save the presence info for these slots */
+ temp_word = ctrl->ctrl_int_comp >> 16;
+ func->presence_save = (temp_word >> hp_slot) & 0x01;
+ func->presence_save |=
+ (temp_word >> (hp_slot + 7)) & 0x02;
+
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ func->switch_save = 0;
+ } else {
+ func->switch_save = 0x10;
+ }
+ }
+ }
+
+ if (rc)
+ dbg("%s: rc = %d\n", __func__, rc);
+
+ if (p_slot)
+ update_slot_info(ctrl, p_slot);
+
+ return rc;
+}
+
+
+int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
+{
+ u8 device, class_code, header_type, BCR;
+ u8 index = 0;
+ u8 replace_flag;
+ u32 rc = 0;
+ unsigned int devfn;
+ struct slot *p_slot;
+ struct pci_bus *pci_bus = ctrl->pci_bus;
+ int physical_slot=0;
+
+ device = func->device;
+ func = cpqhp_slot_find(ctrl->bus, device, index++);
+ p_slot = cpqhp_find_slot(ctrl, device);
+ if (p_slot)
+ physical_slot = p_slot->number;
+
+ /* Make sure there are no video controllers here */
+ while (func && !rc) {
+ pci_bus->number = func->bus;
+ devfn = PCI_DEVFN(func->device, func->function);
+
+ /* Check the Class Code */
+ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ if (class_code == PCI_BASE_CLASS_DISPLAY) {
+ /* Display/Video adapter (not supported) */
+ rc = REMOVE_NOT_SUPPORTED;
+ } else {
+ /* See if it's a bridge */
+ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ /* If it's a bridge, check the VGA Enable bit */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR);
+ if (rc)
+ return rc;
+
+ /* If the VGA Enable bit is set, remove isn't
+ * supported */
+ if (BCR & PCI_BRIDGE_CTL_VGA)
+ rc = REMOVE_NOT_SUPPORTED;
+ }
+ }
+
+ func = cpqhp_slot_find(ctrl->bus, device, index++);
+ }
+
+ func = cpqhp_slot_find(ctrl->bus, device, 0);
+ if ((func != NULL) && !rc) {
+ /* FIXME: Replace flag should be passed into process_SS */
+ replace_flag = !(ctrl->add_support);
+ rc = remove_board(func, replace_flag, ctrl);
+ } else if (!rc) {
+ rc = 1;
+ }
+
+ if (p_slot)
+ update_slot_info(ctrl, p_slot);
+
+ return rc;
+}
+
+/**
+ * switch_leds - switch the leds, go from one site to the other.
+ * @ctrl: controller to use
+ * @num_of_slots: number of slots to use
+ * @work_LED: LED control value
+ * @direction: 1 to start from the left side, 0 to start right.
+ */
+static void switch_leds(struct controller *ctrl, const int num_of_slots,
+ u32 *work_LED, const int direction)
+{
+ int loop;
+
+ for (loop = 0; loop < num_of_slots; loop++) {
+ if (direction)
+ *work_LED = *work_LED >> 1;
+ else
+ *work_LED = *work_LED << 1;
+ writel(*work_LED, ctrl->hpc_reg + LED_CONTROL);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq(ctrl);
+
+ /* Get ready for next iteration */
+ long_delay((2*HZ)/10);
+ }
+}
+
+/**
+ * cpqhp_hardware_test - runs hardware tests
+ * @ctrl: target controller
+ * @test_num: the number written to the "test" file in sysfs.
+ *
+ * For hot plug ctrl folks to play with.
+ */
+int cpqhp_hardware_test(struct controller *ctrl, int test_num)
+{
+ u32 save_LED;
+ u32 work_LED;
+ int loop;
+ int num_of_slots;
+
+ num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
+
+ switch (test_num) {
+ case 1:
+ /* Do stuff here! */
+
+ /* Do that funky LED thing */
+ /* so we can restore them later */
+ save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
+ work_LED = 0x01010101;
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ work_LED = 0x00000101;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ for (loop = 0; loop < num_of_slots; loop++) {
+ set_SOGO(ctrl);
+
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED >> 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
+
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED << 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ work_LED = work_LED << 1;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ }
+
+ /* put it back the way it was */
+ writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
+
+ set_SOGO(ctrl);
+
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ break;
+ case 2:
+ /* Do other stuff here! */
+ break;
+ case 3:
+ /* and more... */
+ break;
+ }
+ return 0;
+}
+
+
+/**
+ * configure_new_device - Configures the PCI header information of one board.
+ * @ctrl: pointer to controller structure
+ * @func: pointer to function structure
+ * @behind_bridge: 1 if this is a recursive call, 0 if not
+ * @resources: pointer to set of resource lists
+ *
+ * Returns 0 if success.
+ */
+static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
+ u8 behind_bridge, struct resource_lists *resources)
+{
+ u8 temp_byte, function, max_functions, stop_it;
+ int rc;
+ u32 ID;
+ struct pci_func *new_slot;
+ int index;
+
+ new_slot = func;
+
+ dbg("%s\n", __func__);
+ /* Check for Multi-function device */
+ ctrl->pci_bus->number = func->bus;
+ rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte);
+ if (rc) {
+ dbg("%s: rc = %d\n", __func__, rc);
+ return rc;
+ }
+
+ if (temp_byte & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ function = 0;
+
+ do {
+ rc = configure_new_function(ctrl, new_slot, behind_bridge, resources);
+
+ if (rc) {
+ dbg("configure_new_function failed %d\n",rc);
+ index = 0;
+
+ while (new_slot) {
+ new_slot = cpqhp_slot_find(new_slot->bus, new_slot->device, index++);
+
+ if (new_slot)
+ cpqhp_return_board_resources(new_slot, resources);
+ }
+
+ return rc;
+ }
+
+ function++;
+
+ stop_it = 0;
+
+ /* The following loop skips to the next present function
+ * and creates a board structure */
+
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
+
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ } else {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(func->bus);
+
+ if (new_slot == NULL)
+ return 1;
+
+ new_slot->bus = func->bus;
+ new_slot->device = func->device;
+ new_slot->function = function;
+ new_slot->is_a_board = 1;
+ new_slot->status = 0;
+
+ stop_it++;
+ }
+ }
+
+ } while (function < max_functions);
+ dbg("returning from configure_new_device\n");
+
+ return 0;
+}
+
+
+/*
+ * Configuration logic that involves the hotplug data structures and
+ * their bookkeeping
+ */
+
+
+/**
+ * configure_new_function - Configures the PCI header information of one device
+ * @ctrl: pointer to controller structure
+ * @func: pointer to function structure
+ * @behind_bridge: 1 if this is a recursive call, 0 if not
+ * @resources: pointer to set of resource lists
+ *
+ * Calls itself recursively for bridged devices.
+ * Returns 0 if success.
+ */
+static int configure_new_function(struct controller *ctrl, struct pci_func *func,
+ u8 behind_bridge,
+ struct resource_lists *resources)
+{
+ int cloop;
+ u8 IRQ = 0;
+ u8 temp_byte;
+ u8 device;
+ u8 class_code;
+ u16 command;
+ u16 temp_word;
+ u32 temp_dword;
+ u32 rc;
+ u32 temp_register;
+ u32 base;
+ u32 ID;
+ unsigned int devfn;
+ struct pci_resource *mem_node;
+ struct pci_resource *p_mem_node;
+ struct pci_resource *io_node;
+ struct pci_resource *bus_node;
+ struct pci_resource *hold_mem_node;
+ struct pci_resource *hold_p_mem_node;
+ struct pci_resource *hold_IO_node;
+ struct pci_resource *hold_bus_node;
+ struct irq_mapping irqs;
+ struct pci_func *new_slot;
+ struct pci_bus *pci_bus;
+ struct resource_lists temp_resources;
+
+ pci_bus = ctrl->pci_bus;
+ pci_bus->number = func->bus;
+ devfn = PCI_DEVFN(func->device, func->function);
+
+ /* Check for Bridge */
+ rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte);
+ if (rc)
+ return rc;
+
+ if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* set Primary bus */
+ dbg("set Primary bus = %d\n", func->bus);
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
+ if (rc)
+ return rc;
+
+ /* find range of buses to use */
+ dbg("find ranges of buses to use\n");
+ bus_node = get_max_resource(&(resources->bus_head), 1);
+
+ /* If we don't have any buses to allocate, we can't continue */
+ if (!bus_node)
+ return -ENOMEM;
+
+ /* set Secondary bus */
+ temp_byte = bus_node->base;
+ dbg("set Secondary bus = %d\n", bus_node->base);
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte);
+ if (rc)
+ return rc;
+
+ /* set subordinate bus */
+ temp_byte = bus_node->base + bus_node->length - 1;
+ dbg("set subordinate bus = %d\n", bus_node->base + bus_node->length - 1);
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte);
+ if (rc)
+ return rc;
+
+ /* set subordinate Latency Timer and base Latency Timer */
+ temp_byte = 0x40;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte);
+ if (rc)
+ return rc;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte);
+ if (rc)
+ return rc;
+
+ /* set Cache Line size */
+ temp_byte = 0x08;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte);
+ if (rc)
+ return rc;
+
+ /* Setup the IO, memory, and prefetchable windows */
+ io_node = get_max_resource(&(resources->io_head), 0x1000);
+ if (!io_node)
+ return -ENOMEM;
+ mem_node = get_max_resource(&(resources->mem_head), 0x100000);
+ if (!mem_node)
+ return -ENOMEM;
+ p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000);
+ if (!p_mem_node)
+ return -ENOMEM;
+ dbg("Setup the IO, memory, and prefetchable windows\n");
+ dbg("io_node\n");
+ dbg("(base, len, next) (%x, %x, %p)\n", io_node->base,
+ io_node->length, io_node->next);
+ dbg("mem_node\n");
+ dbg("(base, len, next) (%x, %x, %p)\n", mem_node->base,
+ mem_node->length, mem_node->next);
+ dbg("p_mem_node\n");
+ dbg("(base, len, next) (%x, %x, %p)\n", p_mem_node->base,
+ p_mem_node->length, p_mem_node->next);
+
+ /* set up the IRQ info */
+ if (!resources->irqs) {
+ irqs.barber_pole = 0;
+ irqs.interrupt[0] = 0;
+ irqs.interrupt[1] = 0;
+ irqs.interrupt[2] = 0;
+ irqs.interrupt[3] = 0;
+ irqs.valid_INT = 0;
+ } else {
+ irqs.barber_pole = resources->irqs->barber_pole;
+ irqs.interrupt[0] = resources->irqs->interrupt[0];
+ irqs.interrupt[1] = resources->irqs->interrupt[1];
+ irqs.interrupt[2] = resources->irqs->interrupt[2];
+ irqs.interrupt[3] = resources->irqs->interrupt[3];
+ irqs.valid_INT = resources->irqs->valid_INT;
+ }
+
+ /* set up resource lists that are now aligned on top and bottom
+ * for anything behind the bridge. */
+ temp_resources.bus_head = bus_node;
+ temp_resources.io_head = io_node;
+ temp_resources.mem_head = mem_node;
+ temp_resources.p_mem_head = p_mem_node;
+ temp_resources.irqs = &irqs;
+
+ /* Make copies of the nodes we are going to pass down so that
+ * if there is a problem,we can just use these to free resources
+ */
+ hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
+ hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
+ hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
+ hold_p_mem_node = kmalloc(sizeof(*hold_p_mem_node), GFP_KERNEL);
+
+ if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) {
+ kfree(hold_bus_node);
+ kfree(hold_IO_node);
+ kfree(hold_mem_node);
+ kfree(hold_p_mem_node);
+
+ return 1;
+ }
+
+ memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource));
+
+ bus_node->base += 1;
+ bus_node->length -= 1;
+ bus_node->next = NULL;
+
+ /* If we have IO resources copy them and fill in the bridge's
+ * IO range registers */
+ memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
+ io_node->next = NULL;
+
+ /* set IO base and Limit registers */
+ temp_byte = io_node->base >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_BASE, temp_byte);
+
+ temp_byte = (io_node->base + io_node->length - 1) >> 8;
+ rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
+
+ /* Copy the memory resources and fill in the bridge's memory
+ * range registers.
+ */
+ memcpy(hold_mem_node, mem_node, sizeof(struct pci_resource));
+ mem_node->next = NULL;
+
+ /* set Mem base and Limit registers */
+ temp_word = mem_node->base >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+
+ temp_word = (mem_node->base + mem_node->length - 1) >> 16;
+ rc = pci_bus_write_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
+
+ memcpy(hold_p_mem_node, p_mem_node, sizeof(struct pci_resource));
+ p_mem_node->next = NULL;
+
+ /* set Pre Mem base and Limit registers */
+ temp_word = p_mem_node->base >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word);
+
+ temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
+
+ /* Adjust this to compensate for extra adjustment in first loop
+ */
+ irqs.barber_pole--;
+
+ rc = 0;
+
+ /* Here we actually find the devices and configure them */
+ for (device = 0; (device <= 0x1F) && !rc; device++) {
+ irqs.barber_pole = (irqs.barber_pole + 1) & 0x03;
+
+ ID = 0xFFFFFFFF;
+ pci_bus->number = hold_bus_node->base;
+ pci_bus_read_config_dword (pci_bus, PCI_DEVFN(device, 0), 0x00, &ID);
+ pci_bus->number = func->bus;
+
+ if (ID != 0xFFFFFFFF) { /* device present */
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(hold_bus_node->base);
+
+ if (new_slot == NULL) {
+ rc = -ENOMEM;
+ continue;
+ }
+
+ new_slot->bus = hold_bus_node->base;
+ new_slot->device = device;
+ new_slot->function = 0;
+ new_slot->is_a_board = 1;
+ new_slot->status = 0;
+
+ rc = configure_new_device(ctrl, new_slot, 1, &temp_resources);
+ dbg("configure_new_device rc=0x%x\n",rc);
+ } /* End of IF (device in slot?) */
+ } /* End of FOR loop */
+
+ if (rc)
+ goto free_and_out;
+ /* save the interrupt routing information */
+ if (resources->irqs) {
+ resources->irqs->interrupt[0] = irqs.interrupt[0];
+ resources->irqs->interrupt[1] = irqs.interrupt[1];
+ resources->irqs->interrupt[2] = irqs.interrupt[2];
+ resources->irqs->interrupt[3] = irqs.interrupt[3];
+ resources->irqs->valid_INT = irqs.valid_INT;
+ } else if (!behind_bridge) {
+ /* We need to hook up the interrupts here */
+ for (cloop = 0; cloop < 4; cloop++) {
+ if (irqs.valid_INT & (0x01 << cloop)) {
+ rc = cpqhp_set_irq(func->bus, func->device,
+ cloop + 1, irqs.interrupt[cloop]);
+ if (rc)
+ goto free_and_out;
+ }
+ } /* end of for loop */
+ }
+ /* Return unused bus resources
+ * First use the temporary node to store information for
+ * the board */
+ if (bus_node && temp_resources.bus_head) {
+ hold_bus_node->length = bus_node->base - hold_bus_node->base;
+
+ hold_bus_node->next = func->bus_head;
+ func->bus_head = hold_bus_node;
+
+ temp_byte = temp_resources.bus_head->base - 1;
+
+ /* set subordinate bus */
+ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte);
+
+ if (temp_resources.bus_head->length == 0) {
+ kfree(temp_resources.bus_head);
+ temp_resources.bus_head = NULL;
+ } else {
+ return_resource(&(resources->bus_head), temp_resources.bus_head);
+ }
+ }
+
+ /* If we have IO space available and there is some left,
+ * return the unused portion */
+ if (hold_IO_node && temp_resources.io_head) {
+ io_node = do_pre_bridge_resource_split(&(temp_resources.io_head),
+ &hold_IO_node, 0x1000);
+
+ /* Check if we were able to split something off */
+ if (io_node) {
+ hold_IO_node->base = io_node->base + io_node->length;
+
+ temp_byte = (hold_IO_node->base) >> 8;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_BASE, temp_byte);
+
+ return_resource(&(resources->io_head), io_node);
+ }
+
+ io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000);
+
+ /* Check if we were able to split something off */
+ if (io_node) {
+ /* First use the temporary node to store
+ * information for the board */
+ hold_IO_node->length = io_node->base - hold_IO_node->base;
+
+ /* If we used any, add it to the board's list */
+ if (hold_IO_node->length) {
+ hold_IO_node->next = func->io_head;
+ func->io_head = hold_IO_node;
+
+ temp_byte = (io_node->base - 1) >> 8;
+ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
+
+ return_resource(&(resources->io_head), io_node);
+ } else {
+ /* it doesn't need any IO */
+ temp_word = 0x0000;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_IO_LIMIT, temp_word);
+
+ return_resource(&(resources->io_head), io_node);
+ kfree(hold_IO_node);
+ }
+ } else {
+ /* it used most of the range */
+ hold_IO_node->next = func->io_head;
+ func->io_head = hold_IO_node;
+ }
+ } else if (hold_IO_node) {
+ /* it used the whole range */
+ hold_IO_node->next = func->io_head;
+ func->io_head = hold_IO_node;
+ }
+ /* If we have memory space available and there is some left,
+ * return the unused portion */
+ if (hold_mem_node && temp_resources.mem_head) {
+ mem_node = do_pre_bridge_resource_split(&(temp_resources. mem_head),
+ &hold_mem_node, 0x100000);
+
+ /* Check if we were able to split something off */
+ if (mem_node) {
+ hold_mem_node->base = mem_node->base + mem_node->length;
+
+ temp_word = (hold_mem_node->base) >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
+
+ return_resource(&(resources->mem_head), mem_node);
+ }
+
+ mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000);
+
+ /* Check if we were able to split something off */
+ if (mem_node) {
+ /* First use the temporary node to store
+ * information for the board */
+ hold_mem_node->length = mem_node->base - hold_mem_node->base;
+
+ if (hold_mem_node->length) {
+ hold_mem_node->next = func->mem_head;
+ func->mem_head = hold_mem_node;
+
+ /* configure end address */
+ temp_word = (mem_node->base - 1) >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
+
+ /* Return unused resources to the pool */
+ return_resource(&(resources->mem_head), mem_node);
+ } else {
+ /* it doesn't need any Mem */
+ temp_word = 0x0000;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
+
+ return_resource(&(resources->mem_head), mem_node);
+ kfree(hold_mem_node);
+ }
+ } else {
+ /* it used most of the range */
+ hold_mem_node->next = func->mem_head;
+ func->mem_head = hold_mem_node;
+ }
+ } else if (hold_mem_node) {
+ /* it used the whole range */
+ hold_mem_node->next = func->mem_head;
+ func->mem_head = hold_mem_node;
+ }
+ /* If we have prefetchable memory space available and there
+ * is some left at the end, return the unused portion */
+ if (temp_resources.p_mem_head) {
+ p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head),
+ &hold_p_mem_node, 0x100000);
+
+ /* Check if we were able to split something off */
+ if (p_mem_node) {
+ hold_p_mem_node->base = p_mem_node->base + p_mem_node->length;
+
+ temp_word = (hold_p_mem_node->base) >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word);
+
+ return_resource(&(resources->p_mem_head), p_mem_node);
+ }
+
+ p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000);
+
+ /* Check if we were able to split something off */
+ if (p_mem_node) {
+ /* First use the temporary node to store
+ * information for the board */
+ hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base;
+
+ /* If we used any, add it to the board's list */
+ if (hold_p_mem_node->length) {
+ hold_p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = hold_p_mem_node;
+
+ temp_word = (p_mem_node->base - 1) >> 16;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
+
+ return_resource(&(resources->p_mem_head), p_mem_node);
+ } else {
+ /* it doesn't need any PMem */
+ temp_word = 0x0000;
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
+
+ return_resource(&(resources->p_mem_head), p_mem_node);
+ kfree(hold_p_mem_node);
+ }
+ } else {
+ /* it used the most of the range */
+ hold_p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = hold_p_mem_node;
+ }
+ } else if (hold_p_mem_node) {
+ /* it used the whole range */
+ hold_p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = hold_p_mem_node;
+ }
+ /* We should be configuring an IRQ and the bridge's base address
+ * registers if it needs them. Although we have never seen such
+ * a device */
+
+ /* enable card */
+ command = 0x0157; /* = PCI_COMMAND_IO |
+ * PCI_COMMAND_MEMORY |
+ * PCI_COMMAND_MASTER |
+ * PCI_COMMAND_INVALIDATE |
+ * PCI_COMMAND_PARITY |
+ * PCI_COMMAND_SERR */
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_COMMAND, command);
+
+ /* set Bridge Control Register */
+ command = 0x07; /* = PCI_BRIDGE_CTL_PARITY |
+ * PCI_BRIDGE_CTL_SERR |
+ * PCI_BRIDGE_CTL_NO_ISA */
+ rc = pci_bus_write_config_word (pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
+ } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
+ /* Standard device */
+ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
+
+ if (class_code == PCI_BASE_CLASS_DISPLAY) {
+ /* Display (video) adapter (not supported) */
+ return DEVICE_TYPE_NOT_SUPPORTED;
+ }
+ /* Figure out IO and memory needs */
+ for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
+ temp_register = 0xFFFFFFFF;
+
+ dbg("CND: bus=%d, devfn=%d, offset=%d\n", pci_bus->number, devfn, cloop);
+ rc = pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
+
+ rc = pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp_register);
+ dbg("CND: base = 0x%x\n", temp_register);
+
+ if (temp_register) { /* If this register is implemented */
+ if ((temp_register & 0x03L) == 0x01) {
+ /* Map IO */
+
+ /* set base = amount of IO space */
+ base = temp_register & 0xFFFFFFFC;
+ base = ~base + 1;
+
+ dbg("CND: length = 0x%x\n", base);
+ io_node = get_io_resource(&(resources->io_head), base);
+ dbg("Got io_node start = %8.8x, length = %8.8x next (%p)\n",
+ io_node->base, io_node->length, io_node->next);
+ dbg("func (%p) io_head (%p)\n", func, func->io_head);
+
+ /* allocate the resource to the board */
+ if (io_node) {
+ base = io_node->base;
+
+ io_node->next = func->io_head;
+ func->io_head = io_node;
+ } else
+ return -ENOMEM;
+ } else if ((temp_register & 0x0BL) == 0x08) {
+ /* Map prefetchable memory */
+ base = temp_register & 0xFFFFFFF0;
+ base = ~base + 1;
+
+ dbg("CND: length = 0x%x\n", base);
+ p_mem_node = get_resource(&(resources->p_mem_head), base);
+
+ /* allocate the resource to the board */
+ if (p_mem_node) {
+ base = p_mem_node->base;
+
+ p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = p_mem_node;
+ } else
+ return -ENOMEM;
+ } else if ((temp_register & 0x0BL) == 0x00) {
+ /* Map memory */
+ base = temp_register & 0xFFFFFFF0;
+ base = ~base + 1;
+
+ dbg("CND: length = 0x%x\n", base);
+ mem_node = get_resource(&(resources->mem_head), base);
+
+ /* allocate the resource to the board */
+ if (mem_node) {
+ base = mem_node->base;
+
+ mem_node->next = func->mem_head;
+ func->mem_head = mem_node;
+ } else
+ return -ENOMEM;
+ } else {
+ /* Reserved bits or requesting space below 1M */
+ return NOT_ENOUGH_RESOURCES;
+ }
+
+ rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base);
+
+ /* Check for 64-bit base */
+ if ((temp_register & 0x07L) == 0x04) {
+ cloop += 4;
+
+ /* Upper 32 bits of address always zero
+ * on today's systems */
+ /* FIXME this is probably not true on
+ * Alpha and ia64??? */
+ base = 0;
+ rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base);
+ }
+ }
+ } /* End of base register loop */
+ if (cpqhp_legacy_mode) {
+ /* Figure out which interrupt pin this function uses */
+ rc = pci_bus_read_config_byte (pci_bus, devfn,
+ PCI_INTERRUPT_PIN, &temp_byte);
+
+ /* If this function needs an interrupt and we are behind
+ * a bridge and the pin is tied to something that's
+ * already mapped, set this one the same */
+ if (temp_byte && resources->irqs &&
+ (resources->irqs->valid_INT &
+ (0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
+ /* We have to share with something already set up */
+ IRQ = resources->irqs->interrupt[(temp_byte +
+ resources->irqs->barber_pole - 1) & 0x03];
+ } else {
+ /* Program IRQ based on card type */
+ rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
+
+ if (class_code == PCI_BASE_CLASS_STORAGE)
+ IRQ = cpqhp_disk_irq;
+ else
+ IRQ = cpqhp_nic_irq;
+ }
+
+ /* IRQ Line */
+ rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_INTERRUPT_LINE, IRQ);
+ }
+
+ if (!behind_bridge) {
+ rc = cpqhp_set_irq(func->bus, func->device, temp_byte, IRQ);
+ if (rc)
+ return 1;
+ } else {
+ /* TBD - this code may also belong in the other clause
+ * of this If statement */
+ resources->irqs->interrupt[(temp_byte + resources->irqs->barber_pole - 1) & 0x03] = IRQ;
+ resources->irqs->valid_INT |= 0x01 << (temp_byte + resources->irqs->barber_pole - 1) & 0x03;
+ }
+
+ /* Latency Timer */
+ temp_byte = 0x40;
+ rc = pci_bus_write_config_byte(pci_bus, devfn,
+ PCI_LATENCY_TIMER, temp_byte);
+
+ /* Cache Line size */
+ temp_byte = 0x08;
+ rc = pci_bus_write_config_byte(pci_bus, devfn,
+ PCI_CACHE_LINE_SIZE, temp_byte);
+
+ /* disable ROM base Address */
+ temp_dword = 0x00L;
+ rc = pci_bus_write_config_word(pci_bus, devfn,
+ PCI_ROM_ADDRESS, temp_dword);
+
+ /* enable card */
+ temp_word = 0x0157; /* = PCI_COMMAND_IO |
+ * PCI_COMMAND_MEMORY |
+ * PCI_COMMAND_MASTER |
+ * PCI_COMMAND_INVALIDATE |
+ * PCI_COMMAND_PARITY |
+ * PCI_COMMAND_SERR */
+ rc = pci_bus_write_config_word (pci_bus, devfn,
+ PCI_COMMAND, temp_word);
+ } else { /* End of Not-A-Bridge else */
+ /* It's some strange type of PCI adapter (Cardbus?) */
+ return DEVICE_TYPE_NOT_SUPPORTED;
+ }
+
+ func->configured = 1;
+
+ return 0;
+free_and_out:
+ cpqhp_destroy_resource_list (&temp_resources);
+
+ return_resource(&(resources-> bus_head), hold_bus_node);
+ return_resource(&(resources-> io_head), hold_IO_node);
+ return_resource(&(resources-> mem_head), hold_mem_node);
+ return_resource(&(resources-> p_mem_head), hold_p_mem_node);
+ return rc;
+}
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
new file mode 100644
index 000000000..1e08ff8c2
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -0,0 +1,667 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <asm/uaccess.h>
+#include "cpqphp.h"
+#include "cpqphp_nvram.h"
+
+
+#define ROM_INT15_PHY_ADDR 0x0FF859
+#define READ_EV 0xD8A4
+#define WRITE_EV 0xD8A5
+
+struct register_foo {
+ union {
+ unsigned long lword; /* eax */
+ unsigned short word; /* ax */
+
+ struct {
+ unsigned char low; /* al */
+ unsigned char high; /* ah */
+ } byte;
+ } data;
+
+ unsigned char opcode; /* see below */
+ unsigned long length; /* if the reg. is a pointer, how much data */
+} __attribute__ ((packed));
+
+struct all_reg {
+ struct register_foo eax_reg;
+ struct register_foo ebx_reg;
+ struct register_foo ecx_reg;
+ struct register_foo edx_reg;
+ struct register_foo edi_reg;
+ struct register_foo esi_reg;
+ struct register_foo eflags_reg;
+} __attribute__ ((packed));
+
+
+struct ev_hrt_header {
+ u8 Version;
+ u8 num_of_ctrl;
+ u8 next;
+};
+
+struct ev_hrt_ctrl {
+ u8 bus;
+ u8 device;
+ u8 function;
+ u8 mem_avail;
+ u8 p_mem_avail;
+ u8 io_avail;
+ u8 bus_avail;
+ u8 next;
+};
+
+
+static u8 evbuffer_init;
+static u8 evbuffer_length;
+static u8 evbuffer[1024];
+
+static void __iomem *compaq_int15_entry_point;
+
+/* lock for ordering int15_bios_call() */
+static spinlock_t int15_lock;
+
+
+/* This is a series of function that deals with
+ * setting & getting the hotplug resource table in some environment variable.
+ */
+
+/*
+ * We really shouldn't be doing this unless there is a _very_ good reason to!!!
+ * greg k-h
+ */
+
+
+static u32 add_byte(u32 **p_buffer, u8 value, u32 *used, u32 *avail)
+{
+ u8 **tByte;
+
+ if ((*used + 1) > *avail)
+ return(1);
+
+ *((u8*)*p_buffer) = value;
+ tByte = (u8**)p_buffer;
+ (*tByte)++;
+ *used+=1;
+ return(0);
+}
+
+
+static u32 add_dword(u32 **p_buffer, u32 value, u32 *used, u32 *avail)
+{
+ if ((*used + 4) > *avail)
+ return(1);
+
+ **p_buffer = value;
+ (*p_buffer)++;
+ *used+=4;
+ return(0);
+}
+
+
+/*
+ * check_for_compaq_ROM
+ *
+ * this routine verifies that the ROM OEM string is 'COMPAQ'
+ *
+ * returns 0 for non-Compaq ROM, 1 for Compaq ROM
+ */
+static int check_for_compaq_ROM (void __iomem *rom_start)
+{
+ u8 temp1, temp2, temp3, temp4, temp5, temp6;
+ int result = 0;
+
+ temp1 = readb(rom_start + 0xffea + 0);
+ temp2 = readb(rom_start + 0xffea + 1);
+ temp3 = readb(rom_start + 0xffea + 2);
+ temp4 = readb(rom_start + 0xffea + 3);
+ temp5 = readb(rom_start + 0xffea + 4);
+ temp6 = readb(rom_start + 0xffea + 5);
+ if ((temp1 == 'C') &&
+ (temp2 == 'O') &&
+ (temp3 == 'M') &&
+ (temp4 == 'P') &&
+ (temp5 == 'A') &&
+ (temp6 == 'Q')) {
+ result = 1;
+ }
+ dbg ("%s - returned %d\n", __func__, result);
+ return result;
+}
+
+
+static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
+{
+ unsigned long flags;
+ int op = operation;
+ int ret_val;
+
+ if (!compaq_int15_entry_point)
+ return -ENODEV;
+
+ spin_lock_irqsave(&int15_lock, flags);
+ __asm__ (
+ "xorl %%ebx,%%ebx\n" \
+ "xorl %%edx,%%edx\n" \
+ "pushf\n" \
+ "push %%cs\n" \
+ "cli\n" \
+ "call *%6\n"
+ : "=c" (*buf_size), "=a" (ret_val)
+ : "a" (op), "c" (*buf_size), "S" (ev_name),
+ "D" (buffer), "m" (compaq_int15_entry_point)
+ : "%ebx", "%edx");
+ spin_unlock_irqrestore(&int15_lock, flags);
+
+ return((ret_val & 0xFF00) >> 8);
+}
+
+
+/*
+ * load_HRT
+ *
+ * Read the hot plug Resource Table from NVRAM
+ */
+static int load_HRT (void __iomem *rom_start)
+{
+ u32 available;
+ u32 temp_dword;
+ u8 temp_byte = 0xFF;
+ u32 rc;
+
+ if (!check_for_compaq_ROM(rom_start))
+ return -ENODEV;
+
+ available = 1024;
+
+ /* Now load the EV */
+ temp_dword = available;
+
+ rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
+
+ evbuffer_length = temp_dword;
+
+ /* We're maintaining the resource lists so write FF to invalidate old
+ * info
+ */
+ temp_dword = 1;
+
+ rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
+
+ return rc;
+}
+
+
+/*
+ * store_HRT
+ *
+ * Save the hot plug Resource Table in NVRAM
+ */
+static u32 store_HRT (void __iomem *rom_start)
+{
+ u32 *buffer;
+ u32 *pFill;
+ u32 usedbytes;
+ u32 available;
+ u32 temp_dword;
+ u32 rc;
+ u8 loop;
+ u8 numCtrl = 0;
+ struct controller *ctrl;
+ struct pci_resource *resNode;
+ struct ev_hrt_header *p_EV_header;
+ struct ev_hrt_ctrl *p_ev_ctrl;
+
+ available = 1024;
+
+ if (!check_for_compaq_ROM(rom_start))
+ return(1);
+
+ buffer = (u32*) evbuffer;
+
+ if (!buffer)
+ return(1);
+
+ pFill = buffer;
+ usedbytes = 0;
+
+ p_EV_header = (struct ev_hrt_header *) pFill;
+
+ ctrl = cpqhp_ctrl_list;
+
+ /* The revision of this structure */
+ rc = add_byte(&pFill, 1 + ctrl->push_flag, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* The number of controllers */
+ rc = add_byte(&pFill, 1, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ while (ctrl) {
+ p_ev_ctrl = (struct ev_hrt_ctrl *) pFill;
+
+ numCtrl++;
+
+ /* The bus number */
+ rc = add_byte(&pFill, ctrl->bus, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* The device Number */
+ rc = add_byte(&pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* The function Number */
+ rc = add_byte(&pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* Skip the number of available entries */
+ rc = add_dword(&pFill, 0, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* Figure out memory Available */
+
+ resNode = ctrl->mem_head;
+
+ loop = 0;
+
+ while (resNode) {
+ loop ++;
+
+ /* base */
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* length */
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ resNode = resNode->next;
+ }
+
+ /* Fill in the number of entries */
+ p_ev_ctrl->mem_avail = loop;
+
+ /* Figure out prefetchable memory Available */
+
+ resNode = ctrl->p_mem_head;
+
+ loop = 0;
+
+ while (resNode) {
+ loop ++;
+
+ /* base */
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* length */
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ resNode = resNode->next;
+ }
+
+ /* Fill in the number of entries */
+ p_ev_ctrl->p_mem_avail = loop;
+
+ /* Figure out IO Available */
+
+ resNode = ctrl->io_head;
+
+ loop = 0;
+
+ while (resNode) {
+ loop ++;
+
+ /* base */
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* length */
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ resNode = resNode->next;
+ }
+
+ /* Fill in the number of entries */
+ p_ev_ctrl->io_avail = loop;
+
+ /* Figure out bus Available */
+
+ resNode = ctrl->bus_head;
+
+ loop = 0;
+
+ while (resNode) {
+ loop ++;
+
+ /* base */
+ rc = add_dword(&pFill, resNode->base, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ /* length */
+ rc = add_dword(&pFill, resNode->length, &usedbytes, &available);
+ if (rc)
+ return(rc);
+
+ resNode = resNode->next;
+ }
+
+ /* Fill in the number of entries */
+ p_ev_ctrl->bus_avail = loop;
+
+ ctrl = ctrl->next;
+ }
+
+ p_EV_header->num_of_ctrl = numCtrl;
+
+ /* Now store the EV */
+
+ temp_dword = usedbytes;
+
+ rc = access_EV(WRITE_EV, "CQTHPS", (u8*) buffer, &temp_dword);
+
+ dbg("usedbytes = 0x%x, length = 0x%x\n", usedbytes, temp_dword);
+
+ evbuffer_length = temp_dword;
+
+ if (rc) {
+ err(msg_unable_to_save);
+ return(1);
+ }
+
+ return(0);
+}
+
+
+void compaq_nvram_init (void __iomem *rom_start)
+{
+ if (rom_start)
+ compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
+
+ dbg("int15 entry = %p\n", compaq_int15_entry_point);
+
+ /* initialize our int15 lock */
+ spin_lock_init(&int15_lock);
+}
+
+
+int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
+{
+ u8 bus, device, function;
+ u8 nummem, numpmem, numio, numbus;
+ u32 rc;
+ u8 *p_byte;
+ struct pci_resource *mem_node;
+ struct pci_resource *p_mem_node;
+ struct pci_resource *io_node;
+ struct pci_resource *bus_node;
+ struct ev_hrt_ctrl *p_ev_ctrl;
+ struct ev_hrt_header *p_EV_header;
+
+ if (!evbuffer_init) {
+ /* Read the resource list information in from NVRAM */
+ if (load_HRT(rom_start))
+ memset (evbuffer, 0, 1024);
+
+ evbuffer_init = 1;
+ }
+
+ /* If we saved information in NVRAM, use it now */
+ p_EV_header = (struct ev_hrt_header *) evbuffer;
+
+ /* The following code is for systems where version 1.0 of this
+ * driver has been loaded, but doesn't support the hardware.
+ * In that case, the driver would incorrectly store something
+ * in NVRAM.
+ */
+ if ((p_EV_header->Version == 2) ||
+ ((p_EV_header->Version == 1) && !ctrl->push_flag)) {
+ p_byte = &(p_EV_header->next);
+
+ p_ev_ctrl = (struct ev_hrt_ctrl *) &(p_EV_header->next);
+
+ p_byte += 3;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length))
+ return 2;
+
+ bus = p_ev_ctrl->bus;
+ device = p_ev_ctrl->device;
+ function = p_ev_ctrl->function;
+
+ while ((bus != ctrl->bus) ||
+ (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
+ (function != PCI_FUNC(ctrl->pci_dev->devfn))) {
+ nummem = p_ev_ctrl->mem_avail;
+ numpmem = p_ev_ctrl->p_mem_avail;
+ numio = p_ev_ctrl->io_avail;
+ numbus = p_ev_ctrl->bus_avail;
+
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length))
+ return 2;
+
+ /* Skip forward to the next entry */
+ p_byte += (nummem + numpmem + numio + numbus) * 8;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length))
+ return 2;
+
+ p_ev_ctrl = (struct ev_hrt_ctrl *) p_byte;
+
+ p_byte += 3;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length))
+ return 2;
+
+ bus = p_ev_ctrl->bus;
+ device = p_ev_ctrl->device;
+ function = p_ev_ctrl->function;
+ }
+
+ nummem = p_ev_ctrl->mem_avail;
+ numpmem = p_ev_ctrl->p_mem_avail;
+ numio = p_ev_ctrl->io_avail;
+ numbus = p_ev_ctrl->bus_avail;
+
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length))
+ return 2;
+
+ while (nummem--) {
+ mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
+
+ if (!mem_node)
+ break;
+
+ mem_node->base = *(u32*)p_byte;
+ dbg("mem base = %8.8x\n",mem_node->base);
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(mem_node);
+ return 2;
+ }
+
+ mem_node->length = *(u32*)p_byte;
+ dbg("mem length = %8.8x\n",mem_node->length);
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(mem_node);
+ return 2;
+ }
+
+ mem_node->next = ctrl->mem_head;
+ ctrl->mem_head = mem_node;
+ }
+
+ while (numpmem--) {
+ p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
+
+ if (!p_mem_node)
+ break;
+
+ p_mem_node->base = *(u32*)p_byte;
+ dbg("pre-mem base = %8.8x\n",p_mem_node->base);
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(p_mem_node);
+ return 2;
+ }
+
+ p_mem_node->length = *(u32*)p_byte;
+ dbg("pre-mem length = %8.8x\n",p_mem_node->length);
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(p_mem_node);
+ return 2;
+ }
+
+ p_mem_node->next = ctrl->p_mem_head;
+ ctrl->p_mem_head = p_mem_node;
+ }
+
+ while (numio--) {
+ io_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
+
+ if (!io_node)
+ break;
+
+ io_node->base = *(u32*)p_byte;
+ dbg("io base = %8.8x\n",io_node->base);
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(io_node);
+ return 2;
+ }
+
+ io_node->length = *(u32*)p_byte;
+ dbg("io length = %8.8x\n",io_node->length);
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(io_node);
+ return 2;
+ }
+
+ io_node->next = ctrl->io_head;
+ ctrl->io_head = io_node;
+ }
+
+ while (numbus--) {
+ bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
+
+ if (!bus_node)
+ break;
+
+ bus_node->base = *(u32*)p_byte;
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(bus_node);
+ return 2;
+ }
+
+ bus_node->length = *(u32*)p_byte;
+ p_byte += 4;
+
+ if (p_byte > ((u8*)p_EV_header + evbuffer_length)) {
+ kfree(bus_node);
+ return 2;
+ }
+
+ bus_node->next = ctrl->bus_head;
+ ctrl->bus_head = bus_node;
+ }
+
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
+ rc = 1;
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head));
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head));
+
+ if (rc)
+ return(rc);
+ } else {
+ if ((evbuffer[0] != 0) && (!ctrl->push_flag))
+ return 1;
+ }
+
+ return 0;
+}
+
+
+int compaq_nvram_store (void __iomem *rom_start)
+{
+ int rc = 1;
+
+ if (rom_start == NULL)
+ return -ENODEV;
+
+ if (evbuffer_init) {
+ rc = store_HRT(rom_start);
+ if (rc)
+ err(msg_unable_to_save);
+ }
+ return rc;
+}
+
diff --git a/drivers/pci/hotplug/cpqphp_nvram.h b/drivers/pci/hotplug/cpqphp_nvram.h
new file mode 100644
index 000000000..34e4e54fc
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp_nvram.h
@@ -0,0 +1,57 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#ifndef _CPQPHP_NVRAM_H
+#define _CPQPHP_NVRAM_H
+
+#ifndef CONFIG_HOTPLUG_PCI_COMPAQ_NVRAM
+
+static inline void compaq_nvram_init(void __iomem *rom_start)
+{
+ return;
+}
+
+static inline int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl)
+{
+ return 0;
+}
+
+static inline int compaq_nvram_store(void __iomem *rom_start)
+{
+ return 0;
+}
+
+#else
+
+void compaq_nvram_init(void __iomem *rom_start);
+int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl);
+int compaq_nvram_store(void __iomem *rom_start);
+
+#endif
+
+#endif
+
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
new file mode 100644
index 000000000..1c8c2f130
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -0,0 +1,1564 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/proc_fs.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include "../pci.h"
+#include "cpqphp.h"
+#include "cpqphp_nvram.h"
+
+
+u8 cpqhp_nic_irq;
+u8 cpqhp_disk_irq;
+
+static u16 unused_IRQ;
+
+/*
+ * detect_HRT_floating_pointer
+ *
+ * find the Hot Plug Resource Table in the specified region of memory.
+ *
+ */
+static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iomem *end)
+{
+ void __iomem *fp;
+ void __iomem *endp;
+ u8 temp1, temp2, temp3, temp4;
+ int status = 0;
+
+ endp = (end - sizeof(struct hrt) + 1);
+
+ for (fp = begin; fp <= endp; fp += 16) {
+ temp1 = readb(fp + SIG0);
+ temp2 = readb(fp + SIG1);
+ temp3 = readb(fp + SIG2);
+ temp4 = readb(fp + SIG3);
+ if (temp1 == '$' &&
+ temp2 == 'H' &&
+ temp3 == 'R' &&
+ temp4 == 'T') {
+ status = 1;
+ break;
+ }
+ }
+
+ if (!status)
+ fp = NULL;
+
+ dbg("Discovered Hotplug Resource Table at %p\n", fp);
+ return fp;
+}
+
+
+int cpqhp_configure_device (struct controller *ctrl, struct pci_func *func)
+{
+ struct pci_bus *child;
+ int num;
+
+ pci_lock_rescan_remove();
+
+ if (func->pci_dev == NULL)
+ func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
+
+ /* No pci device, we need to create it then */
+ if (func->pci_dev == NULL) {
+ dbg("INFO: pci_dev still null\n");
+
+ num = pci_scan_slot(ctrl->pci_dev->bus, PCI_DEVFN(func->device, func->function));
+ if (num)
+ pci_bus_add_devices(ctrl->pci_dev->bus);
+
+ func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ if (func->pci_dev == NULL) {
+ dbg("ERROR: pci_dev still null\n");
+ goto out;
+ }
+ }
+
+ if (func->pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_hp_add_bridge(func->pci_dev);
+ child = func->pci_dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
+ }
+
+ pci_dev_put(func->pci_dev);
+
+ out:
+ pci_unlock_rescan_remove();
+ return 0;
+}
+
+
+int cpqhp_unconfigure_device(struct pci_func *func)
+{
+ int j;
+
+ dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
+
+ pci_lock_rescan_remove();
+ for (j=0; j<8 ; j++) {
+ struct pci_dev *temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ if (temp) {
+ pci_dev_put(temp);
+ pci_stop_and_remove_bus_device(temp);
+ }
+ }
+ pci_unlock_rescan_remove();
+ return 0;
+}
+
+static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value)
+{
+ u32 vendID = 0;
+
+ if (pci_bus_read_config_dword (bus, devfn, PCI_VENDOR_ID, &vendID) == -1)
+ return -1;
+ if (vendID == 0xffffffff)
+ return -1;
+ return pci_bus_read_config_dword (bus, devfn, offset, value);
+}
+
+
+/*
+ * cpqhp_set_irq
+ *
+ * @bus_num: bus number of PCI device
+ * @dev_num: device number of PCI device
+ * @slot: pointer to u8 where slot number will be returned
+ */
+int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
+{
+ int rc = 0;
+
+ if (cpqhp_legacy_mode) {
+ struct pci_dev *fakedev;
+ struct pci_bus *fakebus;
+ u16 temp_word;
+
+ fakedev = kmalloc(sizeof(*fakedev), GFP_KERNEL);
+ fakebus = kmalloc(sizeof(*fakebus), GFP_KERNEL);
+ if (!fakedev || !fakebus) {
+ kfree(fakedev);
+ kfree(fakebus);
+ return -ENOMEM;
+ }
+
+ fakedev->devfn = dev_num << 3;
+ fakedev->bus = fakebus;
+ fakebus->number = bus_num;
+ dbg("%s: dev %d, bus %d, pin %d, num %d\n",
+ __func__, dev_num, bus_num, int_pin, irq_num);
+ rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num);
+ kfree(fakedev);
+ kfree(fakebus);
+ dbg("%s: rc %d\n", __func__, rc);
+ if (!rc)
+ return !rc;
+
+ /* set the Edge Level Control Register (ELCR) */
+ temp_word = inb(0x4d0);
+ temp_word |= inb(0x4d1) << 8;
+
+ temp_word |= 0x01 << irq_num;
+
+ /* This should only be for x86 as it sets the Edge Level
+ * Control Register
+ */
+ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
+ 0xFF00) >> 8), 0x4d1); rc = 0; }
+
+ return rc;
+}
+
+
+static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_num)
+{
+ u16 tdevice;
+ u32 work;
+ u8 tbus;
+
+ ctrl->pci_bus->number = bus_num;
+
+ for (tdevice = 0; tdevice < 0xFF; tdevice++) {
+ /* Scan for access first */
+ if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
+ continue;
+ dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
+ /* Yep we got one. Not a bridge ? */
+ if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
+ *dev_num = tdevice;
+ dbg("found it !\n");
+ return 0;
+ }
+ }
+ for (tdevice = 0; tdevice < 0xFF; tdevice++) {
+ /* Scan for access first */
+ if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
+ continue;
+ dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
+ /* Yep we got one. bridge ? */
+ if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
+ /* XXX: no recursion, wtf? */
+ dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+
+static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
+{
+ int loop, len;
+ u32 work;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+ for (loop = 0; loop < len; ++loop) {
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
+
+ if (tslot == slot) {
+ *bus_num = tbus;
+ *dev_num = tdevice;
+ ctrl->pci_bus->number = tbus;
+ pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
+ if (!nobridge || (work == 0xffffffff))
+ return 0;
+
+ dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
+ pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
+ dbg("work >> 8 (%x) = BRIDGE (%x)\n", work >> 8, PCI_TO_PCI_BRIDGE_CLASS);
+
+ if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
+ pci_bus_read_config_byte (ctrl->pci_bus, *dev_num, PCI_SECONDARY_BUS, &tbus);
+ dbg("Scan bus for Non Bridge: bus %d\n", tbus);
+ if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
+ *bus_num = tbus;
+ return 0;
+ }
+ } else
+ return 0;
+ }
+ }
+ return -1;
+}
+
+
+int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot)
+{
+ /* plain (bridges allowed) */
+ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
+}
+
+
+/* More PCI configuration routines; this time centered around hotplug
+ * controller
+ */
+
+
+/*
+ * cpqhp_save_config
+ *
+ * Reads configuration for all slots in a PCI bus and saves info.
+ *
+ * Note: For non-hot plug buses, the slot # saved is the device #
+ *
+ * returns 0 if success
+ */
+int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
+{
+ long rc;
+ u8 class_code;
+ u8 header_type;
+ u32 ID;
+ u8 secondary_bus;
+ struct pci_func *new_slot;
+ int sub_bus;
+ int FirstSupported;
+ int LastSupported;
+ int max_functions;
+ int function;
+ u8 DevError;
+ int device = 0;
+ int cloop = 0;
+ int stop_it;
+ int index;
+
+ /* Decide which slots are supported */
+
+ if (is_hot_plug) {
+ /*
+ * is_hot_plug is the slot mask
+ */
+ FirstSupported = is_hot_plug >> 4;
+ LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
+ } else {
+ FirstSupported = 0;
+ LastSupported = 0x1F;
+ }
+
+ /* Save PCI configuration space for all devices in supported slots */
+ ctrl->pci_bus->number = busnumber;
+ for (device = FirstSupported; device <= LastSupported; device++) {
+ ID = 0xFFFFFFFF;
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF) {
+ if (is_hot_plug) {
+ /* Setup slot structure with entry for empty
+ * slot
+ */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
+
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = 0;
+ new_slot->is_a_board = 0;
+ new_slot->presence_save = 0;
+ new_slot->switch_save = 0;
+ }
+ continue;
+ }
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ /* If multi-function device, set max_functions to 8 */
+ if (header_type & 0x80)
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ function = 0;
+
+ do {
+ DevError = 0;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus
+ * get the subordinate bus number
+ */
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (rc) {
+ return rc;
+ } else {
+ sub_bus = (int) secondary_bus;
+
+ /* Save secondary bus cfg spc
+ * with this recursive call.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return rc;
+ ctrl->pci_bus->number = busnumber;
+ }
+ }
+
+ index = 0;
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+ while (new_slot &&
+ (new_slot->function != (u8) function))
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+
+ if (!new_slot) {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
+ }
+
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = (u8) function;
+ new_slot->is_a_board = 1;
+ new_slot->switch_save = 0x10;
+ /* In case of unsupported board */
+ new_slot->status = DevError;
+ new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
+
+ for (cloop = 0; cloop < 0x20; cloop++) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ if (rc)
+ return rc;
+ }
+
+ pci_dev_put(new_slot->pci_dev);
+
+ function++;
+
+ stop_it = 0;
+
+ /* this loop skips to the next present function
+ * reading in Class Code and Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ continue;
+ }
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
+ if (rc)
+ return rc;
+
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
+
+ stop_it++;
+ }
+
+ } while (function < max_functions);
+ } /* End of FOR loop */
+
+ return 0;
+}
+
+
+/*
+ * cpqhp_save_slot_config
+ *
+ * Saves configuration info for all PCI devices in a given slot
+ * including subordinate buses.
+ *
+ * returns 0 if success
+ */
+int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func *new_slot)
+{
+ long rc;
+ u8 class_code;
+ u8 header_type;
+ u32 ID;
+ u8 secondary_bus;
+ int sub_bus;
+ int max_functions;
+ int function = 0;
+ int cloop = 0;
+ int stop_it;
+
+ ID = 0xFFFFFFFF;
+
+ ctrl->pci_bus->number = new_slot->bus;
+ pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF)
+ return 2;
+
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
+
+ if (header_type & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
+
+ while (function < max_functions) {
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus */
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
+
+ sub_bus = (int) secondary_bus;
+
+ /* Save the config headers for the secondary
+ * bus.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return(rc);
+ ctrl->pci_bus->number = new_slot->bus;
+
+ }
+
+ new_slot->status = 0;
+
+ for (cloop = 0; cloop < 0x20; cloop++)
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+
+ function++;
+
+ stop_it = 0;
+
+ /* this loop skips to the next present function
+ * reading in the Class Code and the Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF)
+ function++;
+ else {
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ stop_it++;
+ }
+ }
+
+ }
+
+ return 0;
+}
+
+
+/*
+ * cpqhp_save_base_addr_length
+ *
+ * Saves the length of all base address registers for the
+ * specified slot. this is for hot plug REPLACE
+ *
+ * returns 0 if success
+ */
+int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func)
+{
+ u8 cloop;
+ u8 header_type;
+ u8 secondary_bus;
+ u8 type;
+ int sub_bus;
+ u32 temp_register;
+ u32 base;
+ u32 rc;
+ struct pci_func *next;
+ int index = 0;
+ struct pci_bus *pci_bus = ctrl->pci_bus;
+ unsigned int devfn;
+
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+
+ while (func != NULL) {
+ pci_bus->number = func->bus;
+ devfn = PCI_DEVFN(func->device, func->function);
+
+ /* Check for Bridge */
+ pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
+
+ sub_bus = (int) secondary_bus;
+
+ next = cpqhp_slot_list[sub_bus];
+
+ while (next != NULL) {
+ rc = cpqhp_save_base_addr_length(ctrl, next);
+ if (rc)
+ return rc;
+
+ next = next->next;
+ }
+ pci_bus->number = func->bus;
+
+ /* FIXME: this loop is duplicated in the non-bridge
+ * case. The two could be rolled together Figure out
+ * IO and memory base lengths
+ */
+ for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
+ temp_register = 0xFFFFFFFF;
+ pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
+ pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
+ /* If this register is implemented */
+ if (base) {
+ if (base & 0x01L) {
+ /* IO base
+ * set base = amount of IO space
+ * requested
+ */
+ base = base & 0xFFFFFFFE;
+ base = (~base) + 1;
+
+ type = 1;
+ } else {
+ /* memory base */
+ base = base & 0xFFFFFFF0;
+ base = (~base) + 1;
+
+ type = 0;
+ }
+ } else {
+ base = 0x0L;
+ type = 0;
+ }
+
+ /* Save information in slot structure */
+ func->base_length[(cloop - 0x10) >> 2] =
+ base;
+ func->base_type[(cloop - 0x10) >> 2] = type;
+
+ } /* End of base register loop */
+
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
+ for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
+ temp_register = 0xFFFFFFFF;
+ pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
+ pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
+
+ /* If this register is implemented */
+ if (base) {
+ if (base & 0x01L) {
+ /* IO base
+ * base = amount of IO space
+ * requested
+ */
+ base = base & 0xFFFFFFFE;
+ base = (~base) + 1;
+
+ type = 1;
+ } else {
+ /* memory base
+ * base = amount of memory
+ * space requested
+ */
+ base = base & 0xFFFFFFF0;
+ base = (~base) + 1;
+
+ type = 0;
+ }
+ } else {
+ base = 0x0L;
+ type = 0;
+ }
+
+ /* Save information in slot structure */
+ func->base_length[(cloop - 0x10) >> 2] = base;
+ func->base_type[(cloop - 0x10) >> 2] = type;
+
+ } /* End of base register loop */
+
+ } else { /* Some other unknown header type */
+ }
+
+ /* find the next device in this slot */
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+ }
+
+ return(0);
+}
+
+
+/*
+ * cpqhp_save_used_resources
+ *
+ * Stores used resource information for existing boards. this is
+ * for boards that were in the system when this driver was loaded.
+ * this function is for hot plug ADD
+ *
+ * returns 0 if success
+ */
+int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func *func)
+{
+ u8 cloop;
+ u8 header_type;
+ u8 secondary_bus;
+ u8 temp_byte;
+ u8 b_base;
+ u8 b_length;
+ u16 command;
+ u16 save_command;
+ u16 w_base;
+ u16 w_length;
+ u32 temp_register;
+ u32 save_base;
+ u32 base;
+ int index = 0;
+ struct pci_resource *mem_node;
+ struct pci_resource *p_mem_node;
+ struct pci_resource *io_node;
+ struct pci_resource *bus_node;
+ struct pci_bus *pci_bus = ctrl->pci_bus;
+ unsigned int devfn;
+
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+
+ while ((func != NULL) && func->is_a_board) {
+ pci_bus->number = func->bus;
+ devfn = PCI_DEVFN(func->device, func->function);
+
+ /* Save the command register */
+ pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
+
+ /* disable card */
+ command = 0x00;
+ pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
+
+ /* Check for Bridge */
+ pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Clear Bridge Control Register */
+ command = 0x00;
+ pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
+ pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
+ pci_bus_read_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, &temp_byte);
+
+ bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
+ if (!bus_node)
+ return -ENOMEM;
+
+ bus_node->base = secondary_bus;
+ bus_node->length = temp_byte - secondary_bus + 1;
+
+ bus_node->next = func->bus_head;
+ func->bus_head = bus_node;
+
+ /* Save IO base and Limit registers */
+ pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
+ pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
+
+ if ((b_base <= b_length) && (save_command & 0x01)) {
+ io_node = kmalloc(sizeof(*io_node), GFP_KERNEL);
+ if (!io_node)
+ return -ENOMEM;
+
+ io_node->base = (b_base & 0xF0) << 8;
+ io_node->length = (b_length - b_base + 0x10) << 8;
+
+ io_node->next = func->io_head;
+ func->io_head = io_node;
+ }
+
+ /* Save memory base and Limit registers */
+ pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
+ pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
+
+ if ((w_base <= w_length) && (save_command & 0x02)) {
+ mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
+ if (!mem_node)
+ return -ENOMEM;
+
+ mem_node->base = w_base << 16;
+ mem_node->length = (w_length - w_base + 0x10) << 16;
+
+ mem_node->next = func->mem_head;
+ func->mem_head = mem_node;
+ }
+
+ /* Save prefetchable memory base and Limit registers */
+ pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
+ pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
+
+ if ((w_base <= w_length) && (save_command & 0x02)) {
+ p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
+ if (!p_mem_node)
+ return -ENOMEM;
+
+ p_mem_node->base = w_base << 16;
+ p_mem_node->length = (w_length - w_base + 0x10) << 16;
+
+ p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = p_mem_node;
+ }
+ /* Figure out IO and memory base lengths */
+ for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
+ pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
+
+ temp_register = 0xFFFFFFFF;
+ pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register);
+ pci_bus_read_config_dword(pci_bus, devfn, cloop, &base);
+
+ temp_register = base;
+
+ /* If this register is implemented */
+ if (base) {
+ if (((base & 0x03L) == 0x01)
+ && (save_command & 0x01)) {
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
+ temp_register = base & 0xFFFFFFFE;
+ temp_register = (~temp_register) + 1;
+
+ io_node = kmalloc(sizeof(*io_node),
+ GFP_KERNEL);
+ if (!io_node)
+ return -ENOMEM;
+
+ io_node->base =
+ save_base & (~0x03L);
+ io_node->length = temp_register;
+
+ io_node->next = func->io_head;
+ func->io_head = io_node;
+ } else
+ if (((base & 0x0BL) == 0x08)
+ && (save_command & 0x02)) {
+ /* prefetchable memory base */
+ temp_register = base & 0xFFFFFFF0;
+ temp_register = (~temp_register) + 1;
+
+ p_mem_node = kmalloc(sizeof(*p_mem_node),
+ GFP_KERNEL);
+ if (!p_mem_node)
+ return -ENOMEM;
+
+ p_mem_node->base = save_base & (~0x0FL);
+ p_mem_node->length = temp_register;
+
+ p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = p_mem_node;
+ } else
+ if (((base & 0x0BL) == 0x00)
+ && (save_command & 0x02)) {
+ /* prefetchable memory base */
+ temp_register = base & 0xFFFFFFF0;
+ temp_register = (~temp_register) + 1;
+
+ mem_node = kmalloc(sizeof(*mem_node),
+ GFP_KERNEL);
+ if (!mem_node)
+ return -ENOMEM;
+
+ mem_node->base = save_base & (~0x0FL);
+ mem_node->length = temp_register;
+
+ mem_node->next = func->mem_head;
+ func->mem_head = mem_node;
+ } else
+ return(1);
+ }
+ } /* End of base register loop */
+ /* Standard header */
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
+ for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
+ pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
+
+ temp_register = 0xFFFFFFFF;
+ pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register);
+ pci_bus_read_config_dword(pci_bus, devfn, cloop, &base);
+
+ temp_register = base;
+
+ /* If this register is implemented */
+ if (base) {
+ if (((base & 0x03L) == 0x01)
+ && (save_command & 0x01)) {
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
+ temp_register = base & 0xFFFFFFFE;
+ temp_register = (~temp_register) + 1;
+
+ io_node = kmalloc(sizeof(*io_node),
+ GFP_KERNEL);
+ if (!io_node)
+ return -ENOMEM;
+
+ io_node->base = save_base & (~0x01L);
+ io_node->length = temp_register;
+
+ io_node->next = func->io_head;
+ func->io_head = io_node;
+ } else
+ if (((base & 0x0BL) == 0x08)
+ && (save_command & 0x02)) {
+ /* prefetchable memory base */
+ temp_register = base & 0xFFFFFFF0;
+ temp_register = (~temp_register) + 1;
+
+ p_mem_node = kmalloc(sizeof(*p_mem_node),
+ GFP_KERNEL);
+ if (!p_mem_node)
+ return -ENOMEM;
+
+ p_mem_node->base = save_base & (~0x0FL);
+ p_mem_node->length = temp_register;
+
+ p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = p_mem_node;
+ } else
+ if (((base & 0x0BL) == 0x00)
+ && (save_command & 0x02)) {
+ /* prefetchable memory base */
+ temp_register = base & 0xFFFFFFF0;
+ temp_register = (~temp_register) + 1;
+
+ mem_node = kmalloc(sizeof(*mem_node),
+ GFP_KERNEL);
+ if (!mem_node)
+ return -ENOMEM;
+
+ mem_node->base = save_base & (~0x0FL);
+ mem_node->length = temp_register;
+
+ mem_node->next = func->mem_head;
+ func->mem_head = mem_node;
+ } else
+ return(1);
+ }
+ } /* End of base register loop */
+ }
+
+ /* find the next device in this slot */
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+ }
+
+ return 0;
+}
+
+
+/*
+ * cpqhp_configure_board
+ *
+ * Copies saved configuration information to one slot.
+ * this is called recursively for bridge devices.
+ * this is for hot plug REPLACE!
+ *
+ * returns 0 if success
+ */
+int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func)
+{
+ int cloop;
+ u8 header_type;
+ u8 secondary_bus;
+ int sub_bus;
+ struct pci_func *next;
+ u32 temp;
+ u32 rc;
+ int index = 0;
+ struct pci_bus *pci_bus = ctrl->pci_bus;
+ unsigned int devfn;
+
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+
+ while (func != NULL) {
+ pci_bus->number = func->bus;
+ devfn = PCI_DEVFN(func->device, func->function);
+
+ /* Start at the top of config space so that the control
+ * registers are programmed last
+ */
+ for (cloop = 0x3C; cloop > 0; cloop -= 4)
+ pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
+
+ pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
+
+ /* If this is a bridge device, restore subordinate devices */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
+
+ sub_bus = (int) secondary_bus;
+
+ next = cpqhp_slot_list[sub_bus];
+
+ while (next != NULL) {
+ rc = cpqhp_configure_board(ctrl, next);
+ if (rc)
+ return rc;
+
+ next = next->next;
+ }
+ } else {
+
+ /* Check all the base Address Registers to make sure
+ * they are the same. If not, the board is different.
+ */
+
+ for (cloop = 16; cloop < 40; cloop += 4) {
+ pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
+
+ if (temp != func->config_space[cloop >> 2]) {
+ dbg("Config space compare failure!!! offset = %x\n", cloop);
+ dbg("bus = %x, device = %x, function = %x\n", func->bus, func->device, func->function);
+ dbg("temp = %x, config space = %x\n\n", temp, func->config_space[cloop >> 2]);
+ return 1;
+ }
+ }
+ }
+
+ func->configured = 1;
+
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+ }
+
+ return 0;
+}
+
+
+/*
+ * cpqhp_valid_replace
+ *
+ * this function checks to see if a board is the same as the
+ * one it is replacing. this check will detect if the device's
+ * vendor or device id's are the same
+ *
+ * returns 0 if the board is the same nonzero otherwise
+ */
+int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func)
+{
+ u8 cloop;
+ u8 header_type;
+ u8 secondary_bus;
+ u8 type;
+ u32 temp_register = 0;
+ u32 base;
+ u32 rc;
+ struct pci_func *next;
+ int index = 0;
+ struct pci_bus *pci_bus = ctrl->pci_bus;
+ unsigned int devfn;
+
+ if (!func->is_a_board)
+ return(ADD_NOT_SUPPORTED);
+
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+
+ while (func != NULL) {
+ pci_bus->number = func->bus;
+ devfn = PCI_DEVFN(func->device, func->function);
+
+ pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
+
+ /* No adapter present */
+ if (temp_register == 0xFFFFFFFF)
+ return(NO_ADAPTER_PRESENT);
+
+ if (temp_register != func->config_space[0])
+ return(ADAPTER_NOT_SAME);
+
+ /* Check for same revision number and class code */
+ pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
+
+ /* Adapter not the same */
+ if (temp_register != func->config_space[0x08 >> 2])
+ return(ADAPTER_NOT_SAME);
+
+ /* Check for Bridge */
+ pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
+
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* In order to continue checking, we must program the
+ * bus registers in the bridge to respond to accesses
+ * for its subordinate bus(es)
+ */
+
+ temp_register = func->config_space[0x18 >> 2];
+ pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
+
+ secondary_bus = (temp_register >> 8) & 0xFF;
+
+ next = cpqhp_slot_list[secondary_bus];
+
+ while (next != NULL) {
+ rc = cpqhp_valid_replace(ctrl, next);
+ if (rc)
+ return rc;
+
+ next = next->next;
+ }
+
+ }
+ /* Check to see if it is a standard config header */
+ else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
+ /* Check subsystem vendor and ID */
+ pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
+
+ if (temp_register != func->config_space[0x2C >> 2]) {
+ /* If it's a SMART-2 and the register isn't
+ * filled in, ignore the difference because
+ * they just have an old rev of the firmware
+ */
+ if (!((func->config_space[0] == 0xAE100E11)
+ && (temp_register == 0x00L)))
+ return(ADAPTER_NOT_SAME);
+ }
+ /* Figure out IO and memory base lengths */
+ for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
+ temp_register = 0xFFFFFFFF;
+ pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
+ pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
+
+ /* If this register is implemented */
+ if (base) {
+ if (base & 0x01L) {
+ /* IO base
+ * set base = amount of IO
+ * space requested
+ */
+ base = base & 0xFFFFFFFE;
+ base = (~base) + 1;
+
+ type = 1;
+ } else {
+ /* memory base */
+ base = base & 0xFFFFFFF0;
+ base = (~base) + 1;
+
+ type = 0;
+ }
+ } else {
+ base = 0x0L;
+ type = 0;
+ }
+
+ /* Check information in slot structure */
+ if (func->base_length[(cloop - 0x10) >> 2] != base)
+ return(ADAPTER_NOT_SAME);
+
+ if (func->base_type[(cloop - 0x10) >> 2] != type)
+ return(ADAPTER_NOT_SAME);
+
+ } /* End of base register loop */
+
+ } /* End of (type 0 config space) else */
+ else {
+ /* this is not a type 0 or 1 config space header so
+ * we don't know how to do it
+ */
+ return(DEVICE_TYPE_NOT_SUPPORTED);
+ }
+
+ /* Get the next function */
+ func = cpqhp_slot_find(func->bus, func->device, index++);
+ }
+
+
+ return 0;
+}
+
+
+/*
+ * cpqhp_find_available_resources
+ *
+ * Finds available memory, IO, and IRQ resources for programming
+ * devices which may be added to the system
+ * this function is for hot plug ADD!
+ *
+ * returns 0 if success
+ */
+int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
+{
+ u8 temp;
+ u8 populated_slot;
+ u8 bridged_slot;
+ void __iomem *one_slot;
+ void __iomem *rom_resource_table;
+ struct pci_func *func = NULL;
+ int i = 10, index;
+ u32 temp_dword, rc;
+ struct pci_resource *mem_node;
+ struct pci_resource *p_mem_node;
+ struct pci_resource *io_node;
+ struct pci_resource *bus_node;
+
+ rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
+ dbg("rom_resource_table = %p\n", rom_resource_table);
+
+ if (rom_resource_table == NULL)
+ return -ENODEV;
+
+ /* Sum all resources and setup resource maps */
+ unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
+ dbg("unused_IRQ = %x\n", unused_IRQ);
+
+ temp = 0;
+ while (unused_IRQ) {
+ if (unused_IRQ & 1) {
+ cpqhp_disk_irq = temp;
+ break;
+ }
+ unused_IRQ = unused_IRQ >> 1;
+ temp++;
+ }
+
+ dbg("cpqhp_disk_irq= %d\n", cpqhp_disk_irq);
+ unused_IRQ = unused_IRQ >> 1;
+ temp++;
+
+ while (unused_IRQ) {
+ if (unused_IRQ & 1) {
+ cpqhp_nic_irq = temp;
+ break;
+ }
+ unused_IRQ = unused_IRQ >> 1;
+ temp++;
+ }
+
+ dbg("cpqhp_nic_irq= %d\n", cpqhp_nic_irq);
+ unused_IRQ = readl(rom_resource_table + PCIIRQ);
+
+ temp = 0;
+
+ if (!cpqhp_nic_irq)
+ cpqhp_nic_irq = ctrl->cfgspc_irq;
+
+ if (!cpqhp_disk_irq)
+ cpqhp_disk_irq = ctrl->cfgspc_irq;
+
+ dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
+
+ rc = compaq_nvram_load(rom_start, ctrl);
+ if (rc)
+ return rc;
+
+ one_slot = rom_resource_table + sizeof (struct hrt);
+
+ i = readb(rom_resource_table + NUMBER_OF_ENTRIES);
+ dbg("number_of_entries = %d\n", i);
+
+ if (!readb(one_slot + SECONDARY_BUS))
+ return 1;
+
+ dbg("dev|IO base|length|Mem base|length|Pre base|length|PB SB MB\n");
+
+ while (i && readb(one_slot + SECONDARY_BUS)) {
+ u8 dev_func = readb(one_slot + DEV_FUNC);
+ u8 primary_bus = readb(one_slot + PRIMARY_BUS);
+ u8 secondary_bus = readb(one_slot + SECONDARY_BUS);
+ u8 max_bus = readb(one_slot + MAX_BUS);
+ u16 io_base = readw(one_slot + IO_BASE);
+ u16 io_length = readw(one_slot + IO_LENGTH);
+ u16 mem_base = readw(one_slot + MEM_BASE);
+ u16 mem_length = readw(one_slot + MEM_LENGTH);
+ u16 pre_mem_base = readw(one_slot + PRE_MEM_BASE);
+ u16 pre_mem_length = readw(one_slot + PRE_MEM_LENGTH);
+
+ dbg("%2.2x | %4.4x | %4.4x | %4.4x | %4.4x | %4.4x | %4.4x |%2.2x %2.2x %2.2x\n",
+ dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
+ primary_bus, secondary_bus, max_bus);
+
+ /* If this entry isn't for our controller's bus, ignore it */
+ if (primary_bus != ctrl->bus) {
+ i--;
+ one_slot += sizeof (struct slot_rt);
+ continue;
+ }
+ /* find out if this entry is for an occupied slot */
+ ctrl->pci_bus->number = primary_bus;
+ pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
+ dbg("temp_D_word = %x\n", temp_dword);
+
+ if (temp_dword != 0xFFFFFFFF) {
+ index = 0;
+ func = cpqhp_slot_find(primary_bus, dev_func >> 3, 0);
+
+ while (func && (func->function != (dev_func & 0x07))) {
+ dbg("func = %p (bus, dev, fun) = (%d, %d, %d)\n", func, primary_bus, dev_func >> 3, index);
+ func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
+ }
+
+ /* If we can't find a match, skip this table entry */
+ if (!func) {
+ i--;
+ one_slot += sizeof (struct slot_rt);
+ continue;
+ }
+ /* this may not work and shouldn't be used */
+ if (secondary_bus != primary_bus)
+ bridged_slot = 1;
+ else
+ bridged_slot = 0;
+
+ populated_slot = 1;
+ } else {
+ populated_slot = 0;
+ bridged_slot = 0;
+ }
+
+
+ /* If we've got a valid IO base, use it */
+
+ temp_dword = io_base + io_length;
+
+ if ((io_base) && (temp_dword < 0x10000)) {
+ io_node = kmalloc(sizeof(*io_node), GFP_KERNEL);
+ if (!io_node)
+ return -ENOMEM;
+
+ io_node->base = io_base;
+ io_node->length = io_length;
+
+ dbg("found io_node(base, length) = %x, %x\n",
+ io_node->base, io_node->length);
+ dbg("populated slot =%d \n", populated_slot);
+ if (!populated_slot) {
+ io_node->next = ctrl->io_head;
+ ctrl->io_head = io_node;
+ } else {
+ io_node->next = func->io_head;
+ func->io_head = io_node;
+ }
+ }
+
+ /* If we've got a valid memory base, use it */
+ temp_dword = mem_base + mem_length;
+ if ((mem_base) && (temp_dword < 0x10000)) {
+ mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
+ if (!mem_node)
+ return -ENOMEM;
+
+ mem_node->base = mem_base << 16;
+
+ mem_node->length = mem_length << 16;
+
+ dbg("found mem_node(base, length) = %x, %x\n",
+ mem_node->base, mem_node->length);
+ dbg("populated slot =%d \n", populated_slot);
+ if (!populated_slot) {
+ mem_node->next = ctrl->mem_head;
+ ctrl->mem_head = mem_node;
+ } else {
+ mem_node->next = func->mem_head;
+ func->mem_head = mem_node;
+ }
+ }
+
+ /* If we've got a valid prefetchable memory base, and
+ * the base + length isn't greater than 0xFFFF
+ */
+ temp_dword = pre_mem_base + pre_mem_length;
+ if ((pre_mem_base) && (temp_dword < 0x10000)) {
+ p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
+ if (!p_mem_node)
+ return -ENOMEM;
+
+ p_mem_node->base = pre_mem_base << 16;
+
+ p_mem_node->length = pre_mem_length << 16;
+ dbg("found p_mem_node(base, length) = %x, %x\n",
+ p_mem_node->base, p_mem_node->length);
+ dbg("populated slot =%d \n", populated_slot);
+
+ if (!populated_slot) {
+ p_mem_node->next = ctrl->p_mem_head;
+ ctrl->p_mem_head = p_mem_node;
+ } else {
+ p_mem_node->next = func->p_mem_head;
+ func->p_mem_head = p_mem_node;
+ }
+ }
+
+ /* If we've got a valid bus number, use it
+ * The second condition is to ignore bus numbers on
+ * populated slots that don't have PCI-PCI bridges
+ */
+ if (secondary_bus && (secondary_bus != primary_bus)) {
+ bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
+ if (!bus_node)
+ return -ENOMEM;
+
+ bus_node->base = secondary_bus;
+ bus_node->length = max_bus - secondary_bus + 1;
+ dbg("found bus_node(base, length) = %x, %x\n",
+ bus_node->base, bus_node->length);
+ dbg("populated slot =%d \n", populated_slot);
+ if (!populated_slot) {
+ bus_node->next = ctrl->bus_head;
+ ctrl->bus_head = bus_node;
+ } else {
+ bus_node->next = func->bus_head;
+ func->bus_head = bus_node;
+ }
+ }
+
+ i--;
+ one_slot += sizeof (struct slot_rt);
+ }
+
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
+ rc = 1;
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head));
+ rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head));
+
+ return rc;
+}
+
+
+/*
+ * cpqhp_return_board_resources
+ *
+ * this routine returns all resources allocated to a board to
+ * the available pool.
+ *
+ * returns 0 if success
+ */
+int cpqhp_return_board_resources(struct pci_func *func, struct resource_lists *resources)
+{
+ int rc = 0;
+ struct pci_resource *node;
+ struct pci_resource *t_node;
+ dbg("%s\n", __func__);
+
+ if (!func)
+ return 1;
+
+ node = func->io_head;
+ func->io_head = NULL;
+ while (node) {
+ t_node = node->next;
+ return_resource(&(resources->io_head), node);
+ node = t_node;
+ }
+
+ node = func->mem_head;
+ func->mem_head = NULL;
+ while (node) {
+ t_node = node->next;
+ return_resource(&(resources->mem_head), node);
+ node = t_node;
+ }
+
+ node = func->p_mem_head;
+ func->p_mem_head = NULL;
+ while (node) {
+ t_node = node->next;
+ return_resource(&(resources->p_mem_head), node);
+ node = t_node;
+ }
+
+ node = func->bus_head;
+ func->bus_head = NULL;
+ while (node) {
+ t_node = node->next;
+ return_resource(&(resources->bus_head), node);
+ node = t_node;
+ }
+
+ rc |= cpqhp_resource_sort_and_combine(&(resources->mem_head));
+ rc |= cpqhp_resource_sort_and_combine(&(resources->p_mem_head));
+ rc |= cpqhp_resource_sort_and_combine(&(resources->io_head));
+ rc |= cpqhp_resource_sort_and_combine(&(resources->bus_head));
+
+ return rc;
+}
+
+
+/*
+ * cpqhp_destroy_resource_list
+ *
+ * Puts node back in the resource list pointed to by head
+ */
+void cpqhp_destroy_resource_list (struct resource_lists *resources)
+{
+ struct pci_resource *res, *tres;
+
+ res = resources->io_head;
+ resources->io_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = resources->mem_head;
+ resources->mem_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = resources->p_mem_head;
+ resources->p_mem_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = resources->bus_head;
+ resources->bus_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+}
+
+
+/*
+ * cpqhp_destroy_board_resources
+ *
+ * Puts node back in the resource list pointed to by head
+ */
+void cpqhp_destroy_board_resources (struct pci_func *func)
+{
+ struct pci_resource *res, *tres;
+
+ res = func->io_head;
+ func->io_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = func->mem_head;
+ func->mem_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = func->p_mem_head;
+ func->p_mem_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+
+ res = func->bus_head;
+ func->bus_head = NULL;
+
+ while (res) {
+ tres = res;
+ res = res->next;
+ kfree(tres);
+ }
+}
diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c
new file mode 100644
index 000000000..d81648f71
--- /dev/null
+++ b/drivers/pci/hotplug/cpqphp_sysfs.c
@@ -0,0 +1,222 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include "cpqphp.h"
+
+static DEFINE_MUTEX(cpqphp_mutex);
+static int show_ctrl (struct controller *ctrl, char *buf)
+{
+ char *out = buf;
+ int index;
+ struct pci_resource *res;
+
+ out += sprintf(buf, "Free resources: memory\n");
+ index = 11;
+ res = ctrl->mem_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ out += sprintf(out, "Free resources: prefetchable memory\n");
+ index = 11;
+ res = ctrl->p_mem_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ out += sprintf(out, "Free resources: IO\n");
+ index = 11;
+ res = ctrl->io_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ out += sprintf(out, "Free resources: bus numbers\n");
+ index = 11;
+ res = ctrl->bus_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+
+ return out - buf;
+}
+
+static int show_dev (struct controller *ctrl, char *buf)
+{
+ char *out = buf;
+ int index;
+ struct pci_resource *res;
+ struct pci_func *new_slot;
+ struct slot *slot;
+
+ slot = ctrl->slot;
+
+ while (slot) {
+ new_slot = cpqhp_slot_find(slot->bus, slot->device, 0);
+ if (!new_slot)
+ break;
+ out += sprintf(out, "assigned resources: memory\n");
+ index = 11;
+ res = new_slot->mem_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ out += sprintf(out, "assigned resources: prefetchable memory\n");
+ index = 11;
+ res = new_slot->p_mem_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ out += sprintf(out, "assigned resources: IO\n");
+ index = 11;
+ res = new_slot->io_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ out += sprintf(out, "assigned resources: bus numbers\n");
+ index = 11;
+ res = new_slot->bus_head;
+ while (res && index--) {
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n", res->base, res->length);
+ res = res->next;
+ }
+ slot=slot->next;
+ }
+
+ return out - buf;
+}
+
+static int spew_debug_info(struct controller *ctrl, char *data, int size)
+{
+ int used;
+
+ used = size - show_ctrl(ctrl, data);
+ used = (size - used) - show_dev(ctrl, &data[used]);
+ return used;
+}
+
+struct ctrl_dbg {
+ int size;
+ char *data;
+ struct controller *ctrl;
+};
+
+#define MAX_OUTPUT (4*PAGE_SIZE)
+
+static int open(struct inode *inode, struct file *file)
+{
+ struct controller *ctrl = inode->i_private;
+ struct ctrl_dbg *dbg;
+ int retval = -ENOMEM;
+
+ mutex_lock(&cpqphp_mutex);
+ dbg = kmalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ goto exit;
+ dbg->data = kmalloc(MAX_OUTPUT, GFP_KERNEL);
+ if (!dbg->data) {
+ kfree(dbg);
+ goto exit;
+ }
+ dbg->size = spew_debug_info(ctrl, dbg->data, MAX_OUTPUT);
+ file->private_data = dbg;
+ retval = 0;
+exit:
+ mutex_unlock(&cpqphp_mutex);
+ return retval;
+}
+
+static loff_t lseek(struct file *file, loff_t off, int whence)
+{
+ struct ctrl_dbg *dbg = file->private_data;
+ return fixed_size_llseek(file, off, whence, dbg->size);
+}
+
+static ssize_t read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct ctrl_dbg *dbg = file->private_data;
+ return simple_read_from_buffer(buf, nbytes, ppos, dbg->data, dbg->size);
+}
+
+static int release(struct inode *inode, struct file *file)
+{
+ struct ctrl_dbg *dbg = file->private_data;
+
+ kfree(dbg->data);
+ kfree(dbg);
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .owner = THIS_MODULE,
+ .open = open,
+ .llseek = lseek,
+ .read = read,
+ .release = release,
+};
+
+static struct dentry *root;
+
+void cpqhp_initialize_debugfs(void)
+{
+ if (!root)
+ root = debugfs_create_dir("cpqhp", NULL);
+}
+
+void cpqhp_shutdown_debugfs(void)
+{
+ debugfs_remove(root);
+}
+
+void cpqhp_create_debugfs_files(struct controller *ctrl)
+{
+ ctrl->dentry = debugfs_create_file(dev_name(&ctrl->pci_dev->dev),
+ S_IRUGO, root, ctrl, &debug_ops);
+}
+
+void cpqhp_remove_debugfs_files(struct controller *ctrl)
+{
+ debugfs_remove(ctrl->dentry);
+ ctrl->dentry = NULL;
+}
+
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
new file mode 100644
index 000000000..e3e46a7b3
--- /dev/null
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -0,0 +1,760 @@
+#ifndef __IBMPHP_H
+#define __IBMPHP_H
+
+/*
+ * IBM Hot Plug Controller Driver
+ *
+ * Written By: Jyoti Shah, Tong Yu, Irene Zubarev, IBM Corporation
+ *
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001-2003 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>
+ *
+ */
+
+#include <linux/pci_hotplug.h>
+
+extern int ibmphp_debug;
+
+#if !defined(MODULE)
+ #define MY_NAME "ibmphpd"
+#else
+ #define MY_NAME THIS_MODULE->name
+#endif
+#define debug(fmt, arg...) do { if (ibmphp_debug == 1) printk(KERN_DEBUG "%s: " fmt , MY_NAME , ## arg); } while (0)
+#define debug_pci(fmt, arg...) do { if (ibmphp_debug) printk(KERN_DEBUG "%s: " fmt , MY_NAME , ## arg); } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
+
+
+/* EBDA stuff */
+
+/***********************************************************
+* SLOT CAPABILITY *
+***********************************************************/
+
+#define EBDA_SLOT_133_MAX 0x20
+#define EBDA_SLOT_100_MAX 0x10
+#define EBDA_SLOT_66_MAX 0x02
+#define EBDA_SLOT_PCIX_CAP 0x08
+
+
+/************************************************************
+* RESOURCE TYPE *
+************************************************************/
+
+#define EBDA_RSRC_TYPE_MASK 0x03
+#define EBDA_IO_RSRC_TYPE 0x00
+#define EBDA_MEM_RSRC_TYPE 0x01
+#define EBDA_PFM_RSRC_TYPE 0x03
+#define EBDA_RES_RSRC_TYPE 0x02
+
+
+/*************************************************************
+* IO RESTRICTION TYPE *
+*************************************************************/
+
+#define EBDA_IO_RESTRI_MASK 0x0c
+#define EBDA_NO_RESTRI 0x00
+#define EBDA_AVO_VGA_ADDR 0x04
+#define EBDA_AVO_VGA_ADDR_AND_ALIA 0x08
+#define EBDA_AVO_ISA_ADDR 0x0c
+
+
+/**************************************************************
+* DEVICE TYPE DEF *
+**************************************************************/
+
+#define EBDA_DEV_TYPE_MASK 0x10
+#define EBDA_PCI_DEV 0x10
+#define EBDA_NON_PCI_DEV 0x00
+
+
+/***************************************************************
+* PRIMARY DEF DEFINITION *
+***************************************************************/
+
+#define EBDA_PRI_DEF_MASK 0x20
+#define EBDA_PRI_PCI_BUS_INFO 0x20
+#define EBDA_NORM_DEV_RSRC_INFO 0x00
+
+
+//--------------------------------------------------------------
+// RIO TABLE DATA STRUCTURE
+//--------------------------------------------------------------
+
+struct rio_table_hdr {
+ u8 ver_num;
+ u8 scal_count;
+ u8 riodev_count;
+ u16 offset;
+};
+
+//-------------------------------------------------------------
+// SCALABILITY DETAIL
+//-------------------------------------------------------------
+
+struct scal_detail {
+ u8 node_id;
+ u32 cbar;
+ u8 port0_node_connect;
+ u8 port0_port_connect;
+ u8 port1_node_connect;
+ u8 port1_port_connect;
+ u8 port2_node_connect;
+ u8 port2_port_connect;
+ u8 chassis_num;
+// struct list_head scal_detail_list;
+};
+
+//--------------------------------------------------------------
+// RIO DETAIL
+//--------------------------------------------------------------
+
+struct rio_detail {
+ u8 rio_node_id;
+ u32 bbar;
+ u8 rio_type;
+ u8 owner_id;
+ u8 port0_node_connect;
+ u8 port0_port_connect;
+ u8 port1_node_connect;
+ u8 port1_port_connect;
+ u8 first_slot_num;
+ u8 status;
+ u8 wpindex;
+ u8 chassis_num;
+ struct list_head rio_detail_list;
+};
+
+struct opt_rio {
+ u8 rio_type;
+ u8 chassis_num;
+ u8 first_slot_num;
+ u8 middle_num;
+ struct list_head opt_rio_list;
+};
+
+struct opt_rio_lo {
+ u8 rio_type;
+ u8 chassis_num;
+ u8 first_slot_num;
+ u8 middle_num;
+ u8 pack_count;
+ struct list_head opt_rio_lo_list;
+};
+
+/****************************************************************
+* HPC DESCRIPTOR NODE *
+****************************************************************/
+
+struct ebda_hpc_list {
+ u8 format;
+ u16 num_ctlrs;
+ short phys_addr;
+// struct list_head ebda_hpc_list;
+};
+/*****************************************************************
+* IN HPC DATA STRUCTURE, THE ASSOCIATED SLOT AND BUS *
+* STRUCTURE *
+*****************************************************************/
+
+struct ebda_hpc_slot {
+ u8 slot_num;
+ u32 slot_bus_num;
+ u8 ctl_index;
+ u8 slot_cap;
+};
+
+struct ebda_hpc_bus {
+ u32 bus_num;
+ u8 slots_at_33_conv;
+ u8 slots_at_66_conv;
+ u8 slots_at_66_pcix;
+ u8 slots_at_100_pcix;
+ u8 slots_at_133_pcix;
+};
+
+
+/********************************************************************
+* THREE TYPE OF HOT PLUG CONTROLLER *
+********************************************************************/
+
+struct isa_ctlr_access {
+ u16 io_start;
+ u16 io_end;
+};
+
+struct pci_ctlr_access {
+ u8 bus;
+ u8 dev_fun;
+};
+
+struct wpeg_i2c_ctlr_access {
+ ulong wpegbbar;
+ u8 i2c_addr;
+};
+
+#define HPC_DEVICE_ID 0x0246
+#define HPC_SUBSYSTEM_ID 0x0247
+#define HPC_PCI_OFFSET 0x40
+/*************************************************************************
+* RSTC DESCRIPTOR NODE *
+*************************************************************************/
+
+struct ebda_rsrc_list {
+ u8 format;
+ u16 num_entries;
+ u16 phys_addr;
+ struct ebda_rsrc_list *next;
+};
+
+
+/***************************************************************************
+* PCI RSRC NODE *
+***************************************************************************/
+
+struct ebda_pci_rsrc {
+ u8 rsrc_type;
+ u8 bus_num;
+ u8 dev_fun;
+ u32 start_addr;
+ u32 end_addr;
+ u8 marked; /* for NVRAM */
+ struct list_head ebda_pci_rsrc_list;
+};
+
+
+/***********************************************************
+* BUS_INFO DATE STRUCTURE *
+***********************************************************/
+
+struct bus_info {
+ u8 slot_min;
+ u8 slot_max;
+ u8 slot_count;
+ u8 busno;
+ u8 controller_id;
+ u8 current_speed;
+ u8 current_bus_mode;
+ u8 index;
+ u8 slots_at_33_conv;
+ u8 slots_at_66_conv;
+ u8 slots_at_66_pcix;
+ u8 slots_at_100_pcix;
+ u8 slots_at_133_pcix;
+ struct list_head bus_info_list;
+};
+
+
+/***********************************************************
+* GLOBAL VARIABLES *
+***********************************************************/
+extern struct list_head ibmphp_ebda_pci_rsrc_head;
+extern struct list_head ibmphp_slot_head;
+/***********************************************************
+* FUNCTION PROTOTYPES *
+***********************************************************/
+
+void ibmphp_free_ebda_hpc_queue(void);
+int ibmphp_access_ebda(void);
+struct slot *ibmphp_get_slot_from_physical_num(u8);
+int ibmphp_get_total_hp_slots(void);
+void ibmphp_free_ibm_slot(struct slot *);
+void ibmphp_free_bus_info_queue(void);
+void ibmphp_free_ebda_pci_rsrc_queue(void);
+struct bus_info *ibmphp_find_same_bus_num(u32);
+int ibmphp_get_bus_index(u8);
+u16 ibmphp_get_total_controllers(void);
+int ibmphp_register_pci(void);
+
+/* passed parameters */
+#define MEM 0
+#define IO 1
+#define PFMEM 2
+
+/* bit masks */
+#define RESTYPE 0x03
+#define IOMASK 0x00 /* will need to take its complement */
+#define MMASK 0x01
+#define PFMASK 0x03
+#define PCIDEVMASK 0x10 /* we should always have PCI devices */
+#define PRIMARYBUSMASK 0x20
+
+/* pci specific defines */
+#define PCI_VENDOR_ID_NOTVALID 0xFFFF
+#define PCI_HEADER_TYPE_MULTIDEVICE 0x80
+#define PCI_HEADER_TYPE_MULTIBRIDGE 0x81
+
+#define LATENCY 0x64
+#define CACHE 64
+#define DEVICEENABLE 0x015F /* CPQ has 0x0157 */
+
+#define IOBRIDGE 0x1000 /* 4k */
+#define MEMBRIDGE 0x100000 /* 1M */
+
+/* irqs */
+#define SCSI_IRQ 0x09
+#define LAN_IRQ 0x0A
+#define OTHER_IRQ 0x0B
+
+/* Data Structures */
+
+/* type is of the form x x xx xx
+ * | | | |_ 00 - I/O, 01 - Memory, 11 - PFMemory
+ * | | - 00 - No Restrictions, 01 - Avoid VGA, 10 - Avoid
+ * | | VGA and their aliases, 11 - Avoid ISA
+ * | - 1 - PCI device, 0 - non pci device
+ * - 1 - Primary PCI Bus Information (0 if Normal device)
+ * the IO restrictions [2:3] are only for primary buses
+ */
+
+
+/* we need this struct because there could be several resource blocks
+ * allocated per primary bus in the EBDA
+ */
+struct range_node {
+ int rangeno;
+ u32 start;
+ u32 end;
+ struct range_node *next;
+};
+
+struct bus_node {
+ u8 busno;
+ int noIORanges;
+ struct range_node *rangeIO;
+ int noMemRanges;
+ struct range_node *rangeMem;
+ int noPFMemRanges;
+ struct range_node *rangePFMem;
+ int needIOUpdate;
+ int needMemUpdate;
+ int needPFMemUpdate;
+ struct resource_node *firstIO; /* first IO resource on the Bus */
+ struct resource_node *firstMem; /* first memory resource on the Bus */
+ struct resource_node *firstPFMem; /* first prefetchable memory resource on the Bus */
+ struct resource_node *firstPFMemFromMem; /* when run out of pfmem available, taking from Mem */
+ struct list_head bus_list;
+};
+
+struct resource_node {
+ int rangeno;
+ u8 busno;
+ u8 devfunc;
+ u32 start;
+ u32 end;
+ u32 len;
+ int type; /* MEM, IO, PFMEM */
+ u8 fromMem; /* this is to indicate that the range is from
+ * from the Memory bucket rather than from PFMem */
+ struct resource_node *next;
+ struct resource_node *nextRange; /* for the other mem range on bus */
+};
+
+struct res_needed {
+ u32 mem;
+ u32 pfmem;
+ u32 io;
+ u8 not_correct; /* needed for return */
+ int devices[32]; /* for device numbers behind this bridge */
+};
+
+/* functions */
+
+int ibmphp_rsrc_init(void);
+int ibmphp_add_resource(struct resource_node *);
+int ibmphp_remove_resource(struct resource_node *);
+int ibmphp_find_resource(struct bus_node *, u32, struct resource_node **, int);
+int ibmphp_check_resource(struct resource_node *, u8);
+int ibmphp_remove_bus(struct bus_node *, u8);
+void ibmphp_free_resources(void);
+int ibmphp_add_pfmem_from_mem(struct resource_node *);
+struct bus_node *ibmphp_find_res_bus(u8);
+void ibmphp_print_test(void); /* for debugging purposes */
+
+void ibmphp_hpc_initvars(void);
+int ibmphp_hpc_readslot(struct slot *, u8, u8 *);
+int ibmphp_hpc_writeslot(struct slot *, u8);
+void ibmphp_lock_operations(void);
+void ibmphp_unlock_operations(void);
+int ibmphp_hpc_start_poll_thread(void);
+void ibmphp_hpc_stop_poll_thread(void);
+
+//----------------------------------------------------------------------------
+
+
+//----------------------------------------------------------------------------
+// HPC return codes
+//----------------------------------------------------------------------------
+#define HPC_ERROR 0xFF
+
+//-----------------------------------------------------------------------------
+// BUS INFO
+//-----------------------------------------------------------------------------
+#define BUS_SPEED 0x30
+#define BUS_MODE 0x40
+#define BUS_MODE_PCIX 0x01
+#define BUS_MODE_PCI 0x00
+#define BUS_SPEED_2 0x20
+#define BUS_SPEED_1 0x10
+#define BUS_SPEED_33 0x00
+#define BUS_SPEED_66 0x01
+#define BUS_SPEED_100 0x02
+#define BUS_SPEED_133 0x03
+#define BUS_SPEED_66PCIX 0x04
+#define BUS_SPEED_66UNKNOWN 0x05
+#define BUS_STATUS_AVAILABLE 0x01
+#define BUS_CONTROL_AVAILABLE 0x02
+#define SLOT_LATCH_REGS_SUPPORTED 0x10
+
+#define PRGM_MODEL_REV_LEVEL 0xF0
+#define MAX_ADAPTER_NONE 0x09
+
+//----------------------------------------------------------------------------
+// HPC 'write' operations/commands
+//----------------------------------------------------------------------------
+// Command Code State Write to reg
+// Machine at index
+//------------------------- ---- ------- ------------
+#define HPC_CTLR_ENABLEIRQ 0x00 // N 15
+#define HPC_CTLR_DISABLEIRQ 0x01 // N 15
+#define HPC_SLOT_OFF 0x02 // Y 0-14
+#define HPC_SLOT_ON 0x03 // Y 0-14
+#define HPC_SLOT_ATTNOFF 0x04 // N 0-14
+#define HPC_SLOT_ATTNON 0x05 // N 0-14
+#define HPC_CTLR_CLEARIRQ 0x06 // N 15
+#define HPC_CTLR_RESET 0x07 // Y 15
+#define HPC_CTLR_IRQSTEER 0x08 // N 15
+#define HPC_BUS_33CONVMODE 0x09 // Y 31-34
+#define HPC_BUS_66CONVMODE 0x0A // Y 31-34
+#define HPC_BUS_66PCIXMODE 0x0B // Y 31-34
+#define HPC_BUS_100PCIXMODE 0x0C // Y 31-34
+#define HPC_BUS_133PCIXMODE 0x0D // Y 31-34
+#define HPC_ALLSLOT_OFF 0x11 // Y 15
+#define HPC_ALLSLOT_ON 0x12 // Y 15
+#define HPC_SLOT_BLINKLED 0x13 // N 0-14
+
+//----------------------------------------------------------------------------
+// read commands
+//----------------------------------------------------------------------------
+#define READ_SLOTSTATUS 0x01
+#define READ_EXTSLOTSTATUS 0x02
+#define READ_BUSSTATUS 0x03
+#define READ_CTLRSTATUS 0x04
+#define READ_ALLSTAT 0x05
+#define READ_ALLSLOT 0x06
+#define READ_SLOTLATCHLOWREG 0x07
+#define READ_REVLEVEL 0x08
+#define READ_HPCOPTIONS 0x09
+//----------------------------------------------------------------------------
+// slot status
+//----------------------------------------------------------------------------
+#define HPC_SLOT_POWER 0x01
+#define HPC_SLOT_CONNECT 0x02
+#define HPC_SLOT_ATTN 0x04
+#define HPC_SLOT_PRSNT2 0x08
+#define HPC_SLOT_PRSNT1 0x10
+#define HPC_SLOT_PWRGD 0x20
+#define HPC_SLOT_BUS_SPEED 0x40
+#define HPC_SLOT_LATCH 0x80
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_POWER status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_POWER_OFF 0x00
+#define HPC_SLOT_POWER_ON 0x01
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_CONNECT status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_CONNECTED 0x00
+#define HPC_SLOT_DISCONNECTED 0x01
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_ATTN status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_ATTN_OFF 0x00
+#define HPC_SLOT_ATTN_ON 0x01
+#define HPC_SLOT_ATTN_BLINK 0x02
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_PRSNT status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_EMPTY 0x00
+#define HPC_SLOT_PRSNT_7 0x01
+#define HPC_SLOT_PRSNT_15 0x02
+#define HPC_SLOT_PRSNT_25 0x03
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_PWRGD status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_PWRGD_FAULT_NONE 0x00
+#define HPC_SLOT_PWRGD_GOOD 0x01
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_BUS_SPEED status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_BUS_SPEED_OK 0x00
+#define HPC_SLOT_BUS_SPEED_MISM 0x01
+
+//----------------------------------------------------------------------------
+// HPC_SLOT_LATCH status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_LATCH_OPEN 0x01 // NOTE : in PCI spec bit off = open
+#define HPC_SLOT_LATCH_CLOSED 0x00 // NOTE : in PCI spec bit on = closed
+
+
+//----------------------------------------------------------------------------
+// extended slot status
+//----------------------------------------------------------------------------
+#define HPC_SLOT_PCIX 0x01
+#define HPC_SLOT_SPEED1 0x02
+#define HPC_SLOT_SPEED2 0x04
+#define HPC_SLOT_BLINK_ATTN 0x08
+#define HPC_SLOT_RSRVD1 0x10
+#define HPC_SLOT_RSRVD2 0x20
+#define HPC_SLOT_BUS_MODE 0x40
+#define HPC_SLOT_RSRVD3 0x80
+
+//----------------------------------------------------------------------------
+// HPC_XSLOT_PCIX_CAP status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_PCIX_NO 0x00
+#define HPC_SLOT_PCIX_YES 0x01
+
+//----------------------------------------------------------------------------
+// HPC_XSLOT_SPEED status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_SPEED_33 0x00
+#define HPC_SLOT_SPEED_66 0x01
+#define HPC_SLOT_SPEED_133 0x02
+
+//----------------------------------------------------------------------------
+// HPC_XSLOT_ATTN_BLINK status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_ATTN_BLINK_OFF 0x00
+#define HPC_SLOT_ATTN_BLINK_ON 0x01
+
+//----------------------------------------------------------------------------
+// HPC_XSLOT_BUS_MODE status return codes
+//----------------------------------------------------------------------------
+#define HPC_SLOT_BUS_MODE_OK 0x00
+#define HPC_SLOT_BUS_MODE_MISM 0x01
+
+//----------------------------------------------------------------------------
+// Controller status
+//----------------------------------------------------------------------------
+#define HPC_CTLR_WORKING 0x01
+#define HPC_CTLR_FINISHED 0x02
+#define HPC_CTLR_RESULT0 0x04
+#define HPC_CTLR_RESULT1 0x08
+#define HPC_CTLR_RESULE2 0x10
+#define HPC_CTLR_RESULT3 0x20
+#define HPC_CTLR_IRQ_ROUTG 0x40
+#define HPC_CTLR_IRQ_PENDG 0x80
+
+//----------------------------------------------------------------------------
+// HPC_CTLR_WORKING status return codes
+//----------------------------------------------------------------------------
+#define HPC_CTLR_WORKING_NO 0x00
+#define HPC_CTLR_WORKING_YES 0x01
+
+//----------------------------------------------------------------------------
+// HPC_CTLR_FINISHED status return codes
+//----------------------------------------------------------------------------
+#define HPC_CTLR_FINISHED_NO 0x00
+#define HPC_CTLR_FINISHED_YES 0x01
+
+//----------------------------------------------------------------------------
+// HPC_CTLR_RESULT status return codes
+//----------------------------------------------------------------------------
+#define HPC_CTLR_RESULT_SUCCESS 0x00
+#define HPC_CTLR_RESULT_FAILED 0x01
+#define HPC_CTLR_RESULT_RSVD 0x02
+#define HPC_CTLR_RESULT_NORESP 0x03
+
+
+//----------------------------------------------------------------------------
+// macro for slot info
+//----------------------------------------------------------------------------
+#define SLOT_POWER(s) ((u8) ((s & HPC_SLOT_POWER) \
+ ? HPC_SLOT_POWER_ON : HPC_SLOT_POWER_OFF))
+
+#define SLOT_CONNECT(s) ((u8) ((s & HPC_SLOT_CONNECT) \
+ ? HPC_SLOT_DISCONNECTED : HPC_SLOT_CONNECTED))
+
+#define SLOT_ATTN(s,es) ((u8) ((es & HPC_SLOT_BLINK_ATTN) \
+ ? HPC_SLOT_ATTN_BLINK \
+ : ((s & HPC_SLOT_ATTN) ? HPC_SLOT_ATTN_ON : HPC_SLOT_ATTN_OFF)))
+
+#define SLOT_PRESENT(s) ((u8) ((s & HPC_SLOT_PRSNT1) \
+ ? ((s & HPC_SLOT_PRSNT2) ? HPC_SLOT_EMPTY : HPC_SLOT_PRSNT_15) \
+ : ((s & HPC_SLOT_PRSNT2) ? HPC_SLOT_PRSNT_25 : HPC_SLOT_PRSNT_7)))
+
+#define SLOT_PWRGD(s) ((u8) ((s & HPC_SLOT_PWRGD) \
+ ? HPC_SLOT_PWRGD_GOOD : HPC_SLOT_PWRGD_FAULT_NONE))
+
+#define SLOT_BUS_SPEED(s) ((u8) ((s & HPC_SLOT_BUS_SPEED) \
+ ? HPC_SLOT_BUS_SPEED_MISM : HPC_SLOT_BUS_SPEED_OK))
+
+#define SLOT_LATCH(s) ((u8) ((s & HPC_SLOT_LATCH) \
+ ? HPC_SLOT_LATCH_CLOSED : HPC_SLOT_LATCH_OPEN))
+
+#define SLOT_PCIX(es) ((u8) ((es & HPC_SLOT_PCIX) \
+ ? HPC_SLOT_PCIX_YES : HPC_SLOT_PCIX_NO))
+
+#define SLOT_SPEED(es) ((u8) ((es & HPC_SLOT_SPEED2) \
+ ? ((es & HPC_SLOT_SPEED1) ? HPC_SLOT_SPEED_133 \
+ : HPC_SLOT_SPEED_66) \
+ : HPC_SLOT_SPEED_33))
+
+#define SLOT_BUS_MODE(es) ((u8) ((es & HPC_SLOT_BUS_MODE) \
+ ? HPC_SLOT_BUS_MODE_MISM : HPC_SLOT_BUS_MODE_OK))
+
+//--------------------------------------------------------------------------
+// macro for bus info
+//---------------------------------------------------------------------------
+#define CURRENT_BUS_SPEED(s) ((u8) (s & BUS_SPEED_2) \
+ ? ((s & BUS_SPEED_1) ? BUS_SPEED_133 : BUS_SPEED_100) \
+ : ((s & BUS_SPEED_1) ? BUS_SPEED_66 : BUS_SPEED_33))
+
+#define CURRENT_BUS_MODE(s) ((u8) (s & BUS_MODE) ? BUS_MODE_PCIX : BUS_MODE_PCI)
+
+#define READ_BUS_STATUS(s) ((u8) (s->options & BUS_STATUS_AVAILABLE))
+
+#define READ_BUS_MODE(s) ((s->revision & PRGM_MODEL_REV_LEVEL) >= 0x20)
+
+#define SET_BUS_STATUS(s) ((u8) (s->options & BUS_CONTROL_AVAILABLE))
+
+#define READ_SLOT_LATCH(s) ((u8) (s->options & SLOT_LATCH_REGS_SUPPORTED))
+
+//----------------------------------------------------------------------------
+// macro for controller info
+//----------------------------------------------------------------------------
+#define CTLR_WORKING(c) ((u8) ((c & HPC_CTLR_WORKING) \
+ ? HPC_CTLR_WORKING_YES : HPC_CTLR_WORKING_NO))
+#define CTLR_FINISHED(c) ((u8) ((c & HPC_CTLR_FINISHED) \
+ ? HPC_CTLR_FINISHED_YES : HPC_CTLR_FINISHED_NO))
+#define CTLR_RESULT(c) ((u8) ((c & HPC_CTLR_RESULT1) \
+ ? ((c & HPC_CTLR_RESULT0) ? HPC_CTLR_RESULT_NORESP \
+ : HPC_CTLR_RESULT_RSVD) \
+ : ((c & HPC_CTLR_RESULT0) ? HPC_CTLR_RESULT_FAILED \
+ : HPC_CTLR_RESULT_SUCCESS)))
+
+// command that affect the state machine of HPC
+#define NEEDTOCHECK_CMDSTATUS(c) ((c == HPC_SLOT_OFF) || \
+ (c == HPC_SLOT_ON) || \
+ (c == HPC_CTLR_RESET) || \
+ (c == HPC_BUS_33CONVMODE) || \
+ (c == HPC_BUS_66CONVMODE) || \
+ (c == HPC_BUS_66PCIXMODE) || \
+ (c == HPC_BUS_100PCIXMODE) || \
+ (c == HPC_BUS_133PCIXMODE) || \
+ (c == HPC_ALLSLOT_OFF) || \
+ (c == HPC_ALLSLOT_ON))
+
+
+/* Core part of the driver */
+
+#define ENABLE 1
+#define DISABLE 0
+
+#define CARD_INFO 0x07
+#define PCIX133 0x07
+#define PCIX66 0x05
+#define PCI66 0x04
+
+extern struct pci_bus *ibmphp_pci_bus;
+
+/* Variables */
+
+struct pci_func {
+ struct pci_dev *dev; /* from the OS */
+ u8 busno;
+ u8 device;
+ u8 function;
+ struct resource_node *io[6];
+ struct resource_node *mem[6];
+ struct resource_node *pfmem[6];
+ struct pci_func *next;
+ int devices[32]; /* for bridge config */
+ u8 irq[4]; /* for interrupt config */
+ u8 bus; /* flag for unconfiguring, to say if PPB */
+};
+
+struct slot {
+ u8 bus;
+ u8 device;
+ u8 number;
+ u8 real_physical_slot_num;
+ u32 capabilities;
+ u8 supported_speed;
+ u8 supported_bus_mode;
+ u8 flag; /* this is for disable slot and polling */
+ u8 ctlr_index;
+ struct hotplug_slot *hotplug_slot;
+ struct controller *ctrl;
+ struct pci_func *func;
+ u8 irq[4];
+ int bit_mode; /* 0 = 32, 1 = 64 */
+ struct bus_info *bus_on;
+ struct list_head ibm_slot_list;
+ u8 status;
+ u8 ext_status;
+ u8 busstatus;
+};
+
+struct controller {
+ struct ebda_hpc_slot *slots;
+ struct ebda_hpc_bus *buses;
+ struct pci_dev *ctrl_dev; /* in case where controller is PCI */
+ u8 starting_slot_num; /* starting and ending slot #'s this ctrl controls*/
+ u8 ending_slot_num;
+ u8 revision;
+ u8 options; /* which options HPC supports */
+ u8 status;
+ u8 ctlr_id;
+ u8 slot_count;
+ u8 bus_count;
+ u8 ctlr_relative_id;
+ u32 irq;
+ union {
+ struct isa_ctlr_access isa_ctlr;
+ struct pci_ctlr_access pci_ctlr;
+ struct wpeg_i2c_ctlr_access wpeg_ctlr;
+ } u;
+ u8 ctlr_type;
+ struct list_head ebda_hpc_list;
+};
+
+/* Functions */
+
+int ibmphp_init_devno(struct slot **); /* This function is called from EBDA, so we need it not be static */
+int ibmphp_do_disable_slot(struct slot *slot_cur);
+int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
+int ibmphp_configure_card(struct pci_func *, u8);
+int ibmphp_unconfigure_card(struct slot **, int);
+extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+
+#endif //__IBMPHP_H
+
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
new file mode 100644
index 000000000..15302475f
--- /dev/null
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -0,0 +1,1384 @@
+/*
+ * IBM Hot Plug Controller Driver
+ *
+ * Written By: Chuck Cole, Jyoti Shah, Tong Yu, Irene Zubarev, IBM Corporation
+ *
+ * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001-2003 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include "../pci.h"
+#include <asm/pci_x86.h> /* for struct irq_routing_table */
+#include <asm/io_apic.h>
+#include "ibmphp.h"
+
+#define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON)
+#define attn_off(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNOFF)
+#define attn_LED_blink(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_BLINKLED)
+#define get_ctrl_revision(sl, rev) ibmphp_hpc_readslot (sl, READ_REVLEVEL, rev)
+#define get_hpc_options(sl, opt) ibmphp_hpc_readslot (sl, READ_HPCOPTIONS, opt)
+
+#define DRIVER_VERSION "0.6"
+#define DRIVER_DESC "IBM Hot Plug PCI Controller Driver"
+
+int ibmphp_debug;
+
+static bool debug;
+module_param(debug, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC (debug, "Debugging mode enabled or not");
+MODULE_LICENSE ("GPL");
+MODULE_DESCRIPTION (DRIVER_DESC);
+
+struct pci_bus *ibmphp_pci_bus;
+static int max_slots;
+
+static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
+ * tables don't provide default info for empty slots */
+
+static int init_flag;
+
+/*
+static int get_max_adapter_speed_1 (struct hotplug_slot *, u8 *, u8);
+
+static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
+{
+ return get_max_adapter_speed_1 (hs, value, 1);
+}
+*/
+static inline int get_cur_bus_info(struct slot **sl)
+{
+ int rc = 1;
+ struct slot *slot_cur = *sl;
+
+ debug("options = %x\n", slot_cur->ctrl->options);
+ debug("revision = %x\n", slot_cur->ctrl->revision);
+
+ if (READ_BUS_STATUS(slot_cur->ctrl))
+ rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL);
+
+ if (rc)
+ return rc;
+
+ slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus);
+ if (READ_BUS_MODE(slot_cur->ctrl))
+ slot_cur->bus_on->current_bus_mode =
+ CURRENT_BUS_MODE(slot_cur->busstatus);
+ else
+ slot_cur->bus_on->current_bus_mode = 0xFF;
+
+ debug("busstatus = %x, bus_speed = %x, bus_mode = %x\n",
+ slot_cur->busstatus,
+ slot_cur->bus_on->current_speed,
+ slot_cur->bus_on->current_bus_mode);
+
+ *sl = slot_cur;
+ return 0;
+}
+
+static inline int slot_update(struct slot **sl)
+{
+ int rc;
+ rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL);
+ if (rc)
+ return rc;
+ if (!init_flag)
+ rc = get_cur_bus_info(sl);
+ return rc;
+}
+
+static int __init get_max_slots (void)
+{
+ struct slot *slot_cur;
+ struct list_head *tmp;
+ u8 slot_count = 0;
+
+ list_for_each(tmp, &ibmphp_slot_head) {
+ slot_cur = list_entry(tmp, struct slot, ibm_slot_list);
+ /* sometimes the hot-pluggable slots start with 4 (not always from 1) */
+ slot_count = max(slot_count, slot_cur->number);
+ }
+ return slot_count;
+}
+
+/* This routine will put the correct slot->device information per slot. It's
+ * called from initialization of the slot structures. It will also assign
+ * interrupt numbers per each slot.
+ * Parameters: struct slot
+ * Returns 0 or errors
+ */
+int ibmphp_init_devno(struct slot **cur_slot)
+{
+ struct irq_routing_table *rtable;
+ int len;
+ int loop;
+ int i;
+
+ rtable = pcibios_get_irq_routing_table();
+ if (!rtable) {
+ err("no BIOS routing table...\n");
+ return -ENOMEM;
+ }
+
+ len = (rtable->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info);
+
+ if (!len) {
+ kfree(rtable);
+ return -1;
+ }
+ for (loop = 0; loop < len; loop++) {
+ if ((*cur_slot)->number == rtable->slots[loop].slot &&
+ (*cur_slot)->bus == rtable->slots[loop].bus) {
+ (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
+ for (i = 0; i < 4; i++)
+ (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
+ (int) (*cur_slot)->device, i);
+
+ debug("(*cur_slot)->irq[0] = %x\n",
+ (*cur_slot)->irq[0]);
+ debug("(*cur_slot)->irq[1] = %x\n",
+ (*cur_slot)->irq[1]);
+ debug("(*cur_slot)->irq[2] = %x\n",
+ (*cur_slot)->irq[2]);
+ debug("(*cur_slot)->irq[3] = %x\n",
+ (*cur_slot)->irq[3]);
+
+ debug("rtable->exclusive_irqs = %x\n",
+ rtable->exclusive_irqs);
+ debug("rtable->slots[loop].irq[0].bitmap = %x\n",
+ rtable->slots[loop].irq[0].bitmap);
+ debug("rtable->slots[loop].irq[1].bitmap = %x\n",
+ rtable->slots[loop].irq[1].bitmap);
+ debug("rtable->slots[loop].irq[2].bitmap = %x\n",
+ rtable->slots[loop].irq[2].bitmap);
+ debug("rtable->slots[loop].irq[3].bitmap = %x\n",
+ rtable->slots[loop].irq[3].bitmap);
+
+ debug("rtable->slots[loop].irq[0].link = %x\n",
+ rtable->slots[loop].irq[0].link);
+ debug("rtable->slots[loop].irq[1].link = %x\n",
+ rtable->slots[loop].irq[1].link);
+ debug("rtable->slots[loop].irq[2].link = %x\n",
+ rtable->slots[loop].irq[2].link);
+ debug("rtable->slots[loop].irq[3].link = %x\n",
+ rtable->slots[loop].irq[3].link);
+ debug("end of init_devno\n");
+ kfree(rtable);
+ return 0;
+ }
+ }
+
+ kfree(rtable);
+ return -1;
+}
+
+static inline int power_on(struct slot *slot_cur)
+{
+ u8 cmd = HPC_SLOT_ON;
+ int retval;
+
+ retval = ibmphp_hpc_writeslot(slot_cur, cmd);
+ if (retval) {
+ err("power on failed\n");
+ return retval;
+ }
+ if (CTLR_RESULT(slot_cur->ctrl->status)) {
+ err("command not completed successfully in power_on\n");
+ return -EIO;
+ }
+ msleep(3000); /* For ServeRAID cards, and some 66 PCI */
+ return 0;
+}
+
+static inline int power_off(struct slot *slot_cur)
+{
+ u8 cmd = HPC_SLOT_OFF;
+ int retval;
+
+ retval = ibmphp_hpc_writeslot(slot_cur, cmd);
+ if (retval) {
+ err("power off failed\n");
+ return retval;
+ }
+ if (CTLR_RESULT(slot_cur->ctrl->status)) {
+ err("command not completed successfully in power_off\n");
+ retval = -EIO;
+ }
+ return retval;
+}
+
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
+{
+ int rc = 0;
+ struct slot *pslot;
+ u8 cmd = 0x00; /* avoid compiler warning */
+
+ debug("set_attention_status - Entry hotplug_slot[%lx] value[%x]\n",
+ (ulong) hotplug_slot, value);
+ ibmphp_lock_operations();
+
+
+ if (hotplug_slot) {
+ switch (value) {
+ case HPC_SLOT_ATTN_OFF:
+ cmd = HPC_SLOT_ATTNOFF;
+ break;
+ case HPC_SLOT_ATTN_ON:
+ cmd = HPC_SLOT_ATTNON;
+ break;
+ case HPC_SLOT_ATTN_BLINK:
+ cmd = HPC_SLOT_BLINKLED;
+ break;
+ default:
+ rc = -ENODEV;
+ err("set_attention_status - Error : invalid input [%x]\n",
+ value);
+ break;
+ }
+ if (rc == 0) {
+ pslot = hotplug_slot->private;
+ if (pslot)
+ rc = ibmphp_hpc_writeslot(pslot, cmd);
+ else
+ rc = -ENODEV;
+ }
+ } else
+ rc = -ENODEV;
+
+ ibmphp_unlock_operations();
+
+ debug("set_attention_status - Exit rc[%d]\n", rc);
+ return rc;
+}
+
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ int rc = -ENODEV;
+ struct slot *pslot;
+ struct slot myslot;
+
+ debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
+ (ulong) hotplug_slot, (ulong) value);
+
+ ibmphp_lock_operations();
+ if (hotplug_slot) {
+ pslot = hotplug_slot->private;
+ if (pslot) {
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &(myslot.status));
+ if (!rc)
+ rc = ibmphp_hpc_readslot(pslot,
+ READ_EXTSLOTSTATUS,
+ &(myslot.ext_status));
+ if (!rc)
+ *value = SLOT_ATTN(myslot.status,
+ myslot.ext_status);
+ }
+ }
+
+ ibmphp_unlock_operations();
+ debug("get_attention_status - Exit rc[%d] value[%x]\n", rc, *value);
+ return rc;
+}
+
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ int rc = -ENODEV;
+ struct slot *pslot;
+ struct slot myslot;
+
+ debug("get_latch_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
+ (ulong) hotplug_slot, (ulong) value);
+ ibmphp_lock_operations();
+ if (hotplug_slot) {
+ pslot = hotplug_slot->private;
+ if (pslot) {
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &(myslot.status));
+ if (!rc)
+ *value = SLOT_LATCH(myslot.status);
+ }
+ }
+
+ ibmphp_unlock_operations();
+ debug("get_latch_status - Exit rc[%d] rc[%x] value[%x]\n",
+ rc, rc, *value);
+ return rc;
+}
+
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ int rc = -ENODEV;
+ struct slot *pslot;
+ struct slot myslot;
+
+ debug("get_power_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
+ (ulong) hotplug_slot, (ulong) value);
+ ibmphp_lock_operations();
+ if (hotplug_slot) {
+ pslot = hotplug_slot->private;
+ if (pslot) {
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &(myslot.status));
+ if (!rc)
+ *value = SLOT_PWRGD(myslot.status);
+ }
+ }
+
+ ibmphp_unlock_operations();
+ debug("get_power_status - Exit rc[%d] rc[%x] value[%x]\n",
+ rc, rc, *value);
+ return rc;
+}
+
+static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ int rc = -ENODEV;
+ struct slot *pslot;
+ u8 present;
+ struct slot myslot;
+
+ debug("get_adapter_status - Entry hotplug_slot[%lx] pvalue[%lx]\n",
+ (ulong) hotplug_slot, (ulong) value);
+ ibmphp_lock_operations();
+ if (hotplug_slot) {
+ pslot = hotplug_slot->private;
+ if (pslot) {
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &(myslot.status));
+ if (!rc) {
+ present = SLOT_PRESENT(myslot.status);
+ if (present == HPC_SLOT_EMPTY)
+ *value = 0;
+ else
+ *value = 1;
+ }
+ }
+ }
+
+ ibmphp_unlock_operations();
+ debug("get_adapter_present - Exit rc[%d] value[%x]\n", rc, *value);
+ return rc;
+}
+
+static int get_max_bus_speed(struct slot *slot)
+{
+ int rc;
+ u8 mode = 0;
+ enum pci_bus_speed speed;
+ struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
+
+ debug("%s - Entry slot[%p]\n", __func__, slot);
+
+ ibmphp_lock_operations();
+ mode = slot->supported_bus_mode;
+ speed = slot->supported_speed;
+ ibmphp_unlock_operations();
+
+ switch (speed) {
+ case BUS_SPEED_33:
+ break;
+ case BUS_SPEED_66:
+ if (mode == BUS_MODE_PCIX)
+ speed += 0x01;
+ break;
+ case BUS_SPEED_100:
+ case BUS_SPEED_133:
+ speed += 0x01;
+ break;
+ default:
+ /* Note (will need to change): there would be soon 256, 512 also */
+ rc = -ENODEV;
+ }
+
+ if (!rc)
+ bus->max_bus_speed = speed;
+
+ debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed);
+ return rc;
+}
+
+/*
+static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 *value, u8 flag)
+{
+ int rc = -ENODEV;
+ struct slot *pslot;
+ struct slot myslot;
+
+ debug("get_max_adapter_speed_1 - Entry hotplug_slot[%lx] pvalue[%lx]\n",
+ (ulong)hotplug_slot, (ulong) value);
+
+ if (flag)
+ ibmphp_lock_operations();
+
+ if (hotplug_slot && value) {
+ pslot = hotplug_slot->private;
+ if (pslot) {
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &(myslot.status));
+
+ if (!(SLOT_LATCH (myslot.status)) &&
+ (SLOT_PRESENT (myslot.status))) {
+ rc = ibmphp_hpc_readslot(pslot,
+ READ_EXTSLOTSTATUS,
+ &(myslot.ext_status));
+ if (!rc)
+ *value = SLOT_SPEED(myslot.ext_status);
+ } else
+ *value = MAX_ADAPTER_NONE;
+ }
+ }
+
+ if (flag)
+ ibmphp_unlock_operations();
+
+ debug("get_max_adapter_speed_1 - Exit rc[%d] value[%x]\n", rc, *value);
+ return rc;
+}
+
+static int get_bus_name(struct hotplug_slot *hotplug_slot, char *value)
+{
+ int rc = -ENODEV;
+ struct slot *pslot = NULL;
+
+ debug("get_bus_name - Entry hotplug_slot[%lx]\n", (ulong)hotplug_slot);
+
+ ibmphp_lock_operations();
+
+ if (hotplug_slot) {
+ pslot = hotplug_slot->private;
+ if (pslot) {
+ rc = 0;
+ snprintf(value, 100, "Bus %x", pslot->bus);
+ }
+ } else
+ rc = -ENODEV;
+
+ ibmphp_unlock_operations();
+ debug("get_bus_name - Exit rc[%d] value[%x]\n", rc, *value);
+ return rc;
+}
+*/
+
+/****************************************************************************
+ * This routine will initialize the ops data structure used in the validate
+ * function. It will also power off empty slots that are powered on since BIOS
+ * leaves those on, albeit disconnected
+ ****************************************************************************/
+static int __init init_ops(void)
+{
+ struct slot *slot_cur;
+ struct list_head *tmp;
+ int retval;
+ int rc;
+
+ list_for_each(tmp, &ibmphp_slot_head) {
+ slot_cur = list_entry(tmp, struct slot, ibm_slot_list);
+
+ if (!slot_cur)
+ return -ENODEV;
+
+ debug("BEFORE GETTING SLOT STATUS, slot # %x\n",
+ slot_cur->number);
+ if (slot_cur->ctrl->revision == 0xFF)
+ if (get_ctrl_revision(slot_cur,
+ &slot_cur->ctrl->revision))
+ return -1;
+
+ if (slot_cur->bus_on->current_speed == 0xFF)
+ if (get_cur_bus_info(&slot_cur))
+ return -1;
+ get_max_bus_speed(slot_cur);
+
+ if (slot_cur->ctrl->options == 0xFF)
+ if (get_hpc_options(slot_cur, &slot_cur->ctrl->options))
+ return -1;
+
+ retval = slot_update(&slot_cur);
+ if (retval)
+ return retval;
+
+ debug("status = %x\n", slot_cur->status);
+ debug("ext_status = %x\n", slot_cur->ext_status);
+ debug("SLOT_POWER = %x\n", SLOT_POWER(slot_cur->status));
+ debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status));
+ debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status));
+
+ if ((SLOT_PWRGD(slot_cur->status)) &&
+ !(SLOT_PRESENT(slot_cur->status)) &&
+ !(SLOT_LATCH(slot_cur->status))) {
+ debug("BEFORE POWER OFF COMMAND\n");
+ rc = power_off(slot_cur);
+ if (rc)
+ return rc;
+
+ /* retval = slot_update(&slot_cur);
+ * if (retval)
+ * return retval;
+ * ibmphp_update_slot_info(slot_cur);
+ */
+ }
+ }
+ init_flag = 0;
+ return 0;
+}
+
+/* This operation will check whether the slot is within the bounds and
+ * the operation is valid to perform on that slot
+ * Parameters: slot, operation
+ * Returns: 0 or error codes
+ */
+static int validate(struct slot *slot_cur, int opn)
+{
+ int number;
+ int retval;
+
+ if (!slot_cur)
+ return -ENODEV;
+ number = slot_cur->number;
+ if ((number > max_slots) || (number < 0))
+ return -EBADSLT;
+ debug("slot_number in validate is %d\n", slot_cur->number);
+
+ retval = slot_update(&slot_cur);
+ if (retval)
+ return retval;
+
+ switch (opn) {
+ case ENABLE:
+ if (!(SLOT_PWRGD(slot_cur->status)) &&
+ (SLOT_PRESENT(slot_cur->status)) &&
+ !(SLOT_LATCH(slot_cur->status)))
+ return 0;
+ break;
+ case DISABLE:
+ if ((SLOT_PWRGD(slot_cur->status)) &&
+ (SLOT_PRESENT(slot_cur->status)) &&
+ !(SLOT_LATCH(slot_cur->status)))
+ return 0;
+ break;
+ default:
+ break;
+ }
+ err("validate failed....\n");
+ return -EINVAL;
+}
+
+/****************************************************************************
+ * This routine is for updating the data structures in the hotplug core
+ * Parameters: struct slot
+ * Returns: 0 or error
+ ****************************************************************************/
+int ibmphp_update_slot_info(struct slot *slot_cur)
+{
+ struct hotplug_slot_info *info;
+ struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
+ int rc;
+ u8 bus_speed;
+ u8 mode;
+
+ info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
+ if (!info) {
+ err("out of system memory\n");
+ return -ENOMEM;
+ }
+
+ info->power_status = SLOT_PWRGD(slot_cur->status);
+ info->attention_status = SLOT_ATTN(slot_cur->status,
+ slot_cur->ext_status);
+ info->latch_status = SLOT_LATCH(slot_cur->status);
+ if (!SLOT_PRESENT(slot_cur->status)) {
+ info->adapter_status = 0;
+/* info->max_adapter_speed_status = MAX_ADAPTER_NONE; */
+ } else {
+ info->adapter_status = 1;
+/* get_max_adapter_speed_1(slot_cur->hotplug_slot,
+ &info->max_adapter_speed_status, 0); */
+ }
+
+ bus_speed = slot_cur->bus_on->current_speed;
+ mode = slot_cur->bus_on->current_bus_mode;
+
+ switch (bus_speed) {
+ case BUS_SPEED_33:
+ break;
+ case BUS_SPEED_66:
+ if (mode == BUS_MODE_PCIX)
+ bus_speed += 0x01;
+ else if (mode == BUS_MODE_PCI)
+ ;
+ else
+ bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ case BUS_SPEED_100:
+ case BUS_SPEED_133:
+ bus_speed += 0x01;
+ break;
+ default:
+ bus_speed = PCI_SPEED_UNKNOWN;
+ }
+
+ bus->cur_bus_speed = bus_speed;
+ // To do: bus_names
+
+ rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
+ kfree(info);
+ return rc;
+}
+
+
+/******************************************************************************
+ * This function will return the pci_func, given bus and devfunc, or NULL. It
+ * is called from visit routines
+ ******************************************************************************/
+
+static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function)
+{
+ struct pci_func *func_cur;
+ struct slot *slot_cur;
+ struct list_head *tmp;
+ list_for_each(tmp, &ibmphp_slot_head) {
+ slot_cur = list_entry(tmp, struct slot, ibm_slot_list);
+ if (slot_cur->func) {
+ func_cur = slot_cur->func;
+ while (func_cur) {
+ if ((func_cur->busno == busno) &&
+ (func_cur->device == device) &&
+ (func_cur->function == function))
+ return func_cur;
+ func_cur = func_cur->next;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*************************************************************
+ * This routine frees up memory used by struct slot, including
+ * the pointers to pci_func, bus, hotplug_slot, controller,
+ * and deregistering from the hotplug core
+ *************************************************************/
+static void free_slots(void)
+{
+ struct slot *slot_cur;
+ struct list_head *tmp;
+ struct list_head *next;
+
+ debug("%s -- enter\n", __func__);
+
+ list_for_each_safe(tmp, next, &ibmphp_slot_head) {
+ slot_cur = list_entry(tmp, struct slot, ibm_slot_list);
+ pci_hp_deregister(slot_cur->hotplug_slot);
+ }
+ debug("%s -- exit\n", __func__);
+}
+
+static void ibm_unconfigure_device(struct pci_func *func)
+{
+ struct pci_dev *temp;
+ u8 j;
+
+ debug("inside %s\n", __func__);
+ debug("func->device = %x, func->function = %x\n",
+ func->device, func->function);
+ debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0);
+
+ pci_lock_rescan_remove();
+
+ for (j = 0; j < 0x08; j++) {
+ temp = pci_get_bus_and_slot(func->busno, (func->device << 3) | j);
+ if (temp) {
+ pci_stop_and_remove_bus_device(temp);
+ pci_dev_put(temp);
+ }
+ }
+
+ pci_dev_put(func->dev);
+
+ pci_unlock_rescan_remove();
+}
+
+/*
+ * The following function is to fix kernel bug regarding
+ * getting bus entries, here we manually add those primary
+ * bus entries to kernel bus structure whenever apply
+ */
+static u8 bus_structure_fixup(u8 busno)
+{
+ struct pci_bus *bus, *b;
+ struct pci_dev *dev;
+ u16 l;
+
+ if (pci_find_bus(0, busno) || !(ibmphp_find_same_bus_num(busno)))
+ return 1;
+
+ bus = kmalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus) {
+ err("%s - out of memory\n", __func__);
+ return 1;
+ }
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ kfree(bus);
+ err("%s - out of memory\n", __func__);
+ return 1;
+ }
+
+ bus->number = busno;
+ bus->ops = ibmphp_pci_bus->ops;
+ dev->bus = bus;
+ for (dev->devfn = 0; dev->devfn < 256; dev->devfn += 8) {
+ if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) &&
+ (l != 0x0000) && (l != 0xffff)) {
+ debug("%s - Inside bus_structure_fixup()\n",
+ __func__);
+ b = pci_scan_bus(busno, ibmphp_pci_bus->ops, NULL);
+ if (!b)
+ continue;
+
+ pci_bus_add_devices(b);
+ break;
+ }
+ }
+
+ kfree(dev);
+ kfree(bus);
+
+ return 0;
+}
+
+static int ibm_configure_device(struct pci_func *func)
+{
+ struct pci_bus *child;
+ int num;
+ int flag = 0; /* this is to make sure we don't double scan the bus,
+ for bridged devices primarily */
+
+ pci_lock_rescan_remove();
+
+ if (!(bus_structure_fixup(func->busno)))
+ flag = 1;
+ if (func->dev == NULL)
+ func->dev = pci_get_bus_and_slot(func->busno,
+ PCI_DEVFN(func->device, func->function));
+
+ if (func->dev == NULL) {
+ struct pci_bus *bus = pci_find_bus(0, func->busno);
+ if (!bus)
+ goto out;
+
+ num = pci_scan_slot(bus,
+ PCI_DEVFN(func->device, func->function));
+ if (num)
+ pci_bus_add_devices(bus);
+
+ func->dev = pci_get_bus_and_slot(func->busno,
+ PCI_DEVFN(func->device, func->function));
+ if (func->dev == NULL) {
+ err("ERROR... : pci_dev still NULL\n");
+ goto out;
+ }
+ }
+ if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
+ pci_hp_add_bridge(func->dev);
+ child = func->dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
+ }
+
+ out:
+ pci_unlock_rescan_remove();
+ return 0;
+}
+
+/*******************************************************
+ * Returns whether the bus is empty or not
+ *******************************************************/
+static int is_bus_empty(struct slot *slot_cur)
+{
+ int rc;
+ struct slot *tmp_slot;
+ u8 i = slot_cur->bus_on->slot_min;
+
+ while (i <= slot_cur->bus_on->slot_max) {
+ if (i == slot_cur->number) {
+ i++;
+ continue;
+ }
+ tmp_slot = ibmphp_get_slot_from_physical_num(i);
+ if (!tmp_slot)
+ return 0;
+ rc = slot_update(&tmp_slot);
+ if (rc)
+ return 0;
+ if (SLOT_PRESENT(tmp_slot->status) &&
+ SLOT_PWRGD(tmp_slot->status))
+ return 0;
+ i++;
+ }
+ return 1;
+}
+
+/***********************************************************
+ * If the HPC permits and the bus currently empty, tries to set the
+ * bus speed and mode at the maximum card and bus capability
+ * Parameters: slot
+ * Returns: bus is set (0) or error code
+ ***********************************************************/
+static int set_bus(struct slot *slot_cur)
+{
+ int rc;
+ u8 speed;
+ u8 cmd = 0x0;
+ int retval;
+ static struct pci_device_id ciobx[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) },
+ { },
+ };
+
+ debug("%s - entry slot # %d\n", __func__, slot_cur->number);
+ if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) {
+ rc = slot_update(&slot_cur);
+ if (rc)
+ return rc;
+ speed = SLOT_SPEED(slot_cur->ext_status);
+ debug("ext_status = %x, speed = %x\n", slot_cur->ext_status, speed);
+ switch (speed) {
+ case HPC_SLOT_SPEED_33:
+ cmd = HPC_BUS_33CONVMODE;
+ break;
+ case HPC_SLOT_SPEED_66:
+ if (SLOT_PCIX(slot_cur->ext_status)) {
+ if ((slot_cur->supported_speed >= BUS_SPEED_66) &&
+ (slot_cur->supported_bus_mode == BUS_MODE_PCIX))
+ cmd = HPC_BUS_66PCIXMODE;
+ else if (!SLOT_BUS_MODE(slot_cur->ext_status))
+ /* if max slot/bus capability is 66 pci
+ and there's no bus mode mismatch, then
+ the adapter supports 66 pci */
+ cmd = HPC_BUS_66CONVMODE;
+ else
+ cmd = HPC_BUS_33CONVMODE;
+ } else {
+ if (slot_cur->supported_speed >= BUS_SPEED_66)
+ cmd = HPC_BUS_66CONVMODE;
+ else
+ cmd = HPC_BUS_33CONVMODE;
+ }
+ break;
+ case HPC_SLOT_SPEED_133:
+ switch (slot_cur->supported_speed) {
+ case BUS_SPEED_33:
+ cmd = HPC_BUS_33CONVMODE;
+ break;
+ case BUS_SPEED_66:
+ if (slot_cur->supported_bus_mode == BUS_MODE_PCIX)
+ cmd = HPC_BUS_66PCIXMODE;
+ else
+ cmd = HPC_BUS_66CONVMODE;
+ break;
+ case BUS_SPEED_100:
+ cmd = HPC_BUS_100PCIXMODE;
+ break;
+ case BUS_SPEED_133:
+ /* This is to take care of the bug in CIOBX chip */
+ if (pci_dev_present(ciobx))
+ ibmphp_hpc_writeslot(slot_cur,
+ HPC_BUS_100PCIXMODE);
+ cmd = HPC_BUS_133PCIXMODE;
+ break;
+ default:
+ err("Wrong bus speed\n");
+ return -ENODEV;
+ }
+ break;
+ default:
+ err("wrong slot speed\n");
+ return -ENODEV;
+ }
+ debug("setting bus speed for slot %d, cmd %x\n",
+ slot_cur->number, cmd);
+ retval = ibmphp_hpc_writeslot(slot_cur, cmd);
+ if (retval) {
+ err("setting bus speed failed\n");
+ return retval;
+ }
+ if (CTLR_RESULT(slot_cur->ctrl->status)) {
+ err("command not completed successfully in set_bus\n");
+ return -EIO;
+ }
+ }
+ /* This is for x440, once Brandon fixes the firmware,
+ will not need this delay */
+ msleep(1000);
+ debug("%s -Exit\n", __func__);
+ return 0;
+}
+
+/* This routine checks the bus limitations that the slot is on from the BIOS.
+ * This is used in deciding whether or not to power up the slot.
+ * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on
+ * same bus)
+ * Parameters: slot
+ * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus
+ */
+static int check_limitations(struct slot *slot_cur)
+{
+ u8 i;
+ struct slot *tmp_slot;
+ u8 count = 0;
+ u8 limitation = 0;
+
+ for (i = slot_cur->bus_on->slot_min; i <= slot_cur->bus_on->slot_max; i++) {
+ tmp_slot = ibmphp_get_slot_from_physical_num(i);
+ if (!tmp_slot)
+ return -ENODEV;
+ if ((SLOT_PWRGD(tmp_slot->status)) &&
+ !(SLOT_CONNECT(tmp_slot->status)))
+ count++;
+ }
+ get_cur_bus_info(&slot_cur);
+ switch (slot_cur->bus_on->current_speed) {
+ case BUS_SPEED_33:
+ limitation = slot_cur->bus_on->slots_at_33_conv;
+ break;
+ case BUS_SPEED_66:
+ if (slot_cur->bus_on->current_bus_mode == BUS_MODE_PCIX)
+ limitation = slot_cur->bus_on->slots_at_66_pcix;
+ else
+ limitation = slot_cur->bus_on->slots_at_66_conv;
+ break;
+ case BUS_SPEED_100:
+ limitation = slot_cur->bus_on->slots_at_100_pcix;
+ break;
+ case BUS_SPEED_133:
+ limitation = slot_cur->bus_on->slots_at_133_pcix;
+ break;
+ }
+
+ if ((count + 1) > limitation)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void print_card_capability(struct slot *slot_cur)
+{
+ info("capability of the card is ");
+ if ((slot_cur->ext_status & CARD_INFO) == PCIX133)
+ info(" 133 MHz PCI-X\n");
+ else if ((slot_cur->ext_status & CARD_INFO) == PCIX66)
+ info(" 66 MHz PCI-X\n");
+ else if ((slot_cur->ext_status & CARD_INFO) == PCI66)
+ info(" 66 MHz PCI\n");
+ else
+ info(" 33 MHz PCI\n");
+
+}
+
+/* This routine will power on the slot, configure the device(s) and find the
+ * drivers for them.
+ * Parameters: hotplug_slot
+ * Returns: 0 or failure codes
+ */
+static int enable_slot(struct hotplug_slot *hs)
+{
+ int rc, i, rcpr;
+ struct slot *slot_cur;
+ u8 function;
+ struct pci_func *tmp_func;
+
+ ibmphp_lock_operations();
+
+ debug("ENABLING SLOT........\n");
+ slot_cur = hs->private;
+
+ rc = validate(slot_cur, ENABLE);
+ if (rc) {
+ err("validate function failed\n");
+ goto error_nopower;
+ }
+
+ attn_LED_blink(slot_cur);
+
+ rc = set_bus(slot_cur);
+ if (rc) {
+ err("was not able to set the bus\n");
+ goto error_nopower;
+ }
+
+ /*-----------------debugging------------------------------*/
+ get_cur_bus_info(&slot_cur);
+ debug("the current bus speed right after set_bus = %x\n",
+ slot_cur->bus_on->current_speed);
+ /*----------------------------------------------------------*/
+
+ rc = check_limitations(slot_cur);
+ if (rc) {
+ err("Adding this card exceeds the limitations of this bus.\n");
+ err("(i.e., >1 133MHz cards running on same bus, or >2 66 PCI cards running on same bus.\n");
+ err("Try hot-adding into another bus\n");
+ rc = -EINVAL;
+ goto error_nopower;
+ }
+
+ rc = power_on(slot_cur);
+
+ if (rc) {
+ err("something wrong when powering up... please see below for details\n");
+ /* need to turn off before on, otherwise, blinking overwrites */
+ attn_off(slot_cur);
+ attn_on(slot_cur);
+ if (slot_update(&slot_cur)) {
+ attn_off(slot_cur);
+ attn_on(slot_cur);
+ rc = -ENODEV;
+ goto exit;
+ }
+ /* Check to see the error of why it failed */
+ if ((SLOT_POWER(slot_cur->status)) &&
+ !(SLOT_PWRGD(slot_cur->status)))
+ err("power fault occurred trying to power up\n");
+ else if (SLOT_BUS_SPEED(slot_cur->status)) {
+ err("bus speed mismatch occurred. please check current bus speed and card capability\n");
+ print_card_capability(slot_cur);
+ } else if (SLOT_BUS_MODE(slot_cur->ext_status)) {
+ err("bus mode mismatch occurred. please check current bus mode and card capability\n");
+ print_card_capability(slot_cur);
+ }
+ ibmphp_update_slot_info(slot_cur);
+ goto exit;
+ }
+ debug("after power_on\n");
+ /*-----------------------debugging---------------------------*/
+ get_cur_bus_info(&slot_cur);
+ debug("the current bus speed right after power_on = %x\n",
+ slot_cur->bus_on->current_speed);
+ /*----------------------------------------------------------*/
+
+ rc = slot_update(&slot_cur);
+ if (rc)
+ goto error_power;
+
+ rc = -EINVAL;
+ if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) {
+ err("power fault occurred trying to power up...\n");
+ goto error_power;
+ }
+ if (SLOT_POWER(slot_cur->status) && (SLOT_BUS_SPEED(slot_cur->status))) {
+ err("bus speed mismatch occurred. please check current bus speed and card capability\n");
+ print_card_capability(slot_cur);
+ goto error_power;
+ }
+ /* Don't think this case will happen after above checks...
+ * but just in case, for paranoia sake */
+ if (!(SLOT_POWER(slot_cur->status))) {
+ err("power on failed...\n");
+ goto error_power;
+ }
+
+ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
+ if (!slot_cur->func) {
+ /* We cannot do update_slot_info here, since no memory for
+ * kmalloc n.e.ways, and update_slot_info allocates some */
+ err("out of system memory\n");
+ rc = -ENOMEM;
+ goto error_power;
+ }
+ slot_cur->func->busno = slot_cur->bus;
+ slot_cur->func->device = slot_cur->device;
+ for (i = 0; i < 4; i++)
+ slot_cur->func->irq[i] = slot_cur->irq[i];
+
+ debug("b4 configure_card, slot_cur->bus = %x, slot_cur->device = %x\n",
+ slot_cur->bus, slot_cur->device);
+
+ if (ibmphp_configure_card(slot_cur->func, slot_cur->number)) {
+ err("configure_card was unsuccessful...\n");
+ /* true because don't need to actually deallocate resources,
+ * just remove references */
+ ibmphp_unconfigure_card(&slot_cur, 1);
+ debug("after unconfigure_card\n");
+ slot_cur->func = NULL;
+ rc = -ENOMEM;
+ goto error_power;
+ }
+
+ function = 0x00;
+ do {
+ tmp_func = ibm_slot_find(slot_cur->bus, slot_cur->func->device,
+ function++);
+ if (tmp_func && !(tmp_func->dev))
+ ibm_configure_device(tmp_func);
+ } while (tmp_func);
+
+ attn_off(slot_cur);
+ if (slot_update(&slot_cur)) {
+ rc = -EFAULT;
+ goto exit;
+ }
+ ibmphp_print_test();
+ rc = ibmphp_update_slot_info(slot_cur);
+exit:
+ ibmphp_unlock_operations();
+ return rc;
+
+error_nopower:
+ attn_off(slot_cur); /* need to turn off if was blinking b4 */
+ attn_on(slot_cur);
+error_cont:
+ rcpr = slot_update(&slot_cur);
+ if (rcpr) {
+ rc = rcpr;
+ goto exit;
+ }
+ ibmphp_update_slot_info(slot_cur);
+ goto exit;
+
+error_power:
+ attn_off(slot_cur); /* need to turn off if was blinking b4 */
+ attn_on(slot_cur);
+ rcpr = power_off(slot_cur);
+ if (rcpr) {
+ rc = rcpr;
+ goto exit;
+ }
+ goto error_cont;
+}
+
+/**************************************************************
+* HOT REMOVING ADAPTER CARD *
+* INPUT: POINTER TO THE HOTPLUG SLOT STRUCTURE *
+* OUTPUT: SUCCESS 0 ; FAILURE: UNCONFIGURE , VALIDATE *
+ DISABLE POWER , *
+**************************************************************/
+static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ ibmphp_lock_operations();
+ rc = ibmphp_do_disable_slot(slot);
+ ibmphp_unlock_operations();
+ return rc;
+}
+
+int ibmphp_do_disable_slot(struct slot *slot_cur)
+{
+ int rc;
+ u8 flag;
+
+ debug("DISABLING SLOT...\n");
+
+ if ((slot_cur == NULL) || (slot_cur->ctrl == NULL))
+ return -ENODEV;
+
+ flag = slot_cur->flag;
+ slot_cur->flag = 1;
+
+ if (flag == 1) {
+ rc = validate(slot_cur, DISABLE);
+ /* checking if powered off already & valid slot # */
+ if (rc)
+ goto error;
+ }
+ attn_LED_blink(slot_cur);
+
+ if (slot_cur->func == NULL) {
+ /* We need this for functions that were there on bootup */
+ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
+ if (!slot_cur->func) {
+ err("out of system memory\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ slot_cur->func->busno = slot_cur->bus;
+ slot_cur->func->device = slot_cur->device;
+ }
+
+ ibm_unconfigure_device(slot_cur->func);
+
+ /*
+ * If we got here from latch suddenly opening on operating card or
+ * a power fault, there's no power to the card, so cannot
+ * read from it to determine what resources it occupied. This operation
+ * is forbidden anyhow. The best we can do is remove it from kernel
+ * lists at least */
+
+ if (!flag) {
+ attn_off(slot_cur);
+ return 0;
+ }
+
+ rc = ibmphp_unconfigure_card(&slot_cur, 0);
+ slot_cur->func = NULL;
+ debug("in disable_slot. after unconfigure_card\n");
+ if (rc) {
+ err("could not unconfigure card.\n");
+ goto error;
+ }
+
+ rc = ibmphp_hpc_writeslot(slot_cur, HPC_SLOT_OFF);
+ if (rc)
+ goto error;
+
+ attn_off(slot_cur);
+ rc = slot_update(&slot_cur);
+ if (rc)
+ goto exit;
+
+ rc = ibmphp_update_slot_info(slot_cur);
+ ibmphp_print_test();
+exit:
+ return rc;
+
+error:
+ /* Need to turn off if was blinking b4 */
+ attn_off(slot_cur);
+ attn_on(slot_cur);
+ if (slot_update(&slot_cur)) {
+ rc = -EFAULT;
+ goto exit;
+ }
+ if (flag)
+ ibmphp_update_slot_info(slot_cur);
+ goto exit;
+}
+
+struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = enable_slot,
+ .disable_slot = ibmphp_disable_slot,
+ .hardware_test = NULL,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_present,
+/* .get_max_adapter_speed = get_max_adapter_speed,
+ .get_bus_name_status = get_bus_name,
+*/
+};
+
+static void ibmphp_unload(void)
+{
+ free_slots();
+ debug("after slots\n");
+ ibmphp_free_resources();
+ debug("after resources\n");
+ ibmphp_free_bus_info_queue();
+ debug("after bus info\n");
+ ibmphp_free_ebda_hpc_queue();
+ debug("after ebda hpc\n");
+ ibmphp_free_ebda_pci_rsrc_queue();
+ debug("after ebda pci rsrc\n");
+ kfree(ibmphp_pci_bus);
+}
+
+static int __init ibmphp_init(void)
+{
+ struct pci_bus *bus;
+ int i = 0;
+ int rc = 0;
+
+ init_flag = 1;
+
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+
+ ibmphp_pci_bus = kmalloc(sizeof(*ibmphp_pci_bus), GFP_KERNEL);
+ if (!ibmphp_pci_bus) {
+ err("out of memory\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ bus = pci_find_bus(0, 0);
+ if (!bus) {
+ err("Can't find the root pci bus, can not continue\n");
+ rc = -ENODEV;
+ goto error;
+ }
+ memcpy(ibmphp_pci_bus, bus, sizeof(*ibmphp_pci_bus));
+
+ ibmphp_debug = debug;
+
+ ibmphp_hpc_initvars();
+
+ for (i = 0; i < 16; i++)
+ irqs[i] = 0;
+
+ rc = ibmphp_access_ebda();
+ if (rc)
+ goto error;
+ debug("after ibmphp_access_ebda()\n");
+
+ rc = ibmphp_rsrc_init();
+ if (rc)
+ goto error;
+ debug("AFTER Resource & EBDA INITIALIZATIONS\n");
+
+ max_slots = get_max_slots();
+
+ rc = ibmphp_register_pci();
+ if (rc)
+ goto error;
+
+ if (init_ops()) {
+ rc = -ENODEV;
+ goto error;
+ }
+
+ ibmphp_print_test();
+ rc = ibmphp_hpc_start_poll_thread();
+ if (rc)
+ goto error;
+
+exit:
+ return rc;
+
+error:
+ ibmphp_unload();
+ goto exit;
+}
+
+static void __exit ibmphp_exit(void)
+{
+ ibmphp_hpc_stop_poll_thread();
+ debug("after polling\n");
+ ibmphp_unload();
+ debug("done\n");
+}
+
+module_init(ibmphp_init);
+module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
new file mode 100644
index 000000000..d9b197d5c
--- /dev/null
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -0,0 +1,1211 @@
+/*
+ * IBM Hot Plug Controller Driver
+ *
+ * Written By: Tong Yu, IBM Corporation
+ *
+ * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001-2003 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include "ibmphp.h"
+
+/*
+ * POST builds data blocks(in this data block definition, a char-1
+ * byte, short(or word)-2 byte, long(dword)-4 byte) in the Extended
+ * BIOS Data Area which describe the configuration of the hot-plug
+ * controllers and resources used by the PCI Hot-Plug devices.
+ *
+ * This file walks EBDA, maps data block from physical addr,
+ * reconstruct linked lists about all system resource(MEM, PFM, IO)
+ * already assigned by POST, as well as linked lists about hot plug
+ * controllers (ctlr#, slot#, bus&slot features...)
+ */
+
+/* Global lists */
+LIST_HEAD (ibmphp_ebda_pci_rsrc_head);
+LIST_HEAD (ibmphp_slot_head);
+
+/* Local variables */
+static struct ebda_hpc_list *hpc_list_ptr;
+static struct ebda_rsrc_list *rsrc_list_ptr;
+static struct rio_table_hdr *rio_table_ptr = NULL;
+static LIST_HEAD (ebda_hpc_head);
+static LIST_HEAD (bus_info_head);
+static LIST_HEAD (rio_vg_head);
+static LIST_HEAD (rio_lo_head);
+static LIST_HEAD (opt_vg_head);
+static LIST_HEAD (opt_lo_head);
+static void __iomem *io_mem;
+
+/* Local functions */
+static int ebda_rsrc_controller (void);
+static int ebda_rsrc_rsrc (void);
+static int ebda_rio_table (void);
+
+static struct ebda_hpc_list * __init alloc_ebda_hpc_list (void)
+{
+ return kzalloc(sizeof(struct ebda_hpc_list), GFP_KERNEL);
+}
+
+static struct controller *alloc_ebda_hpc (u32 slot_count, u32 bus_count)
+{
+ struct controller *controller;
+ struct ebda_hpc_slot *slots;
+ struct ebda_hpc_bus *buses;
+
+ controller = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!controller)
+ goto error;
+
+ slots = kcalloc(slot_count, sizeof(struct ebda_hpc_slot), GFP_KERNEL);
+ if (!slots)
+ goto error_contr;
+ controller->slots = slots;
+
+ buses = kcalloc(bus_count, sizeof(struct ebda_hpc_bus), GFP_KERNEL);
+ if (!buses)
+ goto error_slots;
+ controller->buses = buses;
+
+ return controller;
+error_slots:
+ kfree(controller->slots);
+error_contr:
+ kfree(controller);
+error:
+ return NULL;
+}
+
+static void free_ebda_hpc (struct controller *controller)
+{
+ kfree (controller->slots);
+ kfree (controller->buses);
+ kfree (controller);
+}
+
+static struct ebda_rsrc_list * __init alloc_ebda_rsrc_list (void)
+{
+ return kzalloc(sizeof(struct ebda_rsrc_list), GFP_KERNEL);
+}
+
+static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void)
+{
+ return kzalloc(sizeof(struct ebda_pci_rsrc), GFP_KERNEL);
+}
+
+static void __init print_bus_info (void)
+{
+ struct bus_info *ptr;
+
+ list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
+ debug ("%s - slot_min = %x\n", __func__, ptr->slot_min);
+ debug ("%s - slot_max = %x\n", __func__, ptr->slot_max);
+ debug ("%s - slot_count = %x\n", __func__, ptr->slot_count);
+ debug ("%s - bus# = %x\n", __func__, ptr->busno);
+ debug ("%s - current_speed = %x\n", __func__, ptr->current_speed);
+ debug ("%s - controller_id = %x\n", __func__, ptr->controller_id);
+
+ debug ("%s - slots_at_33_conv = %x\n", __func__, ptr->slots_at_33_conv);
+ debug ("%s - slots_at_66_conv = %x\n", __func__, ptr->slots_at_66_conv);
+ debug ("%s - slots_at_66_pcix = %x\n", __func__, ptr->slots_at_66_pcix);
+ debug ("%s - slots_at_100_pcix = %x\n", __func__, ptr->slots_at_100_pcix);
+ debug ("%s - slots_at_133_pcix = %x\n", __func__, ptr->slots_at_133_pcix);
+
+ }
+}
+
+static void print_lo_info (void)
+{
+ struct rio_detail *ptr;
+ debug ("print_lo_info ----\n");
+ list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) {
+ debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
+ debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
+ debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
+ debug ("%s - first_slot_num = %x\n", __func__, ptr->first_slot_num);
+ debug ("%s - wpindex = %x\n", __func__, ptr->wpindex);
+ debug ("%s - chassis_num = %x\n", __func__, ptr->chassis_num);
+
+ }
+}
+
+static void print_vg_info (void)
+{
+ struct rio_detail *ptr;
+ debug ("%s ---\n", __func__);
+ list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) {
+ debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id);
+ debug ("%s - rio_type = %x\n", __func__, ptr->rio_type);
+ debug ("%s - owner_id = %x\n", __func__, ptr->owner_id);
+ debug ("%s - first_slot_num = %x\n", __func__, ptr->first_slot_num);
+ debug ("%s - wpindex = %x\n", __func__, ptr->wpindex);
+ debug ("%s - chassis_num = %x\n", __func__, ptr->chassis_num);
+
+ }
+}
+
+static void __init print_ebda_pci_rsrc (void)
+{
+ struct ebda_pci_rsrc *ptr;
+
+ list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) {
+ debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr);
+ }
+}
+
+static void __init print_ibm_slot (void)
+{
+ struct slot *ptr;
+
+ list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) {
+ debug ("%s - slot_number: %x\n", __func__, ptr->number);
+ }
+}
+
+static void __init print_opt_vg (void)
+{
+ struct opt_rio *ptr;
+ debug ("%s ---\n", __func__);
+ list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
+ debug ("%s - rio_type %x\n", __func__, ptr->rio_type);
+ debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num);
+ debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num);
+ debug ("%s - middle_num: %x\n", __func__, ptr->middle_num);
+ }
+}
+
+static void __init print_ebda_hpc (void)
+{
+ struct controller *hpc_ptr;
+ u16 index;
+
+ list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) {
+ for (index = 0; index < hpc_ptr->slot_count; index++) {
+ debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num);
+ debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num);
+ debug ("%s - index into ctlr addr: %x\n", __func__, hpc_ptr->slots[index].ctl_index);
+ debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap);
+ }
+
+ for (index = 0; index < hpc_ptr->bus_count; index++)
+ debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num);
+
+ debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type);
+ switch (hpc_ptr->ctlr_type) {
+ case 1:
+ debug ("%s - bus: %x\n", __func__, hpc_ptr->u.pci_ctlr.bus);
+ debug ("%s - dev_fun: %x\n", __func__, hpc_ptr->u.pci_ctlr.dev_fun);
+ debug ("%s - irq: %x\n", __func__, hpc_ptr->irq);
+ break;
+
+ case 0:
+ debug ("%s - io_start: %x\n", __func__, hpc_ptr->u.isa_ctlr.io_start);
+ debug ("%s - io_end: %x\n", __func__, hpc_ptr->u.isa_ctlr.io_end);
+ debug ("%s - irq: %x\n", __func__, hpc_ptr->irq);
+ break;
+
+ case 2:
+ case 4:
+ debug ("%s - wpegbbar: %lx\n", __func__, hpc_ptr->u.wpeg_ctlr.wpegbbar);
+ debug ("%s - i2c_addr: %x\n", __func__, hpc_ptr->u.wpeg_ctlr.i2c_addr);
+ debug ("%s - irq: %x\n", __func__, hpc_ptr->irq);
+ break;
+ }
+ }
+}
+
+int __init ibmphp_access_ebda (void)
+{
+ u8 format, num_ctlrs, rio_complete, hs_complete, ebda_sz;
+ u16 ebda_seg, num_entries, next_offset, offset, blk_id, sub_addr, re, rc_id, re_id, base;
+ int rc = 0;
+
+
+ rio_complete = 0;
+ hs_complete = 0;
+
+ io_mem = ioremap ((0x40 << 4) + 0x0e, 2);
+ if (!io_mem )
+ return -ENOMEM;
+ ebda_seg = readw (io_mem);
+ iounmap (io_mem);
+ debug ("returned ebda segment: %x\n", ebda_seg);
+
+ io_mem = ioremap(ebda_seg<<4, 1);
+ if (!io_mem)
+ return -ENOMEM;
+ ebda_sz = readb(io_mem);
+ iounmap(io_mem);
+ debug("ebda size: %d(KiB)\n", ebda_sz);
+ if (ebda_sz == 0)
+ return -ENOMEM;
+
+ io_mem = ioremap(ebda_seg<<4, (ebda_sz * 1024));
+ if (!io_mem )
+ return -ENOMEM;
+ next_offset = 0x180;
+
+ for (;;) {
+ offset = next_offset;
+
+ /* Make sure what we read is still in the mapped section */
+ if (WARN(offset > (ebda_sz * 1024 - 4),
+ "ibmphp_ebda: next read is beyond ebda_sz\n"))
+ break;
+
+ next_offset = readw (io_mem + offset); /* offset of next blk */
+
+ offset += 2;
+ if (next_offset == 0) /* 0 indicate it's last blk */
+ break;
+ blk_id = readw (io_mem + offset); /* this blk id */
+
+ offset += 2;
+ /* check if it is hot swap block or rio block */
+ if (blk_id != 0x4853 && blk_id != 0x4752)
+ continue;
+ /* found hs table */
+ if (blk_id == 0x4853) {
+ debug ("now enter hot swap block---\n");
+ debug ("hot blk id: %x\n", blk_id);
+ format = readb (io_mem + offset);
+
+ offset += 1;
+ if (format != 4)
+ goto error_nodev;
+ debug ("hot blk format: %x\n", format);
+ /* hot swap sub blk */
+ base = offset;
+
+ sub_addr = base;
+ re = readw (io_mem + sub_addr); /* next sub blk */
+
+ sub_addr += 2;
+ rc_id = readw (io_mem + sub_addr); /* sub blk id */
+
+ sub_addr += 2;
+ if (rc_id != 0x5243)
+ goto error_nodev;
+ /* rc sub blk signature */
+ num_ctlrs = readb (io_mem + sub_addr);
+
+ sub_addr += 1;
+ hpc_list_ptr = alloc_ebda_hpc_list ();
+ if (!hpc_list_ptr) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ hpc_list_ptr->format = format;
+ hpc_list_ptr->num_ctlrs = num_ctlrs;
+ hpc_list_ptr->phys_addr = sub_addr; /* offset of RSRC_CONTROLLER blk */
+ debug ("info about hpc descriptor---\n");
+ debug ("hot blk format: %x\n", format);
+ debug ("num of controller: %x\n", num_ctlrs);
+ debug ("offset of hpc data structure entries: %x\n ", sub_addr);
+
+ sub_addr = base + re; /* re sub blk */
+ /* FIXME: rc is never used/checked */
+ rc = readw (io_mem + sub_addr); /* next sub blk */
+
+ sub_addr += 2;
+ re_id = readw (io_mem + sub_addr); /* sub blk id */
+
+ sub_addr += 2;
+ if (re_id != 0x5245)
+ goto error_nodev;
+
+ /* signature of re */
+ num_entries = readw (io_mem + sub_addr);
+
+ sub_addr += 2; /* offset of RSRC_ENTRIES blk */
+ rsrc_list_ptr = alloc_ebda_rsrc_list ();
+ if (!rsrc_list_ptr ) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rsrc_list_ptr->format = format;
+ rsrc_list_ptr->num_entries = num_entries;
+ rsrc_list_ptr->phys_addr = sub_addr;
+
+ debug ("info about rsrc descriptor---\n");
+ debug ("format: %x\n", format);
+ debug ("num of rsrc: %x\n", num_entries);
+ debug ("offset of rsrc data structure entries: %x\n ", sub_addr);
+
+ hs_complete = 1;
+ } else {
+ /* found rio table, blk_id == 0x4752 */
+ debug ("now enter io table ---\n");
+ debug ("rio blk id: %x\n", blk_id);
+
+ rio_table_ptr = kzalloc(sizeof(struct rio_table_hdr), GFP_KERNEL);
+ if (!rio_table_ptr) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rio_table_ptr->ver_num = readb (io_mem + offset);
+ rio_table_ptr->scal_count = readb (io_mem + offset + 1);
+ rio_table_ptr->riodev_count = readb (io_mem + offset + 2);
+ rio_table_ptr->offset = offset +3 ;
+
+ debug("info about rio table hdr ---\n");
+ debug("ver_num: %x\nscal_count: %x\nriodev_count: %x\noffset of rio table: %x\n ",
+ rio_table_ptr->ver_num, rio_table_ptr->scal_count,
+ rio_table_ptr->riodev_count, rio_table_ptr->offset);
+
+ rio_complete = 1;
+ }
+ }
+
+ if (!hs_complete && !rio_complete)
+ goto error_nodev;
+
+ if (rio_table_ptr) {
+ if (rio_complete && rio_table_ptr->ver_num == 3) {
+ rc = ebda_rio_table ();
+ if (rc)
+ goto out;
+ }
+ }
+ rc = ebda_rsrc_controller ();
+ if (rc)
+ goto out;
+
+ rc = ebda_rsrc_rsrc ();
+ goto out;
+error_nodev:
+ rc = -ENODEV;
+out:
+ iounmap (io_mem);
+ return rc;
+}
+
+/*
+ * map info of scalability details and rio details from physical address
+ */
+static int __init ebda_rio_table (void)
+{
+ u16 offset;
+ u8 i;
+ struct rio_detail *rio_detail_ptr;
+
+ offset = rio_table_ptr->offset;
+ offset += 12 * rio_table_ptr->scal_count;
+
+ // we do concern about rio details
+ for (i = 0; i < rio_table_ptr->riodev_count; i++) {
+ rio_detail_ptr = kzalloc(sizeof(struct rio_detail), GFP_KERNEL);
+ if (!rio_detail_ptr)
+ return -ENOMEM;
+ rio_detail_ptr->rio_node_id = readb (io_mem + offset);
+ rio_detail_ptr->bbar = readl (io_mem + offset + 1);
+ rio_detail_ptr->rio_type = readb (io_mem + offset + 5);
+ rio_detail_ptr->owner_id = readb (io_mem + offset + 6);
+ rio_detail_ptr->port0_node_connect = readb (io_mem + offset + 7);
+ rio_detail_ptr->port0_port_connect = readb (io_mem + offset + 8);
+ rio_detail_ptr->port1_node_connect = readb (io_mem + offset + 9);
+ rio_detail_ptr->port1_port_connect = readb (io_mem + offset + 10);
+ rio_detail_ptr->first_slot_num = readb (io_mem + offset + 11);
+ rio_detail_ptr->status = readb (io_mem + offset + 12);
+ rio_detail_ptr->wpindex = readb (io_mem + offset + 13);
+ rio_detail_ptr->chassis_num = readb (io_mem + offset + 14);
+// debug ("rio_node_id: %x\nbbar: %x\nrio_type: %x\nowner_id: %x\nport0_node: %x\nport0_port: %x\nport1_node: %x\nport1_port: %x\nfirst_slot_num: %x\nstatus: %x\n", rio_detail_ptr->rio_node_id, rio_detail_ptr->bbar, rio_detail_ptr->rio_type, rio_detail_ptr->owner_id, rio_detail_ptr->port0_node_connect, rio_detail_ptr->port0_port_connect, rio_detail_ptr->port1_node_connect, rio_detail_ptr->port1_port_connect, rio_detail_ptr->first_slot_num, rio_detail_ptr->status);
+ //create linked list of chassis
+ if (rio_detail_ptr->rio_type == 4 || rio_detail_ptr->rio_type == 5)
+ list_add (&rio_detail_ptr->rio_detail_list, &rio_vg_head);
+ //create linked list of expansion box
+ else if (rio_detail_ptr->rio_type == 6 || rio_detail_ptr->rio_type == 7)
+ list_add (&rio_detail_ptr->rio_detail_list, &rio_lo_head);
+ else
+ // not in my concern
+ kfree (rio_detail_ptr);
+ offset += 15;
+ }
+ print_lo_info ();
+ print_vg_info ();
+ return 0;
+}
+
+/*
+ * reorganizing linked list of chassis
+ */
+static struct opt_rio *search_opt_vg (u8 chassis_num)
+{
+ struct opt_rio *ptr;
+ list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) {
+ if (ptr->chassis_num == chassis_num)
+ return ptr;
+ }
+ return NULL;
+}
+
+static int __init combine_wpg_for_chassis (void)
+{
+ struct opt_rio *opt_rio_ptr = NULL;
+ struct rio_detail *rio_detail_ptr = NULL;
+
+ list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) {
+ opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num);
+ if (!opt_rio_ptr) {
+ opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL);
+ if (!opt_rio_ptr)
+ return -ENOMEM;
+ opt_rio_ptr->rio_type = rio_detail_ptr->rio_type;
+ opt_rio_ptr->chassis_num = rio_detail_ptr->chassis_num;
+ opt_rio_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
+ opt_rio_ptr->middle_num = rio_detail_ptr->first_slot_num;
+ list_add (&opt_rio_ptr->opt_rio_list, &opt_vg_head);
+ } else {
+ opt_rio_ptr->first_slot_num = min (opt_rio_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
+ opt_rio_ptr->middle_num = max (opt_rio_ptr->middle_num, rio_detail_ptr->first_slot_num);
+ }
+ }
+ print_opt_vg ();
+ return 0;
+}
+
+/*
+ * reorganizing linked list of expansion box
+ */
+static struct opt_rio_lo *search_opt_lo (u8 chassis_num)
+{
+ struct opt_rio_lo *ptr;
+ list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) {
+ if (ptr->chassis_num == chassis_num)
+ return ptr;
+ }
+ return NULL;
+}
+
+static int combine_wpg_for_expansion (void)
+{
+ struct opt_rio_lo *opt_rio_lo_ptr = NULL;
+ struct rio_detail *rio_detail_ptr = NULL;
+
+ list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) {
+ opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num);
+ if (!opt_rio_lo_ptr) {
+ opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL);
+ if (!opt_rio_lo_ptr)
+ return -ENOMEM;
+ opt_rio_lo_ptr->rio_type = rio_detail_ptr->rio_type;
+ opt_rio_lo_ptr->chassis_num = rio_detail_ptr->chassis_num;
+ opt_rio_lo_ptr->first_slot_num = rio_detail_ptr->first_slot_num;
+ opt_rio_lo_ptr->middle_num = rio_detail_ptr->first_slot_num;
+ opt_rio_lo_ptr->pack_count = 1;
+
+ list_add (&opt_rio_lo_ptr->opt_rio_lo_list, &opt_lo_head);
+ } else {
+ opt_rio_lo_ptr->first_slot_num = min (opt_rio_lo_ptr->first_slot_num, rio_detail_ptr->first_slot_num);
+ opt_rio_lo_ptr->middle_num = max (opt_rio_lo_ptr->middle_num, rio_detail_ptr->first_slot_num);
+ opt_rio_lo_ptr->pack_count = 2;
+ }
+ }
+ return 0;
+}
+
+
+/* Since we don't know the max slot number per each chassis, hence go
+ * through the list of all chassis to find out the range
+ * Arguments: slot_num, 1st slot number of the chassis we think we are on,
+ * var (0 = chassis, 1 = expansion box)
+ */
+static int first_slot_num (u8 slot_num, u8 first_slot, u8 var)
+{
+ struct opt_rio *opt_vg_ptr = NULL;
+ struct opt_rio_lo *opt_lo_ptr = NULL;
+ int rc = 0;
+
+ if (!var) {
+ list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
+ if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) {
+ rc = -ENODEV;
+ break;
+ }
+ }
+ } else {
+ list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
+ if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) {
+ rc = -ENODEV;
+ break;
+ }
+ }
+ }
+ return rc;
+}
+
+static struct opt_rio_lo *find_rxe_num (u8 slot_num)
+{
+ struct opt_rio_lo *opt_lo_ptr;
+
+ list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) {
+ //check to see if this slot_num belongs to expansion box
+ if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1)))
+ return opt_lo_ptr;
+ }
+ return NULL;
+}
+
+static struct opt_rio *find_chassis_num (u8 slot_num)
+{
+ struct opt_rio *opt_vg_ptr;
+
+ list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) {
+ //check to see if this slot_num belongs to chassis
+ if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0)))
+ return opt_vg_ptr;
+ }
+ return NULL;
+}
+
+/* This routine will find out how many slots are in the chassis, so that
+ * the slot numbers for rxe100 would start from 1, and not from 7, or 6 etc
+ */
+static u8 calculate_first_slot (u8 slot_num)
+{
+ u8 first_slot = 1;
+ struct slot *slot_cur;
+
+ list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) {
+ if (slot_cur->ctrl) {
+ if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num))
+ first_slot = slot_cur->ctrl->ending_slot_num;
+ }
+ }
+ return first_slot + 1;
+
+}
+
+#define SLOT_NAME_SIZE 30
+
+static char *create_file_name (struct slot *slot_cur)
+{
+ struct opt_rio *opt_vg_ptr = NULL;
+ struct opt_rio_lo *opt_lo_ptr = NULL;
+ static char str[SLOT_NAME_SIZE];
+ int which = 0; /* rxe = 1, chassis = 0 */
+ u8 number = 1; /* either chassis or rxe # */
+ u8 first_slot = 1;
+ u8 slot_num;
+ u8 flag = 0;
+
+ if (!slot_cur) {
+ err ("Structure passed is empty\n");
+ return NULL;
+ }
+
+ slot_num = slot_cur->number;
+
+ memset (str, 0, sizeof(str));
+
+ if (rio_table_ptr) {
+ if (rio_table_ptr->ver_num == 3) {
+ opt_vg_ptr = find_chassis_num (slot_num);
+ opt_lo_ptr = find_rxe_num (slot_num);
+ }
+ }
+ if (opt_vg_ptr) {
+ if (opt_lo_ptr) {
+ if ((slot_num - opt_vg_ptr->first_slot_num) > (slot_num - opt_lo_ptr->first_slot_num)) {
+ number = opt_lo_ptr->chassis_num;
+ first_slot = opt_lo_ptr->first_slot_num;
+ which = 1; /* it is RXE */
+ } else {
+ first_slot = opt_vg_ptr->first_slot_num;
+ number = opt_vg_ptr->chassis_num;
+ which = 0;
+ }
+ } else {
+ first_slot = opt_vg_ptr->first_slot_num;
+ number = opt_vg_ptr->chassis_num;
+ which = 0;
+ }
+ ++flag;
+ } else if (opt_lo_ptr) {
+ number = opt_lo_ptr->chassis_num;
+ first_slot = opt_lo_ptr->first_slot_num;
+ which = 1;
+ ++flag;
+ } else if (rio_table_ptr) {
+ if (rio_table_ptr->ver_num == 3) {
+ /* if both NULL and we DO have correct RIO table in BIOS */
+ return NULL;
+ }
+ }
+ if (!flag) {
+ if (slot_cur->ctrl->ctlr_type == 4) {
+ first_slot = calculate_first_slot (slot_num);
+ which = 1;
+ } else {
+ which = 0;
+ }
+ }
+
+ sprintf(str, "%s%dslot%d",
+ which == 0 ? "chassis" : "rxe",
+ number, slot_num - first_slot + 1);
+ return str;
+}
+
+static int fillslotinfo(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot;
+ int rc = 0;
+
+ if (!hotplug_slot || !hotplug_slot->private)
+ return -EINVAL;
+
+ slot = hotplug_slot->private;
+ rc = ibmphp_hpc_readslot(slot, READ_ALLSTAT, NULL);
+ if (rc)
+ return rc;
+
+ // power - enabled:1 not:0
+ hotplug_slot->info->power_status = SLOT_POWER(slot->status);
+
+ // attention - off:0, on:1, blinking:2
+ hotplug_slot->info->attention_status = SLOT_ATTN(slot->status, slot->ext_status);
+
+ // latch - open:1 closed:0
+ hotplug_slot->info->latch_status = SLOT_LATCH(slot->status);
+
+ // pci board - present:1 not:0
+ if (SLOT_PRESENT (slot->status))
+ hotplug_slot->info->adapter_status = 1;
+ else
+ hotplug_slot->info->adapter_status = 0;
+/*
+ if (slot->bus_on->supported_bus_mode
+ && (slot->bus_on->supported_speed == BUS_SPEED_66))
+ hotplug_slot->info->max_bus_speed_status = BUS_SPEED_66PCIX;
+ else
+ hotplug_slot->info->max_bus_speed_status = slot->bus_on->supported_speed;
+*/
+
+ return rc;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot;
+
+ if (!hotplug_slot || !hotplug_slot->private)
+ return;
+
+ slot = hotplug_slot->private;
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ slot->ctrl = NULL;
+ slot->bus_on = NULL;
+
+ /* we don't want to actually remove the resources, since free_resources will do just that */
+ ibmphp_unconfigure_card(&slot, -1);
+
+ kfree (slot);
+}
+
+static struct pci_driver ibmphp_driver;
+
+/*
+ * map info (ctlr-id, slot count, slot#.. bus count, bus#, ctlr type...) of
+ * each hpc from physical address to a list of hot plug controllers based on
+ * hpc descriptors.
+ */
+static int __init ebda_rsrc_controller (void)
+{
+ u16 addr, addr_slot, addr_bus;
+ u8 ctlr_id, temp, bus_index;
+ u16 ctlr, slot, bus;
+ u16 slot_num, bus_num, index;
+ struct hotplug_slot *hp_slot_ptr;
+ struct controller *hpc_ptr;
+ struct ebda_hpc_bus *bus_ptr;
+ struct ebda_hpc_slot *slot_ptr;
+ struct bus_info *bus_info_ptr1, *bus_info_ptr2;
+ int rc;
+ struct slot *tmp_slot;
+ char name[SLOT_NAME_SIZE];
+
+ addr = hpc_list_ptr->phys_addr;
+ for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) {
+ bus_index = 1;
+ ctlr_id = readb (io_mem + addr);
+ addr += 1;
+ slot_num = readb (io_mem + addr);
+
+ addr += 1;
+ addr_slot = addr; /* offset of slot structure */
+ addr += (slot_num * 4);
+
+ bus_num = readb (io_mem + addr);
+
+ addr += 1;
+ addr_bus = addr; /* offset of bus */
+ addr += (bus_num * 9); /* offset of ctlr_type */
+ temp = readb (io_mem + addr);
+
+ addr += 1;
+ /* init hpc structure */
+ hpc_ptr = alloc_ebda_hpc (slot_num, bus_num);
+ if (!hpc_ptr ) {
+ rc = -ENOMEM;
+ goto error_no_hpc;
+ }
+ hpc_ptr->ctlr_id = ctlr_id;
+ hpc_ptr->ctlr_relative_id = ctlr;
+ hpc_ptr->slot_count = slot_num;
+ hpc_ptr->bus_count = bus_num;
+ debug ("now enter ctlr data structure ---\n");
+ debug ("ctlr id: %x\n", ctlr_id);
+ debug ("ctlr_relative_id: %x\n", hpc_ptr->ctlr_relative_id);
+ debug ("count of slots controlled by this ctlr: %x\n", slot_num);
+ debug ("count of buses controlled by this ctlr: %x\n", bus_num);
+
+ /* init slot structure, fetch slot, bus, cap... */
+ slot_ptr = hpc_ptr->slots;
+ for (slot = 0; slot < slot_num; slot++) {
+ slot_ptr->slot_num = readb (io_mem + addr_slot);
+ slot_ptr->slot_bus_num = readb (io_mem + addr_slot + slot_num);
+ slot_ptr->ctl_index = readb (io_mem + addr_slot + 2*slot_num);
+ slot_ptr->slot_cap = readb (io_mem + addr_slot + 3*slot_num);
+
+ // create bus_info lined list --- if only one slot per bus: slot_min = slot_max
+
+ bus_info_ptr2 = ibmphp_find_same_bus_num (slot_ptr->slot_bus_num);
+ if (!bus_info_ptr2) {
+ bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL);
+ if (!bus_info_ptr1) {
+ rc = -ENOMEM;
+ goto error_no_hp_slot;
+ }
+ bus_info_ptr1->slot_min = slot_ptr->slot_num;
+ bus_info_ptr1->slot_max = slot_ptr->slot_num;
+ bus_info_ptr1->slot_count += 1;
+ bus_info_ptr1->busno = slot_ptr->slot_bus_num;
+ bus_info_ptr1->index = bus_index++;
+ bus_info_ptr1->current_speed = 0xff;
+ bus_info_ptr1->current_bus_mode = 0xff;
+
+ bus_info_ptr1->controller_id = hpc_ptr->ctlr_id;
+
+ list_add_tail (&bus_info_ptr1->bus_info_list, &bus_info_head);
+
+ } else {
+ bus_info_ptr2->slot_min = min (bus_info_ptr2->slot_min, slot_ptr->slot_num);
+ bus_info_ptr2->slot_max = max (bus_info_ptr2->slot_max, slot_ptr->slot_num);
+ bus_info_ptr2->slot_count += 1;
+
+ }
+
+ // end of creating the bus_info linked list
+
+ slot_ptr++;
+ addr_slot += 1;
+ }
+
+ /* init bus structure */
+ bus_ptr = hpc_ptr->buses;
+ for (bus = 0; bus < bus_num; bus++) {
+ bus_ptr->bus_num = readb (io_mem + addr_bus + bus);
+ bus_ptr->slots_at_33_conv = readb (io_mem + addr_bus + bus_num + 8 * bus);
+ bus_ptr->slots_at_66_conv = readb (io_mem + addr_bus + bus_num + 8 * bus + 1);
+
+ bus_ptr->slots_at_66_pcix = readb (io_mem + addr_bus + bus_num + 8 * bus + 2);
+
+ bus_ptr->slots_at_100_pcix = readb (io_mem + addr_bus + bus_num + 8 * bus + 3);
+
+ bus_ptr->slots_at_133_pcix = readb (io_mem + addr_bus + bus_num + 8 * bus + 4);
+
+ bus_info_ptr2 = ibmphp_find_same_bus_num (bus_ptr->bus_num);
+ if (bus_info_ptr2) {
+ bus_info_ptr2->slots_at_33_conv = bus_ptr->slots_at_33_conv;
+ bus_info_ptr2->slots_at_66_conv = bus_ptr->slots_at_66_conv;
+ bus_info_ptr2->slots_at_66_pcix = bus_ptr->slots_at_66_pcix;
+ bus_info_ptr2->slots_at_100_pcix = bus_ptr->slots_at_100_pcix;
+ bus_info_ptr2->slots_at_133_pcix = bus_ptr->slots_at_133_pcix;
+ }
+ bus_ptr++;
+ }
+
+ hpc_ptr->ctlr_type = temp;
+
+ switch (hpc_ptr->ctlr_type) {
+ case 1:
+ hpc_ptr->u.pci_ctlr.bus = readb (io_mem + addr);
+ hpc_ptr->u.pci_ctlr.dev_fun = readb (io_mem + addr + 1);
+ hpc_ptr->irq = readb (io_mem + addr + 2);
+ addr += 3;
+ debug ("ctrl bus = %x, ctlr devfun = %x, irq = %x\n",
+ hpc_ptr->u.pci_ctlr.bus,
+ hpc_ptr->u.pci_ctlr.dev_fun, hpc_ptr->irq);
+ break;
+
+ case 0:
+ hpc_ptr->u.isa_ctlr.io_start = readw (io_mem + addr);
+ hpc_ptr->u.isa_ctlr.io_end = readw (io_mem + addr + 2);
+ if (!request_region (hpc_ptr->u.isa_ctlr.io_start,
+ (hpc_ptr->u.isa_ctlr.io_end - hpc_ptr->u.isa_ctlr.io_start + 1),
+ "ibmphp")) {
+ rc = -ENODEV;
+ goto error_no_hp_slot;
+ }
+ hpc_ptr->irq = readb (io_mem + addr + 4);
+ addr += 5;
+ break;
+
+ case 2:
+ case 4:
+ hpc_ptr->u.wpeg_ctlr.wpegbbar = readl (io_mem + addr);
+ hpc_ptr->u.wpeg_ctlr.i2c_addr = readb (io_mem + addr + 4);
+ hpc_ptr->irq = readb (io_mem + addr + 5);
+ addr += 6;
+ break;
+ default:
+ rc = -ENODEV;
+ goto error_no_hp_slot;
+ }
+
+ //reorganize chassis' linked list
+ combine_wpg_for_chassis ();
+ combine_wpg_for_expansion ();
+ hpc_ptr->revision = 0xff;
+ hpc_ptr->options = 0xff;
+ hpc_ptr->starting_slot_num = hpc_ptr->slots[0].slot_num;
+ hpc_ptr->ending_slot_num = hpc_ptr->slots[slot_num-1].slot_num;
+
+ // register slots with hpc core as well as create linked list of ibm slot
+ for (index = 0; index < hpc_ptr->slot_count; index++) {
+
+ hp_slot_ptr = kzalloc(sizeof(*hp_slot_ptr), GFP_KERNEL);
+ if (!hp_slot_ptr) {
+ rc = -ENOMEM;
+ goto error_no_hp_slot;
+ }
+
+ hp_slot_ptr->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
+ if (!hp_slot_ptr->info) {
+ rc = -ENOMEM;
+ goto error_no_hp_info;
+ }
+
+ tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
+ if (!tmp_slot) {
+ rc = -ENOMEM;
+ goto error_no_slot;
+ }
+
+ tmp_slot->flag = 1;
+
+ tmp_slot->capabilities = hpc_ptr->slots[index].slot_cap;
+ if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_133_MAX) == EBDA_SLOT_133_MAX)
+ tmp_slot->supported_speed = 3;
+ else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_100_MAX) == EBDA_SLOT_100_MAX)
+ tmp_slot->supported_speed = 2;
+ else if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_66_MAX) == EBDA_SLOT_66_MAX)
+ tmp_slot->supported_speed = 1;
+
+ if ((hpc_ptr->slots[index].slot_cap & EBDA_SLOT_PCIX_CAP) == EBDA_SLOT_PCIX_CAP)
+ tmp_slot->supported_bus_mode = 1;
+ else
+ tmp_slot->supported_bus_mode = 0;
+
+
+ tmp_slot->bus = hpc_ptr->slots[index].slot_bus_num;
+
+ bus_info_ptr1 = ibmphp_find_same_bus_num (hpc_ptr->slots[index].slot_bus_num);
+ if (!bus_info_ptr1) {
+ kfree(tmp_slot);
+ rc = -ENODEV;
+ goto error;
+ }
+ tmp_slot->bus_on = bus_info_ptr1;
+ bus_info_ptr1 = NULL;
+ tmp_slot->ctrl = hpc_ptr;
+
+ tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index;
+ tmp_slot->number = hpc_ptr->slots[index].slot_num;
+ tmp_slot->hotplug_slot = hp_slot_ptr;
+
+ hp_slot_ptr->private = tmp_slot;
+ hp_slot_ptr->release = release_slot;
+
+ rc = fillslotinfo(hp_slot_ptr);
+ if (rc)
+ goto error;
+
+ rc = ibmphp_init_devno ((struct slot **) &hp_slot_ptr->private);
+ if (rc)
+ goto error;
+ hp_slot_ptr->ops = &ibmphp_hotplug_slot_ops;
+
+ // end of registering ibm slot with hotplug core
+
+ list_add (& ((struct slot *)(hp_slot_ptr->private))->ibm_slot_list, &ibmphp_slot_head);
+ }
+
+ print_bus_info ();
+ list_add (&hpc_ptr->ebda_hpc_list, &ebda_hpc_head );
+
+ } /* each hpc */
+
+ list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
+ snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
+ pci_hp_register(tmp_slot->hotplug_slot,
+ pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
+ }
+
+ print_ebda_hpc ();
+ print_ibm_slot ();
+ return 0;
+
+error:
+ kfree (hp_slot_ptr->private);
+error_no_slot:
+ kfree (hp_slot_ptr->info);
+error_no_hp_info:
+ kfree (hp_slot_ptr);
+error_no_hp_slot:
+ free_ebda_hpc (hpc_ptr);
+error_no_hpc:
+ iounmap (io_mem);
+ return rc;
+}
+
+/*
+ * map info (bus, devfun, start addr, end addr..) of i/o, memory,
+ * pfm from the physical addr to a list of resource.
+ */
+static int __init ebda_rsrc_rsrc (void)
+{
+ u16 addr;
+ short rsrc;
+ u8 type, rsrc_type;
+ struct ebda_pci_rsrc *rsrc_ptr;
+
+ addr = rsrc_list_ptr->phys_addr;
+ debug ("now entering rsrc land\n");
+ debug ("offset of rsrc: %x\n", rsrc_list_ptr->phys_addr);
+
+ for (rsrc = 0; rsrc < rsrc_list_ptr->num_entries; rsrc++) {
+ type = readb (io_mem + addr);
+
+ addr += 1;
+ rsrc_type = type & EBDA_RSRC_TYPE_MASK;
+
+ if (rsrc_type == EBDA_IO_RSRC_TYPE) {
+ rsrc_ptr = alloc_ebda_pci_rsrc ();
+ if (!rsrc_ptr) {
+ iounmap (io_mem);
+ return -ENOMEM;
+ }
+ rsrc_ptr->rsrc_type = type;
+
+ rsrc_ptr->bus_num = readb (io_mem + addr);
+ rsrc_ptr->dev_fun = readb (io_mem + addr + 1);
+ rsrc_ptr->start_addr = readw (io_mem + addr + 2);
+ rsrc_ptr->end_addr = readw (io_mem + addr + 4);
+ addr += 6;
+
+ debug ("rsrc from io type ----\n");
+ debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
+
+ list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
+ }
+
+ if (rsrc_type == EBDA_MEM_RSRC_TYPE || rsrc_type == EBDA_PFM_RSRC_TYPE) {
+ rsrc_ptr = alloc_ebda_pci_rsrc ();
+ if (!rsrc_ptr ) {
+ iounmap (io_mem);
+ return -ENOMEM;
+ }
+ rsrc_ptr->rsrc_type = type;
+
+ rsrc_ptr->bus_num = readb (io_mem + addr);
+ rsrc_ptr->dev_fun = readb (io_mem + addr + 1);
+ rsrc_ptr->start_addr = readl (io_mem + addr + 2);
+ rsrc_ptr->end_addr = readl (io_mem + addr + 6);
+ addr += 10;
+
+ debug ("rsrc from mem or pfm ---\n");
+ debug ("rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n",
+ rsrc_ptr->rsrc_type, rsrc_ptr->bus_num, rsrc_ptr->dev_fun, rsrc_ptr->start_addr, rsrc_ptr->end_addr);
+
+ list_add (&rsrc_ptr->ebda_pci_rsrc_list, &ibmphp_ebda_pci_rsrc_head);
+ }
+ }
+ kfree (rsrc_list_ptr);
+ rsrc_list_ptr = NULL;
+ print_ebda_pci_rsrc ();
+ return 0;
+}
+
+u16 ibmphp_get_total_controllers (void)
+{
+ return hpc_list_ptr->num_ctlrs;
+}
+
+struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num)
+{
+ struct slot *slot;
+
+ list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) {
+ if (slot->number == physical_num)
+ return slot;
+ }
+ return NULL;
+}
+
+/* To find:
+ * - the smallest slot number
+ * - the largest slot number
+ * - the total number of the slots based on each bus
+ * (if only one slot per bus slot_min = slot_max )
+ */
+struct bus_info *ibmphp_find_same_bus_num (u32 num)
+{
+ struct bus_info *ptr;
+
+ list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
+ if (ptr->busno == num)
+ return ptr;
+ }
+ return NULL;
+}
+
+/* Finding relative bus number, in order to map corresponding
+ * bus register
+ */
+int ibmphp_get_bus_index (u8 num)
+{
+ struct bus_info *ptr;
+
+ list_for_each_entry(ptr, &bus_info_head, bus_info_list) {
+ if (ptr->busno == num)
+ return ptr->index;
+ }
+ return -ENODEV;
+}
+
+void ibmphp_free_bus_info_queue (void)
+{
+ struct bus_info *bus_info;
+ struct list_head *list;
+ struct list_head *next;
+
+ list_for_each_safe (list, next, &bus_info_head ) {
+ bus_info = list_entry (list, struct bus_info, bus_info_list);
+ kfree (bus_info);
+ }
+}
+
+void ibmphp_free_ebda_hpc_queue (void)
+{
+ struct controller *controller = NULL;
+ struct list_head *list;
+ struct list_head *next;
+ int pci_flag = 0;
+
+ list_for_each_safe (list, next, &ebda_hpc_head) {
+ controller = list_entry (list, struct controller, ebda_hpc_list);
+ if (controller->ctlr_type == 0)
+ release_region (controller->u.isa_ctlr.io_start, (controller->u.isa_ctlr.io_end - controller->u.isa_ctlr.io_start + 1));
+ else if ((controller->ctlr_type == 1) && (!pci_flag)) {
+ ++pci_flag;
+ pci_unregister_driver (&ibmphp_driver);
+ }
+ free_ebda_hpc (controller);
+ }
+}
+
+void ibmphp_free_ebda_pci_rsrc_queue (void)
+{
+ struct ebda_pci_rsrc *resource;
+ struct list_head *list;
+ struct list_head *next;
+
+ list_for_each_safe (list, next, &ibmphp_ebda_pci_rsrc_head) {
+ resource = list_entry (list, struct ebda_pci_rsrc, ebda_pci_rsrc_list);
+ kfree (resource);
+ resource = NULL;
+ }
+}
+
+static struct pci_device_id id_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_IBM,
+ .device = HPC_DEVICE_ID,
+ .subvendor = PCI_VENDOR_ID_IBM,
+ .subdevice = HPC_SUBSYSTEM_ID,
+ .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
+ }, {}
+};
+
+MODULE_DEVICE_TABLE(pci, id_table);
+
+static int ibmphp_probe (struct pci_dev *, const struct pci_device_id *);
+static struct pci_driver ibmphp_driver = {
+ .name = "ibmphp",
+ .id_table = id_table,
+ .probe = ibmphp_probe,
+};
+
+int ibmphp_register_pci (void)
+{
+ struct controller *ctrl;
+ int rc = 0;
+
+ list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
+ if (ctrl->ctlr_type == 1) {
+ rc = pci_register_driver(&ibmphp_driver);
+ break;
+ }
+ }
+ return rc;
+}
+static int ibmphp_probe (struct pci_dev *dev, const struct pci_device_id *ids)
+{
+ struct controller *ctrl;
+
+ debug ("inside ibmphp_probe\n");
+
+ list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) {
+ if (ctrl->ctlr_type == 1) {
+ if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) {
+ ctrl->ctrl_dev = dev;
+ debug ("found device!!!\n");
+ debug ("dev->device = %x, dev->subsystem_device = %x\n", dev->device, dev->subsystem_device);
+ return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
new file mode 100644
index 000000000..220876715
--- /dev/null
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -0,0 +1,1131 @@
+/*
+ * IBM Hot Plug Controller Driver
+ *
+ * Written By: Jyoti Shah, IBM Corporation
+ *
+ * Copyright (C) 2001-2003 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>
+ * <jshah@us.ibm.com>
+ *
+ */
+
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include "ibmphp.h"
+
+static int to_debug = 0;
+#define debug_polling(fmt, arg...) do { if (to_debug) debug (fmt, arg); } while (0)
+
+//----------------------------------------------------------------------------
+// timeout values
+//----------------------------------------------------------------------------
+#define CMD_COMPLETE_TOUT_SEC 60 // give HPC 60 sec to finish cmd
+#define HPC_CTLR_WORKING_TOUT 60 // give HPC 60 sec to finish cmd
+#define HPC_GETACCESS_TIMEOUT 60 // seconds
+#define POLL_INTERVAL_SEC 2 // poll HPC every 2 seconds
+#define POLL_LATCH_CNT 5 // poll latch 5 times, then poll slots
+
+//----------------------------------------------------------------------------
+// Winnipeg Architected Register Offsets
+//----------------------------------------------------------------------------
+#define WPG_I2CMBUFL_OFFSET 0x08 // I2C Message Buffer Low
+#define WPG_I2CMOSUP_OFFSET 0x10 // I2C Master Operation Setup Reg
+#define WPG_I2CMCNTL_OFFSET 0x20 // I2C Master Control Register
+#define WPG_I2CPARM_OFFSET 0x40 // I2C Parameter Register
+#define WPG_I2CSTAT_OFFSET 0x70 // I2C Status Register
+
+//----------------------------------------------------------------------------
+// Winnipeg Store Type commands (Add this commands to the register offset)
+//----------------------------------------------------------------------------
+#define WPG_I2C_AND 0x1000 // I2C AND operation
+#define WPG_I2C_OR 0x2000 // I2C OR operation
+
+//----------------------------------------------------------------------------
+// Command set for I2C Master Operation Setup Register
+//----------------------------------------------------------------------------
+#define WPG_READATADDR_MASK 0x00010000 // read,bytes,I2C shifted,index
+#define WPG_WRITEATADDR_MASK 0x40010000 // write,bytes,I2C shifted,index
+#define WPG_READDIRECT_MASK 0x10010000
+#define WPG_WRITEDIRECT_MASK 0x60010000
+
+
+//----------------------------------------------------------------------------
+// bit masks for I2C Master Control Register
+//----------------------------------------------------------------------------
+#define WPG_I2CMCNTL_STARTOP_MASK 0x00000002 // Start the Operation
+
+//----------------------------------------------------------------------------
+//
+//----------------------------------------------------------------------------
+#define WPG_I2C_IOREMAP_SIZE 0x2044 // size of linear address interval
+
+//----------------------------------------------------------------------------
+// command index
+//----------------------------------------------------------------------------
+#define WPG_1ST_SLOT_INDEX 0x01 // index - 1st slot for ctlr
+#define WPG_CTLR_INDEX 0x0F // index - ctlr
+#define WPG_1ST_EXTSLOT_INDEX 0x10 // index - 1st ext slot for ctlr
+#define WPG_1ST_BUS_INDEX 0x1F // index - 1st bus for ctlr
+
+//----------------------------------------------------------------------------
+// macro utilities
+//----------------------------------------------------------------------------
+// if bits 20,22,25,26,27,29,30 are OFF return 1
+#define HPC_I2CSTATUS_CHECK(s) ((u8)((s & 0x00000A76) ? 0 : 1))
+
+//----------------------------------------------------------------------------
+// global variables
+//----------------------------------------------------------------------------
+static struct mutex sem_hpcaccess; // lock access to HPC
+static struct semaphore semOperations; // lock all operations and
+ // access to data structures
+static struct semaphore sem_exit; // make sure polling thread goes away
+static struct task_struct *ibmphp_poll_thread;
+//----------------------------------------------------------------------------
+// local function prototypes
+//----------------------------------------------------------------------------
+static u8 i2c_ctrl_read (struct controller *, void __iomem *, u8);
+static u8 i2c_ctrl_write (struct controller *, void __iomem *, u8, u8);
+static u8 hpc_writecmdtoindex (u8, u8);
+static u8 hpc_readcmdtoindex (u8, u8);
+static void get_hpc_access (void);
+static void free_hpc_access (void);
+static int poll_hpc(void *data);
+static int process_changeinstatus (struct slot *, struct slot *);
+static int process_changeinlatch (u8, u8, struct controller *);
+static int hpc_wait_ctlr_notworking (int, struct controller *, void __iomem *, u8 *);
+//----------------------------------------------------------------------------
+
+
+/*----------------------------------------------------------------------
+* Name: ibmphp_hpc_initvars
+*
+* Action: initialize semaphores and variables
+*---------------------------------------------------------------------*/
+void __init ibmphp_hpc_initvars (void)
+{
+ debug ("%s - Entry\n", __func__);
+
+ mutex_init(&sem_hpcaccess);
+ sema_init(&semOperations, 1);
+ sema_init(&sem_exit, 0);
+ to_debug = 0;
+
+ debug ("%s - Exit\n", __func__);
+}
+
+/*----------------------------------------------------------------------
+* Name: i2c_ctrl_read
+*
+* Action: read from HPC over I2C
+*
+*---------------------------------------------------------------------*/
+static u8 i2c_ctrl_read (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 index)
+{
+ u8 status;
+ int i;
+ void __iomem *wpg_addr; // base addr + offset
+ unsigned long wpg_data; // data to/from WPG LOHI format
+ unsigned long ultemp;
+ unsigned long data; // actual data HILO format
+
+ debug_polling ("%s - Entry WPGBbar[%p] index[%x] \n", __func__, WPGBbar, index);
+
+ //--------------------------------------------------------------------
+ // READ - step 1
+ // read at address, byte length, I2C address (shifted), index
+ // or read direct, byte length, index
+ if (ctlr_ptr->ctlr_type == 0x02) {
+ data = WPG_READATADDR_MASK;
+ // fill in I2C address
+ ultemp = (unsigned long)ctlr_ptr->u.wpeg_ctlr.i2c_addr;
+ ultemp = ultemp >> 1;
+ data |= (ultemp << 8);
+
+ // fill in index
+ data |= (unsigned long)index;
+ } else if (ctlr_ptr->ctlr_type == 0x04) {
+ data = WPG_READDIRECT_MASK;
+
+ // fill in index
+ ultemp = (unsigned long)index;
+ ultemp = ultemp << 8;
+ data |= ultemp;
+ } else {
+ err ("this controller type is not supported \n");
+ return HPC_ERROR;
+ }
+
+ wpg_data = swab32 (data); // swap data before writing
+ wpg_addr = WPGBbar + WPG_I2CMOSUP_OFFSET;
+ writel (wpg_data, wpg_addr);
+
+ //--------------------------------------------------------------------
+ // READ - step 2 : clear the message buffer
+ data = 0x00000000;
+ wpg_data = swab32 (data);
+ wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET;
+ writel (wpg_data, wpg_addr);
+
+ //--------------------------------------------------------------------
+ // READ - step 3 : issue start operation, I2C master control bit 30:ON
+ // 2020 : [20] OR operation at [20] offset 0x20
+ data = WPG_I2CMCNTL_STARTOP_MASK;
+ wpg_data = swab32 (data);
+ wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET + WPG_I2C_OR;
+ writel (wpg_data, wpg_addr);
+
+ //--------------------------------------------------------------------
+ // READ - step 4 : wait until start operation bit clears
+ i = CMD_COMPLETE_TOUT_SEC;
+ while (i) {
+ msleep(10);
+ wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET;
+ wpg_data = readl (wpg_addr);
+ data = swab32 (wpg_data);
+ if (!(data & WPG_I2CMCNTL_STARTOP_MASK))
+ break;
+ i--;
+ }
+ if (i == 0) {
+ debug ("%s - Error : WPG timeout\n", __func__);
+ return HPC_ERROR;
+ }
+ //--------------------------------------------------------------------
+ // READ - step 5 : read I2C status register
+ i = CMD_COMPLETE_TOUT_SEC;
+ while (i) {
+ msleep(10);
+ wpg_addr = WPGBbar + WPG_I2CSTAT_OFFSET;
+ wpg_data = readl (wpg_addr);
+ data = swab32 (wpg_data);
+ if (HPC_I2CSTATUS_CHECK (data))
+ break;
+ i--;
+ }
+ if (i == 0) {
+ debug ("ctrl_read - Exit Error:I2C timeout\n");
+ return HPC_ERROR;
+ }
+
+ //--------------------------------------------------------------------
+ // READ - step 6 : get DATA
+ wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET;
+ wpg_data = readl (wpg_addr);
+ data = swab32 (wpg_data);
+
+ status = (u8) data;
+
+ debug_polling ("%s - Exit index[%x] status[%x]\n", __func__, index, status);
+
+ return (status);
+}
+
+/*----------------------------------------------------------------------
+* Name: i2c_ctrl_write
+*
+* Action: write to HPC over I2C
+*
+* Return 0 or error codes
+*---------------------------------------------------------------------*/
+static u8 i2c_ctrl_write (struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 index, u8 cmd)
+{
+ u8 rc;
+ void __iomem *wpg_addr; // base addr + offset
+ unsigned long wpg_data; // data to/from WPG LOHI format
+ unsigned long ultemp;
+ unsigned long data; // actual data HILO format
+ int i;
+
+ debug_polling ("%s - Entry WPGBbar[%p] index[%x] cmd[%x]\n", __func__, WPGBbar, index, cmd);
+
+ rc = 0;
+ //--------------------------------------------------------------------
+ // WRITE - step 1
+ // write at address, byte length, I2C address (shifted), index
+ // or write direct, byte length, index
+ data = 0x00000000;
+
+ if (ctlr_ptr->ctlr_type == 0x02) {
+ data = WPG_WRITEATADDR_MASK;
+ // fill in I2C address
+ ultemp = (unsigned long)ctlr_ptr->u.wpeg_ctlr.i2c_addr;
+ ultemp = ultemp >> 1;
+ data |= (ultemp << 8);
+
+ // fill in index
+ data |= (unsigned long)index;
+ } else if (ctlr_ptr->ctlr_type == 0x04) {
+ data = WPG_WRITEDIRECT_MASK;
+
+ // fill in index
+ ultemp = (unsigned long)index;
+ ultemp = ultemp << 8;
+ data |= ultemp;
+ } else {
+ err ("this controller type is not supported \n");
+ return HPC_ERROR;
+ }
+
+ wpg_data = swab32 (data); // swap data before writing
+ wpg_addr = WPGBbar + WPG_I2CMOSUP_OFFSET;
+ writel (wpg_data, wpg_addr);
+
+ //--------------------------------------------------------------------
+ // WRITE - step 2 : clear the message buffer
+ data = 0x00000000 | (unsigned long)cmd;
+ wpg_data = swab32 (data);
+ wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET;
+ writel (wpg_data, wpg_addr);
+
+ //--------------------------------------------------------------------
+ // WRITE - step 3 : issue start operation,I2C master control bit 30:ON
+ // 2020 : [20] OR operation at [20] offset 0x20
+ data = WPG_I2CMCNTL_STARTOP_MASK;
+ wpg_data = swab32 (data);
+ wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET + WPG_I2C_OR;
+ writel (wpg_data, wpg_addr);
+
+ //--------------------------------------------------------------------
+ // WRITE - step 4 : wait until start operation bit clears
+ i = CMD_COMPLETE_TOUT_SEC;
+ while (i) {
+ msleep(10);
+ wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET;
+ wpg_data = readl (wpg_addr);
+ data = swab32 (wpg_data);
+ if (!(data & WPG_I2CMCNTL_STARTOP_MASK))
+ break;
+ i--;
+ }
+ if (i == 0) {
+ debug ("%s - Exit Error:WPG timeout\n", __func__);
+ rc = HPC_ERROR;
+ }
+
+ //--------------------------------------------------------------------
+ // WRITE - step 5 : read I2C status register
+ i = CMD_COMPLETE_TOUT_SEC;
+ while (i) {
+ msleep(10);
+ wpg_addr = WPGBbar + WPG_I2CSTAT_OFFSET;
+ wpg_data = readl (wpg_addr);
+ data = swab32 (wpg_data);
+ if (HPC_I2CSTATUS_CHECK (data))
+ break;
+ i--;
+ }
+ if (i == 0) {
+ debug ("ctrl_read - Error : I2C timeout\n");
+ rc = HPC_ERROR;
+ }
+
+ debug_polling ("%s Exit rc[%x]\n", __func__, rc);
+ return (rc);
+}
+
+//------------------------------------------------------------
+// Read from ISA type HPC
+//------------------------------------------------------------
+static u8 isa_ctrl_read (struct controller *ctlr_ptr, u8 offset)
+{
+ u16 start_address;
+ u16 end_address;
+ u8 data;
+
+ start_address = ctlr_ptr->u.isa_ctlr.io_start;
+ end_address = ctlr_ptr->u.isa_ctlr.io_end;
+ data = inb (start_address + offset);
+ return data;
+}
+
+//--------------------------------------------------------------
+// Write to ISA type HPC
+//--------------------------------------------------------------
+static void isa_ctrl_write (struct controller *ctlr_ptr, u8 offset, u8 data)
+{
+ u16 start_address;
+ u16 port_address;
+
+ start_address = ctlr_ptr->u.isa_ctlr.io_start;
+ port_address = start_address + (u16) offset;
+ outb (data, port_address);
+}
+
+static u8 pci_ctrl_read (struct controller *ctrl, u8 offset)
+{
+ u8 data = 0x00;
+ debug ("inside pci_ctrl_read\n");
+ if (ctrl->ctrl_dev)
+ pci_read_config_byte (ctrl->ctrl_dev, HPC_PCI_OFFSET + offset, &data);
+ return data;
+}
+
+static u8 pci_ctrl_write (struct controller *ctrl, u8 offset, u8 data)
+{
+ u8 rc = -ENODEV;
+ debug ("inside pci_ctrl_write\n");
+ if (ctrl->ctrl_dev) {
+ pci_write_config_byte (ctrl->ctrl_dev, HPC_PCI_OFFSET + offset, data);
+ rc = 0;
+ }
+ return rc;
+}
+
+static u8 ctrl_read (struct controller *ctlr, void __iomem *base, u8 offset)
+{
+ u8 rc;
+ switch (ctlr->ctlr_type) {
+ case 0:
+ rc = isa_ctrl_read (ctlr, offset);
+ break;
+ case 1:
+ rc = pci_ctrl_read (ctlr, offset);
+ break;
+ case 2:
+ case 4:
+ rc = i2c_ctrl_read (ctlr, base, offset);
+ break;
+ default:
+ return -ENODEV;
+ }
+ return rc;
+}
+
+static u8 ctrl_write (struct controller *ctlr, void __iomem *base, u8 offset, u8 data)
+{
+ u8 rc = 0;
+ switch (ctlr->ctlr_type) {
+ case 0:
+ isa_ctrl_write(ctlr, offset, data);
+ break;
+ case 1:
+ rc = pci_ctrl_write (ctlr, offset, data);
+ break;
+ case 2:
+ case 4:
+ rc = i2c_ctrl_write(ctlr, base, offset, data);
+ break;
+ default:
+ return -ENODEV;
+ }
+ return rc;
+}
+/*----------------------------------------------------------------------
+* Name: hpc_writecmdtoindex()
+*
+* Action: convert a write command to proper index within a controller
+*
+* Return index, HPC_ERROR
+*---------------------------------------------------------------------*/
+static u8 hpc_writecmdtoindex (u8 cmd, u8 index)
+{
+ u8 rc;
+
+ switch (cmd) {
+ case HPC_CTLR_ENABLEIRQ: // 0x00.N.15
+ case HPC_CTLR_CLEARIRQ: // 0x06.N.15
+ case HPC_CTLR_RESET: // 0x07.N.15
+ case HPC_CTLR_IRQSTEER: // 0x08.N.15
+ case HPC_CTLR_DISABLEIRQ: // 0x01.N.15
+ case HPC_ALLSLOT_ON: // 0x11.N.15
+ case HPC_ALLSLOT_OFF: // 0x12.N.15
+ rc = 0x0F;
+ break;
+
+ case HPC_SLOT_OFF: // 0x02.Y.0-14
+ case HPC_SLOT_ON: // 0x03.Y.0-14
+ case HPC_SLOT_ATTNOFF: // 0x04.N.0-14
+ case HPC_SLOT_ATTNON: // 0x05.N.0-14
+ case HPC_SLOT_BLINKLED: // 0x13.N.0-14
+ rc = index;
+ break;
+
+ case HPC_BUS_33CONVMODE:
+ case HPC_BUS_66CONVMODE:
+ case HPC_BUS_66PCIXMODE:
+ case HPC_BUS_100PCIXMODE:
+ case HPC_BUS_133PCIXMODE:
+ rc = index + WPG_1ST_BUS_INDEX - 1;
+ break;
+
+ default:
+ err ("hpc_writecmdtoindex - Error invalid cmd[%x]\n", cmd);
+ rc = HPC_ERROR;
+ }
+
+ return rc;
+}
+
+/*----------------------------------------------------------------------
+* Name: hpc_readcmdtoindex()
+*
+* Action: convert a read command to proper index within a controller
+*
+* Return index, HPC_ERROR
+*---------------------------------------------------------------------*/
+static u8 hpc_readcmdtoindex (u8 cmd, u8 index)
+{
+ u8 rc;
+
+ switch (cmd) {
+ case READ_CTLRSTATUS:
+ rc = 0x0F;
+ break;
+ case READ_SLOTSTATUS:
+ case READ_ALLSTAT:
+ rc = index;
+ break;
+ case READ_EXTSLOTSTATUS:
+ rc = index + WPG_1ST_EXTSLOT_INDEX;
+ break;
+ case READ_BUSSTATUS:
+ rc = index + WPG_1ST_BUS_INDEX - 1;
+ break;
+ case READ_SLOTLATCHLOWREG:
+ rc = 0x28;
+ break;
+ case READ_REVLEVEL:
+ rc = 0x25;
+ break;
+ case READ_HPCOPTIONS:
+ rc = 0x27;
+ break;
+ default:
+ rc = HPC_ERROR;
+ }
+ return rc;
+}
+
+/*----------------------------------------------------------------------
+* Name: HPCreadslot()
+*
+* Action: issue a READ command to HPC
+*
+* Input: pslot - cannot be NULL for READ_ALLSTAT
+* pstatus - can be NULL for READ_ALLSTAT
+*
+* Return 0 or error codes
+*---------------------------------------------------------------------*/
+int ibmphp_hpc_readslot (struct slot *pslot, u8 cmd, u8 *pstatus)
+{
+ void __iomem *wpg_bbar = NULL;
+ struct controller *ctlr_ptr;
+ struct list_head *pslotlist;
+ u8 index, status;
+ int rc = 0;
+ int busindex;
+
+ debug_polling ("%s - Entry pslot[%p] cmd[%x] pstatus[%p]\n", __func__, pslot, cmd, pstatus);
+
+ if ((pslot == NULL)
+ || ((pstatus == NULL) && (cmd != READ_ALLSTAT) && (cmd != READ_BUSSTATUS))) {
+ rc = -EINVAL;
+ err ("%s - Error invalid pointer, rc[%d]\n", __func__, rc);
+ return rc;
+ }
+
+ if (cmd == READ_BUSSTATUS) {
+ busindex = ibmphp_get_bus_index (pslot->bus);
+ if (busindex < 0) {
+ rc = -EINVAL;
+ err ("%s - Exit Error:invalid bus, rc[%d]\n", __func__, rc);
+ return rc;
+ } else
+ index = (u8) busindex;
+ } else
+ index = pslot->ctlr_index;
+
+ index = hpc_readcmdtoindex (cmd, index);
+
+ if (index == HPC_ERROR) {
+ rc = -EINVAL;
+ err ("%s - Exit Error:invalid index, rc[%d]\n", __func__, rc);
+ return rc;
+ }
+
+ ctlr_ptr = pslot->ctrl;
+
+ get_hpc_access ();
+
+ //--------------------------------------------------------------------
+ // map physical address to logical address
+ //--------------------------------------------------------------------
+ if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
+ wpg_bbar = ioremap (ctlr_ptr->u.wpeg_ctlr.wpegbbar, WPG_I2C_IOREMAP_SIZE);
+
+ //--------------------------------------------------------------------
+ // check controller status before reading
+ //--------------------------------------------------------------------
+ rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status);
+ if (!rc) {
+ switch (cmd) {
+ case READ_ALLSTAT:
+ // update the slot structure
+ pslot->ctrl->status = status;
+ pslot->status = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar,
+ &status);
+ if (!rc)
+ pslot->ext_status = ctrl_read (ctlr_ptr, wpg_bbar, index + WPG_1ST_EXTSLOT_INDEX);
+
+ break;
+
+ case READ_SLOTSTATUS:
+ // DO NOT update the slot structure
+ *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ break;
+
+ case READ_EXTSLOTSTATUS:
+ // DO NOT update the slot structure
+ *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ break;
+
+ case READ_CTLRSTATUS:
+ // DO NOT update the slot structure
+ *pstatus = status;
+ break;
+
+ case READ_BUSSTATUS:
+ pslot->busstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ break;
+ case READ_REVLEVEL:
+ *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ break;
+ case READ_HPCOPTIONS:
+ *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ break;
+ case READ_SLOTLATCHLOWREG:
+ // DO NOT update the slot structure
+ *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ break;
+
+ // Not used
+ case READ_ALLSLOT:
+ list_for_each (pslotlist, &ibmphp_slot_head) {
+ pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
+ index = pslot->ctlr_index;
+ rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr,
+ wpg_bbar, &status);
+ if (!rc) {
+ pslot->status = ctrl_read (ctlr_ptr, wpg_bbar, index);
+ rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT,
+ ctlr_ptr, wpg_bbar, &status);
+ if (!rc)
+ pslot->ext_status =
+ ctrl_read (ctlr_ptr, wpg_bbar,
+ index + WPG_1ST_EXTSLOT_INDEX);
+ } else {
+ err ("%s - Error ctrl_read failed\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ }
+ //--------------------------------------------------------------------
+ // cleanup
+ //--------------------------------------------------------------------
+
+ // remove physical to logical address mapping
+ if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
+ iounmap (wpg_bbar);
+
+ free_hpc_access ();
+
+ debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
+ return rc;
+}
+
+/*----------------------------------------------------------------------
+* Name: ibmphp_hpc_writeslot()
+*
+* Action: issue a WRITE command to HPC
+*---------------------------------------------------------------------*/
+int ibmphp_hpc_writeslot (struct slot *pslot, u8 cmd)
+{
+ void __iomem *wpg_bbar = NULL;
+ struct controller *ctlr_ptr;
+ u8 index, status;
+ int busindex;
+ u8 done;
+ int rc = 0;
+ int timeout;
+
+ debug_polling ("%s - Entry pslot[%p] cmd[%x]\n", __func__, pslot, cmd);
+ if (pslot == NULL) {
+ rc = -EINVAL;
+ err ("%s - Error Exit rc[%d]\n", __func__, rc);
+ return rc;
+ }
+
+ if ((cmd == HPC_BUS_33CONVMODE) || (cmd == HPC_BUS_66CONVMODE) ||
+ (cmd == HPC_BUS_66PCIXMODE) || (cmd == HPC_BUS_100PCIXMODE) ||
+ (cmd == HPC_BUS_133PCIXMODE)) {
+ busindex = ibmphp_get_bus_index (pslot->bus);
+ if (busindex < 0) {
+ rc = -EINVAL;
+ err ("%s - Exit Error:invalid bus, rc[%d]\n", __func__, rc);
+ return rc;
+ } else
+ index = (u8) busindex;
+ } else
+ index = pslot->ctlr_index;
+
+ index = hpc_writecmdtoindex (cmd, index);
+
+ if (index == HPC_ERROR) {
+ rc = -EINVAL;
+ err ("%s - Error Exit rc[%d]\n", __func__, rc);
+ return rc;
+ }
+
+ ctlr_ptr = pslot->ctrl;
+
+ get_hpc_access ();
+
+ //--------------------------------------------------------------------
+ // map physical address to logical address
+ //--------------------------------------------------------------------
+ if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) {
+ wpg_bbar = ioremap (ctlr_ptr->u.wpeg_ctlr.wpegbbar, WPG_I2C_IOREMAP_SIZE);
+
+ debug ("%s - ctlr id[%x] physical[%lx] logical[%lx] i2c[%x]\n", __func__,
+ ctlr_ptr->ctlr_id, (ulong) (ctlr_ptr->u.wpeg_ctlr.wpegbbar), (ulong) wpg_bbar,
+ ctlr_ptr->u.wpeg_ctlr.i2c_addr);
+ }
+ //--------------------------------------------------------------------
+ // check controller status before writing
+ //--------------------------------------------------------------------
+ rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status);
+ if (!rc) {
+
+ ctrl_write (ctlr_ptr, wpg_bbar, index, cmd);
+
+ //--------------------------------------------------------------------
+ // check controller is still not working on the command
+ //--------------------------------------------------------------------
+ timeout = CMD_COMPLETE_TOUT_SEC;
+ done = 0;
+ while (!done) {
+ rc = hpc_wait_ctlr_notworking (HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar,
+ &status);
+ if (!rc) {
+ if (NEEDTOCHECK_CMDSTATUS (cmd)) {
+ if (CTLR_FINISHED (status) == HPC_CTLR_FINISHED_YES)
+ done = 1;
+ } else
+ done = 1;
+ }
+ if (!done) {
+ msleep(1000);
+ if (timeout < 1) {
+ done = 1;
+ err ("%s - Error command complete timeout\n", __func__);
+ rc = -EFAULT;
+ } else
+ timeout--;
+ }
+ }
+ ctlr_ptr->status = status;
+ }
+ // cleanup
+
+ // remove physical to logical address mapping
+ if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4))
+ iounmap (wpg_bbar);
+ free_hpc_access ();
+
+ debug_polling ("%s - Exit rc[%d]\n", __func__, rc);
+ return rc;
+}
+
+/*----------------------------------------------------------------------
+* Name: get_hpc_access()
+*
+* Action: make sure only one process can access HPC at one time
+*---------------------------------------------------------------------*/
+static void get_hpc_access (void)
+{
+ mutex_lock(&sem_hpcaccess);
+}
+
+/*----------------------------------------------------------------------
+* Name: free_hpc_access()
+*---------------------------------------------------------------------*/
+void free_hpc_access (void)
+{
+ mutex_unlock(&sem_hpcaccess);
+}
+
+/*----------------------------------------------------------------------
+* Name: ibmphp_lock_operations()
+*
+* Action: make sure only one process can change the data structure
+*---------------------------------------------------------------------*/
+void ibmphp_lock_operations (void)
+{
+ down (&semOperations);
+ to_debug = 1;
+}
+
+/*----------------------------------------------------------------------
+* Name: ibmphp_unlock_operations()
+*---------------------------------------------------------------------*/
+void ibmphp_unlock_operations (void)
+{
+ debug ("%s - Entry\n", __func__);
+ up (&semOperations);
+ to_debug = 0;
+ debug ("%s - Exit\n", __func__);
+}
+
+/*----------------------------------------------------------------------
+* Name: poll_hpc()
+*---------------------------------------------------------------------*/
+#define POLL_LATCH_REGISTER 0
+#define POLL_SLOTS 1
+#define POLL_SLEEP 2
+static int poll_hpc(void *data)
+{
+ struct slot myslot;
+ struct slot *pslot = NULL;
+ struct list_head *pslotlist;
+ int rc;
+ int poll_state = POLL_LATCH_REGISTER;
+ u8 oldlatchlow = 0x00;
+ u8 curlatchlow = 0x00;
+ int poll_count = 0;
+ u8 ctrl_count = 0x00;
+
+ debug ("%s - Entry\n", __func__);
+
+ while (!kthread_should_stop()) {
+ /* try to get the lock to do some kind of hardware access */
+ down (&semOperations);
+
+ switch (poll_state) {
+ case POLL_LATCH_REGISTER:
+ oldlatchlow = curlatchlow;
+ ctrl_count = 0x00;
+ list_for_each (pslotlist, &ibmphp_slot_head) {
+ if (ctrl_count >= ibmphp_get_total_controllers())
+ break;
+ pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
+ if (pslot->ctrl->ctlr_relative_id == ctrl_count) {
+ ctrl_count++;
+ if (READ_SLOT_LATCH (pslot->ctrl)) {
+ rc = ibmphp_hpc_readslot (pslot,
+ READ_SLOTLATCHLOWREG,
+ &curlatchlow);
+ if (oldlatchlow != curlatchlow)
+ process_changeinlatch (oldlatchlow,
+ curlatchlow,
+ pslot->ctrl);
+ }
+ }
+ }
+ ++poll_count;
+ poll_state = POLL_SLEEP;
+ break;
+ case POLL_SLOTS:
+ list_for_each (pslotlist, &ibmphp_slot_head) {
+ pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
+ // make a copy of the old status
+ memcpy ((void *) &myslot, (void *) pslot,
+ sizeof (struct slot));
+ rc = ibmphp_hpc_readslot (pslot, READ_ALLSTAT, NULL);
+ if ((myslot.status != pslot->status)
+ || (myslot.ext_status != pslot->ext_status))
+ process_changeinstatus (pslot, &myslot);
+ }
+ ctrl_count = 0x00;
+ list_for_each (pslotlist, &ibmphp_slot_head) {
+ if (ctrl_count >= ibmphp_get_total_controllers())
+ break;
+ pslot = list_entry (pslotlist, struct slot, ibm_slot_list);
+ if (pslot->ctrl->ctlr_relative_id == ctrl_count) {
+ ctrl_count++;
+ if (READ_SLOT_LATCH (pslot->ctrl))
+ rc = ibmphp_hpc_readslot (pslot,
+ READ_SLOTLATCHLOWREG,
+ &curlatchlow);
+ }
+ }
+ ++poll_count;
+ poll_state = POLL_SLEEP;
+ break;
+ case POLL_SLEEP:
+ /* don't sleep with a lock on the hardware */
+ up (&semOperations);
+ msleep(POLL_INTERVAL_SEC * 1000);
+
+ if (kthread_should_stop())
+ goto out_sleep;
+
+ down (&semOperations);
+
+ if (poll_count >= POLL_LATCH_CNT) {
+ poll_count = 0;
+ poll_state = POLL_SLOTS;
+ } else
+ poll_state = POLL_LATCH_REGISTER;
+ break;
+ }
+ /* give up the hardware semaphore */
+ up (&semOperations);
+ /* sleep for a short time just for good measure */
+out_sleep:
+ msleep(100);
+ }
+ up (&sem_exit);
+ debug ("%s - Exit\n", __func__);
+ return 0;
+}
+
+
+/*----------------------------------------------------------------------
+* Name: process_changeinstatus
+*
+* Action: compare old and new slot status, process the change in status
+*
+* Input: pointer to slot struct, old slot struct
+*
+* Return 0 or error codes
+* Value:
+*
+* Side
+* Effects: None.
+*
+* Notes:
+*---------------------------------------------------------------------*/
+static int process_changeinstatus (struct slot *pslot, struct slot *poldslot)
+{
+ u8 status;
+ int rc = 0;
+ u8 disable = 0;
+ u8 update = 0;
+
+ debug ("process_changeinstatus - Entry pslot[%p], poldslot[%p]\n", pslot, poldslot);
+
+ // bit 0 - HPC_SLOT_POWER
+ if ((pslot->status & 0x01) != (poldslot->status & 0x01))
+ update = 1;
+
+ // bit 1 - HPC_SLOT_CONNECT
+ // ignore
+
+ // bit 2 - HPC_SLOT_ATTN
+ if ((pslot->status & 0x04) != (poldslot->status & 0x04))
+ update = 1;
+
+ // bit 3 - HPC_SLOT_PRSNT2
+ // bit 4 - HPC_SLOT_PRSNT1
+ if (((pslot->status & 0x08) != (poldslot->status & 0x08))
+ || ((pslot->status & 0x10) != (poldslot->status & 0x10)))
+ update = 1;
+
+ // bit 5 - HPC_SLOT_PWRGD
+ if ((pslot->status & 0x20) != (poldslot->status & 0x20))
+ // OFF -> ON: ignore, ON -> OFF: disable slot
+ if ((poldslot->status & 0x20) && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status)))
+ disable = 1;
+
+ // bit 6 - HPC_SLOT_BUS_SPEED
+ // ignore
+
+ // bit 7 - HPC_SLOT_LATCH
+ if ((pslot->status & 0x80) != (poldslot->status & 0x80)) {
+ update = 1;
+ // OPEN -> CLOSE
+ if (pslot->status & 0x80) {
+ if (SLOT_PWRGD (pslot->status)) {
+ // power goes on and off after closing latch
+ // check again to make sure power is still ON
+ msleep(1000);
+ rc = ibmphp_hpc_readslot (pslot, READ_SLOTSTATUS, &status);
+ if (SLOT_PWRGD (status))
+ update = 1;
+ else // overwrite power in pslot to OFF
+ pslot->status &= ~HPC_SLOT_POWER;
+ }
+ }
+ // CLOSE -> OPEN
+ else if ((SLOT_PWRGD (poldslot->status) == HPC_SLOT_PWRGD_GOOD)
+ && (SLOT_CONNECT (poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT (poldslot->status))) {
+ disable = 1;
+ }
+ // else - ignore
+ }
+ // bit 4 - HPC_SLOT_BLINK_ATTN
+ if ((pslot->ext_status & 0x08) != (poldslot->ext_status & 0x08))
+ update = 1;
+
+ if (disable) {
+ debug ("process_changeinstatus - disable slot\n");
+ pslot->flag = 0;
+ rc = ibmphp_do_disable_slot (pslot);
+ }
+
+ if (update || disable)
+ ibmphp_update_slot_info (pslot);
+
+ debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update);
+
+ return rc;
+}
+
+/*----------------------------------------------------------------------
+* Name: process_changeinlatch
+*
+* Action: compare old and new latch reg status, process the change
+*
+* Input: old and current latch register status
+*
+* Return 0 or error codes
+* Value:
+*---------------------------------------------------------------------*/
+static int process_changeinlatch (u8 old, u8 new, struct controller *ctrl)
+{
+ struct slot myslot, *pslot;
+ u8 i;
+ u8 mask;
+ int rc = 0;
+
+ debug ("%s - Entry old[%x], new[%x]\n", __func__, old, new);
+ // bit 0 reserved, 0 is LSB, check bit 1-6 for 6 slots
+
+ for (i = ctrl->starting_slot_num; i <= ctrl->ending_slot_num; i++) {
+ mask = 0x01 << i;
+ if ((mask & old) != (mask & new)) {
+ pslot = ibmphp_get_slot_from_physical_num (i);
+ if (pslot) {
+ memcpy ((void *) &myslot, (void *) pslot, sizeof (struct slot));
+ rc = ibmphp_hpc_readslot (pslot, READ_ALLSTAT, NULL);
+ debug ("%s - call process_changeinstatus for slot[%d]\n", __func__, i);
+ process_changeinstatus (pslot, &myslot);
+ } else {
+ rc = -EINVAL;
+ err ("%s - Error bad pointer for slot[%d]\n", __func__, i);
+ }
+ }
+ }
+ debug ("%s - Exit rc[%d]\n", __func__, rc);
+ return rc;
+}
+
+/*----------------------------------------------------------------------
+* Name: ibmphp_hpc_start_poll_thread
+*
+* Action: start polling thread
+*---------------------------------------------------------------------*/
+int __init ibmphp_hpc_start_poll_thread (void)
+{
+ debug ("%s - Entry\n", __func__);
+
+ ibmphp_poll_thread = kthread_run(poll_hpc, NULL, "hpc_poll");
+ if (IS_ERR(ibmphp_poll_thread)) {
+ err ("%s - Error, thread not started\n", __func__);
+ return PTR_ERR(ibmphp_poll_thread);
+ }
+ return 0;
+}
+
+/*----------------------------------------------------------------------
+* Name: ibmphp_hpc_stop_poll_thread
+*
+* Action: stop polling thread and cleanup
+*---------------------------------------------------------------------*/
+void __exit ibmphp_hpc_stop_poll_thread (void)
+{
+ debug ("%s - Entry\n", __func__);
+
+ kthread_stop(ibmphp_poll_thread);
+ debug ("before locking operations \n");
+ ibmphp_lock_operations ();
+ debug ("after locking operations \n");
+
+ // wait for poll thread to exit
+ debug ("before sem_exit down \n");
+ down (&sem_exit);
+ debug ("after sem_exit down \n");
+
+ // cleanup
+ debug ("before free_hpc_access \n");
+ free_hpc_access ();
+ debug ("after free_hpc_access \n");
+ ibmphp_unlock_operations ();
+ debug ("after unlock operations \n");
+ up (&sem_exit);
+ debug ("after sem exit up\n");
+
+ debug ("%s - Exit\n", __func__);
+}
+
+/*----------------------------------------------------------------------
+* Name: hpc_wait_ctlr_notworking
+*
+* Action: wait until the controller is in a not working state
+*
+* Return 0, HPC_ERROR
+* Value:
+*---------------------------------------------------------------------*/
+static int hpc_wait_ctlr_notworking (int timeout, struct controller *ctlr_ptr, void __iomem *wpg_bbar,
+ u8 *pstatus)
+{
+ int rc = 0;
+ u8 done = 0;
+
+ debug_polling ("hpc_wait_ctlr_notworking - Entry timeout[%d]\n", timeout);
+
+ while (!done) {
+ *pstatus = ctrl_read (ctlr_ptr, wpg_bbar, WPG_CTLR_INDEX);
+ if (*pstatus == HPC_ERROR) {
+ rc = HPC_ERROR;
+ done = 1;
+ }
+ if (CTLR_WORKING (*pstatus) == HPC_CTLR_WORKING_NO)
+ done = 1;
+ if (!done) {
+ msleep(1000);
+ if (timeout < 1) {
+ done = 1;
+ err ("HPCreadslot - Error ctlr timeout\n");
+ rc = HPC_ERROR;
+ } else
+ timeout--;
+ }
+ }
+ debug_polling ("hpc_wait_ctlr_notworking - Exit rc[%x] status[%x]\n", rc, *pstatus);
+ return rc;
+}
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
new file mode 100644
index 000000000..814cea22a
--- /dev/null
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -0,0 +1,1722 @@
+/*
+ * IBM Hot Plug Controller Driver
+ *
+ * Written By: Irene Zubarev, IBM Corporation
+ *
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001,2002 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include "ibmphp.h"
+
+
+static int configure_device(struct pci_func *);
+static int configure_bridge(struct pci_func **, u8);
+static struct res_needed *scan_behind_bridge(struct pci_func *, u8);
+static int add_new_bus (struct bus_node *, struct resource_node *, struct resource_node *, struct resource_node *, u8);
+static u8 find_sec_number (u8 primary_busno, u8 slotno);
+
+/*
+ * NOTE..... If BIOS doesn't provide default routing, we assign:
+ * 9 for SCSI, 10 for LAN adapters, and 11 for everything else.
+ * If adapter is bridged, then we assign 11 to it and devices behind it.
+ * We also assign the same irq numbers for multi function devices.
+ * These are PIC mode, so shouldn't matter n.e.ways (hopefully)
+ */
+static void assign_alt_irq (struct pci_func *cur_func, u8 class_code)
+{
+ int j;
+ for (j = 0; j < 4; j++) {
+ if (cur_func->irq[j] == 0xff) {
+ switch (class_code) {
+ case PCI_BASE_CLASS_STORAGE:
+ cur_func->irq[j] = SCSI_IRQ;
+ break;
+ case PCI_BASE_CLASS_NETWORK:
+ cur_func->irq[j] = LAN_IRQ;
+ break;
+ default:
+ cur_func->irq[j] = OTHER_IRQ;
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * Configures the device to be added (will allocate needed resources if it
+ * can), the device can be a bridge or a regular pci device, can also be
+ * multi-functional
+ *
+ * Input: function to be added
+ *
+ * TO DO: The error case with Multifunction device or multi function bridge,
+ * if there is an error, will need to go through all previous functions and
+ * unconfigure....or can add some code into unconfigure_card....
+ */
+int ibmphp_configure_card (struct pci_func *func, u8 slotno)
+{
+ u16 vendor_id;
+ u32 class;
+ u8 class_code;
+ u8 hdr_type, device, sec_number;
+ u8 function;
+ struct pci_func *newfunc; /* for multi devices */
+ struct pci_func *cur_func, *prev_func;
+ int rc, i, j;
+ int cleanup_count;
+ u8 flag;
+ u8 valid_device = 0x00; /* to see if we are able to read from card any device info at all */
+
+ debug ("inside configure_card, func->busno = %x\n", func->busno);
+
+ device = func->device;
+ cur_func = func;
+
+ /* We only get bus and device from IRQ routing table. So at this point,
+ * func->busno is correct, and func->device contains only device (at the 5
+ * highest bits)
+ */
+
+ /* For every function on the card */
+ for (function = 0x00; function < 0x08; function++) {
+ unsigned int devfn = PCI_DEVFN(device, function);
+ ibmphp_pci_bus->number = cur_func->busno;
+
+ cur_func->function = function;
+
+ debug ("inside the loop, cur_func->busno = %x, cur_func->device = %x, cur_func->function = %x\n",
+ cur_func->busno, cur_func->device, cur_func->function);
+
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id);
+
+ debug ("vendor_id is %x\n", vendor_id);
+ if (vendor_id != PCI_VENDOR_ID_NOTVALID) {
+ /* found correct device!!! */
+ debug ("found valid device, vendor_id = %x\n", vendor_id);
+
+ ++valid_device;
+
+ /* header: x x x x x x x x
+ * | |___________|=> 1=PPB bridge, 0=normal device, 2=CardBus Bridge
+ * |_=> 0 = single function device, 1 = multi-function device
+ */
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class);
+
+ class_code = class >> 24;
+ debug ("hrd_type = %x, class = %x, class_code %x\n", hdr_type, class, class_code);
+ class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
+ if (class == PCI_CLASS_NOT_DEFINED_VGA) {
+ err ("The device %x is VGA compatible and as is not supported for hot plugging. "
+ "Please choose another device.\n", cur_func->device);
+ return -ENODEV;
+ } else if (class == PCI_CLASS_DISPLAY_VGA) {
+ err ("The device %x is not supported for hot plugging. Please choose another device.\n",
+ cur_func->device);
+ return -ENODEV;
+ }
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class);
+ assign_alt_irq (cur_func, class_code);
+ rc = configure_device(cur_func);
+ if (rc < 0) {
+ /* We need to do this in case some other BARs were properly inserted */
+ err ("was not able to configure devfunc %x on bus %x.\n",
+ cur_func->device, cur_func->busno);
+ cleanup_count = 6;
+ goto error;
+ }
+ cur_func->next = NULL;
+ function = 0x8;
+ break;
+ case PCI_HEADER_TYPE_MULTIDEVICE:
+ assign_alt_irq (cur_func, class_code);
+ rc = configure_device(cur_func);
+ if (rc < 0) {
+ /* We need to do this in case some other BARs were properly inserted */
+ err ("was not able to configure devfunc %x on bus %x...bailing out\n",
+ cur_func->device, cur_func->busno);
+ cleanup_count = 6;
+ goto error;
+ }
+ newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
+ if (!newfunc) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ newfunc->busno = cur_func->busno;
+ newfunc->device = device;
+ cur_func->next = newfunc;
+ cur_func = newfunc;
+ for (j = 0; j < 4; j++)
+ newfunc->irq[j] = cur_func->irq[j];
+ break;
+ case PCI_HEADER_TYPE_MULTIBRIDGE:
+ class >>= 8;
+ if (class != PCI_CLASS_BRIDGE_PCI) {
+ err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n",
+ cur_func->device);
+ return -ENODEV;
+ }
+ assign_alt_irq (cur_func, class_code);
+ rc = configure_bridge (&cur_func, slotno);
+ if (rc == -ENODEV) {
+ err ("You chose to insert Single Bridge, or nested bridges, this is not supported...\n");
+ err ("Bus %x, devfunc %x\n", cur_func->busno, cur_func->device);
+ return rc;
+ }
+ if (rc) {
+ /* We need to do this in case some other BARs were properly inserted */
+ err ("was not able to hot-add PPB properly.\n");
+ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */
+ cleanup_count = 2;
+ goto error;
+ }
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
+ flag = 0;
+ for (i = 0; i < 32; i++) {
+ if (func->devices[i]) {
+ newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
+ if (!newfunc) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ newfunc->busno = sec_number;
+ newfunc->device = (u8) i;
+ for (j = 0; j < 4; j++)
+ newfunc->irq[j] = cur_func->irq[j];
+
+ if (flag) {
+ for (prev_func = cur_func; prev_func->next; prev_func = prev_func->next) ;
+ prev_func->next = newfunc;
+ } else
+ cur_func->next = newfunc;
+
+ rc = ibmphp_configure_card (newfunc, slotno);
+ /* This could only happen if kmalloc failed */
+ if (rc) {
+ /* We need to do this in case bridge itself got configured properly, but devices behind it failed */
+ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */
+ cleanup_count = 2;
+ goto error;
+ }
+ flag = 1;
+ }
+ }
+
+ newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
+ if (!newfunc) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ newfunc->busno = cur_func->busno;
+ newfunc->device = device;
+ for (j = 0; j < 4; j++)
+ newfunc->irq[j] = cur_func->irq[j];
+ for (prev_func = cur_func; prev_func->next; prev_func = prev_func->next) ;
+ prev_func->next = newfunc;
+ cur_func = newfunc;
+ break;
+ case PCI_HEADER_TYPE_BRIDGE:
+ class >>= 8;
+ debug ("class now is %x\n", class);
+ if (class != PCI_CLASS_BRIDGE_PCI) {
+ err ("This %x is not PCI-to-PCI bridge, and as is not supported for hot-plugging. Please insert another card.\n",
+ cur_func->device);
+ return -ENODEV;
+ }
+
+ assign_alt_irq (cur_func, class_code);
+
+ debug ("cur_func->busno b4 configure_bridge is %x\n", cur_func->busno);
+ rc = configure_bridge (&cur_func, slotno);
+ if (rc == -ENODEV) {
+ err ("You chose to insert Single Bridge, or nested bridges, this is not supported...\n");
+ err ("Bus %x, devfunc %x\n", cur_func->busno, cur_func->device);
+ return rc;
+ }
+ if (rc) {
+ /* We need to do this in case some other BARs were properly inserted */
+ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */
+ err ("was not able to hot-add PPB properly.\n");
+ cleanup_count = 2;
+ goto error;
+ }
+ debug ("cur_func->busno = %x, device = %x, function = %x\n",
+ cur_func->busno, device, function);
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
+ debug ("after configuring bridge..., sec_number = %x\n", sec_number);
+ flag = 0;
+ for (i = 0; i < 32; i++) {
+ if (func->devices[i]) {
+ debug ("inside for loop, device is %x\n", i);
+ newfunc = kzalloc(sizeof(*newfunc), GFP_KERNEL);
+ if (!newfunc) {
+ err (" out of system memory\n");
+ return -ENOMEM;
+ }
+ newfunc->busno = sec_number;
+ newfunc->device = (u8) i;
+ for (j = 0; j < 4; j++)
+ newfunc->irq[j] = cur_func->irq[j];
+
+ if (flag) {
+ for (prev_func = cur_func; prev_func->next; prev_func = prev_func->next) ;
+ prev_func->next = newfunc;
+ } else
+ cur_func->next = newfunc;
+
+ rc = ibmphp_configure_card (newfunc, slotno);
+
+ /* Again, this case should not happen... For complete paranoia, will need to call remove_bus */
+ if (rc) {
+ /* We need to do this in case some other BARs were properly inserted */
+ func->bus = 1; /* To indicate to the unconfigure function that this is a PPB */
+ cleanup_count = 2;
+ goto error;
+ }
+ flag = 1;
+ }
+ }
+
+ function = 0x8;
+ break;
+ default:
+ err ("MAJOR PROBLEM!!!!, header type not supported? %x\n", hdr_type);
+ return -ENXIO;
+ break;
+ } /* end of switch */
+ } /* end of valid device */
+ } /* end of for */
+
+ if (!valid_device) {
+ err ("Cannot find any valid devices on the card. Or unable to read from card.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < cleanup_count; i++) {
+ if (cur_func->io[i]) {
+ ibmphp_remove_resource (cur_func->io[i]);
+ cur_func->io[i] = NULL;
+ } else if (cur_func->pfmem[i]) {
+ ibmphp_remove_resource (cur_func->pfmem[i]);
+ cur_func->pfmem[i] = NULL;
+ } else if (cur_func->mem[i]) {
+ ibmphp_remove_resource (cur_func->mem[i]);
+ cur_func->mem[i] = NULL;
+ }
+ }
+ return rc;
+}
+
+/*
+ * This function configures the pci BARs of a single device.
+ * Input: pointer to the pci_func
+ * Output: configured PCI, 0, or error
+ */
+static int configure_device (struct pci_func *func)
+{
+ u32 bar[6];
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ PCI_BASE_ADDRESS_4,
+ PCI_BASE_ADDRESS_5,
+ 0
+ };
+ u8 irq;
+ int count;
+ int len[6];
+ struct resource_node *io[6];
+ struct resource_node *mem[6];
+ struct resource_node *mem_tmp;
+ struct resource_node *pfmem[6];
+ unsigned int devfn;
+
+ debug ("%s - inside\n", __func__);
+
+ devfn = PCI_DEVFN(func->device, func->function);
+ ibmphp_pci_bus->number = func->busno;
+
+ for (count = 0; address[count]; count++) { /* for 6 BARs */
+
+ /* not sure if i need this. per scott, said maybe need * something like this
+ if devices don't adhere 100% to the spec, so don't want to write
+ to the reserved bits
+
+ pcibios_read_config_byte(cur_func->busno, cur_func->device,
+ PCI_BASE_ADDRESS_0 + 4 * count, &tmp);
+ if (tmp & 0x01) // IO
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFD);
+ else // Memory
+ pcibios_write_config_dword(cur_func->busno, cur_func->device,
+ PCI_BASE_ADDRESS_0 + 4 * count, 0xFFFFFFFF);
+ */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
+
+ if (!bar[count]) /* This BAR is not implemented */
+ continue;
+
+ debug ("Device %x BAR %d wants %x\n", func->device, count, bar[count]);
+
+ if (bar[count] & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
+ debug ("inside IO SPACE\n");
+
+ len[count] = bar[count] & 0xFFFFFFFC;
+ len[count] = ~len[count] + 1;
+
+ debug ("len[count] in IO %x, count %d\n", len[count], count);
+
+ io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+
+ if (!io[count]) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ io[count]->type = IO;
+ io[count]->busno = func->busno;
+ io[count]->devfunc = PCI_DEVFN(func->device, func->function);
+ io[count]->len = len[count];
+ if (ibmphp_check_resource(io[count], 0) == 0) {
+ ibmphp_add_resource (io[count]);
+ func->io[count] = io[count];
+ } else {
+ err ("cannot allocate requested io for bus %x device %x function %x len %x\n",
+ func->busno, func->device, func->function, len[count]);
+ kfree (io[count]);
+ return -EIO;
+ }
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start);
+
+ /* _______________This is for debugging purposes only_____________________ */
+ debug ("b4 writing, the IO address is %x\n", func->io[count]->start);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
+ debug ("after writing.... the start address is %x\n", bar[count]);
+ /* _________________________________________________________________________*/
+
+ } else {
+ /* This is Memory */
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ /* pfmem */
+ debug ("PFMEM SPACE\n");
+
+ len[count] = bar[count] & 0xFFFFFFF0;
+ len[count] = ~len[count] + 1;
+
+ debug ("len[count] in PFMEM %x, count %d\n", len[count], count);
+
+ pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!pfmem[count]) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ pfmem[count]->type = PFMEM;
+ pfmem[count]->busno = func->busno;
+ pfmem[count]->devfunc = PCI_DEVFN(func->device,
+ func->function);
+ pfmem[count]->len = len[count];
+ pfmem[count]->fromMem = 0;
+ if (ibmphp_check_resource (pfmem[count], 0) == 0) {
+ ibmphp_add_resource (pfmem[count]);
+ func->pfmem[count] = pfmem[count];
+ } else {
+ mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL);
+ if (!mem_tmp) {
+ err ("out of system memory\n");
+ kfree (pfmem[count]);
+ return -ENOMEM;
+ }
+ mem_tmp->type = MEM;
+ mem_tmp->busno = pfmem[count]->busno;
+ mem_tmp->devfunc = pfmem[count]->devfunc;
+ mem_tmp->len = pfmem[count]->len;
+ debug ("there's no pfmem... going into mem.\n");
+ if (ibmphp_check_resource (mem_tmp, 0) == 0) {
+ ibmphp_add_resource (mem_tmp);
+ pfmem[count]->fromMem = 1;
+ pfmem[count]->rangeno = mem_tmp->rangeno;
+ pfmem[count]->start = mem_tmp->start;
+ pfmem[count]->end = mem_tmp->end;
+ ibmphp_add_pfmem_from_mem (pfmem[count]);
+ func->pfmem[count] = pfmem[count];
+ } else {
+ err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
+ func->busno, func->device, len[count]);
+ kfree (mem_tmp);
+ kfree (pfmem[count]);
+ return -EIO;
+ }
+ }
+
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start);
+
+ /*_______________This is for debugging purposes only______________________________*/
+ debug ("b4 writing, start address is %x\n", func->pfmem[count]->start);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
+ debug ("after writing, start address is %x\n", bar[count]);
+ /*_________________________________________________________________________________*/
+
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */
+ debug ("inside the mem 64 case, count %d\n", count);
+ count += 1;
+ /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0x00000000);
+ }
+ } else {
+ /* regular memory */
+ debug ("REGULAR MEM SPACE\n");
+
+ len[count] = bar[count] & 0xFFFFFFF0;
+ len[count] = ~len[count] + 1;
+
+ debug ("len[count] in Mem %x, count %d\n", len[count], count);
+
+ mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!mem[count]) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ mem[count]->type = MEM;
+ mem[count]->busno = func->busno;
+ mem[count]->devfunc = PCI_DEVFN(func->device,
+ func->function);
+ mem[count]->len = len[count];
+ if (ibmphp_check_resource (mem[count], 0) == 0) {
+ ibmphp_add_resource (mem[count]);
+ func->mem[count] = mem[count];
+ } else {
+ err ("cannot allocate requested mem for bus %x, device %x, len %x\n",
+ func->busno, func->device, len[count]);
+ kfree (mem[count]);
+ return -EIO;
+ }
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->mem[count]->start);
+ /* _______________________This is for debugging purposes only _______________________*/
+ debug ("b4 writing, start address is %x\n", func->mem[count]->start);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
+ debug ("after writing, the address is %x\n", bar[count]);
+ /* __________________________________________________________________________________*/
+
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ /* takes up another dword */
+ debug ("inside mem 64 case, reg. mem, count %d\n", count);
+ count += 1;
+ /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0x00000000);
+ }
+ }
+ } /* end of mem */
+ } /* end of for */
+
+ func->bus = 0; /* To indicate that this is not a PPB */
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq);
+ if ((irq > 0x00) && (irq < 0x05))
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]);
+
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY);
+
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, PCI_ROM_ADDRESS, 0x00L);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE);
+
+ return 0;
+}
+
+/******************************************************************************
+ * This routine configures a PCI-2-PCI bridge and the functions behind it
+ * Parameters: pci_func
+ * Returns:
+ ******************************************************************************/
+static int configure_bridge (struct pci_func **func_passed, u8 slotno)
+{
+ int count;
+ int i;
+ int rc;
+ u8 sec_number;
+ u8 io_base;
+ u16 pfmem_base;
+ u32 bar[2];
+ u32 len[2];
+ u8 flag_io = 0;
+ u8 flag_mem = 0;
+ u8 flag_pfmem = 0;
+ u8 need_io_upper = 0;
+ u8 need_pfmem_upper = 0;
+ struct res_needed *amount_needed = NULL;
+ struct resource_node *io = NULL;
+ struct resource_node *bus_io[2] = {NULL, NULL};
+ struct resource_node *mem = NULL;
+ struct resource_node *bus_mem[2] = {NULL, NULL};
+ struct resource_node *mem_tmp = NULL;
+ struct resource_node *pfmem = NULL;
+ struct resource_node *bus_pfmem[2] = {NULL, NULL};
+ struct bus_node *bus;
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ 0
+ };
+ struct pci_func *func = *func_passed;
+ unsigned int devfn;
+ u8 irq;
+ int retval;
+
+ debug ("%s - enter\n", __func__);
+
+ devfn = PCI_DEVFN(func->function, func->device);
+ ibmphp_pci_bus->number = func->busno;
+
+ /* Configuring necessary info for the bridge so that we could see the devices
+ * behind it
+ */
+
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_PRIMARY_BUS, func->busno);
+
+ /* _____________________For debugging purposes only __________________________
+ pci_bus_config_byte (ibmphp_pci_bus, devfn, PCI_PRIMARY_BUS, &pri_number);
+ debug ("primary # written into the bridge is %x\n", pri_number);
+ ___________________________________________________________________________*/
+
+ /* in EBDA, only get allocated 1 additional bus # per slot */
+ sec_number = find_sec_number (func->busno, slotno);
+ if (sec_number == 0xff) {
+ err ("cannot allocate secondary bus number for the bridged device\n");
+ return -EINVAL;
+ }
+
+ debug ("after find_sec_number, the number we got is %x\n", sec_number);
+ debug ("AFTER FIND_SEC_NUMBER, func->busno IS %x\n", func->busno);
+
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, sec_number);
+
+ /* __________________For debugging purposes only __________________________________
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
+ debug ("sec_number after write/read is %x\n", sec_number);
+ ________________________________________________________________________________*/
+
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SUBORDINATE_BUS, sec_number);
+
+ /* __________________For debugging purposes only ____________________________________
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SUBORDINATE_BUS, &sec_number);
+ debug ("subordinate number after write/read is %x\n", sec_number);
+ __________________________________________________________________________________*/
+
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_CACHE_LINE_SIZE, CACHE);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_LATENCY_TIMER, LATENCY);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_SEC_LATENCY_TIMER, LATENCY);
+
+ debug ("func->busno is %x\n", func->busno);
+ debug ("sec_number after writing is %x\n", sec_number);
+
+
+ /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!NEED TO ADD!!! FAST BACK-TO-BACK ENABLE!!!!!!!!!!!!!!!!!!!!
+ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
+
+
+ /* First we need to allocate mem/io for the bridge itself in case it needs it */
+ for (count = 0; address[count]; count++) { /* for 2 BARs */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
+
+ if (!bar[count]) {
+ /* This BAR is not implemented */
+ debug ("so we come here then, eh?, count = %d\n", count);
+ continue;
+ }
+ // tmp_bar = bar[count];
+
+ debug ("Bar %d wants %x\n", count, bar[count]);
+
+ if (bar[count] & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
+ len[count] = bar[count] & 0xFFFFFFFC;
+ len[count] = ~len[count] + 1;
+
+ debug ("len[count] in IO = %x\n", len[count]);
+
+ bus_io[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+
+ if (!bus_io[count]) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ bus_io[count]->type = IO;
+ bus_io[count]->busno = func->busno;
+ bus_io[count]->devfunc = PCI_DEVFN(func->device,
+ func->function);
+ bus_io[count]->len = len[count];
+ if (ibmphp_check_resource (bus_io[count], 0) == 0) {
+ ibmphp_add_resource (bus_io[count]);
+ func->io[count] = bus_io[count];
+ } else {
+ err ("cannot allocate requested io for bus %x, device %x, len %x\n",
+ func->busno, func->device, len[count]);
+ kfree (bus_io[count]);
+ return -EIO;
+ }
+
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->io[count]->start);
+
+ } else {
+ /* This is Memory */
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ /* pfmem */
+ len[count] = bar[count] & 0xFFFFFFF0;
+ len[count] = ~len[count] + 1;
+
+ debug ("len[count] in PFMEM = %x\n", len[count]);
+
+ bus_pfmem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!bus_pfmem[count]) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ bus_pfmem[count]->type = PFMEM;
+ bus_pfmem[count]->busno = func->busno;
+ bus_pfmem[count]->devfunc = PCI_DEVFN(func->device,
+ func->function);
+ bus_pfmem[count]->len = len[count];
+ bus_pfmem[count]->fromMem = 0;
+ if (ibmphp_check_resource (bus_pfmem[count], 0) == 0) {
+ ibmphp_add_resource (bus_pfmem[count]);
+ func->pfmem[count] = bus_pfmem[count];
+ } else {
+ mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL);
+ if (!mem_tmp) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ mem_tmp->type = MEM;
+ mem_tmp->busno = bus_pfmem[count]->busno;
+ mem_tmp->devfunc = bus_pfmem[count]->devfunc;
+ mem_tmp->len = bus_pfmem[count]->len;
+ if (ibmphp_check_resource (mem_tmp, 0) == 0) {
+ ibmphp_add_resource (mem_tmp);
+ bus_pfmem[count]->fromMem = 1;
+ bus_pfmem[count]->rangeno = mem_tmp->rangeno;
+ ibmphp_add_pfmem_from_mem (bus_pfmem[count]);
+ func->pfmem[count] = bus_pfmem[count];
+ } else {
+ err ("cannot allocate requested pfmem for bus %x, device %x, len %x\n",
+ func->busno, func->device, len[count]);
+ kfree (mem_tmp);
+ kfree (bus_pfmem[count]);
+ return -EIO;
+ }
+ }
+
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->pfmem[count]->start);
+
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ /* takes up another dword */
+ count += 1;
+ /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0x00000000);
+
+ }
+ } else {
+ /* regular memory */
+ len[count] = bar[count] & 0xFFFFFFF0;
+ len[count] = ~len[count] + 1;
+
+ debug ("len[count] in Memory is %x\n", len[count]);
+
+ bus_mem[count] = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!bus_mem[count]) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ bus_mem[count]->type = MEM;
+ bus_mem[count]->busno = func->busno;
+ bus_mem[count]->devfunc = PCI_DEVFN(func->device,
+ func->function);
+ bus_mem[count]->len = len[count];
+ if (ibmphp_check_resource (bus_mem[count], 0) == 0) {
+ ibmphp_add_resource (bus_mem[count]);
+ func->mem[count] = bus_mem[count];
+ } else {
+ err ("cannot allocate requested mem for bus %x, device %x, len %x\n",
+ func->busno, func->device, len[count]);
+ kfree (bus_mem[count]);
+ return -EIO;
+ }
+
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], func->mem[count]->start);
+
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ /* takes up another dword */
+ count += 1;
+ /* on the 2nd dword, write all 0s, since we can't handle them n.e.ways */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0x00000000);
+
+ }
+ }
+ } /* end of mem */
+ } /* end of for */
+
+ /* Now need to see how much space the devices behind the bridge needed */
+ amount_needed = scan_behind_bridge (func, sec_number);
+ if (amount_needed == NULL)
+ return -ENOMEM;
+
+ ibmphp_pci_bus->number = func->busno;
+ debug ("after coming back from scan_behind_bridge\n");
+ debug ("amount_needed->not_correct = %x\n", amount_needed->not_correct);
+ debug ("amount_needed->io = %x\n", amount_needed->io);
+ debug ("amount_needed->mem = %x\n", amount_needed->mem);
+ debug ("amount_needed->pfmem = %x\n", amount_needed->pfmem);
+
+ if (amount_needed->not_correct) {
+ debug ("amount_needed is not correct\n");
+ for (count = 0; address[count]; count++) {
+ /* for 2 BARs */
+ if (bus_io[count]) {
+ ibmphp_remove_resource (bus_io[count]);
+ func->io[count] = NULL;
+ } else if (bus_pfmem[count]) {
+ ibmphp_remove_resource (bus_pfmem[count]);
+ func->pfmem[count] = NULL;
+ } else if (bus_mem[count]) {
+ ibmphp_remove_resource (bus_mem[count]);
+ func->mem[count] = NULL;
+ }
+ }
+ kfree (amount_needed);
+ return -ENODEV;
+ }
+
+ if (!amount_needed->io) {
+ debug ("it doesn't want IO?\n");
+ flag_io = 1;
+ } else {
+ debug ("it wants %x IO behind the bridge\n", amount_needed->io);
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+
+ if (!io) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ io->type = IO;
+ io->busno = func->busno;
+ io->devfunc = PCI_DEVFN(func->device, func->function);
+ io->len = amount_needed->io;
+ if (ibmphp_check_resource (io, 1) == 0) {
+ debug ("were we able to add io\n");
+ ibmphp_add_resource (io);
+ flag_io = 1;
+ }
+ }
+
+ if (!amount_needed->mem) {
+ debug ("it doesn't want n.e.memory?\n");
+ flag_mem = 1;
+ } else {
+ debug ("it wants %x memory behind the bridge\n", amount_needed->mem);
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ mem->type = MEM;
+ mem->busno = func->busno;
+ mem->devfunc = PCI_DEVFN(func->device, func->function);
+ mem->len = amount_needed->mem;
+ if (ibmphp_check_resource (mem, 1) == 0) {
+ ibmphp_add_resource (mem);
+ flag_mem = 1;
+ debug ("were we able to add mem\n");
+ }
+ }
+
+ if (!amount_needed->pfmem) {
+ debug ("it doesn't want n.e.pfmem mem?\n");
+ flag_pfmem = 1;
+ } else {
+ debug ("it wants %x pfmemory behind the bridge\n", amount_needed->pfmem);
+ pfmem = kzalloc(sizeof(*pfmem), GFP_KERNEL);
+ if (!pfmem) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ pfmem->type = PFMEM;
+ pfmem->busno = func->busno;
+ pfmem->devfunc = PCI_DEVFN(func->device, func->function);
+ pfmem->len = amount_needed->pfmem;
+ pfmem->fromMem = 0;
+ if (ibmphp_check_resource (pfmem, 1) == 0) {
+ ibmphp_add_resource (pfmem);
+ flag_pfmem = 1;
+ } else {
+ mem_tmp = kzalloc(sizeof(*mem_tmp), GFP_KERNEL);
+ if (!mem_tmp) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ mem_tmp->type = MEM;
+ mem_tmp->busno = pfmem->busno;
+ mem_tmp->devfunc = pfmem->devfunc;
+ mem_tmp->len = pfmem->len;
+ if (ibmphp_check_resource (mem_tmp, 1) == 0) {
+ ibmphp_add_resource (mem_tmp);
+ pfmem->fromMem = 1;
+ pfmem->rangeno = mem_tmp->rangeno;
+ ibmphp_add_pfmem_from_mem (pfmem);
+ flag_pfmem = 1;
+ }
+ }
+ }
+
+ debug ("b4 if (flag_io && flag_mem && flag_pfmem)\n");
+ debug ("flag_io = %x, flag_mem = %x, flag_pfmem = %x\n", flag_io, flag_mem, flag_pfmem);
+
+ if (flag_io && flag_mem && flag_pfmem) {
+ /* If on bootup, there was a bridged card in this slot,
+ * then card was removed and ibmphp got unloaded and loaded
+ * back again, there's no way for us to remove the bus
+ * struct, so no need to kmalloc, can use existing node
+ */
+ bus = ibmphp_find_res_bus (sec_number);
+ if (!bus) {
+ bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+ if (!bus) {
+ err ("out of system memory\n");
+ retval = -ENOMEM;
+ goto error;
+ }
+ bus->busno = sec_number;
+ debug ("b4 adding new bus\n");
+ rc = add_new_bus (bus, io, mem, pfmem, func->busno);
+ } else if (!(bus->rangeIO) && !(bus->rangeMem) && !(bus->rangePFMem))
+ rc = add_new_bus (bus, io, mem, pfmem, 0xFF);
+ else {
+ err ("expected bus structure not empty?\n");
+ retval = -EIO;
+ goto error;
+ }
+ if (rc) {
+ if (rc == -ENOMEM) {
+ ibmphp_remove_bus (bus, func->busno);
+ kfree (amount_needed);
+ return rc;
+ }
+ retval = rc;
+ goto error;
+ }
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &io_base);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &pfmem_base);
+
+ if ((io_base & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
+ debug ("io 32\n");
+ need_io_upper = 1;
+ }
+ if ((pfmem_base & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+ debug ("pfmem 64\n");
+ need_pfmem_upper = 1;
+ }
+
+ if (bus->noIORanges) {
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00 | bus->rangeIO->start >> 8);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00 | bus->rangeIO->end >> 8);
+
+ /* _______________This is for debugging purposes only ____________________
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &temp);
+ debug ("io_base = %x\n", (temp & PCI_IO_RANGE_TYPE_MASK) << 8);
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, &temp);
+ debug ("io_limit = %x\n", (temp & PCI_IO_RANGE_TYPE_MASK) << 8);
+ ________________________________________________________________________*/
+
+ if (need_io_upper) { /* since can't support n.e.ways */
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_IO_BASE_UPPER16, 0x0000);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_IO_LIMIT_UPPER16, 0x0000);
+ }
+ } else {
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, 0x00);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, 0x00);
+ }
+
+ if (bus->noMemRanges) {
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0x0000 | bus->rangeMem->start >> 16);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000 | bus->rangeMem->end >> 16);
+
+ /* ____________________This is for debugging purposes only ________________________
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &temp);
+ debug ("mem_base = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &temp);
+ debug ("mem_limit = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
+ __________________________________________________________________________________*/
+
+ } else {
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, 0xffff);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, 0x0000);
+ }
+ if (bus->noPFMemRanges) {
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, 0x0000 | bus->rangePFMem->start >> 16);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, 0x0000 | bus->rangePFMem->end >> 16);
+
+ /* __________________________This is for debugging purposes only _______________________
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &temp);
+ debug ("pfmem_base = %x", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &temp);
+ debug ("pfmem_limit = %x\n", (temp & PCI_MEMORY_RANGE_TYPE_MASK) << 16);
+ ______________________________________________________________________________________*/
+
+ if (need_pfmem_upper) { /* since can't support n.e.ways */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, PCI_PREF_BASE_UPPER32, 0x00000000);
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, PCI_PREF_LIMIT_UPPER32, 0x00000000);
+ }
+ } else {
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, 0xffff);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, 0x0000);
+ }
+
+ debug ("b4 writing control information\n");
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_PIN, &irq);
+ if ((irq > 0x00) && (irq < 0x05))
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_INTERRUPT_LINE, func->irq[irq - 1]);
+ /*
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, ctrl);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_PARITY);
+ pci_bus_write_config_byte (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR);
+ */
+
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_COMMAND, DEVICEENABLE);
+ pci_bus_write_config_word (ibmphp_pci_bus, devfn, PCI_BRIDGE_CONTROL, 0x07);
+ for (i = 0; i < 32; i++) {
+ if (amount_needed->devices[i]) {
+ debug ("device where devices[i] is 1 = %x\n", i);
+ func->devices[i] = 1;
+ }
+ }
+ func->bus = 1; /* For unconfiguring, to indicate it's PPB */
+ func_passed = &func;
+ debug ("func->busno b4 returning is %x\n", func->busno);
+ debug ("func->busno b4 returning in the other structure is %x\n", (*func_passed)->busno);
+ kfree (amount_needed);
+ return 0;
+ } else {
+ err ("Configuring bridge was unsuccessful...\n");
+ mem_tmp = NULL;
+ retval = -EIO;
+ goto error;
+ }
+
+error:
+ kfree(amount_needed);
+ if (pfmem)
+ ibmphp_remove_resource (pfmem);
+ if (io)
+ ibmphp_remove_resource (io);
+ if (mem)
+ ibmphp_remove_resource (mem);
+ for (i = 0; i < 2; i++) { /* for 2 BARs */
+ if (bus_io[i]) {
+ ibmphp_remove_resource (bus_io[i]);
+ func->io[i] = NULL;
+ } else if (bus_pfmem[i]) {
+ ibmphp_remove_resource (bus_pfmem[i]);
+ func->pfmem[i] = NULL;
+ } else if (bus_mem[i]) {
+ ibmphp_remove_resource (bus_mem[i]);
+ func->mem[i] = NULL;
+ }
+ }
+ return retval;
+}
+
+/*****************************************************************************
+ * This function adds up the amount of resources needed behind the PPB bridge
+ * and passes it to the configure_bridge function
+ * Input: bridge function
+ * Output: amount of resources needed
+ *****************************************************************************/
+static struct res_needed *scan_behind_bridge (struct pci_func *func, u8 busno)
+{
+ int count, len[6];
+ u16 vendor_id;
+ u8 hdr_type;
+ u8 device, function;
+ unsigned int devfn;
+ int howmany = 0; /*this is to see if there are any devices behind the bridge */
+
+ u32 bar[6], class;
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ PCI_BASE_ADDRESS_4,
+ PCI_BASE_ADDRESS_5,
+ 0
+ };
+ struct res_needed *amount;
+
+ amount = kzalloc(sizeof(*amount), GFP_KERNEL);
+ if (amount == NULL)
+ return NULL;
+
+ ibmphp_pci_bus->number = busno;
+
+ debug ("the bus_no behind the bridge is %x\n", busno);
+ debug ("scanning devices behind the bridge...\n");
+ for (device = 0; device < 32; device++) {
+ amount->devices[device] = 0;
+ for (function = 0; function < 8; function++) {
+ devfn = PCI_DEVFN(device, function);
+
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id);
+
+ if (vendor_id != PCI_VENDOR_ID_NOTVALID) {
+ /* found correct device!!! */
+ howmany++;
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class);
+
+ debug ("hdr_type behind the bridge is %x\n", hdr_type);
+ if (hdr_type & PCI_HEADER_TYPE_BRIDGE) {
+ err ("embedded bridges not supported for hot-plugging.\n");
+ amount->not_correct = 1;
+ return amount;
+ }
+
+ class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
+ if (class == PCI_CLASS_NOT_DEFINED_VGA) {
+ err ("The device %x is VGA compatible and as is not supported for hot plugging. Please choose another device.\n", device);
+ amount->not_correct = 1;
+ return amount;
+ } else if (class == PCI_CLASS_DISPLAY_VGA) {
+ err ("The device %x is not supported for hot plugging. Please choose another device.\n", device);
+ amount->not_correct = 1;
+ return amount;
+ }
+
+ amount->devices[device] = 1;
+
+ for (count = 0; address[count]; count++) {
+ /* for 6 BARs */
+ /*
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, address[count], &tmp);
+ if (tmp & 0x01) // IO
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFD);
+ else // MEMORY
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
+ */
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &bar[count]);
+
+ debug ("what is bar[count]? %x, count = %d\n", bar[count], count);
+
+ if (!bar[count]) /* This BAR is not implemented */
+ continue;
+
+ //tmp_bar = bar[count];
+
+ debug ("count %d device %x function %x wants %x resources\n", count, device, function, bar[count]);
+
+ if (bar[count] & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
+ len[count] = bar[count] & 0xFFFFFFFC;
+ len[count] = ~len[count] + 1;
+ amount->io += len[count];
+ } else {
+ /* This is Memory */
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ /* pfmem */
+ len[count] = bar[count] & 0xFFFFFFF0;
+ len[count] = ~len[count] + 1;
+ amount->pfmem += len[count];
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ /* takes up another dword */
+ count += 1;
+
+ } else {
+ /* regular memory */
+ len[count] = bar[count] & 0xFFFFFFF0;
+ len[count] = ~len[count] + 1;
+ amount->mem += len[count];
+ if (bar[count] & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ /* takes up another dword */
+ count += 1;
+ }
+ }
+ }
+ } /* end for */
+ } /* end if (valid) */
+ } /* end for */
+ } /* end for */
+
+ if (!howmany)
+ amount->not_correct = 1;
+ else
+ amount->not_correct = 0;
+ if ((amount->io) && (amount->io < IOBRIDGE))
+ amount->io = IOBRIDGE;
+ if ((amount->mem) && (amount->mem < MEMBRIDGE))
+ amount->mem = MEMBRIDGE;
+ if ((amount->pfmem) && (amount->pfmem < MEMBRIDGE))
+ amount->pfmem = MEMBRIDGE;
+ return amount;
+}
+
+/* The following 3 unconfigure_boot_ routines deal with the case when we had the card
+ * upon bootup in the system, since we don't allocate func to such case, we need to read
+ * the start addresses from pci config space and then find the corresponding entries in
+ * our resource lists. The functions return either 0, -ENODEV, or -1 (general failure)
+ * Change: we also call these functions even if we configured the card ourselves (i.e., not
+ * the bootup case), since it should work same way
+ */
+static int unconfigure_boot_device (u8 busno, u8 device, u8 function)
+{
+ u32 start_address;
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ PCI_BASE_ADDRESS_2,
+ PCI_BASE_ADDRESS_3,
+ PCI_BASE_ADDRESS_4,
+ PCI_BASE_ADDRESS_5,
+ 0
+ };
+ int count;
+ struct resource_node *io;
+ struct resource_node *mem;
+ struct resource_node *pfmem;
+ struct bus_node *bus;
+ u32 end_address;
+ u32 temp_end;
+ u32 size;
+ u32 tmp_address;
+ unsigned int devfn;
+
+ debug ("%s - enter\n", __func__);
+
+ bus = ibmphp_find_res_bus (busno);
+ if (!bus) {
+ debug ("cannot find corresponding bus.\n");
+ return -EINVAL;
+ }
+
+ devfn = PCI_DEVFN(device, function);
+ ibmphp_pci_bus->number = busno;
+ for (count = 0; address[count]; count++) { /* for 6 BARs */
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &start_address);
+
+ /* We can do this here, b/c by that time the device driver of the card has been stopped */
+
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], 0xFFFFFFFF);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &size);
+ pci_bus_write_config_dword (ibmphp_pci_bus, devfn, address[count], start_address);
+
+ debug ("start_address is %x\n", start_address);
+ debug ("busno, device, function %x %x %x\n", busno, device, function);
+ if (!size) {
+ /* This BAR is not implemented */
+ debug ("is this bar no implemented?, count = %d\n", count);
+ continue;
+ }
+ tmp_address = start_address;
+ if (start_address & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
+ start_address &= PCI_BASE_ADDRESS_IO_MASK;
+ size = size & 0xFFFFFFFC;
+ size = ~size + 1;
+ end_address = start_address + size - 1;
+ if (ibmphp_find_resource (bus, start_address, &io, IO) < 0) {
+ err ("cannot find corresponding IO resource to remove\n");
+ return -EIO;
+ }
+ debug ("io->start = %x\n", io->start);
+ temp_end = io->end;
+ start_address = io->end + 1;
+ ibmphp_remove_resource (io);
+ /* This is needed b/c of the old I/O restrictions in the BIOS */
+ while (temp_end < end_address) {
+ if (ibmphp_find_resource (bus, start_address, &io, IO) < 0) {
+ err ("cannot find corresponding IO resource to remove\n");
+ return -EIO;
+ }
+ debug ("io->start = %x\n", io->start);
+ temp_end = io->end;
+ start_address = io->end + 1;
+ ibmphp_remove_resource (io);
+ }
+
+ /* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
+ } else {
+ /* This is Memory */
+ if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ /* pfmem */
+ debug ("start address of pfmem is %x\n", start_address);
+ start_address &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
+ err ("cannot find corresponding PFMEM resource to remove\n");
+ return -EIO;
+ }
+ if (pfmem) {
+ debug ("pfmem->start = %x\n", pfmem->start);
+
+ ibmphp_remove_resource(pfmem);
+ }
+ } else {
+ /* regular memory */
+ debug ("start address of mem is %x\n", start_address);
+ start_address &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
+ err ("cannot find corresponding MEM resource to remove\n");
+ return -EIO;
+ }
+ if (mem) {
+ debug ("mem->start = %x\n", mem->start);
+
+ ibmphp_remove_resource(mem);
+ }
+ }
+ if (tmp_address & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ /* takes up another dword */
+ count += 1;
+ }
+ } /* end of mem */
+ } /* end of for */
+
+ return 0;
+}
+
+static int unconfigure_boot_bridge (u8 busno, u8 device, u8 function)
+{
+ int count;
+ int bus_no, pri_no, sub_no, sec_no = 0;
+ u32 start_address, tmp_address;
+ u8 sec_number, sub_number, pri_number;
+ struct resource_node *io = NULL;
+ struct resource_node *mem = NULL;
+ struct resource_node *pfmem = NULL;
+ struct bus_node *bus;
+ u32 address[] = {
+ PCI_BASE_ADDRESS_0,
+ PCI_BASE_ADDRESS_1,
+ 0
+ };
+ unsigned int devfn;
+
+ devfn = PCI_DEVFN(device, function);
+ ibmphp_pci_bus->number = busno;
+ bus_no = (int) busno;
+ debug ("busno is %x\n", busno);
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_PRIMARY_BUS, &pri_number);
+ debug ("%s - busno = %x, primary_number = %x\n", __func__, busno, pri_number);
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_number);
+ debug ("sec_number is %x\n", sec_number);
+ sec_no = (int) sec_number;
+ pri_no = (int) pri_number;
+ if (pri_no != bus_no) {
+ err ("primary numbers in our structures and pci config space don't match.\n");
+ return -EINVAL;
+ }
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SUBORDINATE_BUS, &sub_number);
+ sub_no = (int) sub_number;
+ debug ("sub_no is %d, sec_no is %d\n", sub_no, sec_no);
+ if (sec_no != sub_number) {
+ err ("there're more buses behind this bridge. Hot removal is not supported. Please choose another card\n");
+ return -ENODEV;
+ }
+
+ bus = ibmphp_find_res_bus (sec_number);
+ if (!bus) {
+ err ("cannot find Bus structure for the bridged device\n");
+ return -EINVAL;
+ }
+ debug("bus->busno is %x\n", bus->busno);
+ debug("sec_number is %x\n", sec_number);
+
+ ibmphp_remove_bus (bus, busno);
+
+ for (count = 0; address[count]; count++) {
+ /* for 2 BARs */
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, address[count], &start_address);
+
+ if (!start_address) {
+ /* This BAR is not implemented */
+ continue;
+ }
+
+ tmp_address = start_address;
+
+ if (start_address & PCI_BASE_ADDRESS_SPACE_IO) {
+ /* This is IO */
+ start_address &= PCI_BASE_ADDRESS_IO_MASK;
+ if (ibmphp_find_resource (bus, start_address, &io, IO) < 0) {
+ err ("cannot find corresponding IO resource to remove\n");
+ return -EIO;
+ }
+ if (io)
+ debug ("io->start = %x\n", io->start);
+
+ ibmphp_remove_resource (io);
+
+ /* ????????? DO WE NEED TO WRITE ANYTHING INTO THE PCI CONFIG SPACE BACK ?????????? */
+ } else {
+ /* This is Memory */
+ if (start_address & PCI_BASE_ADDRESS_MEM_PREFETCH) {
+ /* pfmem */
+ start_address &= PCI_BASE_ADDRESS_MEM_MASK;
+ if (ibmphp_find_resource (bus, start_address, &pfmem, PFMEM) < 0) {
+ err ("cannot find corresponding PFMEM resource to remove\n");
+ return -EINVAL;
+ }
+ if (pfmem) {
+ debug ("pfmem->start = %x\n", pfmem->start);
+
+ ibmphp_remove_resource(pfmem);
+ }
+ } else {
+ /* regular memory */
+ start_address &= PCI_BASE_ADDRESS_MEM_MASK;
+ if (ibmphp_find_resource (bus, start_address, &mem, MEM) < 0) {
+ err ("cannot find corresponding MEM resource to remove\n");
+ return -EINVAL;
+ }
+ if (mem) {
+ debug ("mem->start = %x\n", mem->start);
+
+ ibmphp_remove_resource(mem);
+ }
+ }
+ if (tmp_address & PCI_BASE_ADDRESS_MEM_TYPE_64) {
+ /* takes up another dword */
+ count += 1;
+ }
+ } /* end of mem */
+ } /* end of for */
+ debug ("%s - exiting, returning success\n", __func__);
+ return 0;
+}
+
+static int unconfigure_boot_card (struct slot *slot_cur)
+{
+ u16 vendor_id;
+ u32 class;
+ u8 hdr_type;
+ u8 device;
+ u8 busno;
+ u8 function;
+ int rc;
+ unsigned int devfn;
+ u8 valid_device = 0x00; /* To see if we are ever able to find valid device and read it */
+
+ debug ("%s - enter\n", __func__);
+
+ device = slot_cur->device;
+ busno = slot_cur->bus;
+
+ debug ("b4 for loop, device is %x\n", device);
+ /* For every function on the card */
+ for (function = 0x0; function < 0x08; function++) {
+ devfn = PCI_DEVFN(device, function);
+ ibmphp_pci_bus->number = busno;
+
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id);
+
+ if (vendor_id != PCI_VENDOR_ID_NOTVALID) {
+ /* found correct device!!! */
+ ++valid_device;
+
+ debug ("%s - found correct device\n", __func__);
+
+ /* header: x x x x x x x x
+ * | |___________|=> 1=PPB bridge, 0=normal device, 2=CardBus Bridge
+ * |_=> 0 = single function device, 1 = multi-function device
+ */
+
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_CLASS_REVISION, &class);
+
+ debug ("hdr_type %x, class %x\n", hdr_type, class);
+ class >>= 8; /* to take revision out, class = class.subclass.prog i/f */
+ if (class == PCI_CLASS_NOT_DEFINED_VGA) {
+ err ("The device %x function %x is VGA compatible and is not supported for hot removing. Please choose another device.\n", device, function);
+ return -ENODEV;
+ } else if (class == PCI_CLASS_DISPLAY_VGA) {
+ err ("The device %x function %x is not supported for hot removing. Please choose another device.\n", device, function);
+ return -ENODEV;
+ }
+
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ rc = unconfigure_boot_device (busno, device, function);
+ if (rc) {
+ err ("was not able to unconfigure device %x func %x on bus %x. bailing out...\n",
+ device, function, busno);
+ return rc;
+ }
+ function = 0x8;
+ break;
+ case PCI_HEADER_TYPE_MULTIDEVICE:
+ rc = unconfigure_boot_device (busno, device, function);
+ if (rc) {
+ err ("was not able to unconfigure device %x func %x on bus %x. bailing out...\n",
+ device, function, busno);
+ return rc;
+ }
+ break;
+ case PCI_HEADER_TYPE_BRIDGE:
+ class >>= 8;
+ if (class != PCI_CLASS_BRIDGE_PCI) {
+ err ("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function);
+ return -ENODEV;
+ }
+ rc = unconfigure_boot_bridge (busno, device, function);
+ if (rc != 0) {
+ err ("was not able to hot-remove PPB properly.\n");
+ return rc;
+ }
+
+ function = 0x8;
+ break;
+ case PCI_HEADER_TYPE_MULTIBRIDGE:
+ class >>= 8;
+ if (class != PCI_CLASS_BRIDGE_PCI) {
+ err ("This device %x function %x is not PCI-to-PCI bridge, and is not supported for hot-removing. Please try another card.\n", device, function);
+ return -ENODEV;
+ }
+ rc = unconfigure_boot_bridge (busno, device, function);
+ if (rc != 0) {
+ err ("was not able to hot-remove PPB properly.\n");
+ return rc;
+ }
+ break;
+ default:
+ err ("MAJOR PROBLEM!!!! Cannot read device's header\n");
+ return -1;
+ break;
+ } /* end of switch */
+ } /* end of valid device */
+ } /* end of for */
+
+ if (!valid_device) {
+ err ("Could not find device to unconfigure. Or could not read the card.\n");
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * free the resources of the card (multi, single, or bridged)
+ * Parameters: slot, flag to say if this is for removing entire module or just
+ * unconfiguring the device
+ * TO DO: will probably need to add some code in case there was some resource,
+ * to remove it... this is from when we have errors in the configure_card...
+ * !!!!!!!!!!!!!!!!!!!!!!!!!FOR BUSES!!!!!!!!!!!!
+ * Returns: 0, -1, -ENODEV
+ */
+int ibmphp_unconfigure_card (struct slot **slot_cur, int the_end)
+{
+ int i;
+ int count;
+ int rc;
+ struct slot *sl = *slot_cur;
+ struct pci_func *cur_func = NULL;
+ struct pci_func *temp_func;
+
+ debug ("%s - enter\n", __func__);
+
+ if (!the_end) {
+ /* Need to unconfigure the card */
+ rc = unconfigure_boot_card (sl);
+ if ((rc == -ENODEV) || (rc == -EIO) || (rc == -EINVAL)) {
+ /* In all other cases, will still need to get rid of func structure if it exists */
+ return rc;
+ }
+ }
+
+ if (sl->func) {
+ cur_func = sl->func;
+ while (cur_func) {
+ /* TO DO: WILL MOST LIKELY NEED TO GET RID OF THE BUS STRUCTURE FROM RESOURCES AS WELL */
+ if (cur_func->bus) {
+ /* in other words, it's a PPB */
+ count = 2;
+ } else {
+ count = 6;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (cur_func->io[i]) {
+ debug ("io[%d] exists\n", i);
+ if (the_end > 0)
+ ibmphp_remove_resource (cur_func->io[i]);
+ cur_func->io[i] = NULL;
+ }
+ if (cur_func->mem[i]) {
+ debug ("mem[%d] exists\n", i);
+ if (the_end > 0)
+ ibmphp_remove_resource (cur_func->mem[i]);
+ cur_func->mem[i] = NULL;
+ }
+ if (cur_func->pfmem[i]) {
+ debug ("pfmem[%d] exists\n", i);
+ if (the_end > 0)
+ ibmphp_remove_resource (cur_func->pfmem[i]);
+ cur_func->pfmem[i] = NULL;
+ }
+ }
+
+ temp_func = cur_func->next;
+ kfree (cur_func);
+ cur_func = temp_func;
+ }
+ }
+
+ sl->func = NULL;
+ *slot_cur = sl;
+ debug ("%s - exit\n", __func__);
+ return 0;
+}
+
+/*
+ * add a new bus resulting from hot-plugging a PPB bridge with devices
+ *
+ * Input: bus and the amount of resources needed (we know we can assign those,
+ * since they've been checked already
+ * Output: bus added to the correct spot
+ * 0, -1, error
+ */
+static int add_new_bus (struct bus_node *bus, struct resource_node *io, struct resource_node *mem, struct resource_node *pfmem, u8 parent_busno)
+{
+ struct range_node *io_range = NULL;
+ struct range_node *mem_range = NULL;
+ struct range_node *pfmem_range = NULL;
+ struct bus_node *cur_bus = NULL;
+
+ /* Trying to find the parent bus number */
+ if (parent_busno != 0xFF) {
+ cur_bus = ibmphp_find_res_bus (parent_busno);
+ if (!cur_bus) {
+ err ("strange, cannot find bus which is supposed to be at the system... something is terribly wrong...\n");
+ return -ENODEV;
+ }
+
+ list_add (&bus->bus_list, &cur_bus->bus_list);
+ }
+ if (io) {
+ io_range = kzalloc(sizeof(*io_range), GFP_KERNEL);
+ if (!io_range) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ io_range->start = io->start;
+ io_range->end = io->end;
+ io_range->rangeno = 1;
+ bus->noIORanges = 1;
+ bus->rangeIO = io_range;
+ }
+ if (mem) {
+ mem_range = kzalloc(sizeof(*mem_range), GFP_KERNEL);
+ if (!mem_range) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ mem_range->start = mem->start;
+ mem_range->end = mem->end;
+ mem_range->rangeno = 1;
+ bus->noMemRanges = 1;
+ bus->rangeMem = mem_range;
+ }
+ if (pfmem) {
+ pfmem_range = kzalloc(sizeof(*pfmem_range), GFP_KERNEL);
+ if (!pfmem_range) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ pfmem_range->start = pfmem->start;
+ pfmem_range->end = pfmem->end;
+ pfmem_range->rangeno = 1;
+ bus->noPFMemRanges = 1;
+ bus->rangePFMem = pfmem_range;
+ }
+ return 0;
+}
+
+/*
+ * find the 1st available bus number for PPB to set as its secondary bus
+ * Parameters: bus_number of the primary bus
+ * Returns: bus_number of the secondary bus or 0xff in case of failure
+ */
+static u8 find_sec_number (u8 primary_busno, u8 slotno)
+{
+ int min, max;
+ u8 busno;
+ struct bus_info *bus;
+ struct bus_node *bus_cur;
+
+ bus = ibmphp_find_same_bus_num (primary_busno);
+ if (!bus) {
+ err ("cannot get slot range of the bus from the BIOS\n");
+ return 0xff;
+ }
+ max = bus->slot_max;
+ min = bus->slot_min;
+ if ((slotno > max) || (slotno < min)) {
+ err ("got the wrong range\n");
+ return 0xff;
+ }
+ busno = (u8) (slotno - (u8) min);
+ busno += primary_busno + 0x01;
+ bus_cur = ibmphp_find_res_bus (busno);
+ /* either there is no such bus number, or there are no ranges, which
+ * can only happen if we removed the bridged device in previous load
+ * of the driver, and now only have the skeleton bus struct
+ */
+ if ((!bus_cur) || (!(bus_cur->rangeIO) && !(bus_cur->rangeMem) && !(bus_cur->rangePFMem)))
+ return busno;
+ return 0xff;
+}
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
new file mode 100644
index 000000000..f279060cf
--- /dev/null
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -0,0 +1,2158 @@
+/*
+ * IBM Hot Plug Controller Driver
+ *
+ * Written By: Irene Zubarev, IBM Corporation
+ *
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001,2002 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gregkh@us.ibm.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include "ibmphp.h"
+
+static int flags = 0; /* for testing */
+
+static void update_resources (struct bus_node *bus_cur, int type, int rangeno);
+static int once_over (void);
+static int remove_ranges (struct bus_node *, struct bus_node *);
+static int update_bridge_ranges (struct bus_node **);
+static int add_bus_range (int type, struct range_node *, struct bus_node *);
+static void fix_resources (struct bus_node *);
+static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
+
+static LIST_HEAD(gbuses);
+
+static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc *curr, u8 busno, int flag)
+{
+ struct bus_node *newbus;
+
+ if (!(curr) && !(flag)) {
+ err ("NULL pointer passed\n");
+ return NULL;
+ }
+
+ newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL);
+ if (!newbus) {
+ err ("out of system memory\n");
+ return NULL;
+ }
+
+ if (flag)
+ newbus->busno = busno;
+ else
+ newbus->busno = curr->bus_num;
+ list_add_tail (&newbus->bus_list, &gbuses);
+ return newbus;
+}
+
+static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc *curr)
+{
+ struct resource_node *rs;
+
+ if (!curr) {
+ err ("NULL passed to allocate\n");
+ return NULL;
+ }
+
+ rs = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!rs) {
+ err ("out of system memory\n");
+ return NULL;
+ }
+ rs->busno = curr->bus_num;
+ rs->devfunc = curr->dev_fun;
+ rs->start = curr->start_addr;
+ rs->end = curr->end_addr;
+ rs->len = curr->end_addr - curr->start_addr + 1;
+ return rs;
+}
+
+static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus)
+{
+ struct bus_node *newbus;
+ struct range_node *newrange;
+ u8 num_ranges = 0;
+
+ if (first_bus) {
+ newbus = kzalloc(sizeof(struct bus_node), GFP_KERNEL);
+ if (!newbus) {
+ err ("out of system memory.\n");
+ return -ENOMEM;
+ }
+ newbus->busno = curr->bus_num;
+ } else {
+ newbus = *new_bus;
+ switch (flag) {
+ case MEM:
+ num_ranges = newbus->noMemRanges;
+ break;
+ case PFMEM:
+ num_ranges = newbus->noPFMemRanges;
+ break;
+ case IO:
+ num_ranges = newbus->noIORanges;
+ break;
+ }
+ }
+
+ newrange = kzalloc(sizeof(struct range_node), GFP_KERNEL);
+ if (!newrange) {
+ if (first_bus)
+ kfree (newbus);
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ newrange->start = curr->start_addr;
+ newrange->end = curr->end_addr;
+
+ if (first_bus || (!num_ranges))
+ newrange->rangeno = 1;
+ else {
+ /* need to insert our range */
+ add_bus_range (flag, newrange, newbus);
+ debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end);
+ }
+
+ switch (flag) {
+ case MEM:
+ newbus->rangeMem = newrange;
+ if (first_bus)
+ newbus->noMemRanges = 1;
+ else {
+ debug ("First Memory Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ ++newbus->noMemRanges;
+ fix_resources (newbus);
+ }
+ break;
+ case IO:
+ newbus->rangeIO = newrange;
+ if (first_bus)
+ newbus->noIORanges = 1;
+ else {
+ debug ("First IO Primary on bus %x, [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ ++newbus->noIORanges;
+ fix_resources (newbus);
+ }
+ break;
+ case PFMEM:
+ newbus->rangePFMem = newrange;
+ if (first_bus)
+ newbus->noPFMemRanges = 1;
+ else {
+ debug ("1st PFMemory Primary on Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ ++newbus->noPFMemRanges;
+ fix_resources (newbus);
+ }
+
+ break;
+ }
+
+ *new_bus = newbus;
+ *new_range = newrange;
+ return 0;
+}
+
+
+/* Notes:
+ * 1. The ranges are ordered. The buses are not ordered. (First come)
+ *
+ * 2. If cannot allocate out of PFMem range, allocate from Mem ranges. PFmemFromMem
+ * are not sorted. (no need since use mem node). To not change the entire code, we
+ * also add mem node whenever this case happens so as not to change
+ * ibmphp_check_mem_resource etc (and since it really is taking Mem resource)
+ */
+
+/*****************************************************************************
+ * This is the Resource Management initialization function. It will go through
+ * the Resource list taken from EBDA and fill in this module's data structures
+ *
+ * THIS IS NOT TAKING INTO CONSIDERATION IO RESTRICTIONS OF PRIMARY BUSES,
+ * SINCE WE'RE GOING TO ASSUME FOR NOW WE DON'T HAVE THOSE ON OUR BUSES FOR NOW
+ *
+ * Input: ptr to the head of the resource list from EBDA
+ * Output: 0, -1 or error codes
+ ***************************************************************************/
+int __init ibmphp_rsrc_init (void)
+{
+ struct ebda_pci_rsrc *curr;
+ struct range_node *newrange = NULL;
+ struct bus_node *newbus = NULL;
+ struct bus_node *bus_cur;
+ struct bus_node *bus_prev;
+ struct list_head *tmp;
+ struct resource_node *new_io = NULL;
+ struct resource_node *new_mem = NULL;
+ struct resource_node *new_pfmem = NULL;
+ int rc;
+ struct list_head *tmp_ebda;
+
+ list_for_each (tmp_ebda, &ibmphp_ebda_pci_rsrc_head) {
+ curr = list_entry (tmp_ebda, struct ebda_pci_rsrc, ebda_pci_rsrc_list);
+ if (!(curr->rsrc_type & PCIDEVMASK)) {
+ /* EBDA still lists non PCI devices, so ignore... */
+ debug ("this is not a PCI DEVICE in rsrc_init, please take care\n");
+ // continue;
+ }
+
+ /* this is a primary bus resource */
+ if (curr->rsrc_type & PRIMARYBUSMASK) {
+ /* memory */
+ if ((curr->rsrc_type & RESTYPE) == MMASK) {
+ /* no bus structure exists in place yet */
+ if (list_empty (&gbuses)) {
+ rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
+ if (rc)
+ return rc;
+ list_add_tail (&newbus->bus_list, &gbuses);
+ debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ } else {
+ bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1);
+ /* found our bus */
+ if (bus_cur) {
+ rc = alloc_bus_range (&bus_cur, &newrange, curr, MEM, 0);
+ if (rc)
+ return rc;
+ } else {
+ /* went through all the buses and didn't find ours, need to create a new bus node */
+ rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
+ if (rc)
+ return rc;
+
+ list_add_tail (&newbus->bus_list, &gbuses);
+ debug ("New Bus, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ }
+ }
+ } else if ((curr->rsrc_type & RESTYPE) == PFMASK) {
+ /* prefetchable memory */
+ if (list_empty (&gbuses)) {
+ /* no bus structure exists in place yet */
+ rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
+ if (rc)
+ return rc;
+ list_add_tail (&newbus->bus_list, &gbuses);
+ debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ } else {
+ bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1);
+ if (bus_cur) {
+ /* found our bus */
+ rc = alloc_bus_range (&bus_cur, &newrange, curr, PFMEM, 0);
+ if (rc)
+ return rc;
+ } else {
+ /* went through all the buses and didn't find ours, need to create a new bus node */
+ rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
+ if (rc)
+ return rc;
+ list_add_tail (&newbus->bus_list, &gbuses);
+ debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ }
+ }
+ } else if ((curr->rsrc_type & RESTYPE) == IOMASK) {
+ /* IO */
+ if (list_empty (&gbuses)) {
+ /* no bus structure exists in place yet */
+ rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
+ if (rc)
+ return rc;
+ list_add_tail (&newbus->bus_list, &gbuses);
+ debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ } else {
+ bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1);
+ if (bus_cur) {
+ rc = alloc_bus_range (&bus_cur, &newrange, curr, IO, 0);
+ if (rc)
+ return rc;
+ } else {
+ /* went through all the buses and didn't find ours, need to create a new bus node */
+ rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
+ if (rc)
+ return rc;
+ list_add_tail (&newbus->bus_list, &gbuses);
+ debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
+ }
+ }
+
+ } else {
+ ; /* type is reserved WHAT TO DO IN THIS CASE???
+ NOTHING TO DO??? */
+ }
+ } else {
+ /* regular pci device resource */
+ if ((curr->rsrc_type & RESTYPE) == MMASK) {
+ /* Memory resource */
+ new_mem = alloc_resources (curr);
+ if (!new_mem)
+ return -ENOMEM;
+ new_mem->type = MEM;
+ /*
+ * if it didn't find the bus, means PCI dev
+ * came b4 the Primary Bus info, so need to
+ * create a bus rangeno becomes a problem...
+ * assign a -1 and then update once the range
+ * actually appears...
+ */
+ if (ibmphp_add_resource (new_mem) < 0) {
+ newbus = alloc_error_bus (curr, 0, 0);
+ if (!newbus)
+ return -ENOMEM;
+ newbus->firstMem = new_mem;
+ ++newbus->needMemUpdate;
+ new_mem->rangeno = -1;
+ }
+ debug ("Memory resource for device %x, bus %x, [%x - %x]\n", new_mem->devfunc, new_mem->busno, new_mem->start, new_mem->end);
+
+ } else if ((curr->rsrc_type & RESTYPE) == PFMASK) {
+ /* PFMemory resource */
+ new_pfmem = alloc_resources (curr);
+ if (!new_pfmem)
+ return -ENOMEM;
+ new_pfmem->type = PFMEM;
+ new_pfmem->fromMem = 0;
+ if (ibmphp_add_resource (new_pfmem) < 0) {
+ newbus = alloc_error_bus (curr, 0, 0);
+ if (!newbus)
+ return -ENOMEM;
+ newbus->firstPFMem = new_pfmem;
+ ++newbus->needPFMemUpdate;
+ new_pfmem->rangeno = -1;
+ }
+
+ debug ("PFMemory resource for device %x, bus %x, [%x - %x]\n", new_pfmem->devfunc, new_pfmem->busno, new_pfmem->start, new_pfmem->end);
+ } else if ((curr->rsrc_type & RESTYPE) == IOMASK) {
+ /* IO resource */
+ new_io = alloc_resources (curr);
+ if (!new_io)
+ return -ENOMEM;
+ new_io->type = IO;
+
+ /*
+ * if it didn't find the bus, means PCI dev
+ * came b4 the Primary Bus info, so need to
+ * create a bus rangeno becomes a problem...
+ * Can assign a -1 and then update once the
+ * range actually appears...
+ */
+ if (ibmphp_add_resource (new_io) < 0) {
+ newbus = alloc_error_bus (curr, 0, 0);
+ if (!newbus)
+ return -ENOMEM;
+ newbus->firstIO = new_io;
+ ++newbus->needIOUpdate;
+ new_io->rangeno = -1;
+ }
+ debug ("IO resource for device %x, bus %x, [%x - %x]\n", new_io->devfunc, new_io->busno, new_io->start, new_io->end);
+ }
+ }
+ }
+
+ list_for_each (tmp, &gbuses) {
+ bus_cur = list_entry (tmp, struct bus_node, bus_list);
+ /* This is to get info about PPB resources, since EBDA doesn't put this info into the primary bus info */
+ rc = update_bridge_ranges (&bus_cur);
+ if (rc)
+ return rc;
+ }
+ return once_over (); /* This is to align ranges (so no -1) */
+}
+
+/********************************************************************************
+ * This function adds a range into a sorted list of ranges per bus for a particular
+ * range type, it then calls another routine to update the range numbers on the
+ * pci devices' resources for the appropriate resource
+ *
+ * Input: type of the resource, range to add, current bus
+ * Output: 0 or -1, bus and range ptrs
+ ********************************************************************************/
+static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
+{
+ struct range_node *range_cur = NULL;
+ struct range_node *range_prev;
+ int count = 0, i_init;
+ int noRanges = 0;
+
+ switch (type) {
+ case MEM:
+ range_cur = bus_cur->rangeMem;
+ noRanges = bus_cur->noMemRanges;
+ break;
+ case PFMEM:
+ range_cur = bus_cur->rangePFMem;
+ noRanges = bus_cur->noPFMemRanges;
+ break;
+ case IO:
+ range_cur = bus_cur->rangeIO;
+ noRanges = bus_cur->noIORanges;
+ break;
+ }
+
+ range_prev = NULL;
+ while (range_cur) {
+ if (range->start < range_cur->start)
+ break;
+ range_prev = range_cur;
+ range_cur = range_cur->next;
+ count = count + 1;
+ }
+ if (!count) {
+ /* our range will go at the beginning of the list */
+ switch (type) {
+ case MEM:
+ bus_cur->rangeMem = range;
+ break;
+ case PFMEM:
+ bus_cur->rangePFMem = range;
+ break;
+ case IO:
+ bus_cur->rangeIO = range;
+ break;
+ }
+ range->next = range_cur;
+ range->rangeno = 1;
+ i_init = 0;
+ } else if (!range_cur) {
+ /* our range will go at the end of the list */
+ range->next = NULL;
+ range_prev->next = range;
+ range->rangeno = range_prev->rangeno + 1;
+ return 0;
+ } else {
+ /* the range is in the middle */
+ range_prev->next = range;
+ range->next = range_cur;
+ range->rangeno = range_cur->rangeno;
+ i_init = range_prev->rangeno;
+ }
+
+ for (count = i_init; count < noRanges; ++count) {
+ ++range_cur->rangeno;
+ range_cur = range_cur->next;
+ }
+
+ update_resources (bus_cur, type, i_init + 1);
+ return 0;
+}
+
+/*******************************************************************************
+ * This routine goes through the list of resources of type 'type' and updates
+ * the range numbers that they correspond to. It was called from add_bus_range fnc
+ *
+ * Input: bus, type of the resource, the rangeno starting from which to update
+ ******************************************************************************/
+static void update_resources (struct bus_node *bus_cur, int type, int rangeno)
+{
+ struct resource_node *res = NULL;
+ u8 eol = 0; /* end of list indicator */
+
+ switch (type) {
+ case MEM:
+ if (bus_cur->firstMem)
+ res = bus_cur->firstMem;
+ break;
+ case PFMEM:
+ if (bus_cur->firstPFMem)
+ res = bus_cur->firstPFMem;
+ break;
+ case IO:
+ if (bus_cur->firstIO)
+ res = bus_cur->firstIO;
+ break;
+ }
+
+ if (res) {
+ while (res) {
+ if (res->rangeno == rangeno)
+ break;
+ if (res->next)
+ res = res->next;
+ else if (res->nextRange)
+ res = res->nextRange;
+ else {
+ eol = 1;
+ break;
+ }
+ }
+
+ if (!eol) {
+ /* found the range */
+ while (res) {
+ ++res->rangeno;
+ res = res->next;
+ }
+ }
+ }
+}
+
+static void fix_me (struct resource_node *res, struct bus_node *bus_cur, struct range_node *range)
+{
+ char * str = "";
+ switch (res->type) {
+ case IO:
+ str = "io";
+ break;
+ case MEM:
+ str = "mem";
+ break;
+ case PFMEM:
+ str = "pfmem";
+ break;
+ }
+
+ while (res) {
+ if (res->rangeno == -1) {
+ while (range) {
+ if ((res->start >= range->start) && (res->end <= range->end)) {
+ res->rangeno = range->rangeno;
+ debug ("%s->rangeno in fix_resources is %d\n", str, res->rangeno);
+ switch (res->type) {
+ case IO:
+ --bus_cur->needIOUpdate;
+ break;
+ case MEM:
+ --bus_cur->needMemUpdate;
+ break;
+ case PFMEM:
+ --bus_cur->needPFMemUpdate;
+ break;
+ }
+ break;
+ }
+ range = range->next;
+ }
+ }
+ if (res->next)
+ res = res->next;
+ else
+ res = res->nextRange;
+ }
+
+}
+
+/*****************************************************************************
+ * This routine reassigns the range numbers to the resources that had a -1
+ * This case can happen only if upon initialization, resources taken by pci dev
+ * appear in EBDA before the resources allocated for that bus, since we don't
+ * know the range, we assign -1, and this routine is called after a new range
+ * is assigned to see the resources with unknown range belong to the added range
+ *
+ * Input: current bus
+ * Output: none, list of resources for that bus are fixed if can be
+ *******************************************************************************/
+static void fix_resources (struct bus_node *bus_cur)
+{
+ struct range_node *range;
+ struct resource_node *res;
+
+ debug ("%s - bus_cur->busno = %d\n", __func__, bus_cur->busno);
+
+ if (bus_cur->needIOUpdate) {
+ res = bus_cur->firstIO;
+ range = bus_cur->rangeIO;
+ fix_me (res, bus_cur, range);
+ }
+ if (bus_cur->needMemUpdate) {
+ res = bus_cur->firstMem;
+ range = bus_cur->rangeMem;
+ fix_me (res, bus_cur, range);
+ }
+ if (bus_cur->needPFMemUpdate) {
+ res = bus_cur->firstPFMem;
+ range = bus_cur->rangePFMem;
+ fix_me (res, bus_cur, range);
+ }
+}
+
+/*******************************************************************************
+ * This routine adds a resource to the list of resources to the appropriate bus
+ * based on their resource type and sorted by their starting addresses. It assigns
+ * the ptrs to next and nextRange if needed.
+ *
+ * Input: resource ptr
+ * Output: ptrs assigned (to the node)
+ * 0 or -1
+ *******************************************************************************/
+int ibmphp_add_resource (struct resource_node *res)
+{
+ struct resource_node *res_cur;
+ struct resource_node *res_prev;
+ struct bus_node *bus_cur;
+ struct range_node *range_cur = NULL;
+ struct resource_node *res_start = NULL;
+
+ debug ("%s - enter\n", __func__);
+
+ if (!res) {
+ err ("NULL passed to add\n");
+ return -ENODEV;
+ }
+
+ bus_cur = find_bus_wprev (res->busno, NULL, 0);
+
+ if (!bus_cur) {
+ /* didn't find a bus, something's wrong!!! */
+ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
+ return -ENODEV;
+ }
+
+ /* Normal case */
+ switch (res->type) {
+ case IO:
+ range_cur = bus_cur->rangeIO;
+ res_start = bus_cur->firstIO;
+ break;
+ case MEM:
+ range_cur = bus_cur->rangeMem;
+ res_start = bus_cur->firstMem;
+ break;
+ case PFMEM:
+ range_cur = bus_cur->rangePFMem;
+ res_start = bus_cur->firstPFMem;
+ break;
+ default:
+ err ("cannot read the type of the resource to add... problem\n");
+ return -EINVAL;
+ }
+ while (range_cur) {
+ if ((res->start >= range_cur->start) && (res->end <= range_cur->end)) {
+ res->rangeno = range_cur->rangeno;
+ break;
+ }
+ range_cur = range_cur->next;
+ }
+
+ /* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * this is again the case of rangeno = -1
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ */
+
+ if (!range_cur) {
+ switch (res->type) {
+ case IO:
+ ++bus_cur->needIOUpdate;
+ break;
+ case MEM:
+ ++bus_cur->needMemUpdate;
+ break;
+ case PFMEM:
+ ++bus_cur->needPFMemUpdate;
+ break;
+ }
+ res->rangeno = -1;
+ }
+
+ debug ("The range is %d\n", res->rangeno);
+ if (!res_start) {
+ /* no first{IO,Mem,Pfmem} on the bus, 1st IO/Mem/Pfmem resource ever */
+ switch (res->type) {
+ case IO:
+ bus_cur->firstIO = res;
+ break;
+ case MEM:
+ bus_cur->firstMem = res;
+ break;
+ case PFMEM:
+ bus_cur->firstPFMem = res;
+ break;
+ }
+ res->next = NULL;
+ res->nextRange = NULL;
+ } else {
+ res_cur = res_start;
+ res_prev = NULL;
+
+ debug ("res_cur->rangeno is %d\n", res_cur->rangeno);
+
+ while (res_cur) {
+ if (res_cur->rangeno >= res->rangeno)
+ break;
+ res_prev = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ }
+
+ if (!res_cur) {
+ /* at the end of the resource list */
+ debug ("i should be here, [%x - %x]\n", res->start, res->end);
+ res_prev->nextRange = res;
+ res->next = NULL;
+ res->nextRange = NULL;
+ } else if (res_cur->rangeno == res->rangeno) {
+ /* in the same range */
+ while (res_cur) {
+ if (res->start < res_cur->start)
+ break;
+ res_prev = res_cur;
+ res_cur = res_cur->next;
+ }
+ if (!res_cur) {
+ /* the last resource in this range */
+ res_prev->next = res;
+ res->next = NULL;
+ res->nextRange = res_prev->nextRange;
+ res_prev->nextRange = NULL;
+ } else if (res->start < res_cur->start) {
+ /* at the beginning or middle of the range */
+ if (!res_prev) {
+ switch (res->type) {
+ case IO:
+ bus_cur->firstIO = res;
+ break;
+ case MEM:
+ bus_cur->firstMem = res;
+ break;
+ case PFMEM:
+ bus_cur->firstPFMem = res;
+ break;
+ }
+ } else if (res_prev->rangeno == res_cur->rangeno)
+ res_prev->next = res;
+ else
+ res_prev->nextRange = res;
+
+ res->next = res_cur;
+ res->nextRange = NULL;
+ }
+ } else {
+ /* this is the case where it is 1st occurrence of the range */
+ if (!res_prev) {
+ /* at the beginning of the resource list */
+ res->next = NULL;
+ switch (res->type) {
+ case IO:
+ res->nextRange = bus_cur->firstIO;
+ bus_cur->firstIO = res;
+ break;
+ case MEM:
+ res->nextRange = bus_cur->firstMem;
+ bus_cur->firstMem = res;
+ break;
+ case PFMEM:
+ res->nextRange = bus_cur->firstPFMem;
+ bus_cur->firstPFMem = res;
+ break;
+ }
+ } else if (res_cur->rangeno > res->rangeno) {
+ /* in the middle of the resource list */
+ res_prev->nextRange = res;
+ res->next = NULL;
+ res->nextRange = res_cur;
+ }
+ }
+ }
+
+ debug ("%s - exit\n", __func__);
+ return 0;
+}
+
+/****************************************************************************
+ * This routine will remove the resource from the list of resources
+ *
+ * Input: io, mem, and/or pfmem resource to be deleted
+ * Output: modified resource list
+ * 0 or error code
+ ****************************************************************************/
+int ibmphp_remove_resource (struct resource_node *res)
+{
+ struct bus_node *bus_cur;
+ struct resource_node *res_cur = NULL;
+ struct resource_node *res_prev;
+ struct resource_node *mem_cur;
+ char * type = "";
+
+ if (!res) {
+ err ("resource to remove is NULL\n");
+ return -ENODEV;
+ }
+
+ bus_cur = find_bus_wprev (res->busno, NULL, 0);
+
+ if (!bus_cur) {
+ err ("cannot find corresponding bus of the io resource to remove bailing out...\n");
+ return -ENODEV;
+ }
+
+ switch (res->type) {
+ case IO:
+ res_cur = bus_cur->firstIO;
+ type = "io";
+ break;
+ case MEM:
+ res_cur = bus_cur->firstMem;
+ type = "mem";
+ break;
+ case PFMEM:
+ res_cur = bus_cur->firstPFMem;
+ type = "pfmem";
+ break;
+ default:
+ err ("unknown type for resource to remove\n");
+ return -EINVAL;
+ }
+ res_prev = NULL;
+
+ while (res_cur) {
+ if ((res_cur->start == res->start) && (res_cur->end == res->end))
+ break;
+ res_prev = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ }
+
+ if (!res_cur) {
+ if (res->type == PFMEM) {
+ /*
+ * case where pfmem might be in the PFMemFromMem list
+ * so will also need to remove the corresponding mem
+ * entry
+ */
+ res_cur = bus_cur->firstPFMemFromMem;
+ res_prev = NULL;
+
+ while (res_cur) {
+ if ((res_cur->start == res->start) && (res_cur->end == res->end)) {
+ mem_cur = bus_cur->firstMem;
+ while (mem_cur) {
+ if ((mem_cur->start == res_cur->start)
+ && (mem_cur->end == res_cur->end))
+ break;
+ if (mem_cur->next)
+ mem_cur = mem_cur->next;
+ else
+ mem_cur = mem_cur->nextRange;
+ }
+ if (!mem_cur) {
+ err ("cannot find corresponding mem node for pfmem...\n");
+ return -EINVAL;
+ }
+
+ ibmphp_remove_resource (mem_cur);
+ if (!res_prev)
+ bus_cur->firstPFMemFromMem = res_cur->next;
+ else
+ res_prev->next = res_cur->next;
+ kfree (res_cur);
+ return 0;
+ }
+ res_prev = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ }
+ if (!res_cur) {
+ err ("cannot find pfmem to delete...\n");
+ return -EINVAL;
+ }
+ } else {
+ err ("the %s resource is not in the list to be deleted...\n", type);
+ return -EINVAL;
+ }
+ }
+ if (!res_prev) {
+ /* first device to be deleted */
+ if (res_cur->next) {
+ switch (res->type) {
+ case IO:
+ bus_cur->firstIO = res_cur->next;
+ break;
+ case MEM:
+ bus_cur->firstMem = res_cur->next;
+ break;
+ case PFMEM:
+ bus_cur->firstPFMem = res_cur->next;
+ break;
+ }
+ } else if (res_cur->nextRange) {
+ switch (res->type) {
+ case IO:
+ bus_cur->firstIO = res_cur->nextRange;
+ break;
+ case MEM:
+ bus_cur->firstMem = res_cur->nextRange;
+ break;
+ case PFMEM:
+ bus_cur->firstPFMem = res_cur->nextRange;
+ break;
+ }
+ } else {
+ switch (res->type) {
+ case IO:
+ bus_cur->firstIO = NULL;
+ break;
+ case MEM:
+ bus_cur->firstMem = NULL;
+ break;
+ case PFMEM:
+ bus_cur->firstPFMem = NULL;
+ break;
+ }
+ }
+ kfree (res_cur);
+ return 0;
+ } else {
+ if (res_cur->next) {
+ if (res_prev->rangeno == res_cur->rangeno)
+ res_prev->next = res_cur->next;
+ else
+ res_prev->nextRange = res_cur->next;
+ } else if (res_cur->nextRange) {
+ res_prev->next = NULL;
+ res_prev->nextRange = res_cur->nextRange;
+ } else {
+ res_prev->next = NULL;
+ res_prev->nextRange = NULL;
+ }
+ kfree (res_cur);
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct range_node *find_range (struct bus_node *bus_cur, struct resource_node *res)
+{
+ struct range_node *range = NULL;
+
+ switch (res->type) {
+ case IO:
+ range = bus_cur->rangeIO;
+ break;
+ case MEM:
+ range = bus_cur->rangeMem;
+ break;
+ case PFMEM:
+ range = bus_cur->rangePFMem;
+ break;
+ default:
+ err ("cannot read resource type in find_range\n");
+ }
+
+ while (range) {
+ if (res->rangeno == range->rangeno)
+ break;
+ range = range->next;
+ }
+ return range;
+}
+
+/*****************************************************************************
+ * This routine will check to make sure the io/mem/pfmem->len that the device asked for
+ * can fit w/i our list of available IO/MEM/PFMEM resources. If cannot, returns -EINVAL,
+ * otherwise, returns 0
+ *
+ * Input: resource
+ * Output: the correct start and end address are inputted into the resource node,
+ * 0 or -EINVAL
+ *****************************************************************************/
+int ibmphp_check_resource (struct resource_node *res, u8 bridge)
+{
+ struct bus_node *bus_cur;
+ struct range_node *range = NULL;
+ struct resource_node *res_prev;
+ struct resource_node *res_cur = NULL;
+ u32 len_cur = 0, start_cur = 0, len_tmp = 0;
+ int noranges = 0;
+ u32 tmp_start; /* this is to make sure start address is divisible by the length needed */
+ u32 tmp_divide;
+ u8 flag = 0;
+
+ if (!res)
+ return -EINVAL;
+
+ if (bridge) {
+ /* The rules for bridges are different, 4K divisible for IO, 1M for (pf)mem*/
+ if (res->type == IO)
+ tmp_divide = IOBRIDGE;
+ else
+ tmp_divide = MEMBRIDGE;
+ } else
+ tmp_divide = res->len;
+
+ bus_cur = find_bus_wprev (res->busno, NULL, 0);
+
+ if (!bus_cur) {
+ /* didn't find a bus, something's wrong!!! */
+ debug ("no bus in the system, either pci_dev's wrong or allocation failed\n");
+ return -EINVAL;
+ }
+
+ debug ("%s - enter\n", __func__);
+ debug ("bus_cur->busno is %d\n", bus_cur->busno);
+
+ /* This is a quick fix to not mess up with the code very much. i.e.,
+ * 2000-2fff, len = 1000, but when we compare, we need it to be fff */
+ res->len -= 1;
+
+ switch (res->type) {
+ case IO:
+ res_cur = bus_cur->firstIO;
+ noranges = bus_cur->noIORanges;
+ break;
+ case MEM:
+ res_cur = bus_cur->firstMem;
+ noranges = bus_cur->noMemRanges;
+ break;
+ case PFMEM:
+ res_cur = bus_cur->firstPFMem;
+ noranges = bus_cur->noPFMemRanges;
+ break;
+ default:
+ err ("wrong type of resource to check\n");
+ return -EINVAL;
+ }
+ res_prev = NULL;
+
+ while (res_cur) {
+ range = find_range (bus_cur, res_cur);
+ debug ("%s - rangeno = %d\n", __func__, res_cur->rangeno);
+
+ if (!range) {
+ err ("no range for the device exists... bailing out...\n");
+ return -EINVAL;
+ }
+
+ /* found our range */
+ if (!res_prev) {
+ /* first time in the loop */
+ len_tmp = res_cur->start - 1 - range->start;
+
+ if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
+ debug ("len_tmp = %x\n", len_tmp);
+
+ if ((len_tmp < len_cur) || (len_cur == 0)) {
+
+ if ((range->start % tmp_divide) == 0) {
+ /* just perfect, starting address is divisible by length */
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = range->start;
+ } else {
+ /* Needs adjusting */
+ tmp_start = range->start;
+ flag = 0;
+
+ while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) {
+ if ((tmp_start % tmp_divide) == 0) {
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = tmp_start;
+ break;
+ }
+ tmp_start += tmp_divide - tmp_start % tmp_divide;
+ if (tmp_start >= res_cur->start - 1)
+ break;
+ }
+ }
+
+ if (flag && len_cur == res->len) {
+ debug ("but we are not here, right?\n");
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+ }
+ }
+ if (!res_cur->next) {
+ /* last device on the range */
+ len_tmp = range->end - (res_cur->end + 1);
+
+ if ((range->end != res_cur->end) && (len_tmp >= res->len)) {
+ debug ("len_tmp = %x\n", len_tmp);
+ if ((len_tmp < len_cur) || (len_cur == 0)) {
+
+ if (((res_cur->end + 1) % tmp_divide) == 0) {
+ /* just perfect, starting address is divisible by length */
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = res_cur->end + 1;
+ } else {
+ /* Needs adjusting */
+ tmp_start = res_cur->end + 1;
+ flag = 0;
+
+ while ((len_tmp = range->end - tmp_start) >= res->len) {
+ if ((tmp_start % tmp_divide) == 0) {
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = tmp_start;
+ break;
+ }
+ tmp_start += tmp_divide - tmp_start % tmp_divide;
+ if (tmp_start >= range->end)
+ break;
+ }
+ }
+ if (flag && len_cur == res->len) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+ }
+ }
+
+ if (res_prev) {
+ if (res_prev->rangeno != res_cur->rangeno) {
+ /* 1st device on this range */
+ len_tmp = res_cur->start - 1 - range->start;
+
+ if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
+ if ((len_tmp < len_cur) || (len_cur == 0)) {
+ if ((range->start % tmp_divide) == 0) {
+ /* just perfect, starting address is divisible by length */
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = range->start;
+ } else {
+ /* Needs adjusting */
+ tmp_start = range->start;
+ flag = 0;
+
+ while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) {
+ if ((tmp_start % tmp_divide) == 0) {
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = tmp_start;
+ break;
+ }
+ tmp_start += tmp_divide - tmp_start % tmp_divide;
+ if (tmp_start >= res_cur->start - 1)
+ break;
+ }
+ }
+
+ if (flag && len_cur == res->len) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+ }
+ } else {
+ /* in the same range */
+ len_tmp = res_cur->start - 1 - res_prev->end - 1;
+
+ if (len_tmp >= res->len) {
+ if ((len_tmp < len_cur) || (len_cur == 0)) {
+ if (((res_prev->end + 1) % tmp_divide) == 0) {
+ /* just perfect, starting address's divisible by length */
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = res_prev->end + 1;
+ } else {
+ /* Needs adjusting */
+ tmp_start = res_prev->end + 1;
+ flag = 0;
+
+ while ((len_tmp = res_cur->start - 1 - tmp_start) >= res->len) {
+ if ((tmp_start % tmp_divide) == 0) {
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = tmp_start;
+ break;
+ }
+ tmp_start += tmp_divide - tmp_start % tmp_divide;
+ if (tmp_start >= res_cur->start - 1)
+ break;
+ }
+ }
+
+ if (flag && len_cur == res->len) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+ }
+ }
+ }
+ /* end if (res_prev) */
+ res_prev = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ } /* end of while */
+
+
+ if (!res_prev) {
+ /* 1st device ever */
+ /* need to find appropriate range */
+ switch (res->type) {
+ case IO:
+ range = bus_cur->rangeIO;
+ break;
+ case MEM:
+ range = bus_cur->rangeMem;
+ break;
+ case PFMEM:
+ range = bus_cur->rangePFMem;
+ break;
+ }
+ while (range) {
+ len_tmp = range->end - range->start;
+
+ if (len_tmp >= res->len) {
+ if ((len_tmp < len_cur) || (len_cur == 0)) {
+ if ((range->start % tmp_divide) == 0) {
+ /* just perfect, starting address's divisible by length */
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = range->start;
+ } else {
+ /* Needs adjusting */
+ tmp_start = range->start;
+ flag = 0;
+
+ while ((len_tmp = range->end - tmp_start) >= res->len) {
+ if ((tmp_start % tmp_divide) == 0) {
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = tmp_start;
+ break;
+ }
+ tmp_start += tmp_divide - tmp_start % tmp_divide;
+ if (tmp_start >= range->end)
+ break;
+ }
+ }
+
+ if (flag && len_cur == res->len) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+ }
+ range = range->next;
+ } /* end of while */
+
+ if ((!range) && (len_cur == 0)) {
+ /* have gone through the list of devices and ranges and haven't found n.e.thing */
+ err ("no appropriate range.. bailing out...\n");
+ return -EINVAL;
+ } else if (len_cur) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+
+ if (!res_cur) {
+ debug ("prev->rangeno = %d, noranges = %d\n", res_prev->rangeno, noranges);
+ if (res_prev->rangeno < noranges) {
+ /* if there're more ranges out there to check */
+ switch (res->type) {
+ case IO:
+ range = bus_cur->rangeIO;
+ break;
+ case MEM:
+ range = bus_cur->rangeMem;
+ break;
+ case PFMEM:
+ range = bus_cur->rangePFMem;
+ break;
+ }
+ while (range) {
+ len_tmp = range->end - range->start;
+
+ if (len_tmp >= res->len) {
+ if ((len_tmp < len_cur) || (len_cur == 0)) {
+ if ((range->start % tmp_divide) == 0) {
+ /* just perfect, starting address's divisible by length */
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = range->start;
+ } else {
+ /* Needs adjusting */
+ tmp_start = range->start;
+ flag = 0;
+
+ while ((len_tmp = range->end - tmp_start) >= res->len) {
+ if ((tmp_start % tmp_divide) == 0) {
+ flag = 1;
+ len_cur = len_tmp;
+ start_cur = tmp_start;
+ break;
+ }
+ tmp_start += tmp_divide - tmp_start % tmp_divide;
+ if (tmp_start >= range->end)
+ break;
+ }
+ }
+
+ if (flag && len_cur == res->len) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ }
+ }
+ range = range->next;
+ } /* end of while */
+
+ if ((!range) && (len_cur == 0)) {
+ /* have gone through the list of devices and ranges and haven't found n.e.thing */
+ err ("no appropriate range.. bailing out...\n");
+ return -EINVAL;
+ } else if (len_cur) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ }
+ } else {
+ /* no more ranges to check on */
+ if (len_cur) {
+ res->start = start_cur;
+ res->len += 1; /* To restore the balance */
+ res->end = res->start + res->len - 1;
+ return 0;
+ } else {
+ /* have gone through the list of devices and haven't found n.e.thing */
+ err ("no appropriate range.. bailing out...\n");
+ return -EINVAL;
+ }
+ }
+ } /* end if (!res_cur) */
+ return -EINVAL;
+}
+
+/********************************************************************************
+ * This routine is called from remove_card if the card contained PPB.
+ * It will remove all the resources on the bus as well as the bus itself
+ * Input: Bus
+ * Output: 0, -ENODEV
+ ********************************************************************************/
+int ibmphp_remove_bus (struct bus_node *bus, u8 parent_busno)
+{
+ struct resource_node *res_cur;
+ struct resource_node *res_tmp;
+ struct bus_node *prev_bus;
+ int rc;
+
+ prev_bus = find_bus_wprev (parent_busno, NULL, 0);
+
+ if (!prev_bus) {
+ debug ("something terribly wrong. Cannot find parent bus to the one to remove\n");
+ return -ENODEV;
+ }
+
+ debug ("In ibmphp_remove_bus... prev_bus->busno is %x\n", prev_bus->busno);
+
+ rc = remove_ranges (bus, prev_bus);
+ if (rc)
+ return rc;
+
+ if (bus->firstIO) {
+ res_cur = bus->firstIO;
+ while (res_cur) {
+ res_tmp = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus->firstIO = NULL;
+ }
+ if (bus->firstMem) {
+ res_cur = bus->firstMem;
+ while (res_cur) {
+ res_tmp = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus->firstMem = NULL;
+ }
+ if (bus->firstPFMem) {
+ res_cur = bus->firstPFMem;
+ while (res_cur) {
+ res_tmp = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus->firstPFMem = NULL;
+ }
+
+ if (bus->firstPFMemFromMem) {
+ res_cur = bus->firstPFMemFromMem;
+ while (res_cur) {
+ res_tmp = res_cur;
+ res_cur = res_cur->next;
+
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus->firstPFMemFromMem = NULL;
+ }
+
+ list_del (&bus->bus_list);
+ kfree (bus);
+ return 0;
+}
+
+/******************************************************************************
+ * This routine deletes the ranges from a given bus, and the entries from the
+ * parent's bus in the resources
+ * Input: current bus, previous bus
+ * Output: 0, -EINVAL
+ ******************************************************************************/
+static int remove_ranges (struct bus_node *bus_cur, struct bus_node *bus_prev)
+{
+ struct range_node *range_cur;
+ struct range_node *range_tmp;
+ int i;
+ struct resource_node *res = NULL;
+
+ if (bus_cur->noIORanges) {
+ range_cur = bus_cur->rangeIO;
+ for (i = 0; i < bus_cur->noIORanges; i++) {
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, IO) < 0)
+ return -EINVAL;
+ ibmphp_remove_resource (res);
+
+ range_tmp = range_cur;
+ range_cur = range_cur->next;
+ kfree (range_tmp);
+ range_tmp = NULL;
+ }
+ bus_cur->rangeIO = NULL;
+ }
+ if (bus_cur->noMemRanges) {
+ range_cur = bus_cur->rangeMem;
+ for (i = 0; i < bus_cur->noMemRanges; i++) {
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, MEM) < 0)
+ return -EINVAL;
+
+ ibmphp_remove_resource (res);
+ range_tmp = range_cur;
+ range_cur = range_cur->next;
+ kfree (range_tmp);
+ range_tmp = NULL;
+ }
+ bus_cur->rangeMem = NULL;
+ }
+ if (bus_cur->noPFMemRanges) {
+ range_cur = bus_cur->rangePFMem;
+ for (i = 0; i < bus_cur->noPFMemRanges; i++) {
+ if (ibmphp_find_resource (bus_prev, range_cur->start, &res, PFMEM) < 0)
+ return -EINVAL;
+
+ ibmphp_remove_resource (res);
+ range_tmp = range_cur;
+ range_cur = range_cur->next;
+ kfree (range_tmp);
+ range_tmp = NULL;
+ }
+ bus_cur->rangePFMem = NULL;
+ }
+ return 0;
+}
+
+/*
+ * find the resource node in the bus
+ * Input: Resource needed, start address of the resource, type of resource
+ */
+int ibmphp_find_resource (struct bus_node *bus, u32 start_address, struct resource_node **res, int flag)
+{
+ struct resource_node *res_cur = NULL;
+ char * type = "";
+
+ if (!bus) {
+ err ("The bus passed in NULL to find resource\n");
+ return -ENODEV;
+ }
+
+ switch (flag) {
+ case IO:
+ res_cur = bus->firstIO;
+ type = "io";
+ break;
+ case MEM:
+ res_cur = bus->firstMem;
+ type = "mem";
+ break;
+ case PFMEM:
+ res_cur = bus->firstPFMem;
+ type = "pfmem";
+ break;
+ default:
+ err ("wrong type of flag\n");
+ return -EINVAL;
+ }
+
+ while (res_cur) {
+ if (res_cur->start == start_address) {
+ *res = res_cur;
+ break;
+ }
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ }
+
+ if (!res_cur) {
+ if (flag == PFMEM) {
+ res_cur = bus->firstPFMemFromMem;
+ while (res_cur) {
+ if (res_cur->start == start_address) {
+ *res = res_cur;
+ break;
+ }
+ res_cur = res_cur->next;
+ }
+ if (!res_cur) {
+ debug ("SOS...cannot find %s resource in the bus.\n", type);
+ return -EINVAL;
+ }
+ } else {
+ debug ("SOS... cannot find %s resource in the bus.\n", type);
+ return -EINVAL;
+ }
+ }
+
+ if (*res)
+ debug ("*res->start = %x\n", (*res)->start);
+
+ return 0;
+}
+
+/***********************************************************************
+ * This routine will free the resource structures used by the
+ * system. It is called from cleanup routine for the module
+ * Parameters: none
+ * Returns: none
+ ***********************************************************************/
+void ibmphp_free_resources (void)
+{
+ struct bus_node *bus_cur = NULL;
+ struct bus_node *bus_tmp;
+ struct range_node *range_cur;
+ struct range_node *range_tmp;
+ struct resource_node *res_cur;
+ struct resource_node *res_tmp;
+ struct list_head *tmp;
+ struct list_head *next;
+ int i = 0;
+ flags = 1;
+
+ list_for_each_safe (tmp, next, &gbuses) {
+ bus_cur = list_entry (tmp, struct bus_node, bus_list);
+ if (bus_cur->noIORanges) {
+ range_cur = bus_cur->rangeIO;
+ for (i = 0; i < bus_cur->noIORanges; i++) {
+ if (!range_cur)
+ break;
+ range_tmp = range_cur;
+ range_cur = range_cur->next;
+ kfree (range_tmp);
+ range_tmp = NULL;
+ }
+ }
+ if (bus_cur->noMemRanges) {
+ range_cur = bus_cur->rangeMem;
+ for (i = 0; i < bus_cur->noMemRanges; i++) {
+ if (!range_cur)
+ break;
+ range_tmp = range_cur;
+ range_cur = range_cur->next;
+ kfree (range_tmp);
+ range_tmp = NULL;
+ }
+ }
+ if (bus_cur->noPFMemRanges) {
+ range_cur = bus_cur->rangePFMem;
+ for (i = 0; i < bus_cur->noPFMemRanges; i++) {
+ if (!range_cur)
+ break;
+ range_tmp = range_cur;
+ range_cur = range_cur->next;
+ kfree (range_tmp);
+ range_tmp = NULL;
+ }
+ }
+
+ if (bus_cur->firstIO) {
+ res_cur = bus_cur->firstIO;
+ while (res_cur) {
+ res_tmp = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus_cur->firstIO = NULL;
+ }
+ if (bus_cur->firstMem) {
+ res_cur = bus_cur->firstMem;
+ while (res_cur) {
+ res_tmp = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus_cur->firstMem = NULL;
+ }
+ if (bus_cur->firstPFMem) {
+ res_cur = bus_cur->firstPFMem;
+ while (res_cur) {
+ res_tmp = res_cur;
+ if (res_cur->next)
+ res_cur = res_cur->next;
+ else
+ res_cur = res_cur->nextRange;
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus_cur->firstPFMem = NULL;
+ }
+
+ if (bus_cur->firstPFMemFromMem) {
+ res_cur = bus_cur->firstPFMemFromMem;
+ while (res_cur) {
+ res_tmp = res_cur;
+ res_cur = res_cur->next;
+
+ kfree (res_tmp);
+ res_tmp = NULL;
+ }
+ bus_cur->firstPFMemFromMem = NULL;
+ }
+
+ bus_tmp = bus_cur;
+ list_del (&bus_cur->bus_list);
+ kfree (bus_tmp);
+ bus_tmp = NULL;
+ }
+}
+
+/*********************************************************************************
+ * This function will go over the PFmem resources to check if the EBDA allocated
+ * pfmem out of memory buckets of the bus. If so, it will change the range numbers
+ * and a flag to indicate that this resource is out of memory. It will also move the
+ * Pfmem out of the pfmem resource list to the PFMemFromMem list, and will create
+ * a new Mem node
+ * This routine is called right after initialization
+ *******************************************************************************/
+static int __init once_over (void)
+{
+ struct resource_node *pfmem_cur;
+ struct resource_node *pfmem_prev;
+ struct resource_node *mem;
+ struct bus_node *bus_cur;
+ struct list_head *tmp;
+
+ list_for_each (tmp, &gbuses) {
+ bus_cur = list_entry (tmp, struct bus_node, bus_list);
+ if ((!bus_cur->rangePFMem) && (bus_cur->firstPFMem)) {
+ for (pfmem_cur = bus_cur->firstPFMem, pfmem_prev = NULL; pfmem_cur; pfmem_prev = pfmem_cur, pfmem_cur = pfmem_cur->next) {
+ pfmem_cur->fromMem = 1;
+ if (pfmem_prev)
+ pfmem_prev->next = pfmem_cur->next;
+ else
+ bus_cur->firstPFMem = pfmem_cur->next;
+
+ if (!bus_cur->firstPFMemFromMem)
+ pfmem_cur->next = NULL;
+ else
+ /* we don't need to sort PFMemFromMem since we're using mem node for
+ all the real work anyways, so just insert at the beginning of the
+ list
+ */
+ pfmem_cur->next = bus_cur->firstPFMemFromMem;
+
+ bus_cur->firstPFMemFromMem = pfmem_cur;
+
+ mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!mem) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ mem->type = MEM;
+ mem->busno = pfmem_cur->busno;
+ mem->devfunc = pfmem_cur->devfunc;
+ mem->start = pfmem_cur->start;
+ mem->end = pfmem_cur->end;
+ mem->len = pfmem_cur->len;
+ if (ibmphp_add_resource (mem) < 0)
+ err ("Trouble...trouble... EBDA allocated pfmem from mem, but system doesn't display it has this space... unless not PCI device...\n");
+ pfmem_cur->rangeno = mem->rangeno;
+ } /* end for pfmem */
+ } /* end if */
+ } /* end list_for_each bus */
+ return 0;
+}
+
+int ibmphp_add_pfmem_from_mem (struct resource_node *pfmem)
+{
+ struct bus_node *bus_cur = find_bus_wprev (pfmem->busno, NULL, 0);
+
+ if (!bus_cur) {
+ err ("cannot find bus of pfmem to add...\n");
+ return -ENODEV;
+ }
+
+ if (bus_cur->firstPFMemFromMem)
+ pfmem->next = bus_cur->firstPFMemFromMem;
+ else
+ pfmem->next = NULL;
+
+ bus_cur->firstPFMemFromMem = pfmem;
+
+ return 0;
+}
+
+/* This routine just goes through the buses to see if the bus already exists.
+ * It is called from ibmphp_find_sec_number, to find out a secondary bus number for
+ * bridged cards
+ * Parameters: bus_number
+ * Returns: Bus pointer or NULL
+ */
+struct bus_node *ibmphp_find_res_bus (u8 bus_number)
+{
+ return find_bus_wprev (bus_number, NULL, 0);
+}
+
+static struct bus_node *find_bus_wprev (u8 bus_number, struct bus_node **prev, u8 flag)
+{
+ struct bus_node *bus_cur;
+ struct list_head *tmp;
+ struct list_head *tmp_prev;
+
+ list_for_each (tmp, &gbuses) {
+ tmp_prev = tmp->prev;
+ bus_cur = list_entry (tmp, struct bus_node, bus_list);
+ if (flag)
+ *prev = list_entry (tmp_prev, struct bus_node, bus_list);
+ if (bus_cur->busno == bus_number)
+ return bus_cur;
+ }
+
+ return NULL;
+}
+
+void ibmphp_print_test (void)
+{
+ int i = 0;
+ struct bus_node *bus_cur = NULL;
+ struct range_node *range;
+ struct resource_node *res;
+ struct list_head *tmp;
+
+ debug_pci ("*****************START**********************\n");
+
+ if ((!list_empty(&gbuses)) && flags) {
+ err ("The GBUSES is not NULL?!?!?!?!?\n");
+ return;
+ }
+
+ list_for_each (tmp, &gbuses) {
+ bus_cur = list_entry (tmp, struct bus_node, bus_list);
+ debug_pci ("This is bus # %d. There are\n", bus_cur->busno);
+ debug_pci ("IORanges = %d\t", bus_cur->noIORanges);
+ debug_pci ("MemRanges = %d\t", bus_cur->noMemRanges);
+ debug_pci ("PFMemRanges = %d\n", bus_cur->noPFMemRanges);
+ debug_pci ("The IO Ranges are as follows:\n");
+ if (bus_cur->rangeIO) {
+ range = bus_cur->rangeIO;
+ for (i = 0; i < bus_cur->noIORanges; i++) {
+ debug_pci ("rangeno is %d\n", range->rangeno);
+ debug_pci ("[%x - %x]\n", range->start, range->end);
+ range = range->next;
+ }
+ }
+
+ debug_pci ("The Mem Ranges are as follows:\n");
+ if (bus_cur->rangeMem) {
+ range = bus_cur->rangeMem;
+ for (i = 0; i < bus_cur->noMemRanges; i++) {
+ debug_pci ("rangeno is %d\n", range->rangeno);
+ debug_pci ("[%x - %x]\n", range->start, range->end);
+ range = range->next;
+ }
+ }
+
+ debug_pci ("The PFMem Ranges are as follows:\n");
+
+ if (bus_cur->rangePFMem) {
+ range = bus_cur->rangePFMem;
+ for (i = 0; i < bus_cur->noPFMemRanges; i++) {
+ debug_pci ("rangeno is %d\n", range->rangeno);
+ debug_pci ("[%x - %x]\n", range->start, range->end);
+ range = range->next;
+ }
+ }
+
+ debug_pci ("The resources on this bus are as follows\n");
+
+ debug_pci ("IO...\n");
+ if (bus_cur->firstIO) {
+ res = bus_cur->firstIO;
+ while (res) {
+ debug_pci ("The range # is %d\n", res->rangeno);
+ debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc);
+ debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len);
+ if (res->next)
+ res = res->next;
+ else if (res->nextRange)
+ res = res->nextRange;
+ else
+ break;
+ }
+ }
+ debug_pci ("Mem...\n");
+ if (bus_cur->firstMem) {
+ res = bus_cur->firstMem;
+ while (res) {
+ debug_pci ("The range # is %d\n", res->rangeno);
+ debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc);
+ debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len);
+ if (res->next)
+ res = res->next;
+ else if (res->nextRange)
+ res = res->nextRange;
+ else
+ break;
+ }
+ }
+ debug_pci ("PFMem...\n");
+ if (bus_cur->firstPFMem) {
+ res = bus_cur->firstPFMem;
+ while (res) {
+ debug_pci ("The range # is %d\n", res->rangeno);
+ debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc);
+ debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len);
+ if (res->next)
+ res = res->next;
+ else if (res->nextRange)
+ res = res->nextRange;
+ else
+ break;
+ }
+ }
+
+ debug_pci ("PFMemFromMem...\n");
+ if (bus_cur->firstPFMemFromMem) {
+ res = bus_cur->firstPFMemFromMem;
+ while (res) {
+ debug_pci ("The range # is %d\n", res->rangeno);
+ debug_pci ("The bus, devfnc is %d, %x\n", res->busno, res->devfunc);
+ debug_pci ("[%x - %x], len=%x\n", res->start, res->end, res->len);
+ res = res->next;
+ }
+ }
+ }
+ debug_pci ("***********************END***********************\n");
+}
+
+static int range_exists_already (struct range_node * range, struct bus_node * bus_cur, u8 type)
+{
+ struct range_node * range_cur = NULL;
+ switch (type) {
+ case IO:
+ range_cur = bus_cur->rangeIO;
+ break;
+ case MEM:
+ range_cur = bus_cur->rangeMem;
+ break;
+ case PFMEM:
+ range_cur = bus_cur->rangePFMem;
+ break;
+ default:
+ err ("wrong type passed to find out if range already exists\n");
+ return -ENODEV;
+ }
+
+ while (range_cur) {
+ if ((range_cur->start == range->start) && (range_cur->end == range->end))
+ return 1;
+ range_cur = range_cur->next;
+ }
+
+ return 0;
+}
+
+/* This routine will read the windows for any PPB we have and update the
+ * range info for the secondary bus, and will also input this info into
+ * primary bus, since BIOS doesn't. This is for PPB that are in the system
+ * on bootup. For bridged cards that were added during previous load of the
+ * driver, only the ranges and the bus structure are added, the devices are
+ * added from NVRAM
+ * Input: primary busno
+ * Returns: none
+ * Note: this function doesn't take into account IO restrictions etc,
+ * so will only work for bridges with no video/ISA devices behind them It
+ * also will not work for onboard PPBs that can have more than 1 *bus
+ * behind them All these are TO DO.
+ * Also need to add more error checkings... (from fnc returns etc)
+ */
+static int __init update_bridge_ranges (struct bus_node **bus)
+{
+ u8 sec_busno, device, function, hdr_type, start_io_address, end_io_address;
+ u16 vendor_id, upper_io_start, upper_io_end, start_mem_address, end_mem_address;
+ u32 start_address, end_address, upper_start, upper_end;
+ struct bus_node *bus_sec;
+ struct bus_node *bus_cur;
+ struct resource_node *io;
+ struct resource_node *mem;
+ struct resource_node *pfmem;
+ struct range_node *range;
+ unsigned int devfn;
+
+ bus_cur = *bus;
+ if (!bus_cur)
+ return -ENODEV;
+ ibmphp_pci_bus->number = bus_cur->busno;
+
+ debug ("inside %s\n", __func__);
+ debug ("bus_cur->busno = %x\n", bus_cur->busno);
+
+ for (device = 0; device < 32; device++) {
+ for (function = 0x00; function < 0x08; function++) {
+ devfn = PCI_DEVFN(device, function);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id);
+
+ if (vendor_id != PCI_VENDOR_ID_NOTVALID) {
+ /* found correct device!!! */
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ function = 0x8;
+ break;
+ case PCI_HEADER_TYPE_MULTIDEVICE:
+ break;
+ case PCI_HEADER_TYPE_BRIDGE:
+ function = 0x8;
+ case PCI_HEADER_TYPE_MULTIBRIDGE:
+ /* We assume here that only 1 bus behind the bridge
+ TO DO: add functionality for several:
+ temp = secondary;
+ while (temp < subordinate) {
+ ...
+ temp++;
+ }
+ */
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_SECONDARY_BUS, &sec_busno);
+ bus_sec = find_bus_wprev (sec_busno, NULL, 0);
+ /* this bus structure doesn't exist yet, PPB was configured during previous loading of ibmphp */
+ if (!bus_sec) {
+ bus_sec = alloc_error_bus (NULL, sec_busno, 1);
+ /* the rest will be populated during NVRAM call */
+ return 0;
+ }
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_BASE, &start_io_address);
+ pci_bus_read_config_byte (ibmphp_pci_bus, devfn, PCI_IO_LIMIT, &end_io_address);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_IO_BASE_UPPER16, &upper_io_start);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_IO_LIMIT_UPPER16, &upper_io_end);
+ start_address = (start_io_address & PCI_IO_RANGE_MASK) << 8;
+ start_address |= (upper_io_start << 16);
+ end_address = (end_io_address & PCI_IO_RANGE_MASK) << 8;
+ end_address |= (upper_io_end << 16);
+
+ if ((start_address) && (start_address <= end_address)) {
+ range = kzalloc(sizeof(struct range_node), GFP_KERNEL);
+ if (!range) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ range->start = start_address;
+ range->end = end_address + 0xfff;
+
+ if (bus_sec->noIORanges > 0) {
+ if (!range_exists_already (range, bus_sec, IO)) {
+ add_bus_range (IO, range, bus_sec);
+ ++bus_sec->noIORanges;
+ } else {
+ kfree (range);
+ range = NULL;
+ }
+ } else {
+ /* 1st IO Range on the bus */
+ range->rangeno = 1;
+ bus_sec->rangeIO = range;
+ ++bus_sec->noIORanges;
+ }
+ fix_resources (bus_sec);
+
+ if (ibmphp_find_resource (bus_cur, start_address, &io, IO)) {
+ io = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!io) {
+ kfree (range);
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ io->type = IO;
+ io->busno = bus_cur->busno;
+ io->devfunc = ((device << 3) | (function & 0x7));
+ io->start = start_address;
+ io->end = end_address + 0xfff;
+ io->len = io->end - io->start + 1;
+ ibmphp_add_resource (io);
+ }
+ }
+
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_BASE, &start_mem_address);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_MEMORY_LIMIT, &end_mem_address);
+
+ start_address = 0x00000000 | (start_mem_address & PCI_MEMORY_RANGE_MASK) << 16;
+ end_address = 0x00000000 | (end_mem_address & PCI_MEMORY_RANGE_MASK) << 16;
+
+ if ((start_address) && (start_address <= end_address)) {
+
+ range = kzalloc(sizeof(struct range_node), GFP_KERNEL);
+ if (!range) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ range->start = start_address;
+ range->end = end_address + 0xfffff;
+
+ if (bus_sec->noMemRanges > 0) {
+ if (!range_exists_already (range, bus_sec, MEM)) {
+ add_bus_range (MEM, range, bus_sec);
+ ++bus_sec->noMemRanges;
+ } else {
+ kfree (range);
+ range = NULL;
+ }
+ } else {
+ /* 1st Mem Range on the bus */
+ range->rangeno = 1;
+ bus_sec->rangeMem = range;
+ ++bus_sec->noMemRanges;
+ }
+
+ fix_resources (bus_sec);
+
+ if (ibmphp_find_resource (bus_cur, start_address, &mem, MEM)) {
+ mem = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!mem) {
+ kfree (range);
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ mem->type = MEM;
+ mem->busno = bus_cur->busno;
+ mem->devfunc = ((device << 3) | (function & 0x7));
+ mem->start = start_address;
+ mem->end = end_address + 0xfffff;
+ mem->len = mem->end - mem->start + 1;
+ ibmphp_add_resource (mem);
+ }
+ }
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_BASE, &start_mem_address);
+ pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &end_mem_address);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_PREF_BASE_UPPER32, &upper_start);
+ pci_bus_read_config_dword (ibmphp_pci_bus, devfn, PCI_PREF_LIMIT_UPPER32, &upper_end);
+ start_address = 0x00000000 | (start_mem_address & PCI_MEMORY_RANGE_MASK) << 16;
+ end_address = 0x00000000 | (end_mem_address & PCI_MEMORY_RANGE_MASK) << 16;
+#if BITS_PER_LONG == 64
+ start_address |= ((long) upper_start) << 32;
+ end_address |= ((long) upper_end) << 32;
+#endif
+
+ if ((start_address) && (start_address <= end_address)) {
+
+ range = kzalloc(sizeof(struct range_node), GFP_KERNEL);
+ if (!range) {
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ range->start = start_address;
+ range->end = end_address + 0xfffff;
+
+ if (bus_sec->noPFMemRanges > 0) {
+ if (!range_exists_already (range, bus_sec, PFMEM)) {
+ add_bus_range (PFMEM, range, bus_sec);
+ ++bus_sec->noPFMemRanges;
+ } else {
+ kfree (range);
+ range = NULL;
+ }
+ } else {
+ /* 1st PFMem Range on the bus */
+ range->rangeno = 1;
+ bus_sec->rangePFMem = range;
+ ++bus_sec->noPFMemRanges;
+ }
+
+ fix_resources (bus_sec);
+ if (ibmphp_find_resource (bus_cur, start_address, &pfmem, PFMEM)) {
+ pfmem = kzalloc(sizeof(struct resource_node), GFP_KERNEL);
+ if (!pfmem) {
+ kfree (range);
+ err ("out of system memory\n");
+ return -ENOMEM;
+ }
+ pfmem->type = PFMEM;
+ pfmem->busno = bus_cur->busno;
+ pfmem->devfunc = ((device << 3) | (function & 0x7));
+ pfmem->start = start_address;
+ pfmem->end = end_address + 0xfffff;
+ pfmem->len = pfmem->end - pfmem->start + 1;
+ pfmem->fromMem = 0;
+
+ ibmphp_add_resource (pfmem);
+ }
+ }
+ break;
+ } /* end of switch */
+ } /* end if vendor */
+ } /* end for function */
+ } /* end for device */
+
+ bus = &bus_cur;
+ return 0;
+}
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
new file mode 100644
index 000000000..56d8486dc
--- /dev/null
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -0,0 +1,555 @@
+/*
+ * PCI HotPlug Controller Core
+ *
+ * Copyright (C) 2001-2002 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001-2002 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <asm/uaccess.h>
+#include "../pci.h"
+#include "cpci_hotplug.h"
+
+#define MY_NAME "pci_hotplug"
+
+#define dbg(fmt, arg...) do { if (debug) printk(KERN_DEBUG "%s: %s: " fmt , MY_NAME , __func__ , ## arg); } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format , MY_NAME , ## arg)
+
+
+/* local variables */
+static bool debug;
+
+#define DRIVER_VERSION "0.5"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
+#define DRIVER_DESC "PCI Hot Plug PCI Core"
+
+
+static LIST_HEAD(pci_hotplug_slot_list);
+static DEFINE_MUTEX(pci_hp_mutex);
+
+/* Weee, fun with macros... */
+#define GET_STATUS(name, type) \
+static int get_##name(struct hotplug_slot *slot, type *value) \
+{ \
+ struct hotplug_slot_ops *ops = slot->ops; \
+ int retval = 0; \
+ if (!try_module_get(ops->owner)) \
+ return -ENODEV; \
+ if (ops->get_##name) \
+ retval = ops->get_##name(slot, value); \
+ else \
+ *value = slot->info->name; \
+ module_put(ops->owner); \
+ return retval; \
+}
+
+GET_STATUS(power_status, u8)
+GET_STATUS(attention_status, u8)
+GET_STATUS(latch_status, u8)
+GET_STATUS(adapter_status, u8)
+
+static ssize_t power_read_file(struct pci_slot *slot, char *buf)
+{
+ int retval;
+ u8 value;
+
+ retval = get_power_status(slot->hotplug, &value);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
+ size_t count)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ unsigned long lpower;
+ u8 power;
+ int retval = 0;
+
+ lpower = simple_strtoul(buf, NULL, 10);
+ power = (u8)(lpower & 0xff);
+ dbg("power = %d\n", power);
+
+ if (!try_module_get(slot->ops->owner)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ switch (power) {
+ case 0:
+ if (slot->ops->disable_slot)
+ retval = slot->ops->disable_slot(slot);
+ break;
+
+ case 1:
+ if (slot->ops->enable_slot)
+ retval = slot->ops->enable_slot(slot);
+ break;
+
+ default:
+ err("Illegal value specified for power\n");
+ retval = -EINVAL;
+ }
+ module_put(slot->ops->owner);
+
+exit:
+ if (retval)
+ return retval;
+ return count;
+}
+
+static struct pci_slot_attribute hotplug_slot_attr_power = {
+ .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR},
+ .show = power_read_file,
+ .store = power_write_file
+};
+
+static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
+{
+ int retval;
+ u8 value;
+
+ retval = get_attention_status(slot->hotplug, &value);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
+ size_t count)
+{
+ struct hotplug_slot_ops *ops = slot->hotplug->ops;
+ unsigned long lattention;
+ u8 attention;
+ int retval = 0;
+
+ lattention = simple_strtoul(buf, NULL, 10);
+ attention = (u8)(lattention & 0xff);
+ dbg(" - attention = %d\n", attention);
+
+ if (!try_module_get(ops->owner)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (ops->set_attention_status)
+ retval = ops->set_attention_status(slot->hotplug, attention);
+ module_put(ops->owner);
+
+exit:
+ if (retval)
+ return retval;
+ return count;
+}
+
+static struct pci_slot_attribute hotplug_slot_attr_attention = {
+ .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR},
+ .show = attention_read_file,
+ .store = attention_write_file
+};
+
+static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
+{
+ int retval;
+ u8 value;
+
+ retval = get_latch_status(slot->hotplug, &value);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static struct pci_slot_attribute hotplug_slot_attr_latch = {
+ .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO},
+ .show = latch_read_file,
+};
+
+static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
+{
+ int retval;
+ u8 value;
+
+ retval = get_adapter_status(slot->hotplug, &value);
+ if (retval)
+ return retval;
+
+ return sprintf(buf, "%d\n", value);
+}
+
+static struct pci_slot_attribute hotplug_slot_attr_presence = {
+ .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO},
+ .show = presence_read_file,
+};
+
+static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
+ size_t count)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ unsigned long ltest;
+ u32 test;
+ int retval = 0;
+
+ ltest = simple_strtoul (buf, NULL, 10);
+ test = (u32)(ltest & 0xffffffff);
+ dbg("test = %d\n", test);
+
+ if (!try_module_get(slot->ops->owner)) {
+ retval = -ENODEV;
+ goto exit;
+ }
+ if (slot->ops->hardware_test)
+ retval = slot->ops->hardware_test(slot, test);
+ module_put(slot->ops->owner);
+
+exit:
+ if (retval)
+ return retval;
+ return count;
+}
+
+static struct pci_slot_attribute hotplug_slot_attr_test = {
+ .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR},
+ .store = test_write_file
+};
+
+static bool has_power_file(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+
+ if ((!slot) || (!slot->ops))
+ return false;
+ if ((slot->ops->enable_slot) ||
+ (slot->ops->disable_slot) ||
+ (slot->ops->get_power_status))
+ return true;
+ return false;
+}
+
+static bool has_attention_file(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+
+ if ((!slot) || (!slot->ops))
+ return false;
+ if ((slot->ops->set_attention_status) ||
+ (slot->ops->get_attention_status))
+ return true;
+ return false;
+}
+
+static bool has_latch_file(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+
+ if ((!slot) || (!slot->ops))
+ return false;
+ if (slot->ops->get_latch_status)
+ return true;
+ return false;
+}
+
+static bool has_adapter_file(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+
+ if ((!slot) || (!slot->ops))
+ return false;
+ if (slot->ops->get_adapter_status)
+ return true;
+ return false;
+}
+
+static bool has_test_file(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+
+ if ((!slot) || (!slot->ops))
+ return false;
+ if (slot->ops->hardware_test)
+ return true;
+ return false;
+}
+
+static int fs_add_slot(struct pci_slot *slot)
+{
+ int retval = 0;
+
+ /* Create symbolic link to the hotplug driver module */
+ pci_hp_create_module_link(slot);
+
+ if (has_power_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_power.attr);
+ if (retval)
+ goto exit_power;
+ }
+
+ if (has_attention_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
+ if (retval)
+ goto exit_attention;
+ }
+
+ if (has_latch_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_latch.attr);
+ if (retval)
+ goto exit_latch;
+ }
+
+ if (has_adapter_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
+ if (retval)
+ goto exit_adapter;
+ }
+
+ if (has_test_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_test.attr);
+ if (retval)
+ goto exit_test;
+ }
+
+ goto exit;
+
+exit_test:
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
+exit_adapter:
+ if (has_latch_file(slot))
+ sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+exit_latch:
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
+exit_attention:
+ if (has_power_file(slot))
+ sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+exit_power:
+ pci_hp_remove_module_link(slot);
+exit:
+ return retval;
+}
+
+static void fs_remove_slot(struct pci_slot *slot)
+{
+ if (has_power_file(slot))
+ sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
+
+ if (has_latch_file(slot))
+ sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
+
+ if (has_test_file(slot))
+ sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+
+ pci_hp_remove_module_link(slot);
+}
+
+static struct hotplug_slot *get_slot_from_name(const char *name)
+{
+ struct hotplug_slot *slot;
+ struct list_head *tmp;
+
+ list_for_each(tmp, &pci_hotplug_slot_list) {
+ slot = list_entry(tmp, struct hotplug_slot, slot_list);
+ if (strcmp(hotplug_slot_name(slot), name) == 0)
+ return slot;
+ }
+ return NULL;
+}
+
+/**
+ * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
+ * @bus: bus this slot is on
+ * @slot: pointer to the &struct hotplug_slot to register
+ * @devnr: device number
+ * @name: name registered with kobject core
+ * @owner: caller module owner
+ * @mod_name: caller module name
+ *
+ * Registers a hotplug slot with the pci hotplug subsystem, which will allow
+ * userspace interaction to the slot.
+ *
+ * Returns 0 if successful, anything else for an error.
+ */
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name,
+ struct module *owner, const char *mod_name)
+{
+ int result;
+ struct pci_slot *pci_slot;
+
+ if (slot == NULL)
+ return -ENODEV;
+ if ((slot->info == NULL) || (slot->ops == NULL))
+ return -EINVAL;
+ if (slot->release == NULL) {
+ dbg("Why are you trying to register a hotplug slot without a proper release function?\n");
+ return -EINVAL;
+ }
+
+ slot->ops->owner = owner;
+ slot->ops->mod_name = mod_name;
+
+ mutex_lock(&pci_hp_mutex);
+ /*
+ * No problems if we call this interface from both ACPI_PCI_SLOT
+ * driver and call it here again. If we've already created the
+ * pci_slot, the interface will simply bump the refcount.
+ */
+ pci_slot = pci_create_slot(bus, devnr, name, slot);
+ if (IS_ERR(pci_slot)) {
+ result = PTR_ERR(pci_slot);
+ goto out;
+ }
+
+ slot->pci_slot = pci_slot;
+ pci_slot->hotplug = slot;
+
+ list_add(&slot->slot_list, &pci_hotplug_slot_list);
+
+ result = fs_add_slot(pci_slot);
+ kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
+ dbg("Added slot %s to the list\n", name);
+out:
+ mutex_unlock(&pci_hp_mutex);
+ return result;
+}
+EXPORT_SYMBOL_GPL(__pci_hp_register);
+
+/**
+ * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
+ * @hotplug: pointer to the &struct hotplug_slot to deregister
+ *
+ * The @slot must have been registered with the pci hotplug subsystem
+ * previously with a call to pci_hp_register().
+ *
+ * Returns 0 if successful, anything else for an error.
+ */
+int pci_hp_deregister(struct hotplug_slot *hotplug)
+{
+ struct hotplug_slot *temp;
+ struct pci_slot *slot;
+
+ if (!hotplug)
+ return -ENODEV;
+
+ mutex_lock(&pci_hp_mutex);
+ temp = get_slot_from_name(hotplug_slot_name(hotplug));
+ if (temp != hotplug) {
+ mutex_unlock(&pci_hp_mutex);
+ return -ENODEV;
+ }
+
+ list_del(&hotplug->slot_list);
+
+ slot = hotplug->pci_slot;
+ fs_remove_slot(slot);
+ dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
+
+ hotplug->release(hotplug);
+ slot->hotplug = NULL;
+ pci_destroy_slot(slot);
+ mutex_unlock(&pci_hp_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_hp_deregister);
+
+/**
+ * pci_hp_change_slot_info - changes the slot's information structure in the core
+ * @hotplug: pointer to the slot whose info has changed
+ * @info: pointer to the info copy into the slot's info structure
+ *
+ * @slot must have been registered with the pci
+ * hotplug subsystem previously with a call to pci_hp_register().
+ *
+ * Returns 0 if successful, anything else for an error.
+ */
+int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+ struct hotplug_slot_info *info)
+{
+ if (!hotplug || !info)
+ return -ENODEV;
+
+ memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
+
+static int __init pci_hotplug_init(void)
+{
+ int result;
+
+ result = cpci_hotplug_init(debug);
+ if (result) {
+ err("cpci_hotplug_init with error %d\n", result);
+ return result;
+ }
+
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ return result;
+}
+
+static void __exit pci_hotplug_exit(void)
+{
+ cpci_hotplug_exit();
+}
+
+module_init(pci_hotplug_init);
+module_exit(pci_hotplug_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
new file mode 100644
index 000000000..b11521953
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp.h
@@ -0,0 +1,187 @@
+/*
+ * PCI Express Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+#ifndef _PCIEHP_H
+#define _PCIEHP_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/delay.h>
+#include <linux/sched.h> /* signal_pending() */
+#include <linux/pcieport_if.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#define MY_NAME "pciehp"
+
+extern bool pciehp_poll_mode;
+extern int pciehp_poll_time;
+extern bool pciehp_debug;
+
+#define dbg(format, arg...) \
+do { \
+ if (pciehp_debug) \
+ printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
+} while (0)
+#define err(format, arg...) \
+ printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
+#define info(format, arg...) \
+ printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
+#define warn(format, arg...) \
+ printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
+
+#define ctrl_dbg(ctrl, format, arg...) \
+ do { \
+ if (pciehp_debug) \
+ dev_printk(KERN_DEBUG, &ctrl->pcie->device, \
+ format, ## arg); \
+ } while (0)
+#define ctrl_err(ctrl, format, arg...) \
+ dev_err(&ctrl->pcie->device, format, ## arg)
+#define ctrl_info(ctrl, format, arg...) \
+ dev_info(&ctrl->pcie->device, format, ## arg)
+#define ctrl_warn(ctrl, format, arg...) \
+ dev_warn(&ctrl->pcie->device, format, ## arg)
+
+#define SLOT_NAME_SIZE 10
+struct slot {
+ u8 state;
+ struct controller *ctrl;
+ struct hotplug_slot *hotplug_slot;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
+ struct mutex hotplug_lock;
+ struct workqueue_struct *wq;
+};
+
+struct event_info {
+ u32 event_type;
+ struct slot *p_slot;
+ struct work_struct work;
+};
+
+struct controller {
+ struct mutex ctrl_lock; /* controller lock */
+ struct pcie_device *pcie; /* PCI Express port service */
+ struct slot *slot;
+ wait_queue_head_t queue; /* sleep & wake process */
+ u32 slot_cap;
+ u16 slot_ctrl;
+ struct timer_list poll_timer;
+ unsigned long cmd_started; /* jiffies */
+ unsigned int cmd_busy:1;
+ unsigned int link_active_reporting:1;
+ unsigned int notification_enabled:1;
+ unsigned int power_fault_detected;
+};
+
+#define INT_BUTTON_IGNORE 0
+#define INT_PRESENCE_ON 1
+#define INT_PRESENCE_OFF 2
+#define INT_SWITCH_CLOSE 3
+#define INT_SWITCH_OPEN 4
+#define INT_POWER_FAULT 5
+#define INT_POWER_FAULT_CLEAR 6
+#define INT_BUTTON_PRESS 7
+#define INT_BUTTON_RELEASE 8
+#define INT_BUTTON_CANCEL 9
+#define INT_LINK_UP 10
+#define INT_LINK_DOWN 11
+
+#define STATIC_STATE 0
+#define BLINKINGON_STATE 1
+#define BLINKINGOFF_STATE 2
+#define POWERON_STATE 3
+#define POWEROFF_STATE 4
+
+#define ATTN_BUTTN(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_ABP)
+#define POWER_CTRL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PCP)
+#define MRL_SENS(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_MRLSP)
+#define ATTN_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_AIP)
+#define PWR_LED(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_PIP)
+#define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS)
+#define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP)
+#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
+#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
+
+int pciehp_sysfs_enable_slot(struct slot *slot);
+int pciehp_sysfs_disable_slot(struct slot *slot);
+u8 pciehp_handle_attention_button(struct slot *p_slot);
+u8 pciehp_handle_switch_change(struct slot *p_slot);
+u8 pciehp_handle_presence_change(struct slot *p_slot);
+u8 pciehp_handle_power_fault(struct slot *p_slot);
+void pciehp_handle_linkstate_change(struct slot *p_slot);
+int pciehp_configure_device(struct slot *p_slot);
+int pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_queue_pushbutton_work(struct work_struct *work);
+struct controller *pcie_init(struct pcie_device *dev);
+int pcie_init_notification(struct controller *ctrl);
+int pciehp_enable_slot(struct slot *p_slot);
+int pciehp_disable_slot(struct slot *p_slot);
+void pcie_enable_notification(struct controller *ctrl);
+int pciehp_power_on_slot(struct slot *slot);
+void pciehp_power_off_slot(struct slot *slot);
+void pciehp_get_power_status(struct slot *slot, u8 *status);
+void pciehp_get_attention_status(struct slot *slot, u8 *status);
+
+void pciehp_set_attention_status(struct slot *slot, u8 status);
+void pciehp_get_latch_status(struct slot *slot, u8 *status);
+void pciehp_get_adapter_status(struct slot *slot, u8 *status);
+int pciehp_query_power_fault(struct slot *slot);
+void pciehp_green_led_on(struct slot *slot);
+void pciehp_green_led_off(struct slot *slot);
+void pciehp_green_led_blink(struct slot *slot);
+int pciehp_check_link_status(struct controller *ctrl);
+bool pciehp_check_link_active(struct controller *ctrl);
+void pciehp_release_ctrl(struct controller *ctrl);
+int pciehp_reset_slot(struct slot *slot, int probe);
+
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
+#ifdef CONFIG_ACPI
+#include <linux/pci-acpi.h>
+
+void __init pciehp_acpi_slot_detection_init(void);
+int pciehp_acpi_slot_detection_check(struct pci_dev *dev);
+
+static inline void pciehp_firmware_init(void)
+{
+ pciehp_acpi_slot_detection_init();
+}
+#else
+#define pciehp_firmware_init() do {} while (0)
+static inline int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
new file mode 100644
index 000000000..93cc9266e
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -0,0 +1,137 @@
+/*
+ * ACPI related functions for PCI Express Hot Plug driver.
+ *
+ * Copyright (C) 2008 Kenji Kaneshige
+ * Copyright (C) 2008 Fujitsu Limited.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "pciehp.h"
+
+#define PCIEHP_DETECT_PCIE (0)
+#define PCIEHP_DETECT_ACPI (1)
+#define PCIEHP_DETECT_AUTO (2)
+#define PCIEHP_DETECT_DEFAULT PCIEHP_DETECT_AUTO
+
+struct dummy_slot {
+ u32 number;
+ struct list_head list;
+};
+
+static int slot_detection_mode;
+static char *pciehp_detect_mode;
+module_param(pciehp_detect_mode, charp, 0444);
+MODULE_PARM_DESC(pciehp_detect_mode,
+ "Slot detection mode: pcie, acpi, auto\n"
+ " pcie - Use PCIe based slot detection\n"
+ " acpi - Use ACPI for slot detection\n"
+ " auto(default) - Auto select mode. Use acpi option if duplicate\n"
+ " slot ids are found. Otherwise, use pcie option\n");
+
+int pciehp_acpi_slot_detection_check(struct pci_dev *dev)
+{
+ if (slot_detection_mode != PCIEHP_DETECT_ACPI)
+ return 0;
+ if (acpi_pci_detect_ejectable(ACPI_HANDLE(&dev->dev)))
+ return 0;
+ return -ENODEV;
+}
+
+static int __init parse_detect_mode(void)
+{
+ if (!pciehp_detect_mode)
+ return PCIEHP_DETECT_DEFAULT;
+ if (!strcmp(pciehp_detect_mode, "pcie"))
+ return PCIEHP_DETECT_PCIE;
+ if (!strcmp(pciehp_detect_mode, "acpi"))
+ return PCIEHP_DETECT_ACPI;
+ if (!strcmp(pciehp_detect_mode, "auto"))
+ return PCIEHP_DETECT_AUTO;
+ warn("bad specifier '%s' for pciehp_detect_mode. Use default\n",
+ pciehp_detect_mode);
+ return PCIEHP_DETECT_DEFAULT;
+}
+
+static int __initdata dup_slot_id;
+static int __initdata acpi_slot_detected;
+static struct list_head __initdata dummy_slots = LIST_HEAD_INIT(dummy_slots);
+
+/* Dummy driver for duplicate name detection */
+static int __init dummy_probe(struct pcie_device *dev)
+{
+ u32 slot_cap;
+ acpi_handle handle;
+ struct dummy_slot *slot, *tmp;
+ struct pci_dev *pdev = dev->port;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+ slot->number = (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19;
+ list_for_each_entry(tmp, &dummy_slots, list) {
+ if (tmp->number == slot->number)
+ dup_slot_id++;
+ }
+ list_add_tail(&slot->list, &dummy_slots);
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!acpi_slot_detected && acpi_pci_detect_ejectable(handle))
+ acpi_slot_detected = 1;
+ return -ENODEV; /* dummy driver always returns error */
+}
+
+static struct pcie_port_service_driver __initdata dummy_driver = {
+ .name = "pciehp_dummy",
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_HP,
+ .probe = dummy_probe,
+};
+
+static int __init select_detection_mode(void)
+{
+ struct dummy_slot *slot, *tmp;
+
+ if (pcie_port_service_register(&dummy_driver))
+ return PCIEHP_DETECT_ACPI;
+ pcie_port_service_unregister(&dummy_driver);
+ list_for_each_entry_safe(slot, tmp, &dummy_slots, list) {
+ list_del(&slot->list);
+ kfree(slot);
+ }
+ if (acpi_slot_detected && dup_slot_id)
+ return PCIEHP_DETECT_ACPI;
+ return PCIEHP_DETECT_PCIE;
+}
+
+void __init pciehp_acpi_slot_detection_init(void)
+{
+ slot_detection_mode = parse_detect_mode();
+ if (slot_detection_mode != PCIEHP_DETECT_AUTO)
+ goto out;
+ slot_detection_mode = select_detection_mode();
+out:
+ if (slot_detection_mode == PCIEHP_DETECT_ACPI)
+ info("Using ACPI for slot detection.\n");
+}
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
new file mode 100644
index 000000000..07aa722bb
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -0,0 +1,387 @@
+/*
+ * PCI Express Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "pciehp.h"
+#include <linux/interrupt.h>
+#include <linux/time.h>
+
+/* Global variables */
+bool pciehp_debug;
+bool pciehp_poll_mode;
+int pciehp_poll_time;
+static bool pciehp_force;
+
+#define DRIVER_VERSION "0.4"
+#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+#define DRIVER_DESC "PCI Express Hot Plug Controller Driver"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_param(pciehp_debug, bool, 0644);
+module_param(pciehp_poll_mode, bool, 0644);
+module_param(pciehp_poll_time, int, 0644);
+module_param(pciehp_force, bool, 0644);
+MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
+MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
+MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if OSHP is missing");
+
+#define PCIE_MODULE_NAME "pciehp"
+
+static int set_attention_status (struct hotplug_slot *slot, u8 value);
+static int enable_slot (struct hotplug_slot *slot);
+static int disable_slot (struct hotplug_slot *slot);
+static int get_power_status (struct hotplug_slot *slot, u8 *value);
+static int get_attention_status (struct hotplug_slot *slot, u8 *value);
+static int get_latch_status (struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+static int reset_slot (struct hotplug_slot *slot, int probe);
+
+/**
+ * release_slot - free up the memory used by a slot
+ * @hotplug_slot: slot to free
+ */
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, hotplug_slot_name(hotplug_slot));
+
+ kfree(hotplug_slot->ops);
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
+}
+
+static int init_slot(struct controller *ctrl)
+{
+ struct slot *slot = ctrl->slot;
+ struct hotplug_slot *hotplug = NULL;
+ struct hotplug_slot_info *info = NULL;
+ struct hotplug_slot_ops *ops = NULL;
+ char name[SLOT_NAME_SIZE];
+ int retval = -ENOMEM;
+
+ hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
+ if (!hotplug)
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto out;
+
+ /* Setup hotplug slot ops */
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (!ops)
+ goto out;
+
+ ops->enable_slot = enable_slot;
+ ops->disable_slot = disable_slot;
+ ops->get_power_status = get_power_status;
+ ops->get_adapter_status = get_adapter_status;
+ ops->reset_slot = reset_slot;
+ if (MRL_SENS(ctrl))
+ ops->get_latch_status = get_latch_status;
+ if (ATTN_LED(ctrl)) {
+ ops->get_attention_status = get_attention_status;
+ ops->set_attention_status = set_attention_status;
+ }
+
+ /* register this slot with the hotplug pci core */
+ hotplug->info = info;
+ hotplug->private = slot;
+ hotplug->release = &release_slot;
+ hotplug->ops = ops;
+ slot->hotplug_slot = hotplug;
+ snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
+
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:00 sun=%x\n",
+ pci_domain_nr(ctrl->pcie->port->subordinate),
+ ctrl->pcie->port->subordinate->number, PSN(ctrl));
+ retval = pci_hp_register(hotplug,
+ ctrl->pcie->port->subordinate, 0, name);
+ if (retval)
+ ctrl_err(ctrl,
+ "pci_hp_register failed with error %d\n", retval);
+out:
+ if (retval) {
+ kfree(ops);
+ kfree(info);
+ kfree(hotplug);
+ }
+ return retval;
+}
+
+static void cleanup_slot(struct controller *ctrl)
+{
+ pci_hp_deregister(ctrl->slot->hotplug_slot);
+}
+
+/*
+ * set_attention_status - Turns the Amber LED for a slot on, off or blink
+ */
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ pciehp_set_attention_status(slot, status);
+ return 0;
+}
+
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return pciehp_sysfs_enable_slot(slot);
+}
+
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return pciehp_sysfs_disable_slot(slot);
+}
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ pciehp_get_power_status(slot, value);
+ return 0;
+}
+
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ pciehp_get_attention_status(slot, value);
+ return 0;
+}
+
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ pciehp_get_latch_status(slot, value);
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ pciehp_get_adapter_status(slot, value);
+ return 0;
+}
+
+static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return pciehp_reset_slot(slot, probe);
+}
+
+static int pciehp_probe(struct pcie_device *dev)
+{
+ int rc;
+ struct controller *ctrl;
+ struct slot *slot;
+ u8 occupied, poweron;
+
+ if (pciehp_force)
+ dev_info(&dev->device,
+ "Bypassing BIOS check for pciehp use on %s\n",
+ pci_name(dev->port));
+ else if (pciehp_acpi_slot_detection_check(dev->port))
+ goto err_out_none;
+
+ if (!dev->port->subordinate) {
+ /* Can happen if we run out of bus numbers during probe */
+ dev_err(&dev->device,
+ "Hotplug bridge without secondary bus, ignoring\n");
+ goto err_out_none;
+ }
+
+ ctrl = pcie_init(dev);
+ if (!ctrl) {
+ dev_err(&dev->device, "Controller initialization failed\n");
+ goto err_out_none;
+ }
+ set_service_data(dev, ctrl);
+
+ /* Setup the slot information structures */
+ rc = init_slot(ctrl);
+ if (rc) {
+ if (rc == -EBUSY)
+ ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n");
+ else
+ ctrl_err(ctrl, "Slot initialization failed\n");
+ goto err_out_release_ctlr;
+ }
+
+ /* Enable events after we have setup the data structures */
+ rc = pcie_init_notification(ctrl);
+ if (rc) {
+ ctrl_err(ctrl, "Notification initialization failed\n");
+ goto err_out_free_ctrl_slot;
+ }
+
+ /* Check if slot is occupied */
+ slot = ctrl->slot;
+ pciehp_get_adapter_status(slot, &occupied);
+ pciehp_get_power_status(slot, &poweron);
+ if (occupied && pciehp_force) {
+ mutex_lock(&slot->hotplug_lock);
+ pciehp_enable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
+ }
+ /* If empty slot's power status is on, turn power off */
+ if (!occupied && poweron && POWER_CTRL(ctrl))
+ pciehp_power_off_slot(slot);
+
+ return 0;
+
+err_out_free_ctrl_slot:
+ cleanup_slot(ctrl);
+err_out_release_ctlr:
+ pciehp_release_ctrl(ctrl);
+err_out_none:
+ return -ENODEV;
+}
+
+static void pciehp_remove(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ cleanup_slot(ctrl);
+ pciehp_release_ctrl(ctrl);
+}
+
+#ifdef CONFIG_PM
+static int pciehp_suspend(struct pcie_device *dev)
+{
+ return 0;
+}
+
+static int pciehp_resume(struct pcie_device *dev)
+{
+ struct controller *ctrl;
+ struct slot *slot;
+ u8 status;
+
+ ctrl = get_service_data(dev);
+
+ /* reinitialize the chipset's event detection logic */
+ pcie_enable_notification(ctrl);
+
+ slot = ctrl->slot;
+
+ /* Check if slot is occupied */
+ pciehp_get_adapter_status(slot, &status);
+ mutex_lock(&slot->hotplug_lock);
+ if (status)
+ pciehp_enable_slot(slot);
+ else
+ pciehp_disable_slot(slot);
+ mutex_unlock(&slot->hotplug_lock);
+ return 0;
+}
+#endif /* PM */
+
+static struct pcie_port_service_driver hpdriver_portdrv = {
+ .name = PCIE_MODULE_NAME,
+ .port_type = PCIE_ANY_PORT,
+ .service = PCIE_PORT_SERVICE_HP,
+
+ .probe = pciehp_probe,
+ .remove = pciehp_remove,
+
+#ifdef CONFIG_PM
+ .suspend = pciehp_suspend,
+ .resume = pciehp_resume,
+#endif /* PM */
+};
+
+static int __init pcied_init(void)
+{
+ int retval = 0;
+
+ pciehp_firmware_init();
+ retval = pcie_port_service_register(&hpdriver_portdrv);
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ if (retval)
+ dbg("Failure to register service\n");
+
+ return retval;
+}
+
+static void __exit pcied_cleanup(void)
+{
+ dbg("unload_pciehpd()\n");
+ pcie_port_service_unregister(&hpdriver_portdrv);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+}
+
+module_init(pcied_init);
+module_exit(pcied_cleanup);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
new file mode 100644
index 000000000..f052e951b
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -0,0 +1,692 @@
+/*
+ * PCI Express Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include "../pci.h"
+#include "pciehp.h"
+
+static void interrupt_event_handler(struct work_struct *work);
+
+static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+{
+ struct event_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_ATOMIC);
+ if (!info)
+ return -ENOMEM;
+
+ info->event_type = event_type;
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+}
+
+u8 pciehp_handle_attention_button(struct slot *p_slot)
+{
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Attention Button Change */
+ ctrl_dbg(ctrl, "Attention button interrupt received\n");
+
+ /*
+ * Button pressed - See if need to TAKE ACTION!!!
+ */
+ ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_BUTTON_PRESS;
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 0;
+}
+
+u8 pciehp_handle_switch_change(struct slot *p_slot)
+{
+ u8 getstatus;
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Switch Change */
+ ctrl_dbg(ctrl, "Switch interrupt received\n");
+
+ pciehp_get_latch_status(p_slot, &getstatus);
+ if (getstatus) {
+ /*
+ * Switch opened
+ */
+ ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_SWITCH_OPEN;
+ } else {
+ /*
+ * Switch closed
+ */
+ ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_SWITCH_CLOSE;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 1;
+}
+
+u8 pciehp_handle_presence_change(struct slot *p_slot)
+{
+ u32 event_type;
+ u8 presence_save;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Presence Change */
+ ctrl_dbg(ctrl, "Presence/Notify input change\n");
+
+ /* Switch is open, assume a presence change
+ * Save the presence state
+ */
+ pciehp_get_adapter_status(p_slot, &presence_save);
+ if (presence_save) {
+ /*
+ * Card Present
+ */
+ ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_PRESENCE_ON;
+ } else {
+ /*
+ * Not Present
+ */
+ ctrl_info(ctrl, "Card not present on Slot(%s)\n",
+ slot_name(p_slot));
+ event_type = INT_PRESENCE_OFF;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 1;
+}
+
+u8 pciehp_handle_power_fault(struct slot *p_slot)
+{
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* power fault */
+ ctrl_dbg(ctrl, "Power fault interrupt received\n");
+ ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
+ event_type = INT_POWER_FAULT;
+ ctrl_info(ctrl, "Power fault bit %x set\n", 0);
+ queue_interrupt_event(p_slot, event_type);
+
+ return 1;
+}
+
+void pciehp_handle_linkstate_change(struct slot *p_slot)
+{
+ u32 event_type;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Link Status Change */
+ ctrl_dbg(ctrl, "Data Link Layer State change\n");
+
+ if (pciehp_check_link_active(ctrl)) {
+ ctrl_info(ctrl, "slot(%s): Link Up event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_UP;
+ } else {
+ ctrl_info(ctrl, "slot(%s): Link Down event\n",
+ slot_name(p_slot));
+ event_type = INT_LINK_DOWN;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+}
+
+/* The following routines constitute the bulk of the
+ hotplug controller logic
+ */
+
+static void set_slot_off(struct controller *ctrl, struct slot *pslot)
+{
+ /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
+ if (POWER_CTRL(ctrl)) {
+ pciehp_power_off_slot(pslot);
+
+ /*
+ * After turning power off, we must wait for at least 1 second
+ * before taking any action that relies on power having been
+ * removed from the slot/adapter.
+ */
+ msleep(1000);
+ }
+
+ pciehp_green_led_off(pslot);
+ pciehp_set_attention_status(pslot, 1);
+}
+
+/**
+ * board_added - Called after a board has been added to the system.
+ * @p_slot: &slot where board is added
+ *
+ * Turns power on for the board.
+ * Configures board.
+ */
+static int board_added(struct slot *p_slot)
+{
+ int retval = 0;
+ struct controller *ctrl = p_slot->ctrl;
+ struct pci_bus *parent = ctrl->pcie->port->subordinate;
+
+ if (POWER_CTRL(ctrl)) {
+ /* Power on slot */
+ retval = pciehp_power_on_slot(p_slot);
+ if (retval)
+ return retval;
+ }
+
+ pciehp_green_led_blink(p_slot);
+
+ /* Check link training status */
+ retval = pciehp_check_link_status(ctrl);
+ if (retval) {
+ ctrl_err(ctrl, "Failed to check link status\n");
+ goto err_exit;
+ }
+
+ /* Check for a power fault */
+ if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
+ ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
+ retval = -EIO;
+ goto err_exit;
+ }
+
+ retval = pciehp_configure_device(p_slot);
+ if (retval) {
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
+ pci_domain_nr(parent), parent->number);
+ if (retval != -EEXIST)
+ goto err_exit;
+ }
+
+ pciehp_green_led_on(p_slot);
+ return 0;
+
+err_exit:
+ set_slot_off(ctrl, p_slot);
+ return retval;
+}
+
+/**
+ * remove_board - Turns off slot and LEDs
+ * @p_slot: slot where board is being removed
+ */
+static int remove_board(struct slot *p_slot)
+{
+ int retval;
+ struct controller *ctrl = p_slot->ctrl;
+
+ retval = pciehp_unconfigure_device(p_slot);
+ if (retval)
+ return retval;
+
+ if (POWER_CTRL(ctrl)) {
+ pciehp_power_off_slot(p_slot);
+
+ /*
+ * After turning power off, we must wait for at least 1 second
+ * before taking any action that relies on power having been
+ * removed from the slot/adapter.
+ */
+ msleep(1000);
+ }
+
+ /* turn off Green LED */
+ pciehp_green_led_off(p_slot);
+ return 0;
+}
+
+struct power_work_info {
+ struct slot *p_slot;
+ struct work_struct work;
+ unsigned int req;
+#define DISABLE_REQ 0
+#define ENABLE_REQ 1
+};
+
+/**
+ * pciehp_power_thread - handle pushbutton events
+ * @work: &struct work_struct describing work to be done
+ *
+ * Scheduled procedure to handle blocking stuff for the pushbuttons.
+ * Handles all pending events and exits.
+ */
+static void pciehp_power_thread(struct work_struct *work)
+{
+ struct power_work_info *info =
+ container_of(work, struct power_work_info, work);
+ struct slot *p_slot = info->p_slot;
+ int ret;
+
+ switch (info->req) {
+ case DISABLE_REQ:
+ ctrl_dbg(p_slot->ctrl,
+ "Disabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
+ pciehp_disable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ mutex_unlock(&p_slot->lock);
+ break;
+ case ENABLE_REQ:
+ ctrl_dbg(p_slot->ctrl,
+ "Enabling domain:bus:device=%04x:%02x:00\n",
+ pci_domain_nr(p_slot->ctrl->pcie->port->subordinate),
+ p_slot->ctrl->pcie->port->subordinate->number);
+ mutex_lock(&p_slot->hotplug_lock);
+ ret = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
+ if (ret)
+ pciehp_green_led_off(p_slot);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ mutex_unlock(&p_slot->lock);
+ break;
+ default:
+ break;
+ }
+
+ kfree(info);
+}
+
+void pciehp_queue_pushbutton_work(struct work_struct *work)
+{
+ struct slot *p_slot = container_of(work, struct slot, work.work);
+ struct power_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, pciehp_power_thread);
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case BLINKINGOFF_STATE:
+ p_slot->state = POWEROFF_STATE;
+ info->req = DISABLE_REQ;
+ break;
+ case BLINKINGON_STATE:
+ p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
+ break;
+ default:
+ kfree(info);
+ goto out;
+ }
+ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+}
+
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_button_press_event(struct slot *p_slot)
+{
+ struct controller *ctrl = p_slot->ctrl;
+ u8 getstatus;
+
+ switch (p_slot->state) {
+ case STATIC_STATE:
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (getstatus) {
+ p_slot->state = BLINKINGOFF_STATE;
+ ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ slot_name(p_slot));
+ } else {
+ p_slot->state = BLINKINGON_STATE;
+ ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ slot_name(p_slot));
+ }
+ /* blink green LED and turn off amber */
+ pciehp_green_led_blink(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
+ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+ /*
+ * Cancel if we are still blinking; this means that we
+ * press the attention again before the 5 sec. limit
+ * expires to cancel hot-add or hot-remove
+ */
+ ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot));
+ cancel_delayed_work(&p_slot->work);
+ if (p_slot->state == BLINKINGOFF_STATE)
+ pciehp_green_led_on(p_slot);
+ else
+ pciehp_green_led_off(p_slot);
+ pciehp_set_attention_status(p_slot, 0);
+ ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ slot_name(p_slot));
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWEROFF_STATE:
+ case POWERON_STATE:
+ /*
+ * Ignore if the slot is on power-on or power-off state;
+ * this means that the previous attention button action
+ * to hot-add or hot-remove is undergoing
+ */
+ ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
+ break;
+ default:
+ ctrl_warn(ctrl, "Not a valid state\n");
+ break;
+ }
+}
+
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_surprise_event(struct slot *p_slot)
+{
+ u8 getstatus;
+ struct power_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, pciehp_power_thread);
+
+ pciehp_get_adapter_status(p_slot, &getstatus);
+ if (!getstatus) {
+ p_slot->state = POWEROFF_STATE;
+ info->req = DISABLE_REQ;
+ } else {
+ p_slot->state = POWERON_STATE;
+ info->req = ENABLE_REQ;
+ }
+
+ queue_work(p_slot->wq, &info->work);
+}
+
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_link_event(struct slot *p_slot, u32 event)
+{
+ struct controller *ctrl = p_slot->ctrl;
+ struct power_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ info->req = event == INT_LINK_UP ? ENABLE_REQ : DISABLE_REQ;
+ INIT_WORK(&info->work, pciehp_power_thread);
+
+ switch (p_slot->state) {
+ case BLINKINGON_STATE:
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&p_slot->work);
+ /* Fall through */
+ case STATIC_STATE:
+ p_slot->state = event == INT_LINK_UP ?
+ POWERON_STATE : POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ break;
+ case POWERON_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event ignored on slot(%s): already powering on\n",
+ slot_name(p_slot));
+ kfree(info);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event queued on slot(%s): currently getting powered on\n",
+ slot_name(p_slot));
+ p_slot->state = POWEROFF_STATE;
+ queue_work(p_slot->wq, &info->work);
+ }
+ break;
+ case POWEROFF_STATE:
+ if (event == INT_LINK_UP) {
+ ctrl_info(ctrl,
+ "Link Up event queued on slot(%s): currently getting powered off\n",
+ slot_name(p_slot));
+ p_slot->state = POWERON_STATE;
+ queue_work(p_slot->wq, &info->work);
+ } else {
+ ctrl_info(ctrl,
+ "Link Down event ignored on slot(%s): already powering off\n",
+ slot_name(p_slot));
+ kfree(info);
+ }
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot(%s)\n",
+ slot_name(p_slot));
+ kfree(info);
+ break;
+ }
+}
+
+static void interrupt_event_handler(struct work_struct *work)
+{
+ struct event_info *info = container_of(work, struct event_info, work);
+ struct slot *p_slot = info->p_slot;
+ struct controller *ctrl = p_slot->ctrl;
+
+ mutex_lock(&p_slot->lock);
+ switch (info->event_type) {
+ case INT_BUTTON_PRESS:
+ handle_button_press_event(p_slot);
+ break;
+ case INT_POWER_FAULT:
+ if (!POWER_CTRL(ctrl))
+ break;
+ pciehp_set_attention_status(p_slot, 1);
+ pciehp_green_led_off(p_slot);
+ break;
+ case INT_PRESENCE_ON:
+ ctrl_dbg(ctrl, "Surprise Insertion\n");
+ handle_surprise_event(p_slot);
+ break;
+ case INT_PRESENCE_OFF:
+ /*
+ * Regardless of surprise capability, we need to
+ * definitely remove a card that has been pulled out!
+ */
+ ctrl_dbg(ctrl, "Surprise Removal\n");
+ handle_surprise_event(p_slot);
+ break;
+ case INT_LINK_UP:
+ case INT_LINK_DOWN:
+ handle_link_event(p_slot, info->event_type);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ kfree(info);
+}
+
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
+int pciehp_enable_slot(struct slot *p_slot)
+{
+ u8 getstatus = 0;
+ int rc;
+ struct controller *ctrl = p_slot->ctrl;
+
+ pciehp_get_adapter_status(p_slot, &getstatus);
+ if (!getstatus) {
+ ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
+ return -ENODEV;
+ }
+ if (MRL_SENS(p_slot->ctrl)) {
+ pciehp_get_latch_status(p_slot, &getstatus);
+ if (getstatus) {
+ ctrl_info(ctrl, "Latch open on slot(%s)\n",
+ slot_name(p_slot));
+ return -ENODEV;
+ }
+ }
+
+ if (POWER_CTRL(p_slot->ctrl)) {
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (getstatus) {
+ ctrl_info(ctrl, "Already enabled on slot(%s)\n",
+ slot_name(p_slot));
+ return -EINVAL;
+ }
+ }
+
+ pciehp_get_latch_status(p_slot, &getstatus);
+
+ rc = board_added(p_slot);
+ if (rc)
+ pciehp_get_latch_status(p_slot, &getstatus);
+
+ return rc;
+}
+
+/*
+ * Note: This function must be called with slot->hotplug_lock held
+ */
+int pciehp_disable_slot(struct slot *p_slot)
+{
+ u8 getstatus = 0;
+ struct controller *ctrl = p_slot->ctrl;
+
+ if (!p_slot->ctrl)
+ return 1;
+
+ if (POWER_CTRL(p_slot->ctrl)) {
+ pciehp_get_power_status(p_slot, &getstatus);
+ if (!getstatus) {
+ ctrl_info(ctrl, "Already disabled on slot(%s)\n",
+ slot_name(p_slot));
+ return -EINVAL;
+ }
+ }
+
+ return remove_board(p_slot);
+}
+
+int pciehp_sysfs_enable_slot(struct slot *p_slot)
+{
+ int retval = -ENODEV;
+ struct controller *ctrl = p_slot->ctrl;
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case BLINKINGON_STATE:
+ cancel_delayed_work(&p_slot->work);
+ case STATIC_STATE:
+ p_slot->state = POWERON_STATE;
+ mutex_unlock(&p_slot->lock);
+ mutex_lock(&p_slot->hotplug_lock);
+ retval = pciehp_enable_slot(p_slot);
+ mutex_unlock(&p_slot->hotplug_lock);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWERON_STATE:
+ ctrl_info(ctrl, "Slot %s is already in powering on state\n",
+ slot_name(p_slot));
+ break;
+ case BLINKINGOFF_STATE:
+ case POWEROFF_STATE:
+ ctrl_info(ctrl, "Already enabled on slot %s\n",
+ slot_name(p_slot));
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot %s\n",
+ slot_name(p_slot));
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ return retval;
+}
+
+int pciehp_sysfs_disable_slot(struct slot *p_slot)
+{
+ int retval = -ENODEV;
+ struct controller *ctrl = p_slot->ctrl;
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&p_slot->work);
+ case STATIC_STATE:
+ p_slot->state = POWEROFF_STATE;
+ mutex_unlock(&p_slot->lock);
+ retval = pciehp_disable_slot(p_slot);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWEROFF_STATE:
+ ctrl_info(ctrl, "Slot %s is already in powering off state\n",
+ slot_name(p_slot));
+ break;
+ case BLINKINGON_STATE:
+ case POWERON_STATE:
+ ctrl_info(ctrl, "Already disabled on slot %s\n",
+ slot_name(p_slot));
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot %s\n",
+ slot_name(p_slot));
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ return retval;
+}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
new file mode 100644
index 000000000..6d6868811
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -0,0 +1,859 @@
+/*
+ * PCI Express PCI Hot Plug Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/signal.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+
+#include "../pci.h"
+#include "pciehp.h"
+
+static inline struct pci_dev *ctrl_dev(struct controller *ctrl)
+{
+ return ctrl->pcie->port;
+}
+
+static irqreturn_t pcie_isr(int irq, void *dev_id);
+static void start_int_poll_timer(struct controller *ctrl, int sec);
+
+/* This is the interrupt polling timeout function. */
+static void int_poll_timeout(unsigned long data)
+{
+ struct controller *ctrl = (struct controller *)data;
+
+ /* Poll for interrupt events. regs == NULL => polling */
+ pcie_isr(0, ctrl);
+
+ init_timer(&ctrl->poll_timer);
+ if (!pciehp_poll_time)
+ pciehp_poll_time = 2; /* default polling interval is 2 sec */
+
+ start_int_poll_timer(ctrl, pciehp_poll_time);
+}
+
+/* This function starts the interrupt polling timer. */
+static void start_int_poll_timer(struct controller *ctrl, int sec)
+{
+ /* Clamp to sane value */
+ if ((sec <= 0) || (sec > 60))
+ sec = 2;
+
+ ctrl->poll_timer.function = &int_poll_timeout;
+ ctrl->poll_timer.data = (unsigned long)ctrl;
+ ctrl->poll_timer.expires = jiffies + sec * HZ;
+ add_timer(&ctrl->poll_timer);
+}
+
+static inline int pciehp_request_irq(struct controller *ctrl)
+{
+ int retval, irq = ctrl->pcie->irq;
+
+ /* Install interrupt polling timer. Start with 10 sec delay */
+ if (pciehp_poll_mode) {
+ init_timer(&ctrl->poll_timer);
+ start_int_poll_timer(ctrl, 10);
+ return 0;
+ }
+
+ /* Installs the interrupt handler */
+ retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
+ if (retval)
+ ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n",
+ irq);
+ return retval;
+}
+
+static inline void pciehp_free_irq(struct controller *ctrl)
+{
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
+ else
+ free_irq(ctrl->pcie->irq, ctrl);
+}
+
+static int pcie_poll_cmd(struct controller *ctrl, int timeout)
+{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 slot_status;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
+ return 1;
+ }
+ while (timeout > 0) {
+ msleep(10);
+ timeout -= 10;
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_CC) {
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_CC);
+ return 1;
+ }
+ }
+ return 0; /* timeout */
+}
+
+static void pcie_wait_cmd(struct controller *ctrl)
+{
+ unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
+ unsigned long duration = msecs_to_jiffies(msecs);
+ unsigned long cmd_timeout = ctrl->cmd_started + duration;
+ unsigned long now, timeout;
+ int rc;
+
+ /*
+ * If the controller does not generate notifications for command
+ * completions, we never need to wait between writes.
+ */
+ if (NO_CMD_CMPL(ctrl))
+ return;
+
+ if (!ctrl->cmd_busy)
+ return;
+
+ /*
+ * Even if the command has already timed out, we want to call
+ * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
+ */
+ now = jiffies;
+ if (time_before_eq(cmd_timeout, now))
+ timeout = 1;
+ else
+ timeout = cmd_timeout - now;
+
+ if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
+ ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
+ rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
+ else
+ rc = pcie_poll_cmd(ctrl, jiffies_to_msecs(timeout));
+
+ /*
+ * Controllers with errata like Intel CF118 don't generate
+ * completion notifications unless the power/indicator/interlock
+ * control bits are changed. On such controllers, we'll emit this
+ * timeout message when we wait for completion of commands that
+ * don't change those bits, e.g., commands that merely enable
+ * interrupts.
+ */
+ if (!rc)
+ ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
+ ctrl->slot_ctrl,
+ jiffies_to_msecs(jiffies - ctrl->cmd_started));
+}
+
+static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
+ u16 mask, bool wait)
+{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 slot_ctrl;
+
+ mutex_lock(&ctrl->ctrl_lock);
+
+ /*
+ * Always wait for any previous command that might still be in progress
+ */
+ pcie_wait_cmd(ctrl);
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ slot_ctrl &= ~mask;
+ slot_ctrl |= (cmd & mask);
+ ctrl->cmd_busy = 1;
+ smp_mb();
+ pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
+ ctrl->cmd_started = jiffies;
+ ctrl->slot_ctrl = slot_ctrl;
+
+ /*
+ * Optionally wait for the hardware to be ready for a new command,
+ * indicating completion of the above issued command.
+ */
+ if (wait)
+ pcie_wait_cmd(ctrl);
+
+ mutex_unlock(&ctrl->ctrl_lock);
+}
+
+/**
+ * pcie_write_cmd - Issue controller command
+ * @ctrl: controller to which the command is issued
+ * @cmd: command value written to slot control register
+ * @mask: bitmask of slot control register to be modified
+ */
+static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
+{
+ pcie_do_write_cmd(ctrl, cmd, mask, true);
+}
+
+/* Same as above without waiting for the hardware to latch */
+static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
+{
+ pcie_do_write_cmd(ctrl, cmd, mask, false);
+}
+
+bool pciehp_check_link_active(struct controller *ctrl)
+{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 lnk_status;
+ bool ret;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+
+ if (ret)
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+
+ return ret;
+}
+
+static void __pcie_wait_link_active(struct controller *ctrl, bool active)
+{
+ int timeout = 1000;
+
+ if (pciehp_check_link_active(ctrl) == active)
+ return;
+ while (timeout > 0) {
+ msleep(10);
+ timeout -= 10;
+ if (pciehp_check_link_active(ctrl) == active)
+ return;
+ }
+ ctrl_dbg(ctrl, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+}
+
+static void pcie_wait_link_active(struct controller *ctrl)
+{
+ __pcie_wait_link_active(ctrl, true);
+}
+
+static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
+{
+ u32 l;
+ int count = 0;
+ int delay = 1000, step = 20;
+ bool found = false;
+
+ do {
+ found = pci_bus_read_dev_vendor_id(bus, devfn, &l, 0);
+ count++;
+
+ if (found)
+ break;
+
+ msleep(step);
+ delay -= step;
+ } while (delay > 0);
+
+ if (count > 1 && pciehp_debug)
+ printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), count, step, l);
+
+ return found;
+}
+
+int pciehp_check_link_status(struct controller *ctrl)
+{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ bool found;
+ u16 lnk_status;
+
+ /*
+ * Data Link Layer Link Active Reporting must be capable for
+ * hot-plug capable downstream port. But old controller might
+ * not implement it. In this case, we wait for 1000 ms.
+ */
+ if (ctrl->link_active_reporting)
+ pcie_wait_link_active(ctrl);
+ else
+ msleep(1000);
+
+ /* wait 100ms before read pci conf, and try in 1s */
+ msleep(100);
+ found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
+ PCI_DEVFN(0, 0));
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+ ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+ if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
+ !(lnk_status & PCI_EXP_LNKSTA_NLW)) {
+ ctrl_err(ctrl, "Link Training Error occurs\n");
+ return -1;
+ }
+
+ pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
+ if (!found)
+ return -1;
+
+ return 0;
+}
+
+static int __pciehp_link_set(struct controller *ctrl, bool enable)
+{
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 lnk_ctrl;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnk_ctrl);
+
+ if (enable)
+ lnk_ctrl &= ~PCI_EXP_LNKCTL_LD;
+ else
+ lnk_ctrl |= PCI_EXP_LNKCTL_LD;
+
+ pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnk_ctrl);
+ ctrl_dbg(ctrl, "%s: lnk_ctrl = %x\n", __func__, lnk_ctrl);
+ return 0;
+}
+
+static int pciehp_link_enable(struct controller *ctrl)
+{
+ return __pciehp_link_set(ctrl, true);
+}
+
+void pciehp_get_attention_status(struct slot *slot, u8 *status)
+{
+ struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 slot_ctrl;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
+
+ switch (slot_ctrl & PCI_EXP_SLTCTL_AIC) {
+ case PCI_EXP_SLTCTL_ATTN_IND_ON:
+ *status = 1; /* On */
+ break;
+ case PCI_EXP_SLTCTL_ATTN_IND_BLINK:
+ *status = 2; /* Blink */
+ break;
+ case PCI_EXP_SLTCTL_ATTN_IND_OFF:
+ *status = 0; /* Off */
+ break;
+ default:
+ *status = 0xFF;
+ break;
+ }
+}
+
+void pciehp_get_power_status(struct slot *slot, u8 *status)
+{
+ struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 slot_ctrl;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
+
+ switch (slot_ctrl & PCI_EXP_SLTCTL_PCC) {
+ case PCI_EXP_SLTCTL_PWR_ON:
+ *status = 1; /* On */
+ break;
+ case PCI_EXP_SLTCTL_PWR_OFF:
+ *status = 0; /* Off */
+ break;
+ default:
+ *status = 0xFF;
+ break;
+ }
+}
+
+void pciehp_get_latch_status(struct slot *slot, u8 *status)
+{
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ u16 slot_status;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
+}
+
+void pciehp_get_adapter_status(struct slot *slot, u8 *status)
+{
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ u16 slot_status;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
+}
+
+int pciehp_query_power_fault(struct slot *slot)
+{
+ struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ u16 slot_status;
+
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ return !!(slot_status & PCI_EXP_SLTSTA_PFD);
+}
+
+void pciehp_set_attention_status(struct slot *slot, u8 value)
+{
+ struct controller *ctrl = slot->ctrl;
+ u16 slot_cmd;
+
+ if (!ATTN_LED(ctrl))
+ return;
+
+ switch (value) {
+ case 0: /* turn off */
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF;
+ break;
+ case 1: /* turn on */
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON;
+ break;
+ case 2: /* turn blink */
+ slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK;
+ break;
+ default:
+ return;
+ }
+ pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+}
+
+void pciehp_green_led_on(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
+ PCI_EXP_SLTCTL_PIC);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_ON);
+}
+
+void pciehp_green_led_off(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
+ PCI_EXP_SLTCTL_PIC);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_OFF);
+}
+
+void pciehp_green_led_blink(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ if (!PWR_LED(ctrl))
+ return;
+
+ pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
+ PCI_EXP_SLTCTL_PIC);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_IND_BLINK);
+}
+
+int pciehp_power_on_slot(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 slot_status;
+ int retval;
+
+ /* Clear sticky power-fault bit from previous power failures */
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+ if (slot_status & PCI_EXP_SLTSTA_PFD)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PFD);
+ ctrl->power_fault_detected = 0;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_ON, PCI_EXP_SLTCTL_PCC);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_ON);
+
+ retval = pciehp_link_enable(ctrl);
+ if (retval)
+ ctrl_err(ctrl, "%s: Can not enable the link!\n", __func__);
+
+ return retval;
+}
+
+void pciehp_power_off_slot(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_PWR_OFF);
+}
+
+static irqreturn_t pcie_isr(int irq, void *dev_id)
+{
+ struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ struct pci_bus *subordinate = pdev->subordinate;
+ struct pci_dev *dev;
+ struct slot *slot = ctrl->slot;
+ u16 detected, intr_loc;
+
+ /*
+ * In order to guarantee that all interrupt events are
+ * serviced, we need to re-inspect Slot Status register after
+ * clearing what is presumed to be the last pending interrupt.
+ */
+ intr_loc = 0;
+ do {
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
+
+ detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
+ detected &= ~intr_loc;
+ intr_loc |= detected;
+ if (!intr_loc)
+ return IRQ_NONE;
+ if (detected)
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ intr_loc);
+ } while (detected);
+
+ ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc);
+
+ /* Check Command Complete Interrupt Pending */
+ if (intr_loc & PCI_EXP_SLTSTA_CC) {
+ ctrl->cmd_busy = 0;
+ smp_mb();
+ wake_up(&ctrl->queue);
+ }
+
+ if (subordinate) {
+ list_for_each_entry(dev, &subordinate->devices, bus_list) {
+ if (dev->ignore_hotplug) {
+ ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
+ intr_loc, pci_name(dev));
+ return IRQ_HANDLED;
+ }
+ }
+ }
+
+ if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
+ return IRQ_HANDLED;
+
+ /* Check MRL Sensor Changed */
+ if (intr_loc & PCI_EXP_SLTSTA_MRLSC)
+ pciehp_handle_switch_change(slot);
+
+ /* Check Attention Button Pressed */
+ if (intr_loc & PCI_EXP_SLTSTA_ABP)
+ pciehp_handle_attention_button(slot);
+
+ /* Check Presence Detect Changed */
+ if (intr_loc & PCI_EXP_SLTSTA_PDC)
+ pciehp_handle_presence_change(slot);
+
+ /* Check Power Fault Detected */
+ if ((intr_loc & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+ ctrl->power_fault_detected = 1;
+ pciehp_handle_power_fault(slot);
+ }
+
+ if (intr_loc & PCI_EXP_SLTSTA_DLLSC)
+ pciehp_handle_linkstate_change(slot);
+
+ return IRQ_HANDLED;
+}
+
+void pcie_enable_notification(struct controller *ctrl)
+{
+ u16 cmd, mask;
+
+ /*
+ * TBD: Power fault detected software notification support.
+ *
+ * Power fault detected software notification is not enabled
+ * now, because it caused power fault detected interrupt storm
+ * on some machines. On those machines, power fault detected
+ * bit in the slot status register was set again immediately
+ * when it is cleared in the interrupt service routine, and
+ * next power fault detected interrupt was notified again.
+ */
+
+ /*
+ * Always enable link events: thus link-up and link-down shall
+ * always be treated as hotplug and unplug respectively. Enable
+ * presence detect only if Attention Button is not present.
+ */
+ cmd = PCI_EXP_SLTCTL_DLLSCE;
+ if (ATTN_BUTTN(ctrl))
+ cmd |= PCI_EXP_SLTCTL_ABPE;
+ else
+ cmd |= PCI_EXP_SLTCTL_PDCE;
+ if (MRL_SENS(ctrl))
+ cmd |= PCI_EXP_SLTCTL_MRLSCE;
+ if (!pciehp_poll_mode)
+ cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
+
+ mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
+ PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+ PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ PCI_EXP_SLTCTL_DLLSCE);
+
+ pcie_write_cmd_nowait(ctrl, cmd, mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
+}
+
+static void pcie_disable_notification(struct controller *ctrl)
+{
+ u16 mask;
+
+ mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
+ PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+ PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ PCI_EXP_SLTCTL_DLLSCE);
+ pcie_write_cmd(ctrl, 0, mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
+}
+
+/*
+ * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
+ * bus reset of the bridge, but at the same time we want to ensure that it is
+ * not seen as a hot-unplug, followed by the hot-plug of the device. Thus,
+ * disable link state notification and presence detection change notification
+ * momentarily, if we see that they could interfere. Also, clear any spurious
+ * events after.
+ */
+int pciehp_reset_slot(struct slot *slot, int probe)
+{
+ struct controller *ctrl = slot->ctrl;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ u16 stat_mask = 0, ctrl_mask = 0;
+
+ if (probe)
+ return 0;
+
+ if (!ATTN_BUTTN(ctrl)) {
+ ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
+ stat_mask |= PCI_EXP_SLTSTA_PDC;
+ }
+ ctrl_mask |= PCI_EXP_SLTCTL_DLLSCE;
+ stat_mask |= PCI_EXP_SLTSTA_DLLSC;
+
+ pcie_write_cmd(ctrl, 0, ctrl_mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
+
+ pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
+ pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
+ ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
+ if (pciehp_poll_mode)
+ int_poll_timeout(ctrl->poll_timer.data);
+
+ return 0;
+}
+
+int pcie_init_notification(struct controller *ctrl)
+{
+ if (pciehp_request_irq(ctrl))
+ return -1;
+ pcie_enable_notification(ctrl);
+ ctrl->notification_enabled = 1;
+ return 0;
+}
+
+static void pcie_shutdown_notification(struct controller *ctrl)
+{
+ if (ctrl->notification_enabled) {
+ pcie_disable_notification(ctrl);
+ pciehp_free_irq(ctrl);
+ ctrl->notification_enabled = 0;
+ }
+}
+
+static int pcie_init_slot(struct controller *ctrl)
+{
+ struct slot *slot;
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+
+ slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
+ if (!slot->wq)
+ goto abort;
+
+ slot->ctrl = ctrl;
+ mutex_init(&slot->lock);
+ mutex_init(&slot->hotplug_lock);
+ INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
+ ctrl->slot = slot;
+ return 0;
+abort:
+ kfree(slot);
+ return -ENOMEM;
+}
+
+static void pcie_cleanup_slot(struct controller *ctrl)
+{
+ struct slot *slot = ctrl->slot;
+ cancel_delayed_work(&slot->work);
+ destroy_workqueue(slot->wq);
+ kfree(slot);
+}
+
+static inline void dbg_ctrl(struct controller *ctrl)
+{
+ int i;
+ u16 reg16;
+ struct pci_dev *pdev = ctrl->pcie->port;
+
+ if (!pciehp_debug)
+ return;
+
+ ctrl_info(ctrl, "Hotplug Controller:\n");
+ ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n",
+ pci_name(pdev), pdev->irq);
+ ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor);
+ ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device);
+ ctrl_info(ctrl, " Subsystem ID : 0x%04x\n",
+ pdev->subsystem_device);
+ ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
+ pdev->subsystem_vendor);
+ ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n",
+ pci_pcie_cap(pdev));
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (!pci_resource_len(pdev, i))
+ continue;
+ ctrl_info(ctrl, " PCI resource [%d] : %pR\n",
+ i, &pdev->resource[i]);
+ }
+ ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
+ ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
+ ctrl_info(ctrl, " Attention Button : %3s\n",
+ ATTN_BUTTN(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " Power Controller : %3s\n",
+ POWER_CTRL(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " MRL Sensor : %3s\n",
+ MRL_SENS(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " Attention Indicator : %3s\n",
+ ATTN_LED(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " Power Indicator : %3s\n",
+ PWR_LED(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n",
+ HP_SUPR_RM(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " EMI Present : %3s\n",
+ EMI(ctrl) ? "yes" : "no");
+ ctrl_info(ctrl, " Command Completed : %3s\n",
+ NO_CMD_CMPL(ctrl) ? "no" : "yes");
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &reg16);
+ ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16);
+ pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &reg16);
+ ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16);
+}
+
+#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+
+struct controller *pcie_init(struct pcie_device *dev)
+{
+ struct controller *ctrl;
+ u32 slot_cap, link_cap;
+ struct pci_dev *pdev = dev->port;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ dev_err(&dev->device, "%s: Out of memory\n", __func__);
+ goto abort;
+ }
+ ctrl->pcie = dev;
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
+ ctrl->slot_cap = slot_cap;
+ mutex_init(&ctrl->ctrl_lock);
+ init_waitqueue_head(&ctrl->queue);
+ dbg_ctrl(ctrl);
+
+ /* Check if Data Link Layer Link Active Reporting is implemented */
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
+ if (link_cap & PCI_EXP_LNKCAP_DLLLARC) {
+ ctrl_dbg(ctrl, "Link Active Reporting supported\n");
+ ctrl->link_active_reporting = 1;
+ }
+
+ /* Clear all remaining event bits in Slot Status register */
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
+
+ ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
+ (slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
+ FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_AIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_PCP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_MRLSP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_EIP),
+ FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS),
+ FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC));
+
+ if (pcie_init_slot(ctrl))
+ goto abort_ctrl;
+
+ return ctrl;
+
+abort_ctrl:
+ kfree(ctrl);
+abort:
+ return NULL;
+}
+
+void pciehp_release_ctrl(struct controller *ctrl)
+{
+ pcie_shutdown_notification(ctrl);
+ pcie_cleanup_slot(ctrl);
+ kfree(ctrl);
+}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
new file mode 100644
index 000000000..9e69403be
--- /dev/null
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -0,0 +1,128 @@
+/*
+ * PCI Express Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "../pci.h"
+#include "pciehp.h"
+
+int pciehp_configure_device(struct slot *p_slot)
+{
+ struct pci_dev *dev;
+ struct pci_dev *bridge = p_slot->ctrl->pcie->port;
+ struct pci_bus *parent = bridge->subordinate;
+ int num, ret = 0;
+ struct controller *ctrl = p_slot->ctrl;
+
+ pci_lock_rescan_remove();
+
+ dev = pci_get_slot(parent, PCI_DEVFN(0, 0));
+ if (dev) {
+ ctrl_err(ctrl, "Device %s already exists at %04x:%02x:00, cannot hot-add\n",
+ pci_name(dev), pci_domain_nr(parent), parent->number);
+ pci_dev_put(dev);
+ ret = -EEXIST;
+ goto out;
+ }
+
+ num = pci_scan_slot(parent, PCI_DEVFN(0, 0));
+ if (num == 0) {
+ ctrl_err(ctrl, "No new device found\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ list_for_each_entry(dev, &parent->devices, bus_list)
+ if (pci_is_bridge(dev))
+ pci_hp_add_bridge(dev);
+
+ pci_assign_unassigned_bridge_resources(bridge);
+ pcie_bus_configure_settings(parent);
+ pci_bus_add_devices(parent);
+
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
+}
+
+int pciehp_unconfigure_device(struct slot *p_slot)
+{
+ int rc = 0;
+ u8 bctl = 0;
+ u8 presence = 0;
+ struct pci_dev *dev, *temp;
+ struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
+ u16 command;
+ struct controller *ctrl = p_slot->ctrl;
+
+ ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
+ __func__, pci_domain_nr(parent), parent->number);
+ pciehp_get_adapter_status(p_slot, &presence);
+
+ pci_lock_rescan_remove();
+
+ /*
+ * Stopping an SR-IOV PF device removes all the associated VFs,
+ * which will update the bus->devices list and confuse the
+ * iterator. Therefore, iterate in reverse so we remove the VFs
+ * first, then the PF. We do the same in pci_stop_bus_device().
+ */
+ list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ bus_list) {
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
+ if (bctl & PCI_BRIDGE_CTL_VGA) {
+ ctrl_err(ctrl,
+ "Cannot remove display device %s\n",
+ pci_name(dev));
+ pci_dev_put(dev);
+ rc = -EINVAL;
+ break;
+ }
+ }
+ pci_stop_and_remove_bus_device(dev);
+ /*
+ * Ensure that no new Requests will be generated from
+ * the device.
+ */
+ if (presence) {
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
+ command |= PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(dev, PCI_COMMAND, command);
+ }
+ pci_dev_put(dev);
+ }
+
+ pci_unlock_rescan_remove();
+ return rc;
+}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
new file mode 100644
index 000000000..d062c008f
--- /dev/null
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -0,0 +1,365 @@
+/*
+ * PCI Hot Plug Controller Skeleton Driver - 0.3
+ *
+ * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001,2003 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This driver is to be used as a skeleton driver to show how to interface
+ * with the pci hotplug core easily.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/init.h>
+
+#define SLOT_NAME_SIZE 10
+struct slot {
+ u8 number;
+ struct hotplug_slot *hotplug_slot;
+ struct list_head slot_list;
+ char name[SLOT_NAME_SIZE];
+};
+
+static LIST_HEAD(slot_list);
+
+#define MY_NAME "pcihp_skeleton"
+
+#define dbg(format, arg...) \
+ do { \
+ if (debug) \
+ printk(KERN_DEBUG "%s: " format "\n", \
+ MY_NAME , ## arg); \
+ } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
+
+/* local variables */
+static bool debug;
+static int num_slots;
+
+#define DRIVER_VERSION "0.3"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>"
+#define DRIVER_DESC "Hot Plug PCI Controller Skeleton Driver"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+module_param(debug, bool, 0644);
+MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
+
+static int enable_slot (struct hotplug_slot *slot);
+static int disable_slot (struct hotplug_slot *slot);
+static int set_attention_status (struct hotplug_slot *slot, u8 value);
+static int hardware_test (struct hotplug_slot *slot, u32 value);
+static int get_power_status (struct hotplug_slot *slot, u8 *value);
+static int get_attention_status (struct hotplug_slot *slot, u8 *value);
+static int get_latch_status (struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops skel_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .set_attention_status = set_attention_status,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ /*
+ * Fill in code here to enable the specified slot
+ */
+
+ return retval;
+}
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ /*
+ * Fill in code here to disable the specified slot
+ */
+
+ return retval;
+}
+
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ switch (status) {
+ case 0:
+ /*
+ * Fill in code here to turn light off
+ */
+ break;
+
+ case 1:
+ default:
+ /*
+ * Fill in code here to turn light on
+ */
+ break;
+ }
+
+ return retval;
+}
+
+static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ switch (value) {
+ case 0:
+ /* Specify a test here */
+ break;
+ case 1:
+ /* Specify another test here */
+ break;
+ }
+
+ return retval;
+}
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ /*
+ * Fill in logic to get the current power status of the specific
+ * slot and store it in the *value location.
+ */
+
+ return retval;
+}
+
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ /*
+ * Fill in logic to get the current attention status of the specific
+ * slot and store it in the *value location.
+ */
+
+ return retval;
+}
+
+static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ /*
+ * Fill in logic to get the current latch status of the specific
+ * slot and store it in the *value location.
+ */
+
+ return retval;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+ int retval = 0;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+
+ /*
+ * Fill in logic to get the current adapter status of the specific
+ * slot and store it in the *value location.
+ */
+
+ return retval;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static void make_slot_name(struct slot *slot)
+{
+ /*
+ * Stupid way to make a filename out of the slot name.
+ * replace this if your hardware provides a better way to name slots.
+ */
+ snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", slot->number);
+}
+
+/**
+ * init_slots - initialize 'struct slot' structures for each slot
+ *
+ */
+static int __init init_slots(void)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ int retval;
+ int i;
+
+ /*
+ * Create a structure for each slot, and register that slot
+ * with the pci_hotplug subsystem.
+ */
+ for (i = 0; i < num_slots; ++i) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot) {
+ retval = -ENOMEM;
+ goto error_slot;
+ }
+ slot->hotplug_slot = hotplug_slot;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ retval = -ENOMEM;
+ goto error_hpslot;
+ }
+ hotplug_slot->info = info;
+
+ slot->number = i;
+
+ hotplug_slot->name = slot->name;
+ hotplug_slot->private = slot;
+ hotplug_slot->release = &release_slot;
+ make_slot_name(slot);
+ hotplug_slot->ops = &skel_hotplug_slot_ops;
+
+ /*
+ * Initialize the slot info structure with some known
+ * good values.
+ */
+ get_power_status(hotplug_slot, &info->power_status);
+ get_attention_status(hotplug_slot, &info->attention_status);
+ get_latch_status(hotplug_slot, &info->latch_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
+ dbg("registering slot %d\n", i);
+ retval = pci_hp_register(slot->hotplug_slot);
+ if (retval) {
+ err("pci_hp_register failed with error %d\n", retval);
+ goto error_info;
+ }
+
+ /* add slot to our internal list */
+ list_add(&slot->slot_list, &slot_list);
+ }
+
+ return 0;
+error_info:
+ kfree(info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return retval;
+}
+
+static void __exit cleanup_slots(void)
+{
+ struct list_head *tmp;
+ struct list_head *next;
+ struct slot *slot;
+
+ /*
+ * Unregister all of our slots with the pci_hotplug subsystem.
+ * Memory will be freed in release_slot() callback after slot's
+ * lifespan is finished.
+ */
+ list_for_each_safe(tmp, next, &slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
+
+static int __init pcihp_skel_init(void)
+{
+ int retval;
+
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ /*
+ * Do specific initialization stuff for your driver here
+ * like initializing your controller hardware (if any) and
+ * determining the number of slots you have in the system
+ * right now.
+ */
+ num_slots = 5;
+
+ return init_slots();
+}
+
+static void __exit pcihp_skel_exit(void)
+{
+ /*
+ * Clean everything up.
+ */
+ cleanup_slots();
+}
+
+module_init(pcihp_skel_init);
+module_exit(pcihp_skel_exit);
diff --git a/drivers/pci/hotplug/rpadlpar.h b/drivers/pci/hotplug/rpadlpar.h
new file mode 100644
index 000000000..81df93931
--- /dev/null
+++ b/drivers/pci/hotplug/rpadlpar.h
@@ -0,0 +1,24 @@
+/*
+ * Interface for Dynamic Logical Partitioning of I/O Slots on
+ * RPA-compliant PPC64 platform.
+ *
+ * John Rose <johnrose@austin.ibm.com>
+ * October 2003
+ *
+ * Copyright (C) 2003 IBM.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _RPADLPAR_IO_H_
+#define _RPADLPAR_IO_H_
+
+int dlpar_sysfs_init(void);
+void dlpar_sysfs_exit(void);
+
+int dlpar_add_slot(char *drc_name);
+int dlpar_remove_slot(char *drc_name);
+
+#endif
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
new file mode 100644
index 000000000..e12bafdc4
--- /dev/null
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -0,0 +1,479 @@
+/*
+ * Interface for Dynamic Logical Partitioning of I/O Slots on
+ * RPA-compliant PPC64 platform.
+ *
+ * John Rose <johnrose@austin.ibm.com>
+ * Linda Xie <lxie@us.ibm.com>
+ *
+ * October 2003
+ *
+ * Copyright (C) 2003 IBM.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include <asm/pci-bridge.h>
+#include <linux/mutex.h>
+#include <asm/rtas.h>
+#include <asm/vio.h>
+
+#include "../pci.h"
+#include "rpaphp.h"
+#include "rpadlpar.h"
+
+static DEFINE_MUTEX(rpadlpar_mutex);
+
+#define DLPAR_MODULE_NAME "rpadlpar_io"
+
+#define NODE_TYPE_VIO 1
+#define NODE_TYPE_SLOT 2
+#define NODE_TYPE_PHB 3
+
+static struct device_node *find_vio_slot_node(char *drc_name)
+{
+ struct device_node *parent = of_find_node_by_name(NULL, "vdevice");
+ struct device_node *dn = NULL;
+ char *name;
+ int rc;
+
+ if (!parent)
+ return NULL;
+
+ while ((dn = of_get_next_child(parent, dn))) {
+ rc = rpaphp_get_drc_props(dn, NULL, &name, NULL, NULL);
+ if ((rc == 0) && (!strcmp(drc_name, name)))
+ break;
+ }
+
+ return dn;
+}
+
+/* Find dlpar-capable pci node that contains the specified name and type */
+static struct device_node *find_php_slot_pci_node(char *drc_name,
+ char *drc_type)
+{
+ struct device_node *np = NULL;
+ char *name;
+ char *type;
+ int rc;
+
+ while ((np = of_find_node_by_name(np, "pci"))) {
+ rc = rpaphp_get_drc_props(np, NULL, &name, &type, NULL);
+ if (rc == 0)
+ if (!strcmp(drc_name, name) && !strcmp(drc_type, type))
+ break;
+ }
+
+ return np;
+}
+
+static struct device_node *find_dlpar_node(char *drc_name, int *node_type)
+{
+ struct device_node *dn;
+
+ dn = find_php_slot_pci_node(drc_name, "SLOT");
+ if (dn) {
+ *node_type = NODE_TYPE_SLOT;
+ return dn;
+ }
+
+ dn = find_php_slot_pci_node(drc_name, "PHB");
+ if (dn) {
+ *node_type = NODE_TYPE_PHB;
+ return dn;
+ }
+
+ dn = find_vio_slot_node(drc_name);
+ if (dn) {
+ *node_type = NODE_TYPE_VIO;
+ return dn;
+ }
+
+ return NULL;
+}
+
+/**
+ * find_php_slot - return hotplug slot structure for device node
+ * @dn: target &device_node
+ *
+ * This routine will return the hotplug slot structure
+ * for a given device node. Note that built-in PCI slots
+ * may be dlpar-able, but not hot-pluggable, so this routine
+ * will return NULL for built-in PCI slots.
+ */
+static struct slot *find_php_slot(struct device_node *dn)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ list_for_each_safe(tmp, n, &rpaphp_slot_head) {
+ slot = list_entry(tmp, struct slot, rpaphp_slot_list);
+ if (slot->dn == dn)
+ return slot;
+ }
+
+ return NULL;
+}
+
+static struct pci_dev *dlpar_find_new_dev(struct pci_bus *parent,
+ struct device_node *dev_dn)
+{
+ struct pci_dev *tmp = NULL;
+ struct device_node *child_dn;
+
+ list_for_each_entry(tmp, &parent->devices, bus_list) {
+ child_dn = pci_device_to_OF_node(tmp);
+ if (child_dn == dev_dn)
+ return tmp;
+ }
+ return NULL;
+}
+
+static void dlpar_pci_add_bus(struct device_node *dn)
+{
+ struct pci_dn *pdn = PCI_DN(dn);
+ struct pci_controller *phb = pdn->phb;
+ struct pci_dev *dev = NULL;
+
+ eeh_add_device_tree_early(pdn);
+
+ /* Add EADS device to PHB bus, adding new entry to bus->devices */
+ dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
+ if (!dev) {
+ printk(KERN_ERR "%s: failed to create pci dev for %s\n",
+ __func__, dn->full_name);
+ return;
+ }
+
+ /* Scan below the new bridge */
+ if (pci_is_bridge(dev))
+ of_scan_pci_bridge(dev);
+
+ /* Map IO space for child bus, which may or may not succeed */
+ pcibios_map_io_space(dev->subordinate);
+
+ /* Finish adding it : resource allocation, adding devices, etc...
+ * Note that we need to perform the finish pass on the -parent-
+ * bus of the EADS bridge so the bridge device itself gets
+ * properly added
+ */
+ pcibios_finish_adding_to_bus(phb->bus);
+}
+
+static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
+{
+ struct pci_dev *dev;
+ struct pci_controller *phb;
+
+ if (pcibios_find_pci_bus(dn))
+ return -EINVAL;
+
+ /* Add pci bus */
+ dlpar_pci_add_bus(dn);
+
+ /* Confirm new bridge dev was created */
+ phb = PCI_DN(dn)->phb;
+ dev = dlpar_find_new_dev(phb->bus, dn);
+
+ if (!dev) {
+ printk(KERN_ERR "%s: unable to add bus %s\n", __func__,
+ drc_name);
+ return -EIO;
+ }
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ printk(KERN_ERR "%s: unexpected header type %d, unable to add bus %s\n",
+ __func__, dev->hdr_type, drc_name);
+ return -EIO;
+ }
+
+ /* Add hotplug slot */
+ if (rpaphp_add_slot(dn)) {
+ printk(KERN_ERR "%s: unable to add hotplug slot %s\n",
+ __func__, drc_name);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int dlpar_remove_phb(char *drc_name, struct device_node *dn)
+{
+ struct slot *slot;
+ struct pci_dn *pdn;
+ int rc = 0;
+
+ if (!pcibios_find_pci_bus(dn))
+ return -EINVAL;
+
+ /* If pci slot is hotpluggable, use hotplug to remove it */
+ slot = find_php_slot(dn);
+ if (slot && rpaphp_deregister_slot(slot)) {
+ printk(KERN_ERR "%s: unable to remove hotplug slot %s\n",
+ __func__, drc_name);
+ return -EIO;
+ }
+
+ pdn = dn->data;
+ BUG_ON(!pdn || !pdn->phb);
+ rc = remove_phb_dynamic(pdn->phb);
+ if (rc < 0)
+ return rc;
+
+ pdn->phb = NULL;
+
+ return 0;
+}
+
+static int dlpar_add_phb(char *drc_name, struct device_node *dn)
+{
+ struct pci_controller *phb;
+
+ if (PCI_DN(dn) && PCI_DN(dn)->phb) {
+ /* PHB already exists */
+ return -EINVAL;
+ }
+
+ phb = init_phb_dynamic(dn);
+ if (!phb)
+ return -EIO;
+
+ if (rpaphp_add_slot(dn)) {
+ printk(KERN_ERR "%s: unable to add hotplug slot %s\n",
+ __func__, drc_name);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
+{
+ if (vio_find_node(dn))
+ return -EINVAL;
+
+ if (!vio_register_device_node(dn)) {
+ printk(KERN_ERR
+ "%s: failed to register vio node %s\n",
+ __func__, drc_name);
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * dlpar_add_slot - DLPAR add an I/O Slot
+ * @drc_name: drc-name of newly added slot
+ *
+ * Make the hotplug module and the kernel aware of a newly added I/O Slot.
+ * Return Codes:
+ * 0 Success
+ * -ENODEV Not a valid drc_name
+ * -EINVAL Slot already added
+ * -ERESTARTSYS Signalled before obtaining lock
+ * -EIO Internal PCI Error
+ */
+int dlpar_add_slot(char *drc_name)
+{
+ struct device_node *dn = NULL;
+ int node_type;
+ int rc = -EIO;
+
+ if (mutex_lock_interruptible(&rpadlpar_mutex))
+ return -ERESTARTSYS;
+
+ /* Find newly added node */
+ dn = find_dlpar_node(drc_name, &node_type);
+ if (!dn) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ switch (node_type) {
+ case NODE_TYPE_VIO:
+ rc = dlpar_add_vio_slot(drc_name, dn);
+ break;
+ case NODE_TYPE_SLOT:
+ rc = dlpar_add_pci_slot(drc_name, dn);
+ break;
+ case NODE_TYPE_PHB:
+ rc = dlpar_add_phb(drc_name, dn);
+ break;
+ }
+
+ printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name);
+exit:
+ mutex_unlock(&rpadlpar_mutex);
+ return rc;
+}
+
+/**
+ * dlpar_remove_vio_slot - DLPAR remove a virtual I/O Slot
+ * @drc_name: drc-name of newly added slot
+ * @dn: &device_node
+ *
+ * Remove the kernel and hotplug representations of an I/O Slot.
+ * Return Codes:
+ * 0 Success
+ * -EINVAL Vio dev doesn't exist
+ */
+static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
+{
+ struct vio_dev *vio_dev;
+
+ vio_dev = vio_find_node(dn);
+ if (!vio_dev)
+ return -EINVAL;
+
+ vio_unregister_device(vio_dev);
+ return 0;
+}
+
+/**
+ * dlpar_remove_pci_slot - DLPAR remove a PCI I/O Slot
+ * @drc_name: drc-name of newly added slot
+ * @dn: &device_node
+ *
+ * Remove the kernel and hotplug representations of a PCI I/O Slot.
+ * Return Codes:
+ * 0 Success
+ * -ENODEV Not a valid drc_name
+ * -EIO Internal PCI Error
+ */
+int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
+{
+ struct pci_bus *bus;
+ struct slot *slot;
+ int ret = 0;
+
+ pci_lock_rescan_remove();
+
+ bus = pcibios_find_pci_bus(dn);
+ if (!bus) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ pr_debug("PCI: Removing PCI slot below EADS bridge %s\n",
+ bus->self ? pci_name(bus->self) : "<!PHB!>");
+
+ slot = find_php_slot(dn);
+ if (slot) {
+ pr_debug("PCI: Removing hotplug slot for %04x:%02x...\n",
+ pci_domain_nr(bus), bus->number);
+
+ if (rpaphp_deregister_slot(slot)) {
+ printk(KERN_ERR
+ "%s: unable to remove hotplug slot %s\n",
+ __func__, drc_name);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ /* Remove all devices below slot */
+ pcibios_remove_pci_devices(bus);
+
+ /* Unmap PCI IO space */
+ if (pcibios_unmap_io_space(bus)) {
+ printk(KERN_ERR "%s: failed to unmap bus range\n",
+ __func__);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ /* Remove the EADS bridge device itself */
+ BUG_ON(!bus->self);
+ pr_debug("PCI: Now removing bridge device %s\n", pci_name(bus->self));
+ pci_stop_and_remove_bus_device(bus->self);
+
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
+}
+
+/**
+ * dlpar_remove_slot - DLPAR remove an I/O Slot
+ * @drc_name: drc-name of newly added slot
+ *
+ * Remove the kernel and hotplug representations of an I/O Slot.
+ * Return Codes:
+ * 0 Success
+ * -ENODEV Not a valid drc_name
+ * -EINVAL Slot already removed
+ * -ERESTARTSYS Signalled before obtaining lock
+ * -EIO Internal Error
+ */
+int dlpar_remove_slot(char *drc_name)
+{
+ struct device_node *dn;
+ int node_type;
+ int rc = 0;
+
+ if (mutex_lock_interruptible(&rpadlpar_mutex))
+ return -ERESTARTSYS;
+
+ dn = find_dlpar_node(drc_name, &node_type);
+ if (!dn) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ switch (node_type) {
+ case NODE_TYPE_VIO:
+ rc = dlpar_remove_vio_slot(drc_name, dn);
+ break;
+ case NODE_TYPE_PHB:
+ rc = dlpar_remove_phb(drc_name, dn);
+ break;
+ case NODE_TYPE_SLOT:
+ rc = dlpar_remove_pci_slot(drc_name, dn);
+ break;
+ }
+ vm_unmap_aliases();
+
+ printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name);
+exit:
+ mutex_unlock(&rpadlpar_mutex);
+ return rc;
+}
+
+static inline int is_dlpar_capable(void)
+{
+ int rc = rtas_token("ibm,configure-connector");
+
+ return (int) (rc != RTAS_UNKNOWN_SERVICE);
+}
+
+int __init rpadlpar_io_init(void)
+{
+ int rc = 0;
+
+ if (!is_dlpar_capable()) {
+ printk(KERN_WARNING "%s: partition not DLPAR capable\n",
+ __func__);
+ return -EPERM;
+ }
+
+ rc = dlpar_sysfs_init();
+ return rc;
+}
+
+void rpadlpar_io_exit(void)
+{
+ dlpar_sysfs_exit();
+ return;
+}
+
+module_init(rpadlpar_io_init);
+module_exit(rpadlpar_io_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/hotplug/rpadlpar_sysfs.c b/drivers/pci/hotplug/rpadlpar_sysfs.c
new file mode 100644
index 000000000..a796301ea
--- /dev/null
+++ b/drivers/pci/hotplug/rpadlpar_sysfs.c
@@ -0,0 +1,130 @@
+/*
+ * Interface for Dynamic Logical Partitioning of I/O Slots on
+ * RPA-compliant PPC64 platform.
+ *
+ * John Rose <johnrose@austin.ibm.com>
+ * October 2003
+ *
+ * Copyright (C) 2003 IBM.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include "rpadlpar.h"
+#include "../pci.h"
+
+#define DLPAR_KOBJ_NAME "control"
+
+/* Those two have no quotes because they are passed to __ATTR() which
+ * stringifies the argument (yuck !)
+ */
+#define ADD_SLOT_ATTR_NAME add_slot
+#define REMOVE_SLOT_ATTR_NAME remove_slot
+
+#define MAX_DRC_NAME_LEN 64
+
+static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t nbytes)
+{
+ char drc_name[MAX_DRC_NAME_LEN];
+ char *end;
+ int rc;
+
+ if (nbytes >= MAX_DRC_NAME_LEN)
+ return 0;
+
+ memcpy(drc_name, buf, nbytes);
+
+ end = strchr(drc_name, '\n');
+ if (!end)
+ end = &drc_name[nbytes];
+ *end = '\0';
+
+ rc = dlpar_add_slot(drc_name);
+ if (rc)
+ return rc;
+
+ return nbytes;
+}
+
+static ssize_t add_slot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t remove_slot_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t nbytes)
+{
+ char drc_name[MAX_DRC_NAME_LEN];
+ int rc;
+ char *end;
+
+ if (nbytes >= MAX_DRC_NAME_LEN)
+ return 0;
+
+ memcpy(drc_name, buf, nbytes);
+
+ end = strchr(drc_name, '\n');
+ if (!end)
+ end = &drc_name[nbytes];
+ *end = '\0';
+
+ rc = dlpar_remove_slot(drc_name);
+ if (rc)
+ return rc;
+
+ return nbytes;
+}
+
+static ssize_t remove_slot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0\n");
+}
+
+static struct kobj_attribute add_slot_attr =
+ __ATTR(ADD_SLOT_ATTR_NAME, 0644, add_slot_show, add_slot_store);
+
+static struct kobj_attribute remove_slot_attr =
+ __ATTR(REMOVE_SLOT_ATTR_NAME, 0644, remove_slot_show, remove_slot_store);
+
+static struct attribute *default_attrs[] = {
+ &add_slot_attr.attr,
+ &remove_slot_attr.attr,
+ NULL,
+};
+
+static struct attribute_group dlpar_attr_group = {
+ .attrs = default_attrs,
+};
+
+static struct kobject *dlpar_kobj;
+
+int dlpar_sysfs_init(void)
+{
+ int error;
+
+ dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME,
+ &pci_slots_kset->kobj);
+ if (!dlpar_kobj)
+ return -EINVAL;
+
+ error = sysfs_create_group(dlpar_kobj, &dlpar_attr_group);
+ if (error)
+ kobject_put(dlpar_kobj);
+ return error;
+}
+
+void dlpar_sysfs_exit(void)
+{
+ sysfs_remove_group(dlpar_kobj, &dlpar_attr_group);
+ kobject_put(dlpar_kobj);
+}
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
new file mode 100644
index 000000000..b2593e876
--- /dev/null
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -0,0 +1,103 @@
+/*
+ * PCI Hot Plug Controller Driver for RPA-compliant PPC64 platform.
+ *
+ * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <lxie@us.ibm.com>,
+ *
+ */
+
+#ifndef _PPC64PHP_H
+#define _PPC64PHP_H
+
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+
+#define DR_INDICATOR 9002
+#define DR_ENTITY_SENSE 9003
+
+#define POWER_ON 100
+#define POWER_OFF 0
+
+#define LED_OFF 0
+#define LED_ON 1 /* continuous on */
+#define LED_ID 2 /* slow blinking */
+#define LED_ACTION 3 /* fast blinking */
+
+/* Sensor values from rtas_get-sensor */
+#define EMPTY 0 /* No card in slot */
+#define PRESENT 1 /* Card in slot */
+
+#define MY_NAME "rpaphp"
+extern bool rpaphp_debug;
+#define dbg(format, arg...) \
+ do { \
+ if (rpaphp_debug) \
+ printk(KERN_DEBUG "%s: " format, \
+ MY_NAME , ## arg); \
+ } while (0)
+#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
+#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
+#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
+
+/* slot states */
+
+#define NOT_VALID 3
+#define NOT_CONFIGURED 2
+#define CONFIGURED 1
+#define EMPTY 0
+
+/*
+ * struct slot - slot information for each *physical* slot
+ */
+struct slot {
+ struct list_head rpaphp_slot_list;
+ int state;
+ u32 index;
+ u32 type;
+ u32 power_domain;
+ char *name;
+ struct device_node *dn;
+ struct pci_bus *bus;
+ struct list_head *pci_devs;
+ struct hotplug_slot *hotplug_slot;
+};
+
+extern struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
+extern struct list_head rpaphp_slot_head;
+
+/* function prototypes */
+
+/* rpaphp_pci.c */
+int rpaphp_enable_slot(struct slot *slot);
+int rpaphp_get_sensor_state(struct slot *slot, int *state);
+
+/* rpaphp_core.c */
+int rpaphp_add_slot(struct device_node *dn);
+int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
+ char **drc_name, char **drc_type, int *drc_power_domain);
+
+/* rpaphp_slot.c */
+void dealloc_slot_struct(struct slot *slot);
+struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain);
+int rpaphp_register_slot(struct slot *slot);
+int rpaphp_deregister_slot(struct slot *slot);
+
+#endif /* _PPC64PHP_H */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
new file mode 100644
index 000000000..f2945fa73
--- /dev/null
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -0,0 +1,448 @@
+/*
+ * PCI Hot Plug Controller Driver for RPA-compliant PPC64 platform.
+ * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <lxie@us.ibm.com>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <asm/eeh.h> /* for eeh_add_device() */
+#include <asm/rtas.h> /* rtas_call */
+#include <asm/pci-bridge.h> /* for pci_controller */
+#include "../pci.h" /* for pci_add_new_bus */
+ /* and pci_do_scan_bus */
+#include "rpaphp.h"
+
+bool rpaphp_debug;
+LIST_HEAD(rpaphp_slot_head);
+EXPORT_SYMBOL_GPL(rpaphp_slot_head);
+
+#define DRIVER_VERSION "0.1"
+#define DRIVER_AUTHOR "Linda Xie <lxie@us.ibm.com>"
+#define DRIVER_DESC "RPA HOT Plug PCI Controller Driver"
+
+#define MAX_LOC_CODE 128
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_param_named(debug, rpaphp_debug, bool, 0644);
+
+/**
+ * set_attention_status - set attention LED
+ * @hotplug_slot: target &hotplug_slot
+ * @value: LED control value
+ *
+ * echo 0 > attention -- set LED OFF
+ * echo 1 > attention -- set LED ON
+ * echo 2 > attention -- set LED ID(identify, light is blinking)
+ */
+static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
+{
+ int rc;
+ struct slot *slot = (struct slot *)hotplug_slot->private;
+
+ switch (value) {
+ case 0:
+ case 1:
+ case 2:
+ break;
+ default:
+ value = 1;
+ break;
+ }
+
+ rc = rtas_set_indicator(DR_INDICATOR, slot->index, value);
+ if (!rc)
+ hotplug_slot->info->attention_status = value;
+
+ return rc;
+}
+
+/**
+ * get_power_status - get power status of a slot
+ * @hotplug_slot: slot to get status
+ * @value: pointer to store status
+ */
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ int retval, level;
+ struct slot *slot = (struct slot *)hotplug_slot->private;
+
+ retval = rtas_get_power_level (slot->power_domain, &level);
+ if (!retval)
+ *value = level;
+ return retval;
+}
+
+/**
+ * get_attention_status - get attention LED status
+ * @hotplug_slot: slot to get status
+ * @value: pointer to store status
+ */
+static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = (struct slot *)hotplug_slot->private;
+ *value = slot->hotplug_slot->info->attention_status;
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = (struct slot *)hotplug_slot->private;
+ int rc, state;
+
+ rc = rpaphp_get_sensor_state(slot, &state);
+
+ *value = NOT_VALID;
+ if (rc)
+ return rc;
+
+ if (state == EMPTY)
+ *value = EMPTY;
+ else if (state == PRESENT)
+ *value = slot->state;
+
+ return 0;
+}
+
+static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
+{
+ enum pci_bus_speed speed;
+ switch (slot->type) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ speed = PCI_SPEED_33MHz; /* speed for case 1-6 */
+ break;
+ case 7:
+ case 8:
+ speed = PCI_SPEED_66MHz;
+ break;
+ case 11:
+ case 14:
+ speed = PCI_SPEED_66MHz_PCIX;
+ break;
+ case 12:
+ case 15:
+ speed = PCI_SPEED_100MHz_PCIX;
+ break;
+ case 13:
+ case 16:
+ speed = PCI_SPEED_133MHz_PCIX;
+ break;
+ default:
+ speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+
+ return speed;
+}
+
+static int get_children_props(struct device_node *dn, const int **drc_indexes,
+ const int **drc_names, const int **drc_types,
+ const int **drc_power_domains)
+{
+ const int *indexes, *names, *types, *domains;
+
+ indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+ names = of_get_property(dn, "ibm,drc-names", NULL);
+ types = of_get_property(dn, "ibm,drc-types", NULL);
+ domains = of_get_property(dn, "ibm,drc-power-domains", NULL);
+
+ if (!indexes || !names || !types || !domains) {
+ /* Slot does not have dynamically-removable children */
+ return -EINVAL;
+ }
+ if (drc_indexes)
+ *drc_indexes = indexes;
+ if (drc_names)
+ /* &drc_names[1] contains NULL terminated slot names */
+ *drc_names = names;
+ if (drc_types)
+ /* &drc_types[1] contains NULL terminated slot types */
+ *drc_types = types;
+ if (drc_power_domains)
+ *drc_power_domains = domains;
+
+ return 0;
+}
+
+/* To get the DRC props describing the current node, first obtain it's
+ * my-drc-index property. Next obtain the DRC list from it's parent. Use
+ * the my-drc-index for correlation, and obtain the requested properties.
+ */
+int rpaphp_get_drc_props(struct device_node *dn, int *drc_index,
+ char **drc_name, char **drc_type, int *drc_power_domain)
+{
+ const int *indexes, *names;
+ const int *types, *domains;
+ const unsigned int *my_index;
+ char *name_tmp, *type_tmp;
+ int i, rc;
+
+ my_index = of_get_property(dn, "ibm,my-drc-index", NULL);
+ if (!my_index) {
+ /* Node isn't DLPAR/hotplug capable */
+ return -EINVAL;
+ }
+
+ rc = get_children_props(dn->parent, &indexes, &names, &types, &domains);
+ if (rc < 0) {
+ return -EINVAL;
+ }
+
+ name_tmp = (char *) &names[1];
+ type_tmp = (char *) &types[1];
+
+ /* Iterate through parent properties, looking for my-drc-index */
+ for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
+ if ((unsigned int) indexes[i + 1] == *my_index) {
+ if (drc_name)
+ *drc_name = name_tmp;
+ if (drc_type)
+ *drc_type = type_tmp;
+ if (drc_index)
+ *drc_index = be32_to_cpu(*my_index);
+ if (drc_power_domain)
+ *drc_power_domain = be32_to_cpu(domains[i+1]);
+ return 0;
+ }
+ name_tmp += (strlen(name_tmp) + 1);
+ type_tmp += (strlen(type_tmp) + 1);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(rpaphp_get_drc_props);
+
+static int is_php_type(char *drc_type)
+{
+ unsigned long value;
+ char *endptr;
+
+ /* PCI Hotplug nodes have an integer for drc_type */
+ value = simple_strtoul(drc_type, &endptr, 10);
+ if (endptr == drc_type)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * is_php_dn() - return 1 if this is a hotpluggable pci slot, else 0
+ * @dn: target &device_node
+ * @indexes: passed to get_children_props()
+ * @names: passed to get_children_props()
+ * @types: returned from get_children_props()
+ * @power_domains:
+ *
+ * This routine will return true only if the device node is
+ * a hotpluggable slot. This routine will return false
+ * for built-in pci slots (even when the built-in slots are
+ * dlparable.)
+ */
+static int is_php_dn(struct device_node *dn, const int **indexes,
+ const int **names, const int **types, const int **power_domains)
+{
+ const int *drc_types;
+ int rc;
+
+ rc = get_children_props(dn, indexes, names, &drc_types, power_domains);
+ if (rc < 0)
+ return 0;
+
+ if (!is_php_type((char *) &drc_types[1]))
+ return 0;
+
+ *types = drc_types;
+ return 1;
+}
+
+/**
+ * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem.
+ * @dn: device node of slot
+ *
+ * This subroutine will register a hotpluggable slot with the
+ * PCI hotplug infrastructure. This routine is typically called
+ * during boot time, if the hotplug slots are present at boot time,
+ * or is called later, by the dlpar add code, if the slot is
+ * being dynamically added during runtime.
+ *
+ * If the device node points at an embedded (built-in) slot, this
+ * routine will just return without doing anything, since embedded
+ * slots cannot be hotplugged.
+ *
+ * To remove a slot, it suffices to call rpaphp_deregister_slot().
+ */
+int rpaphp_add_slot(struct device_node *dn)
+{
+ struct slot *slot;
+ int retval = 0;
+ int i;
+ const int *indexes, *names, *types, *power_domains;
+ char *name, *type;
+
+ if (!dn->name || strcmp(dn->name, "pci"))
+ return 0;
+
+ /* If this is not a hotplug slot, return without doing anything. */
+ if (!is_php_dn(dn, &indexes, &names, &types, &power_domains))
+ return 0;
+
+ dbg("Entry %s: dn->full_name=%s\n", __func__, dn->full_name);
+
+ /* register PCI devices */
+ name = (char *) &names[1];
+ type = (char *) &types[1];
+ for (i = 0; i < be32_to_cpu(indexes[0]); i++) {
+ int index;
+
+ index = be32_to_cpu(indexes[i + 1]);
+ slot = alloc_slot_struct(dn, index, name,
+ be32_to_cpu(power_domains[i + 1]));
+ if (!slot)
+ return -ENOMEM;
+
+ slot->type = simple_strtoul(type, NULL, 10);
+
+ dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n",
+ index, name, type);
+
+ retval = rpaphp_enable_slot(slot);
+ if (!retval)
+ retval = rpaphp_register_slot(slot);
+
+ if (retval)
+ dealloc_slot_struct(slot);
+
+ name += strlen(name) + 1;
+ type += strlen(type) + 1;
+ }
+ dbg("%s - Exit: rc[%d]\n", __func__, retval);
+
+ /* XXX FIXME: reports a failure only if last entry in loop failed */
+ return retval;
+}
+EXPORT_SYMBOL_GPL(rpaphp_add_slot);
+
+static void __exit cleanup_slots(void)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ /*
+ * Unregister all of our slots with the pci_hotplug subsystem,
+ * and free up all memory that we had allocated.
+ * memory will be freed in release_slot callback.
+ */
+
+ list_for_each_safe(tmp, n, &rpaphp_slot_head) {
+ slot = list_entry(tmp, struct slot, rpaphp_slot_list);
+ list_del(&slot->rpaphp_slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+ return;
+}
+
+static int __init rpaphp_init(void)
+{
+ struct device_node *dn;
+
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+
+ for_each_node_by_name(dn, "pci")
+ rpaphp_add_slot(dn);
+
+ return 0;
+}
+
+static void __exit rpaphp_exit(void)
+{
+ cleanup_slots();
+}
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = (struct slot *)hotplug_slot->private;
+ int state;
+ int retval;
+
+ if (slot->state == CONFIGURED)
+ return 0;
+
+ retval = rpaphp_get_sensor_state(slot, &state);
+ if (retval)
+ return retval;
+
+ if (state == PRESENT) {
+ pci_lock_rescan_remove();
+ pcibios_add_pci_devices(slot->bus);
+ pci_unlock_rescan_remove();
+ slot->state = CONFIGURED;
+ } else if (state == EMPTY) {
+ slot->state = EMPTY;
+ } else {
+ err("%s: slot[%s] is in invalid state\n", __func__, slot->name);
+ slot->state = NOT_VALID;
+ return -EINVAL;
+ }
+
+ slot->bus->max_bus_speed = get_max_bus_speed(slot);
+ return 0;
+}
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = (struct slot *)hotplug_slot->private;
+ if (slot->state == NOT_CONFIGURED)
+ return -EINVAL;
+
+ pci_lock_rescan_remove();
+ pcibios_remove_pci_devices(slot->bus);
+ pci_unlock_rescan_remove();
+ vm_unmap_aliases();
+
+ slot->state = NOT_CONFIGURED;
+ return 0;
+}
+
+struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .set_attention_status = set_attention_status,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+module_init(rpaphp_init);
+module_exit(rpaphp_exit);
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
new file mode 100644
index 000000000..9243f3e7a
--- /dev/null
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -0,0 +1,135 @@
+/*
+ * PCI Hot Plug Controller Driver for RPA-compliant PPC64 platform.
+ * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <lxie@us.ibm.com>
+ *
+ */
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include <asm/pci-bridge.h>
+#include <asm/rtas.h>
+#include <asm/machdep.h>
+
+#include "../pci.h" /* for pci_add_new_bus */
+#include "rpaphp.h"
+
+int rpaphp_get_sensor_state(struct slot *slot, int *state)
+{
+ int rc;
+ int setlevel;
+
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, slot->index, state);
+
+ if (rc < 0) {
+ if (rc == -EFAULT || rc == -EEXIST) {
+ dbg("%s: slot must be power up to get sensor-state\n",
+ __func__);
+
+ /* some slots have to be powered up
+ * before get-sensor will succeed.
+ */
+ rc = rtas_set_power_level(slot->power_domain, POWER_ON,
+ &setlevel);
+ if (rc < 0) {
+ dbg("%s: power on slot[%s] failed rc=%d.\n",
+ __func__, slot->name, rc);
+ } else {
+ rc = rtas_get_sensor(DR_ENTITY_SENSE,
+ slot->index, state);
+ }
+ } else if (rc == -ENODEV)
+ info("%s: slot is unusable\n", __func__);
+ else
+ err("%s failed to get sensor state\n", __func__);
+ }
+ return rc;
+}
+
+/**
+ * rpaphp_enable_slot - record slot state, config pci device
+ * @slot: target &slot
+ *
+ * Initialize values in the slot, and the hotplug_slot info
+ * structures to indicate if there is a pci card plugged into
+ * the slot. If the slot is not empty, run the pcibios routine
+ * to get pcibios stuff correctly set up.
+ */
+int rpaphp_enable_slot(struct slot *slot)
+{
+ int rc, level, state;
+ struct pci_bus *bus;
+ struct hotplug_slot_info *info = slot->hotplug_slot->info;
+
+ info->adapter_status = NOT_VALID;
+ slot->state = EMPTY;
+
+ /* Find out if the power is turned on for the slot */
+ rc = rtas_get_power_level(slot->power_domain, &level);
+ if (rc)
+ return rc;
+ info->power_status = level;
+
+ /* Figure out if there is an adapter in the slot */
+ rc = rpaphp_get_sensor_state(slot, &state);
+ if (rc)
+ return rc;
+
+ bus = pcibios_find_pci_bus(slot->dn);
+ if (!bus) {
+ err("%s: no pci_bus for dn %s\n", __func__, slot->dn->full_name);
+ return -EINVAL;
+ }
+
+ info->adapter_status = EMPTY;
+ slot->bus = bus;
+ slot->pci_devs = &bus->devices;
+
+ /* if there's an adapter in the slot, go add the pci devices */
+ if (state == PRESENT) {
+ info->adapter_status = NOT_CONFIGURED;
+ slot->state = NOT_CONFIGURED;
+
+ /* non-empty slot has to have child */
+ if (!slot->dn->child) {
+ err("%s: slot[%s]'s device_node doesn't have child for adapter\n",
+ __func__, slot->name);
+ return -EINVAL;
+ }
+
+ if (list_empty(&bus->devices))
+ pcibios_add_pci_devices(bus);
+
+ if (!list_empty(&bus->devices)) {
+ info->adapter_status = CONFIGURED;
+ slot->state = CONFIGURED;
+ }
+
+ if (rpaphp_debug) {
+ struct pci_dev *dev;
+ dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name);
+ list_for_each_entry (dev, &bus->devices, bus_list)
+ dbg("\t%s\n", pci_name(dev));
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
new file mode 100644
index 000000000..a6082cc26
--- /dev/null
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -0,0 +1,147 @@
+/*
+ * RPA Virtual I/O device functions
+ * Copyright (C) 2004 Linda Xie <lxie@us.ibm.com>
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <lxie@us.ibm.com>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+#include <asm/rtas.h>
+#include "rpaphp.h"
+
+/* free up the memory used by a slot */
+static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = (struct slot *) hotplug_slot->private;
+ dealloc_slot_struct(slot);
+}
+
+void dealloc_slot_struct(struct slot *slot)
+{
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->name);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+struct slot *alloc_slot_struct(struct device_node *dn,
+ int drc_index, char *drc_name, int power_domain)
+{
+ struct slot *slot;
+
+ slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
+ if (!slot)
+ goto error_nomem;
+ slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+ slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot->info)
+ goto error_hpslot;
+ slot->name = kstrdup(drc_name, GFP_KERNEL);
+ if (!slot->name)
+ goto error_info;
+ slot->dn = dn;
+ slot->index = drc_index;
+ slot->power_domain = power_domain;
+ slot->hotplug_slot->private = slot;
+ slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
+ slot->hotplug_slot->release = &rpaphp_release_slot;
+
+ return (slot);
+
+error_info:
+ kfree(slot->hotplug_slot->info);
+error_hpslot:
+ kfree(slot->hotplug_slot);
+error_slot:
+ kfree(slot);
+error_nomem:
+ return NULL;
+}
+
+static int is_registered(struct slot *slot)
+{
+ struct slot *tmp_slot;
+
+ list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) {
+ if (!strcmp(tmp_slot->name, slot->name))
+ return 1;
+ }
+ return 0;
+}
+
+int rpaphp_deregister_slot(struct slot *slot)
+{
+ int retval = 0;
+ struct hotplug_slot *php_slot = slot->hotplug_slot;
+
+ dbg("%s - Entry: deregistering slot=%s\n",
+ __func__, slot->name);
+
+ list_del(&slot->rpaphp_slot_list);
+
+ retval = pci_hp_deregister(php_slot);
+ if (retval)
+ err("Problem unregistering a slot %s\n", slot->name);
+
+ dbg("%s - Exit: rc[%d]\n", __func__, retval);
+ return retval;
+}
+EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
+
+int rpaphp_register_slot(struct slot *slot)
+{
+ struct hotplug_slot *php_slot = slot->hotplug_slot;
+ int retval;
+ int slotno;
+
+ dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
+ __func__, slot->dn->full_name, slot->index, slot->name,
+ slot->power_domain, slot->type);
+
+ /* should not try to register the same slot twice */
+ if (is_registered(slot)) {
+ err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name);
+ return -EAGAIN;
+ }
+
+ if (slot->dn->child)
+ slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
+ else
+ slotno = -1;
+ retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name);
+ if (retval) {
+ err("pci_hp_register failed with error %d\n", retval);
+ return retval;
+ }
+
+ /* add slot to our internal list */
+ list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head);
+ info("Slot [%s] registered\n", slot->name);
+ return 0;
+}
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
new file mode 100644
index 000000000..d77e46bca
--- /dev/null
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -0,0 +1,214 @@
+/*
+ * PCI Hot Plug Controller Driver for System z
+ *
+ * Copyright 2012 IBM Corp.
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <asm/pci_debug.h>
+#include <asm/sclp.h>
+
+#define SLOT_NAME_SIZE 10
+static LIST_HEAD(s390_hotplug_slot_list);
+
+MODULE_AUTHOR("Jan Glauber <jang@linux.vnet.ibm.com");
+MODULE_DESCRIPTION("Hot Plug PCI Controller for System z");
+MODULE_LICENSE("GPL");
+
+static int zpci_fn_configured(enum zpci_state state)
+{
+ return state == ZPCI_FN_STATE_CONFIGURED ||
+ state == ZPCI_FN_STATE_ONLINE;
+}
+
+/*
+ * struct slot - slot information for each *physical* slot
+ */
+struct slot {
+ struct list_head slot_list;
+ struct hotplug_slot *hotplug_slot;
+ struct zpci_dev *zdev;
+};
+
+static inline int slot_configure(struct slot *slot)
+{
+ int ret = sclp_pci_configure(slot->zdev->fid);
+
+ zpci_dbg(3, "conf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ if (!ret)
+ slot->zdev->state = ZPCI_FN_STATE_CONFIGURED;
+
+ return ret;
+}
+
+static inline int slot_deconfigure(struct slot *slot)
+{
+ int ret = sclp_pci_deconfigure(slot->zdev->fid);
+
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", slot->zdev->fid, ret);
+ if (!ret)
+ slot->zdev->state = ZPCI_FN_STATE_STANDBY;
+
+ return ret;
+}
+
+static int enable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
+ return -EIO;
+
+ rc = slot_configure(slot);
+ if (rc)
+ return rc;
+
+ rc = zpci_enable_device(slot->zdev);
+ if (rc)
+ goto out_deconfigure;
+
+ pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(slot->zdev->bus);
+ pci_unlock_rescan_remove();
+
+ return rc;
+
+out_deconfigure:
+ slot_deconfigure(slot);
+ return rc;
+}
+
+static int disable_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+ int rc;
+
+ if (!zpci_fn_configured(slot->zdev->state))
+ return -EIO;
+
+ if (slot->zdev->pdev)
+ pci_stop_and_remove_bus_device_locked(slot->zdev->pdev);
+
+ rc = zpci_disable_device(slot->zdev);
+ if (rc)
+ return rc;
+
+ return slot_deconfigure(slot);
+}
+
+static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ switch (slot->zdev->state) {
+ case ZPCI_FN_STATE_STANDBY:
+ *value = 0;
+ break;
+ default:
+ *value = 1;
+ break;
+ }
+ return 0;
+}
+
+static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ /* if the slot exits it always contains a function */
+ *value = 1;
+ return 0;
+}
+
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .get_power_status = get_power_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+int zpci_init_slot(struct zpci_dev *zdev)
+{
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
+ struct slot *slot;
+ int rc;
+
+ if (!zdev)
+ return 0;
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot)
+ goto error_hp;
+ hotplug_slot->private = slot;
+
+ slot->hotplug_slot = hotplug_slot;
+ slot->zdev = zdev;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto error_info;
+ hotplug_slot->info = info;
+
+ hotplug_slot->ops = &s390_hotplug_slot_ops;
+ hotplug_slot->release = &release_slot;
+
+ get_power_status(hotplug_slot, &info->power_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
+ snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
+ rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ ZPCI_DEVFN, name);
+ if (rc)
+ goto error_reg;
+
+ list_add(&slot->slot_list, &s390_hotplug_slot_list);
+ return 0;
+
+error_reg:
+ kfree(info);
+error_info:
+ kfree(hotplug_slot);
+error_hp:
+ kfree(slot);
+error:
+ return -ENOMEM;
+}
+
+void zpci_exit_slot(struct zpci_dev *zdev)
+{
+ struct list_head *tmp, *n;
+ struct slot *slot;
+
+ list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ if (slot->zdev != zdev)
+ continue;
+ list_del(&slot->slot_list);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
new file mode 100644
index 000000000..c32fb786d
--- /dev/null
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -0,0 +1,717 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2006 Silicon Graphics, Inc. All rights reserved.
+ *
+ * This work was based on the 2.4/2.6 kernel development by Dick Reigner.
+ * Work to add BIOS PROM support was completed by Mike Habeck.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+
+#include <asm/sn/addrs.h>
+#include <asm/sn/geo.h>
+#include <asm/sn/l1.h>
+#include <asm/sn/module.h>
+#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/pcibus_provider_defs.h>
+#include <asm/sn/pcidev.h>
+#include <asm/sn/sn_feature_sets.h>
+#include <asm/sn/sn_sal.h>
+#include <asm/sn/types.h>
+#include <asm/sn/acpi.h>
+
+#include "../pci.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("SGI (prarit@sgi.com, dickie@sgi.com, habeck@sgi.com)");
+MODULE_DESCRIPTION("SGI Altix Hot Plug PCI Controller Driver");
+
+
+/* SAL call error codes. Keep in sync with prom header io/include/pcibr.h */
+#define PCI_SLOT_ALREADY_UP 2 /* slot already up */
+#define PCI_SLOT_ALREADY_DOWN 3 /* slot already down */
+#define PCI_L1_ERR 7 /* L1 console command error */
+#define PCI_EMPTY_33MHZ 15 /* empty 33 MHz bus */
+
+
+#define PCIIO_ASIC_TYPE_TIOCA 4
+#define PCI_L1_QSIZE 128 /* our L1 message buffer size */
+#define SN_MAX_HP_SLOTS 32 /* max hotplug slots */
+#define SN_SLOT_NAME_SIZE 33 /* size of name string */
+
+/* internal list head */
+static struct list_head sn_hp_list;
+
+/* hotplug_slot struct's private pointer */
+struct slot {
+ int device_num;
+ struct pci_bus *pci_bus;
+ /* this struct for glue internal only */
+ struct hotplug_slot *hotplug_slot;
+ struct list_head hp_list;
+ char physical_path[SN_SLOT_NAME_SIZE];
+};
+
+struct pcibr_slot_enable_resp {
+ int resp_sub_errno;
+ char resp_l1_msg[PCI_L1_QSIZE + 1];
+};
+
+struct pcibr_slot_disable_resp {
+ int resp_sub_errno;
+ char resp_l1_msg[PCI_L1_QSIZE + 1];
+};
+
+enum sn_pci_req_e {
+ PCI_REQ_SLOT_ELIGIBLE,
+ PCI_REQ_SLOT_DISABLE
+};
+
+static int enable_slot(struct hotplug_slot *slot);
+static int disable_slot(struct hotplug_slot *slot);
+static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops sn_hotplug_slot_ops = {
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .get_power_status = get_power_status,
+};
+
+static DEFINE_MUTEX(sn_hotplug_mutex);
+
+static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
+{
+ int retval = -ENOENT;
+ struct slot *slot = pci_slot->hotplug->private;
+
+ if (!slot)
+ return retval;
+
+ retval = sprintf (buf, "%s\n", slot->physical_path);
+ return retval;
+}
+
+static struct pci_slot_attribute sn_slot_path_attr = __ATTR_RO(path);
+
+static int sn_pci_slot_valid(struct pci_bus *pci_bus, int device)
+{
+ struct pcibus_info *pcibus_info;
+ u16 busnum, segment, ioboard_type;
+
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
+
+ /* Check to see if this is a valid slot on 'pci_bus' */
+ if (!(pcibus_info->pbi_valid_devices & (1 << device)))
+ return -EPERM;
+
+ ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
+ busnum = pcibus_info->pbi_buscommon.bs_persist_busnum;
+ segment = pci_domain_nr(pci_bus) & 0xf;
+
+ /* Do not allow hotplug operations on base I/O cards */
+ if ((ioboard_type == L1_BRICKTYPE_IX ||
+ ioboard_type == L1_BRICKTYPE_IA) &&
+ (segment == 1 && busnum == 0 && device != 1))
+ return -EPERM;
+
+ return 1;
+}
+
+static int sn_pci_bus_valid(struct pci_bus *pci_bus)
+{
+ struct pcibus_info *pcibus_info;
+ u32 asic_type;
+ u16 ioboard_type;
+
+ /* Don't register slots hanging off the TIOCA bus */
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
+ asic_type = pcibus_info->pbi_buscommon.bs_asic_type;
+ if (asic_type == PCIIO_ASIC_TYPE_TIOCA)
+ return -EPERM;
+
+ /* Only register slots in I/O Bricks that support hotplug */
+ ioboard_type = sn_ioboard_to_pci_bus(pci_bus);
+ switch (ioboard_type) {
+ case L1_BRICKTYPE_IX:
+ case L1_BRICKTYPE_PX:
+ case L1_BRICKTYPE_IA:
+ case L1_BRICKTYPE_PA:
+ case L1_BOARDTYPE_PCIX3SLOT:
+ return 1;
+ break;
+ default:
+ return -EPERM;
+ break;
+ }
+
+ return -EIO;
+}
+
+static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
+ struct pci_bus *pci_bus, int device,
+ char *name)
+{
+ struct pcibus_info *pcibus_info;
+ struct slot *slot;
+
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(pci_bus);
+
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ return -ENOMEM;
+ bss_hotplug_slot->private = slot;
+
+ slot->device_num = device;
+ slot->pci_bus = pci_bus;
+ sprintf(name, "%04x:%02x:%02x",
+ pci_domain_nr(pci_bus),
+ ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum),
+ device + 1);
+
+ sn_generate_path(pci_bus, slot->physical_path);
+
+ slot->hotplug_slot = bss_hotplug_slot;
+ list_add(&slot->hp_list, &sn_hp_list);
+
+ return 0;
+}
+
+static struct hotplug_slot *sn_hp_destroy(void)
+{
+ struct slot *slot;
+ struct pci_slot *pci_slot;
+ struct hotplug_slot *bss_hotplug_slot = NULL;
+
+ list_for_each_entry(slot, &sn_hp_list, hp_list) {
+ bss_hotplug_slot = slot->hotplug_slot;
+ pci_slot = bss_hotplug_slot->pci_slot;
+ list_del(&((struct slot *)bss_hotplug_slot->private)->
+ hp_list);
+ sysfs_remove_file(&pci_slot->kobj,
+ &sn_slot_path_attr.attr);
+ break;
+ }
+ return bss_hotplug_slot;
+}
+
+static void sn_bus_free_data(struct pci_dev *dev)
+{
+ struct pci_bus *subordinate_bus;
+ struct pci_dev *child;
+
+ /* Recursively clean up sn_irq_info structs */
+ if (dev->subordinate) {
+ subordinate_bus = dev->subordinate;
+ list_for_each_entry(child, &subordinate_bus->devices, bus_list)
+ sn_bus_free_data(child);
+ }
+ /*
+ * Some drivers may use dma accesses during the
+ * driver remove function. We release the sysdata
+ * areas after the driver remove functions have
+ * been called.
+ */
+ sn_bus_store_sysdata(dev);
+ sn_pci_unfixup_slot(dev);
+}
+
+static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
+ int device_num, char **ssdt)
+{
+ struct slot *slot = bss_hotplug_slot->private;
+ struct pcibus_info *pcibus_info;
+ struct pcibr_slot_enable_resp resp;
+ int rc;
+
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
+
+ /*
+ * Power-on and initialize the slot in the SN
+ * PCI infrastructure.
+ */
+ rc = sal_pcibr_slot_enable(pcibus_info, device_num, &resp, ssdt);
+
+
+ if (rc == PCI_SLOT_ALREADY_UP) {
+ dev_dbg(&slot->pci_bus->self->dev, "is already active\n");
+ return 1; /* return 1 to user */
+ }
+
+ if (rc == PCI_L1_ERR) {
+ dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message: %s",
+ resp.resp_sub_errno, resp.resp_l1_msg);
+ return -EPERM;
+ }
+
+ if (rc) {
+ dev_dbg(&slot->pci_bus->self->dev, "insert failed with error %d sub-error %d\n",
+ rc, resp.resp_sub_errno);
+ return -EIO;
+ }
+
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
+ pcibus_info->pbi_enabled_devices |= (1 << device_num);
+
+ return 0;
+}
+
+static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
+ int device_num, int action)
+{
+ struct slot *slot = bss_hotplug_slot->private;
+ struct pcibus_info *pcibus_info;
+ struct pcibr_slot_disable_resp resp;
+ int rc;
+
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
+
+ rc = sal_pcibr_slot_disable(pcibus_info, device_num, action, &resp);
+
+ if ((action == PCI_REQ_SLOT_ELIGIBLE) &&
+ (rc == PCI_SLOT_ALREADY_DOWN)) {
+ dev_dbg(&slot->pci_bus->self->dev, "Slot %s already inactive\n", slot->physical_path);
+ return 1; /* return 1 to user */
+ }
+
+ if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_EMPTY_33MHZ)) {
+ dev_dbg(&slot->pci_bus->self->dev, "Cannot remove last 33MHz card\n");
+ return -EPERM;
+ }
+
+ if ((action == PCI_REQ_SLOT_ELIGIBLE) && (rc == PCI_L1_ERR)) {
+ dev_dbg(&slot->pci_bus->self->dev, "L1 failure %d with message \n%s\n",
+ resp.resp_sub_errno, resp.resp_l1_msg);
+ return -EPERM;
+ }
+
+ if ((action == PCI_REQ_SLOT_ELIGIBLE) && rc) {
+ dev_dbg(&slot->pci_bus->self->dev, "remove failed with error %d sub-error %d\n",
+ rc, resp.resp_sub_errno);
+ return -EIO;
+ }
+
+ if ((action == PCI_REQ_SLOT_ELIGIBLE) && !rc)
+ return 0;
+
+ if ((action == PCI_REQ_SLOT_DISABLE) && !rc) {
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
+ pcibus_info->pbi_enabled_devices &= ~(1 << device_num);
+ dev_dbg(&slot->pci_bus->self->dev, "remove successful\n");
+ return 0;
+ }
+
+ if ((action == PCI_REQ_SLOT_DISABLE) && rc) {
+ dev_dbg(&slot->pci_bus->self->dev,"remove failed rc = %d\n", rc);
+ }
+
+ return rc;
+}
+
+/*
+ * Power up and configure the slot via a SAL call to PROM.
+ * Scan slot (and any children), do any platform specific fixup,
+ * and find device driver.
+ */
+static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
+{
+ struct slot *slot = bss_hotplug_slot->private;
+ struct pci_bus *new_bus = NULL;
+ struct pci_dev *dev;
+ int num_funcs;
+ int new_ppb = 0;
+ int rc;
+ char *ssdt = NULL;
+ void pcibios_fixup_device_resources(struct pci_dev *);
+
+ /* Serialize the Linux PCI infrastructure */
+ mutex_lock(&sn_hotplug_mutex);
+
+ /*
+ * Power-on and initialize the slot in the SN
+ * PCI infrastructure. Also, retrieve the ACPI SSDT
+ * table for the slot (if ACPI capable PROM).
+ */
+ rc = sn_slot_enable(bss_hotplug_slot, slot->device_num, &ssdt);
+ if (rc) {
+ mutex_unlock(&sn_hotplug_mutex);
+ return rc;
+ }
+
+ if (ssdt)
+ ssdt = __va(ssdt);
+ /* Add the new SSDT for the slot to the ACPI namespace */
+ if (SN_ACPI_BASE_SUPPORT() && ssdt) {
+ acpi_status ret;
+
+ ret = acpi_load_table((struct acpi_table_header *)ssdt);
+ if (ACPI_FAILURE(ret)) {
+ printk(KERN_ERR "%s: acpi_load_table failed (0x%x)\n",
+ __func__, ret);
+ /* try to continue on */
+ }
+ }
+
+ num_funcs = pci_scan_slot(slot->pci_bus,
+ PCI_DEVFN(slot->device_num + 1, 0));
+ if (!num_funcs) {
+ dev_dbg(&slot->pci_bus->self->dev, "no device in slot\n");
+ mutex_unlock(&sn_hotplug_mutex);
+ return -ENODEV;
+ }
+
+ /*
+ * Map SN resources for all functions on the card
+ * to the Linux PCI interface and tell the drivers
+ * about them.
+ */
+ list_for_each_entry(dev, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ /* Need to do slot fixup on PPB before fixup of children
+ * (PPB's pcidev_info needs to be in pcidev_info list
+ * before child's SN_PCIDEV_INFO() call to setup
+ * pdi_host_pcidev_info).
+ */
+ pcibios_fixup_device_resources(dev);
+ if (SN_ACPI_BASE_SUPPORT())
+ sn_acpi_slot_fixup(dev);
+ else
+ sn_io_slot_fixup(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_hp_add_bridge(dev);
+ if (dev->subordinate) {
+ new_bus = dev->subordinate;
+ new_ppb = 1;
+ }
+ }
+ }
+
+ /*
+ * Add the slot's devices to the ACPI infrastructure */
+ if (SN_ACPI_BASE_SUPPORT() && ssdt) {
+ unsigned long long adr;
+ struct acpi_device *pdevice;
+ acpi_handle phandle;
+ acpi_handle chandle = NULL;
+ acpi_handle rethandle;
+ acpi_status ret;
+
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
+
+ if (acpi_bus_get_device(phandle, &pdevice)) {
+ dev_dbg(&slot->pci_bus->self->dev, "no parent device, assuming NULL\n");
+ pdevice = NULL;
+ }
+
+ acpi_scan_lock_acquire();
+ /*
+ * Walk the rootbus node's immediate children looking for
+ * the slot's device node(s). There can be more than
+ * one for multifunction devices.
+ */
+ for (;;) {
+ rethandle = NULL;
+ ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
+ phandle, chandle,
+ &rethandle);
+
+ if (ret == AE_NOT_FOUND || rethandle == NULL)
+ break;
+
+ chandle = rethandle;
+
+ ret = acpi_evaluate_integer(chandle, METHOD_NAME__ADR,
+ NULL, &adr);
+
+ if (ACPI_SUCCESS(ret) &&
+ (adr>>16) == (slot->device_num + 1)) {
+
+ ret = acpi_bus_scan(chandle);
+ if (ACPI_FAILURE(ret)) {
+ printk(KERN_ERR "%s: acpi_bus_scan failed (0x%x) for slot %d func %d\n",
+ __func__, ret, (int)(adr>>16),
+ (int)(adr&0xffff));
+ /* try to continue on */
+ }
+ }
+ }
+ acpi_scan_lock_release();
+ }
+
+ pci_lock_rescan_remove();
+
+ /* Call the driver for the new device */
+ pci_bus_add_devices(slot->pci_bus);
+ /* Call the drivers for the new devices subordinate to PPB */
+ if (new_ppb)
+ pci_bus_add_devices(new_bus);
+
+ pci_unlock_rescan_remove();
+ mutex_unlock(&sn_hotplug_mutex);
+
+ if (rc == 0)
+ dev_dbg(&slot->pci_bus->self->dev, "insert operation successful\n");
+ else
+ dev_dbg(&slot->pci_bus->self->dev, "insert operation failed rc = %d\n", rc);
+
+ return rc;
+}
+
+static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
+{
+ struct slot *slot = bss_hotplug_slot->private;
+ struct pci_dev *dev, *temp;
+ int rc;
+ acpi_handle ssdt_hdl = NULL;
+
+ /* Acquire update access to the bus */
+ mutex_lock(&sn_hotplug_mutex);
+
+ /* is it okay to bring this slot down? */
+ rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
+ PCI_REQ_SLOT_ELIGIBLE);
+ if (rc)
+ goto leaving;
+
+ /* free the ACPI resources for the slot */
+ if (SN_ACPI_BASE_SUPPORT() &&
+ PCI_CONTROLLER(slot->pci_bus)->companion) {
+ unsigned long long adr;
+ struct acpi_device *device;
+ acpi_handle phandle;
+ acpi_handle chandle = NULL;
+ acpi_handle rethandle;
+ acpi_status ret;
+
+ /* Get the rootbus node pointer */
+ phandle = acpi_device_handle(PCI_CONTROLLER(slot->pci_bus)->companion);
+
+ acpi_scan_lock_acquire();
+ /*
+ * Walk the rootbus node's immediate children looking for
+ * the slot's device node(s). There can be more than
+ * one for multifunction devices.
+ */
+ for (;;) {
+ rethandle = NULL;
+ ret = acpi_get_next_object(ACPI_TYPE_DEVICE,
+ phandle, chandle,
+ &rethandle);
+
+ if (ret == AE_NOT_FOUND || rethandle == NULL)
+ break;
+
+ chandle = rethandle;
+
+ ret = acpi_evaluate_integer(chandle,
+ METHOD_NAME__ADR,
+ NULL, &adr);
+ if (ACPI_SUCCESS(ret) &&
+ (adr>>16) == (slot->device_num + 1)) {
+ /* retain the owner id */
+ ssdt_hdl = chandle;
+
+ ret = acpi_bus_get_device(chandle,
+ &device);
+ if (ACPI_SUCCESS(ret))
+ acpi_bus_trim(device);
+ }
+ }
+ acpi_scan_lock_release();
+ }
+
+ pci_lock_rescan_remove();
+ /* Free the SN resources assigned to the Linux device.*/
+ list_for_each_entry_safe(dev, temp, &slot->pci_bus->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != slot->device_num + 1)
+ continue;
+
+ pci_dev_get(dev);
+ sn_bus_free_data(dev);
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
+ pci_unlock_rescan_remove();
+
+ /* Remove the SSDT for the slot from the ACPI namespace */
+ if (SN_ACPI_BASE_SUPPORT() && ssdt_hdl) {
+ acpi_status ret;
+ ret = acpi_unload_parent_table(ssdt_hdl);
+ if (ACPI_FAILURE(ret)) {
+ acpi_handle_err(ssdt_hdl,
+ "%s: acpi_unload_parent_table failed (0x%x)\n",
+ __func__, ret);
+ /* try to continue on */
+ }
+ }
+
+ /* free the collected sysdata pointers */
+ sn_bus_free_sysdata();
+
+ /* Deactivate slot */
+ rc = sn_slot_disable(bss_hotplug_slot, slot->device_num,
+ PCI_REQ_SLOT_DISABLE);
+ leaving:
+ /* Release the bus lock */
+ mutex_unlock(&sn_hotplug_mutex);
+
+ return rc;
+}
+
+static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
+ u8 *value)
+{
+ struct slot *slot = bss_hotplug_slot->private;
+ struct pcibus_info *pcibus_info;
+ u32 power;
+
+ pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus);
+ mutex_lock(&sn_hotplug_mutex);
+ power = pcibus_info->pbi_enabled_devices & (1 << slot->device_num);
+ *value = power ? 1 : 0;
+ mutex_unlock(&sn_hotplug_mutex);
+ return 0;
+}
+
+static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
+{
+ kfree(bss_hotplug_slot->info);
+ kfree(bss_hotplug_slot->private);
+ kfree(bss_hotplug_slot);
+}
+
+static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
+{
+ int device;
+ struct pci_slot *pci_slot;
+ struct hotplug_slot *bss_hotplug_slot;
+ char name[SN_SLOT_NAME_SIZE];
+ int rc = 0;
+
+ /*
+ * Currently only four devices are supported,
+ * in the future there maybe more -- up to 32.
+ */
+
+ for (device = 0; device < SN_MAX_HP_SLOTS ; device++) {
+ if (sn_pci_slot_valid(pci_bus, device) != 1)
+ continue;
+
+ bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
+ GFP_KERNEL);
+ if (!bss_hotplug_slot) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+
+ bss_hotplug_slot->info =
+ kzalloc(sizeof(struct hotplug_slot_info),
+ GFP_KERNEL);
+ if (!bss_hotplug_slot->info) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+
+ if (sn_hp_slot_private_alloc(bss_hotplug_slot,
+ pci_bus, device, name)) {
+ rc = -ENOMEM;
+ goto alloc_err;
+ }
+ bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
+ bss_hotplug_slot->release = &sn_release_slot;
+
+ rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name);
+ if (rc)
+ goto register_err;
+
+ pci_slot = bss_hotplug_slot->pci_slot;
+ rc = sysfs_create_file(&pci_slot->kobj,
+ &sn_slot_path_attr.attr);
+ if (rc)
+ goto register_err;
+ }
+ dev_dbg(&pci_bus->self->dev, "Registered bus with hotplug\n");
+ return rc;
+
+register_err:
+ dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
+ rc);
+
+alloc_err:
+ if (rc == -ENOMEM)
+ dev_dbg(&pci_bus->self->dev, "Memory allocation error\n");
+
+ /* destroy THIS element */
+ if (bss_hotplug_slot)
+ sn_release_slot(bss_hotplug_slot);
+
+ /* destroy anything else on the list */
+ while ((bss_hotplug_slot = sn_hp_destroy()))
+ pci_hp_deregister(bss_hotplug_slot);
+
+ return rc;
+}
+
+static int __init sn_pci_hotplug_init(void)
+{
+ struct pci_bus *pci_bus = NULL;
+ int rc;
+ int registered = 0;
+
+ if (!sn_prom_feature_available(PRF_HOTPLUG_SUPPORT)) {
+ printk(KERN_ERR "%s: PROM version does not support hotplug.\n",
+ __func__);
+ return -EPERM;
+ }
+
+ INIT_LIST_HEAD(&sn_hp_list);
+
+ while ((pci_bus = pci_find_next_bus(pci_bus))) {
+ if (!pci_bus->sysdata)
+ continue;
+
+ rc = sn_pci_bus_valid(pci_bus);
+ if (rc != 1) {
+ dev_dbg(&pci_bus->self->dev, "not a valid hotplug bus\n");
+ continue;
+ }
+ dev_dbg(&pci_bus->self->dev, "valid hotplug bus\n");
+
+ rc = sn_hotplug_slot_register(pci_bus);
+ if (!rc) {
+ registered = 1;
+ } else {
+ registered = 0;
+ break;
+ }
+ }
+
+ return registered == 1 ? 0 : -ENODEV;
+}
+
+static void __exit sn_pci_hotplug_exit(void)
+{
+ struct hotplug_slot *bss_hotplug_slot;
+
+ while ((bss_hotplug_slot = sn_hp_destroy()))
+ pci_hp_deregister(bss_hotplug_slot);
+
+ if (!list_empty(&sn_hp_list))
+ printk(KERN_ERR "%s: internal list is not empty\n", __FILE__);
+}
+
+module_init(sn_pci_hotplug_init);
+module_exit(sn_pci_hotplug_exit);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
new file mode 100644
index 000000000..5897d5164
--- /dev/null
+++ b/drivers/pci/hotplug/shpchp.h
@@ -0,0 +1,348 @@
+/*
+ * Standard Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
+ *
+ */
+#ifndef _SHPCHP_H
+#define _SHPCHP_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/delay.h>
+#include <linux/sched.h> /* signal_pending(), struct timer_list */
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#if !defined(MODULE)
+ #define MY_NAME "shpchp"
+#else
+ #define MY_NAME THIS_MODULE->name
+#endif
+
+extern bool shpchp_poll_mode;
+extern int shpchp_poll_time;
+extern bool shpchp_debug;
+
+#define dbg(format, arg...) \
+do { \
+ if (shpchp_debug) \
+ printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); \
+} while (0)
+#define err(format, arg...) \
+ printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
+#define info(format, arg...) \
+ printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
+#define warn(format, arg...) \
+ printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
+
+#define ctrl_dbg(ctrl, format, arg...) \
+ do { \
+ if (shpchp_debug) \
+ dev_printk(KERN_DEBUG, &ctrl->pci_dev->dev, \
+ format, ## arg); \
+ } while (0)
+#define ctrl_err(ctrl, format, arg...) \
+ dev_err(&ctrl->pci_dev->dev, format, ## arg)
+#define ctrl_info(ctrl, format, arg...) \
+ dev_info(&ctrl->pci_dev->dev, format, ## arg)
+#define ctrl_warn(ctrl, format, arg...) \
+ dev_warn(&ctrl->pci_dev->dev, format, ## arg)
+
+
+#define SLOT_NAME_SIZE 10
+struct slot {
+ u8 bus;
+ u8 device;
+ u16 status;
+ u32 number;
+ u8 is_a_board;
+ u8 state;
+ u8 presence_save;
+ u8 pwr_save;
+ struct controller *ctrl;
+ struct hpc_ops *hpc_ops;
+ struct hotplug_slot *hotplug_slot;
+ struct list_head slot_list;
+ struct delayed_work work; /* work for button event */
+ struct mutex lock;
+ struct workqueue_struct *wq;
+ u8 hp_slot;
+};
+
+struct event_info {
+ u32 event_type;
+ struct slot *p_slot;
+ struct work_struct work;
+};
+
+struct controller {
+ struct mutex crit_sect; /* critical section mutex */
+ struct mutex cmd_lock; /* command lock */
+ int num_slots; /* Number of slots on ctlr */
+ int slot_num_inc; /* 1 or -1 */
+ struct pci_dev *pci_dev;
+ struct list_head slot_list;
+ struct hpc_ops *hpc_ops;
+ wait_queue_head_t queue; /* sleep & wake process */
+ u8 slot_device_offset;
+ u32 pcix_misc2_reg; /* for amd pogo errata */
+ u32 first_slot; /* First physical slot number */
+ u32 cap_offset;
+ unsigned long mmio_base;
+ unsigned long mmio_size;
+ void __iomem *creg;
+ struct timer_list poll_timer;
+};
+
+/* Define AMD SHPC ID */
+#define PCI_DEVICE_ID_AMD_GOLAM_7450 0x7450
+#define PCI_DEVICE_ID_AMD_POGO_7458 0x7458
+
+/* AMD PCI-X bridge registers */
+#define PCIX_MEM_BASE_LIMIT_OFFSET 0x1C
+#define PCIX_MISCII_OFFSET 0x48
+#define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80
+
+/* AMD PCIX_MISCII masks and offsets */
+#define PERRNONFATALENABLE_MASK 0x00040000
+#define PERRFATALENABLE_MASK 0x00080000
+#define PERRFLOODENABLE_MASK 0x00100000
+#define SERRNONFATALENABLE_MASK 0x00200000
+#define SERRFATALENABLE_MASK 0x00400000
+
+/* AMD PCIX_MISC_BRIDGE_ERRORS masks and offsets */
+#define PERR_OBSERVED_MASK 0x00000001
+
+/* AMD PCIX_MEM_BASE_LIMIT masks */
+#define RSE_MASK 0x40000000
+
+#define INT_BUTTON_IGNORE 0
+#define INT_PRESENCE_ON 1
+#define INT_PRESENCE_OFF 2
+#define INT_SWITCH_CLOSE 3
+#define INT_SWITCH_OPEN 4
+#define INT_POWER_FAULT 5
+#define INT_POWER_FAULT_CLEAR 6
+#define INT_BUTTON_PRESS 7
+#define INT_BUTTON_RELEASE 8
+#define INT_BUTTON_CANCEL 9
+
+#define STATIC_STATE 0
+#define BLINKINGON_STATE 1
+#define BLINKINGOFF_STATE 2
+#define POWERON_STATE 3
+#define POWEROFF_STATE 4
+
+/* Error messages */
+#define INTERLOCK_OPEN 0x00000002
+#define ADD_NOT_SUPPORTED 0x00000003
+#define CARD_FUNCTIONING 0x00000005
+#define ADAPTER_NOT_SAME 0x00000006
+#define NO_ADAPTER_PRESENT 0x00000009
+#define NOT_ENOUGH_RESOURCES 0x0000000B
+#define DEVICE_TYPE_NOT_SUPPORTED 0x0000000C
+#define WRONG_BUS_FREQUENCY 0x0000000D
+#define POWER_FAILURE 0x0000000E
+
+int __must_check shpchp_create_ctrl_files(struct controller *ctrl);
+void shpchp_remove_ctrl_files(struct controller *ctrl);
+int shpchp_sysfs_enable_slot(struct slot *slot);
+int shpchp_sysfs_disable_slot(struct slot *slot);
+u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl);
+u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl);
+int shpchp_configure_device(struct slot *p_slot);
+int shpchp_unconfigure_device(struct slot *p_slot);
+void cleanup_slots(struct controller *ctrl);
+void shpchp_queue_pushbutton_work(struct work_struct *work);
+int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
+
+static inline const char *slot_name(struct slot *slot)
+{
+ return hotplug_slot_name(slot->hotplug_slot);
+}
+
+#ifdef CONFIG_ACPI
+#include <linux/pci-acpi.h>
+static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
+{
+ u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
+ return acpi_get_hp_hw_control_from_firmware(dev, flags);
+}
+#else
+#define get_hp_hw_control_from_firmware(dev) (0)
+#endif
+
+struct ctrl_reg {
+ volatile u32 base_offset;
+ volatile u32 slot_avail1;
+ volatile u32 slot_avail2;
+ volatile u32 slot_config;
+ volatile u16 sec_bus_config;
+ volatile u8 msi_ctrl;
+ volatile u8 prog_interface;
+ volatile u16 cmd;
+ volatile u16 cmd_status;
+ volatile u32 intr_loc;
+ volatile u32 serr_loc;
+ volatile u32 serr_intr_enable;
+ volatile u32 slot1;
+} __attribute__ ((packed));
+
+/* offsets to the controller registers based on the above structure layout */
+enum ctrl_offsets {
+ BASE_OFFSET = offsetof(struct ctrl_reg, base_offset),
+ SLOT_AVAIL1 = offsetof(struct ctrl_reg, slot_avail1),
+ SLOT_AVAIL2 = offsetof(struct ctrl_reg, slot_avail2),
+ SLOT_CONFIG = offsetof(struct ctrl_reg, slot_config),
+ SEC_BUS_CONFIG = offsetof(struct ctrl_reg, sec_bus_config),
+ MSI_CTRL = offsetof(struct ctrl_reg, msi_ctrl),
+ PROG_INTERFACE = offsetof(struct ctrl_reg, prog_interface),
+ CMD = offsetof(struct ctrl_reg, cmd),
+ CMD_STATUS = offsetof(struct ctrl_reg, cmd_status),
+ INTR_LOC = offsetof(struct ctrl_reg, intr_loc),
+ SERR_LOC = offsetof(struct ctrl_reg, serr_loc),
+ SERR_INTR_ENABLE = offsetof(struct ctrl_reg, serr_intr_enable),
+ SLOT1 = offsetof(struct ctrl_reg, slot1),
+};
+
+static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot)
+{
+ return hotplug_slot->private;
+}
+
+static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device)
+{
+ struct slot *slot;
+
+ list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
+ if (slot->device == device)
+ return slot;
+ }
+
+ ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device);
+ return NULL;
+}
+
+static inline void amd_pogo_errata_save_misc_reg(struct slot *p_slot)
+{
+ u32 pcix_misc2_temp;
+
+ /* save MiscII register */
+ pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, &pcix_misc2_temp);
+
+ p_slot->ctrl->pcix_misc2_reg = pcix_misc2_temp;
+
+ /* clear SERR/PERR enable bits */
+ pcix_misc2_temp &= ~SERRFATALENABLE_MASK;
+ pcix_misc2_temp &= ~SERRNONFATALENABLE_MASK;
+ pcix_misc2_temp &= ~PERRFLOODENABLE_MASK;
+ pcix_misc2_temp &= ~PERRFATALENABLE_MASK;
+ pcix_misc2_temp &= ~PERRNONFATALENABLE_MASK;
+ pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
+}
+
+static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot)
+{
+ u32 pcix_misc2_temp;
+ u32 pcix_bridge_errors_reg;
+ u32 pcix_mem_base_reg;
+ u8 perr_set;
+ u8 rse_set;
+
+ /* write-one-to-clear Bridge_Errors[ PERR_OBSERVED ] */
+ pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, &pcix_bridge_errors_reg);
+ perr_set = pcix_bridge_errors_reg & PERR_OBSERVED_MASK;
+ if (perr_set) {
+ ctrl_dbg(p_slot->ctrl,
+ "Bridge_Errors[ PERR_OBSERVED = %08X] (W1C)\n",
+ perr_set);
+
+ pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, perr_set);
+ }
+
+ /* write-one-to-clear Memory_Base_Limit[ RSE ] */
+ pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, &pcix_mem_base_reg);
+ rse_set = pcix_mem_base_reg & RSE_MASK;
+ if (rse_set) {
+ ctrl_dbg(p_slot->ctrl, "Memory_Base_Limit[ RSE ] (W1C)\n");
+
+ pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set);
+ }
+ /* restore MiscII register */
+ pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, &pcix_misc2_temp );
+
+ if (p_slot->ctrl->pcix_misc2_reg & SERRFATALENABLE_MASK)
+ pcix_misc2_temp |= SERRFATALENABLE_MASK;
+ else
+ pcix_misc2_temp &= ~SERRFATALENABLE_MASK;
+
+ if (p_slot->ctrl->pcix_misc2_reg & SERRNONFATALENABLE_MASK)
+ pcix_misc2_temp |= SERRNONFATALENABLE_MASK;
+ else
+ pcix_misc2_temp &= ~SERRNONFATALENABLE_MASK;
+
+ if (p_slot->ctrl->pcix_misc2_reg & PERRFLOODENABLE_MASK)
+ pcix_misc2_temp |= PERRFLOODENABLE_MASK;
+ else
+ pcix_misc2_temp &= ~PERRFLOODENABLE_MASK;
+
+ if (p_slot->ctrl->pcix_misc2_reg & PERRFATALENABLE_MASK)
+ pcix_misc2_temp |= PERRFATALENABLE_MASK;
+ else
+ pcix_misc2_temp &= ~PERRFATALENABLE_MASK;
+
+ if (p_slot->ctrl->pcix_misc2_reg & PERRNONFATALENABLE_MASK)
+ pcix_misc2_temp |= PERRNONFATALENABLE_MASK;
+ else
+ pcix_misc2_temp &= ~PERRNONFATALENABLE_MASK;
+ pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISCII_OFFSET, pcix_misc2_temp);
+}
+
+struct hpc_ops {
+ int (*power_on_slot)(struct slot *slot);
+ int (*slot_enable)(struct slot *slot);
+ int (*slot_disable)(struct slot *slot);
+ int (*set_bus_speed_mode)(struct slot *slot, enum pci_bus_speed speed);
+ int (*get_power_status)(struct slot *slot, u8 *status);
+ int (*get_attention_status)(struct slot *slot, u8 *status);
+ int (*set_attention_status)(struct slot *slot, u8 status);
+ int (*get_latch_status)(struct slot *slot, u8 *status);
+ int (*get_adapter_status)(struct slot *slot, u8 *status);
+ int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
+ int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode);
+ int (*get_prog_int)(struct slot *slot, u8 *prog_int);
+ int (*query_power_fault)(struct slot *slot);
+ void (*green_led_on)(struct slot *slot);
+ void (*green_led_off)(struct slot *slot);
+ void (*green_led_blink)(struct slot *slot);
+ void (*release_ctlr)(struct controller *ctrl);
+ int (*check_cmd_status)(struct controller *ctrl);
+};
+
+#endif /* _SHPCHP_H */
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
new file mode 100644
index 000000000..294ef4b10
--- /dev/null
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -0,0 +1,389 @@
+/*
+ * Standard Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include "shpchp.h"
+
+/* Global variables */
+bool shpchp_debug;
+bool shpchp_poll_mode;
+int shpchp_poll_time;
+
+#define DRIVER_VERSION "0.4"
+#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
+#define DRIVER_DESC "Standard Hot Plug PCI Controller Driver"
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+module_param(shpchp_debug, bool, 0644);
+module_param(shpchp_poll_mode, bool, 0644);
+module_param(shpchp_poll_time, int, 0644);
+MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
+MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
+MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
+
+#define SHPC_MODULE_NAME "shpchp"
+
+static int set_attention_status (struct hotplug_slot *slot, u8 value);
+static int enable_slot (struct hotplug_slot *slot);
+static int disable_slot (struct hotplug_slot *slot);
+static int get_power_status (struct hotplug_slot *slot, u8 *value);
+static int get_attention_status (struct hotplug_slot *slot, u8 *value);
+static int get_latch_status (struct hotplug_slot *slot, u8 *value);
+static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+
+static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = enable_slot,
+ .disable_slot = disable_slot,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+};
+
+/**
+ * release_slot - free up the memory used by a slot
+ * @hotplug_slot: slot to free
+ */
+static void release_slot(struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ kfree(slot->hotplug_slot->info);
+ kfree(slot->hotplug_slot);
+ kfree(slot);
+}
+
+static int init_slots(struct controller *ctrl)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *info;
+ char name[SLOT_NAME_SIZE];
+ int retval;
+ int i;
+
+ for (i = 0; i < ctrl->num_slots; i++) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
+ if (!hotplug_slot) {
+ retval = -ENOMEM;
+ goto error_slot;
+ }
+ slot->hotplug_slot = hotplug_slot;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ retval = -ENOMEM;
+ goto error_hpslot;
+ }
+ hotplug_slot->info = info;
+
+ slot->hp_slot = i;
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->pci_dev->subordinate->number;
+ slot->device = ctrl->slot_device_offset + i;
+ slot->hpc_ops = ctrl->hpc_ops;
+ slot->number = ctrl->first_slot + (ctrl->slot_num_inc * i);
+
+ slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
+ if (!slot->wq) {
+ retval = -ENOMEM;
+ goto error_info;
+ }
+
+ mutex_init(&slot->lock);
+ INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->private = slot;
+ hotplug_slot->release = &release_slot;
+ snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
+ hotplug_slot->ops = &shpchp_hotplug_slot_ops;
+
+ ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x hp_slot=%x sun=%x slot_device_offset=%x\n",
+ pci_domain_nr(ctrl->pci_dev->subordinate),
+ slot->bus, slot->device, slot->hp_slot, slot->number,
+ ctrl->slot_device_offset);
+ retval = pci_hp_register(slot->hotplug_slot,
+ ctrl->pci_dev->subordinate, slot->device, name);
+ if (retval) {
+ ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
+ retval);
+ goto error_slotwq;
+ }
+
+ get_power_status(hotplug_slot, &info->power_status);
+ get_attention_status(hotplug_slot, &info->attention_status);
+ get_latch_status(hotplug_slot, &info->latch_status);
+ get_adapter_status(hotplug_slot, &info->adapter_status);
+
+ list_add(&slot->slot_list, &ctrl->slot_list);
+ }
+
+ return 0;
+error_slotwq:
+ destroy_workqueue(slot->wq);
+error_info:
+ kfree(info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return retval;
+}
+
+void cleanup_slots(struct controller *ctrl)
+{
+ struct list_head *tmp;
+ struct list_head *next;
+ struct slot *slot;
+
+ list_for_each_safe(tmp, next, &ctrl->slot_list) {
+ slot = list_entry(tmp, struct slot, slot_list);
+ list_del(&slot->slot_list);
+ cancel_delayed_work(&slot->work);
+ destroy_workqueue(slot->wq);
+ pci_hp_deregister(slot->hotplug_slot);
+ }
+}
+
+/*
+ * set_attention_status - Turns the Amber LED for a slot on, off or blink
+ */
+static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ hotplug_slot->info->attention_status = status;
+ slot->hpc_ops->set_attention_status(slot, status);
+
+ return 0;
+}
+
+static int enable_slot (struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return shpchp_sysfs_enable_slot(slot);
+}
+
+static int disable_slot (struct hotplug_slot *hotplug_slot)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return shpchp_sysfs_disable_slot(slot);
+}
+
+static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+ int retval;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ retval = slot->hpc_ops->get_power_status(slot, value);
+ if (retval < 0)
+ *value = hotplug_slot->info->power_status;
+
+ return 0;
+}
+
+static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+ int retval;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ retval = slot->hpc_ops->get_attention_status(slot, value);
+ if (retval < 0)
+ *value = hotplug_slot->info->attention_status;
+
+ return 0;
+}
+
+static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+ int retval;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ retval = slot->hpc_ops->get_latch_status(slot, value);
+ if (retval < 0)
+ *value = hotplug_slot->info->latch_status;
+
+ return 0;
+}
+
+static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
+{
+ struct slot *slot = get_slot(hotplug_slot);
+ int retval;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ retval = slot->hpc_ops->get_adapter_status(slot, value);
+ if (retval < 0)
+ *value = hotplug_slot->info->adapter_status;
+
+ return 0;
+}
+
+static int is_shpc_capable(struct pci_dev *dev)
+{
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
+ return 1;
+ if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
+ return 0;
+ if (get_hp_hw_control_from_firmware(dev))
+ return 0;
+ return 1;
+}
+
+static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+ struct controller *ctrl;
+
+ if (!is_shpc_capable(pdev))
+ return -ENODEV;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ dev_err(&pdev->dev, "%s: Out of memory\n", __func__);
+ goto err_out_none;
+ }
+ INIT_LIST_HEAD(&ctrl->slot_list);
+
+ rc = shpc_init(ctrl, pdev);
+ if (rc) {
+ ctrl_dbg(ctrl, "Controller initialization failed\n");
+ goto err_out_free_ctrl;
+ }
+
+ pci_set_drvdata(pdev, ctrl);
+
+ /* Setup the slot information structures */
+ rc = init_slots(ctrl);
+ if (rc) {
+ ctrl_err(ctrl, "Slot initialization failed\n");
+ goto err_out_release_ctlr;
+ }
+
+ rc = shpchp_create_ctrl_files(ctrl);
+ if (rc)
+ goto err_cleanup_slots;
+
+ return 0;
+
+err_cleanup_slots:
+ cleanup_slots(ctrl);
+err_out_release_ctlr:
+ ctrl->hpc_ops->release_ctlr(ctrl);
+err_out_free_ctrl:
+ kfree(ctrl);
+err_out_none:
+ return -ENODEV;
+}
+
+static void shpc_remove(struct pci_dev *dev)
+{
+ struct controller *ctrl = pci_get_drvdata(dev);
+
+ shpchp_remove_ctrl_files(ctrl);
+ ctrl->hpc_ops->release_ctlr(ctrl);
+ kfree(ctrl);
+}
+
+static struct pci_device_id shpcd_pci_tbl[] = {
+ {PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0)},
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, shpcd_pci_tbl);
+
+static struct pci_driver shpc_driver = {
+ .name = SHPC_MODULE_NAME,
+ .id_table = shpcd_pci_tbl,
+ .probe = shpc_probe,
+ .remove = shpc_remove,
+};
+
+static int __init shpcd_init(void)
+{
+ int retval;
+
+ retval = pci_register_driver(&shpc_driver);
+ dbg("%s: pci_register_driver = %d\n", __func__, retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+
+ return retval;
+}
+
+static void __exit shpcd_cleanup(void)
+{
+ dbg("unload_shpchpd()\n");
+ pci_unregister_driver(&shpc_driver);
+ info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
+}
+
+module_init(shpcd_init);
+module_exit(shpcd_cleanup);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
new file mode 100644
index 000000000..10c792759
--- /dev/null
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -0,0 +1,730 @@
+/*
+ * Standard Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include "../pci.h"
+#include "shpchp.h"
+
+static void interrupt_event_handler(struct work_struct *work);
+static int shpchp_enable_slot(struct slot *p_slot);
+static int shpchp_disable_slot(struct slot *p_slot);
+
+static int queue_interrupt_event(struct slot *p_slot, u32 event_type)
+{
+ struct event_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_ATOMIC);
+ if (!info)
+ return -ENOMEM;
+
+ info->event_type = event_type;
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, interrupt_event_handler);
+
+ queue_work(p_slot->wq, &info->work);
+
+ return 0;
+}
+
+u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl)
+{
+ struct slot *p_slot;
+ u32 event_type;
+
+ /* Attention Button Change */
+ ctrl_dbg(ctrl, "Attention button interrupt received\n");
+
+ p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+
+ /*
+ * Button pressed - See if need to TAKE ACTION!!!
+ */
+ ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_BUTTON_PRESS;
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 0;
+
+}
+
+u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl)
+{
+ struct slot *p_slot;
+ u8 getstatus;
+ u32 event_type;
+
+ /* Switch Change */
+ ctrl_dbg(ctrl, "Switch interrupt received\n");
+
+ p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ ctrl_dbg(ctrl, "Card present %x Power status %x\n",
+ p_slot->presence_save, p_slot->pwr_save);
+
+ if (getstatus) {
+ /*
+ * Switch opened
+ */
+ ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_SWITCH_OPEN;
+ if (p_slot->pwr_save && p_slot->presence_save) {
+ event_type = INT_POWER_FAULT;
+ ctrl_err(ctrl, "Surprise Removal of card\n");
+ }
+ } else {
+ /*
+ * Switch closed
+ */
+ ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_SWITCH_CLOSE;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 1;
+}
+
+u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl)
+{
+ struct slot *p_slot;
+ u32 event_type;
+
+ /* Presence Change */
+ ctrl_dbg(ctrl, "Presence/Notify input change\n");
+
+ p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+
+ /*
+ * Save the presence state
+ */
+ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ if (p_slot->presence_save) {
+ /*
+ * Card Present
+ */
+ ctrl_info(ctrl, "Card present on Slot(%s)\n",
+ slot_name(p_slot));
+ event_type = INT_PRESENCE_ON;
+ } else {
+ /*
+ * Not Present
+ */
+ ctrl_info(ctrl, "Card not present on Slot(%s)\n",
+ slot_name(p_slot));
+ event_type = INT_PRESENCE_OFF;
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 1;
+}
+
+u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
+{
+ struct slot *p_slot;
+ u32 event_type;
+
+ /* Power fault */
+ ctrl_dbg(ctrl, "Power fault interrupt received\n");
+
+ p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+
+ if (!(p_slot->hpc_ops->query_power_fault(p_slot))) {
+ /*
+ * Power fault Cleared
+ */
+ ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
+ slot_name(p_slot));
+ p_slot->status = 0x00;
+ event_type = INT_POWER_FAULT_CLEAR;
+ } else {
+ /*
+ * Power fault
+ */
+ ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
+ event_type = INT_POWER_FAULT;
+ /* set power fault status for this board */
+ p_slot->status = 0xFF;
+ ctrl_info(ctrl, "Power fault bit %x set\n", hp_slot);
+ }
+
+ queue_interrupt_event(p_slot, event_type);
+
+ return 1;
+}
+
+/* The following routines constitute the bulk of the
+ hotplug controller logic
+ */
+static int change_bus_speed(struct controller *ctrl, struct slot *p_slot,
+ enum pci_bus_speed speed)
+{
+ int rc = 0;
+
+ ctrl_dbg(ctrl, "Change speed to %d\n", speed);
+ rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+ if (rc) {
+ ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
+ __func__);
+ return WRONG_BUS_FREQUENCY;
+ }
+ return rc;
+}
+
+static int fix_bus_speed(struct controller *ctrl, struct slot *pslot,
+ u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp,
+ enum pci_bus_speed msp)
+{
+ int rc = 0;
+
+ /*
+ * If other slots on the same bus are occupied, we cannot
+ * change the bus speed.
+ */
+ if (flag) {
+ if (asp < bsp) {
+ ctrl_err(ctrl, "Speed of bus %x and adapter %x mismatch\n",
+ bsp, asp);
+ rc = WRONG_BUS_FREQUENCY;
+ }
+ return rc;
+ }
+
+ if (asp < msp) {
+ if (bsp != asp)
+ rc = change_bus_speed(ctrl, pslot, asp);
+ } else {
+ if (bsp != msp)
+ rc = change_bus_speed(ctrl, pslot, msp);
+ }
+ return rc;
+}
+
+/**
+ * board_added - Called after a board has been added to the system.
+ * @p_slot: target &slot
+ *
+ * Turns power on for the board.
+ * Configures board.
+ */
+static int board_added(struct slot *p_slot)
+{
+ u8 hp_slot;
+ u8 slots_not_empty = 0;
+ int rc = 0;
+ enum pci_bus_speed asp, bsp, msp;
+ struct controller *ctrl = p_slot->ctrl;
+ struct pci_bus *parent = ctrl->pci_dev->subordinate;
+
+ hp_slot = p_slot->device - ctrl->slot_device_offset;
+
+ ctrl_dbg(ctrl, "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n",
+ __func__, p_slot->device, ctrl->slot_device_offset, hp_slot);
+
+ /* Power on slot without connecting to bus */
+ rc = p_slot->hpc_ops->power_on_slot(p_slot);
+ if (rc) {
+ ctrl_err(ctrl, "Failed to power on slot\n");
+ return -1;
+ }
+
+ if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
+ rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+ if (rc) {
+ ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
+ __func__);
+ return WRONG_BUS_FREQUENCY;
+ }
+
+ /* turn on board, blink green LED, turn off Amber LED */
+ rc = p_slot->hpc_ops->slot_enable(p_slot);
+ if (rc) {
+ ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
+ return rc;
+ }
+ }
+
+ rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp);
+ if (rc) {
+ ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n");
+ return WRONG_BUS_FREQUENCY;
+ }
+
+ bsp = ctrl->pci_dev->subordinate->cur_bus_speed;
+ msp = ctrl->pci_dev->subordinate->max_bus_speed;
+
+ /* Check if there are other slots or devices on the same bus */
+ if (!list_empty(&ctrl->pci_dev->subordinate->devices))
+ slots_not_empty = 1;
+
+ ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, max_bus_speed %d\n",
+ __func__, slots_not_empty, asp,
+ bsp, msp);
+
+ rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp);
+ if (rc)
+ return rc;
+
+ /* turn on board, blink green LED, turn off Amber LED */
+ rc = p_slot->hpc_ops->slot_enable(p_slot);
+ if (rc) {
+ ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
+ return rc;
+ }
+
+ /* Wait for ~1 second */
+ msleep(1000);
+
+ ctrl_dbg(ctrl, "%s: slot status = %x\n", __func__, p_slot->status);
+ /* Check for a power fault */
+ if (p_slot->status == 0xFF) {
+ /* power fault occurred, but it was benign */
+ ctrl_dbg(ctrl, "%s: Power fault\n", __func__);
+ rc = POWER_FAILURE;
+ p_slot->status = 0;
+ goto err_exit;
+ }
+
+ if (shpchp_configure_device(p_slot)) {
+ ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n",
+ pci_domain_nr(parent), p_slot->bus, p_slot->device);
+ goto err_exit;
+ }
+
+ p_slot->status = 0;
+ p_slot->is_a_board = 0x01;
+ p_slot->pwr_save = 1;
+
+ p_slot->hpc_ops->green_led_on(p_slot);
+
+ return 0;
+
+err_exit:
+ /* turn off slot, turn on Amber LED, turn off Green LED */
+ rc = p_slot->hpc_ops->slot_disable(p_slot);
+ if (rc) {
+ ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
+ __func__);
+ return rc;
+ }
+
+ return(rc);
+}
+
+
+/**
+ * remove_board - Turns off slot and LEDs
+ * @p_slot: target &slot
+ */
+static int remove_board(struct slot *p_slot)
+{
+ struct controller *ctrl = p_slot->ctrl;
+ u8 hp_slot;
+ int rc;
+
+ if (shpchp_unconfigure_device(p_slot))
+ return(1);
+
+ hp_slot = p_slot->device - ctrl->slot_device_offset;
+ p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
+
+ ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, hp_slot);
+
+ /* Change status to shutdown */
+ if (p_slot->is_a_board)
+ p_slot->status = 0x01;
+
+ /* turn off slot, turn on Amber LED, turn off Green LED */
+ rc = p_slot->hpc_ops->slot_disable(p_slot);
+ if (rc) {
+ ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n",
+ __func__);
+ return rc;
+ }
+
+ rc = p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ if (rc) {
+ ctrl_err(ctrl, "Issue of Set Attention command failed\n");
+ return rc;
+ }
+
+ p_slot->pwr_save = 0;
+ p_slot->is_a_board = 0;
+
+ return 0;
+}
+
+
+struct pushbutton_work_info {
+ struct slot *p_slot;
+ struct work_struct work;
+};
+
+/**
+ * shpchp_pushbutton_thread - handle pushbutton events
+ * @work: &struct work_struct to be handled
+ *
+ * Scheduled procedure to handle blocking stuff for the pushbuttons.
+ * Handles all pending events and exits.
+ */
+static void shpchp_pushbutton_thread(struct work_struct *work)
+{
+ struct pushbutton_work_info *info =
+ container_of(work, struct pushbutton_work_info, work);
+ struct slot *p_slot = info->p_slot;
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case POWEROFF_STATE:
+ mutex_unlock(&p_slot->lock);
+ shpchp_disable_slot(p_slot);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWERON_STATE:
+ mutex_unlock(&p_slot->lock);
+ if (shpchp_enable_slot(p_slot))
+ p_slot->hpc_ops->green_led_off(p_slot);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ kfree(info);
+}
+
+void shpchp_queue_pushbutton_work(struct work_struct *work)
+{
+ struct slot *p_slot = container_of(work, struct slot, work.work);
+ struct pushbutton_work_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n",
+ __func__);
+ return;
+ }
+ info->p_slot = p_slot;
+ INIT_WORK(&info->work, shpchp_pushbutton_thread);
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case BLINKINGOFF_STATE:
+ p_slot->state = POWEROFF_STATE;
+ break;
+ case BLINKINGON_STATE:
+ p_slot->state = POWERON_STATE;
+ break;
+ default:
+ kfree(info);
+ goto out;
+ }
+ queue_work(p_slot->wq, &info->work);
+ out:
+ mutex_unlock(&p_slot->lock);
+}
+
+static int update_slot_info (struct slot *slot)
+{
+ struct hotplug_slot_info *info;
+ int result;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ slot->hpc_ops->get_power_status(slot, &(info->power_status));
+ slot->hpc_ops->get_attention_status(slot, &(info->attention_status));
+ slot->hpc_ops->get_latch_status(slot, &(info->latch_status));
+ slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status));
+
+ result = pci_hp_change_slot_info(slot->hotplug_slot, info);
+ kfree (info);
+ return result;
+}
+
+/*
+ * Note: This function must be called with slot->lock held
+ */
+static void handle_button_press_event(struct slot *p_slot)
+{
+ u8 getstatus;
+ struct controller *ctrl = p_slot->ctrl;
+
+ switch (p_slot->state) {
+ case STATIC_STATE:
+ p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ if (getstatus) {
+ p_slot->state = BLINKINGOFF_STATE;
+ ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n",
+ slot_name(p_slot));
+ } else {
+ p_slot->state = BLINKINGON_STATE;
+ ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n",
+ slot_name(p_slot));
+ }
+ /* blink green LED and turn off amber */
+ p_slot->hpc_ops->green_led_blink(p_slot);
+ p_slot->hpc_ops->set_attention_status(p_slot, 0);
+
+ queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ);
+ break;
+ case BLINKINGOFF_STATE:
+ case BLINKINGON_STATE:
+ /*
+ * Cancel if we are still blinking; this means that we
+ * press the attention again before the 5 sec. limit
+ * expires to cancel hot-add or hot-remove
+ */
+ ctrl_info(ctrl, "Button cancel on Slot(%s)\n",
+ slot_name(p_slot));
+ cancel_delayed_work(&p_slot->work);
+ if (p_slot->state == BLINKINGOFF_STATE)
+ p_slot->hpc_ops->green_led_on(p_slot);
+ else
+ p_slot->hpc_ops->green_led_off(p_slot);
+ p_slot->hpc_ops->set_attention_status(p_slot, 0);
+ ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n",
+ slot_name(p_slot));
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWEROFF_STATE:
+ case POWERON_STATE:
+ /*
+ * Ignore if the slot is on power-on or power-off state;
+ * this means that the previous attention button action
+ * to hot-add or hot-remove is undergoing
+ */
+ ctrl_info(ctrl, "Button ignore on Slot(%s)\n",
+ slot_name(p_slot));
+ update_slot_info(p_slot);
+ break;
+ default:
+ ctrl_warn(ctrl, "Not a valid state\n");
+ break;
+ }
+}
+
+static void interrupt_event_handler(struct work_struct *work)
+{
+ struct event_info *info = container_of(work, struct event_info, work);
+ struct slot *p_slot = info->p_slot;
+
+ mutex_lock(&p_slot->lock);
+ switch (info->event_type) {
+ case INT_BUTTON_PRESS:
+ handle_button_press_event(p_slot);
+ break;
+ case INT_POWER_FAULT:
+ ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__);
+ p_slot->hpc_ops->set_attention_status(p_slot, 1);
+ p_slot->hpc_ops->green_led_off(p_slot);
+ break;
+ default:
+ update_slot_info(p_slot);
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ kfree(info);
+}
+
+
+static int shpchp_enable_slot (struct slot *p_slot)
+{
+ u8 getstatus = 0;
+ int rc, retval = -ENODEV;
+ struct controller *ctrl = p_slot->ctrl;
+
+ /* Check to see if (latch closed, card present, power off) */
+ mutex_lock(&p_slot->ctrl->crit_sect);
+ rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ if (rc || !getstatus) {
+ ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
+ goto out;
+ }
+ rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ if (rc || getstatus) {
+ ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
+ goto out;
+ }
+ rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ if (rc || getstatus) {
+ ctrl_info(ctrl, "Already enabled on slot(%s)\n",
+ slot_name(p_slot));
+ goto out;
+ }
+
+ p_slot->is_a_board = 1;
+
+ /* We have to save the presence info for these slots */
+ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save));
+ p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save));
+ ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
+ p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+
+ if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
+ (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
+ && p_slot->ctrl->num_slots == 1) {
+ /* handle amd pogo errata; this must be done before enable */
+ amd_pogo_errata_save_misc_reg(p_slot);
+ retval = board_added(p_slot);
+ /* handle amd pogo errata; this must be done after enable */
+ amd_pogo_errata_restore_misc_reg(p_slot);
+ } else
+ retval = board_added(p_slot);
+
+ if (retval) {
+ p_slot->hpc_ops->get_adapter_status(p_slot,
+ &(p_slot->presence_save));
+ p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ }
+
+ update_slot_info(p_slot);
+ out:
+ mutex_unlock(&p_slot->ctrl->crit_sect);
+ return retval;
+}
+
+
+static int shpchp_disable_slot (struct slot *p_slot)
+{
+ u8 getstatus = 0;
+ int rc, retval = -ENODEV;
+ struct controller *ctrl = p_slot->ctrl;
+
+ if (!p_slot->ctrl)
+ return -ENODEV;
+
+ /* Check to see if (latch closed, card present, power on) */
+ mutex_lock(&p_slot->ctrl->crit_sect);
+
+ rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
+ if (rc || !getstatus) {
+ ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot));
+ goto out;
+ }
+ rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
+ if (rc || getstatus) {
+ ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot));
+ goto out;
+ }
+ rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
+ if (rc || !getstatus) {
+ ctrl_info(ctrl, "Already disabled on slot(%s)\n",
+ slot_name(p_slot));
+ goto out;
+ }
+
+ retval = remove_board(p_slot);
+ update_slot_info(p_slot);
+ out:
+ mutex_unlock(&p_slot->ctrl->crit_sect);
+ return retval;
+}
+
+int shpchp_sysfs_enable_slot(struct slot *p_slot)
+{
+ int retval = -ENODEV;
+ struct controller *ctrl = p_slot->ctrl;
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case BLINKINGON_STATE:
+ cancel_delayed_work(&p_slot->work);
+ case STATIC_STATE:
+ p_slot->state = POWERON_STATE;
+ mutex_unlock(&p_slot->lock);
+ retval = shpchp_enable_slot(p_slot);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWERON_STATE:
+ ctrl_info(ctrl, "Slot %s is already in powering on state\n",
+ slot_name(p_slot));
+ break;
+ case BLINKINGOFF_STATE:
+ case POWEROFF_STATE:
+ ctrl_info(ctrl, "Already enabled on slot %s\n",
+ slot_name(p_slot));
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot %s\n",
+ slot_name(p_slot));
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ return retval;
+}
+
+int shpchp_sysfs_disable_slot(struct slot *p_slot)
+{
+ int retval = -ENODEV;
+ struct controller *ctrl = p_slot->ctrl;
+
+ mutex_lock(&p_slot->lock);
+ switch (p_slot->state) {
+ case BLINKINGOFF_STATE:
+ cancel_delayed_work(&p_slot->work);
+ case STATIC_STATE:
+ p_slot->state = POWEROFF_STATE;
+ mutex_unlock(&p_slot->lock);
+ retval = shpchp_disable_slot(p_slot);
+ mutex_lock(&p_slot->lock);
+ p_slot->state = STATIC_STATE;
+ break;
+ case POWEROFF_STATE:
+ ctrl_info(ctrl, "Slot %s is already in powering off state\n",
+ slot_name(p_slot));
+ break;
+ case BLINKINGON_STATE:
+ case POWERON_STATE:
+ ctrl_info(ctrl, "Already disabled on slot %s\n",
+ slot_name(p_slot));
+ break;
+ default:
+ ctrl_err(ctrl, "Not a valid state on slot %s\n",
+ slot_name(p_slot));
+ break;
+ }
+ mutex_unlock(&p_slot->lock);
+
+ return retval;
+}
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
new file mode 100644
index 000000000..7d223e908
--- /dev/null
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -0,0 +1,1111 @@
+/*
+ * Standard PCI Hot Plug Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+
+#include "shpchp.h"
+
+/* Slot Available Register I field definition */
+#define SLOT_33MHZ 0x0000001f
+#define SLOT_66MHZ_PCIX 0x00001f00
+#define SLOT_100MHZ_PCIX 0x001f0000
+#define SLOT_133MHZ_PCIX 0x1f000000
+
+/* Slot Available Register II field definition */
+#define SLOT_66MHZ 0x0000001f
+#define SLOT_66MHZ_PCIX_266 0x00000f00
+#define SLOT_100MHZ_PCIX_266 0x0000f000
+#define SLOT_133MHZ_PCIX_266 0x000f0000
+#define SLOT_66MHZ_PCIX_533 0x00f00000
+#define SLOT_100MHZ_PCIX_533 0x0f000000
+#define SLOT_133MHZ_PCIX_533 0xf0000000
+
+/* Slot Configuration */
+#define SLOT_NUM 0x0000001F
+#define FIRST_DEV_NUM 0x00001F00
+#define PSN 0x07FF0000
+#define UPDOWN 0x20000000
+#define MRLSENSOR 0x40000000
+#define ATTN_BUTTON 0x80000000
+
+/*
+ * Interrupt Locator Register definitions
+ */
+#define CMD_INTR_PENDING (1 << 0)
+#define SLOT_INTR_PENDING(i) (1 << (i + 1))
+
+/*
+ * Controller SERR-INT Register
+ */
+#define GLOBAL_INTR_MASK (1 << 0)
+#define GLOBAL_SERR_MASK (1 << 1)
+#define COMMAND_INTR_MASK (1 << 2)
+#define ARBITER_SERR_MASK (1 << 3)
+#define COMMAND_DETECTED (1 << 16)
+#define ARBITER_DETECTED (1 << 17)
+#define SERR_INTR_RSVDZ_MASK 0xfffc0000
+
+/*
+ * Logical Slot Register definitions
+ */
+#define SLOT_REG(i) (SLOT1 + (4 * i))
+
+#define SLOT_STATE_SHIFT (0)
+#define SLOT_STATE_MASK (3 << 0)
+#define SLOT_STATE_PWRONLY (1)
+#define SLOT_STATE_ENABLED (2)
+#define SLOT_STATE_DISABLED (3)
+#define PWR_LED_STATE_SHIFT (2)
+#define PWR_LED_STATE_MASK (3 << 2)
+#define ATN_LED_STATE_SHIFT (4)
+#define ATN_LED_STATE_MASK (3 << 4)
+#define ATN_LED_STATE_ON (1)
+#define ATN_LED_STATE_BLINK (2)
+#define ATN_LED_STATE_OFF (3)
+#define POWER_FAULT (1 << 6)
+#define ATN_BUTTON (1 << 7)
+#define MRL_SENSOR (1 << 8)
+#define MHZ66_CAP (1 << 9)
+#define PRSNT_SHIFT (10)
+#define PRSNT_MASK (3 << 10)
+#define PCIX_CAP_SHIFT (12)
+#define PCIX_CAP_MASK_PI1 (3 << 12)
+#define PCIX_CAP_MASK_PI2 (7 << 12)
+#define PRSNT_CHANGE_DETECTED (1 << 16)
+#define ISO_PFAULT_DETECTED (1 << 17)
+#define BUTTON_PRESS_DETECTED (1 << 18)
+#define MRL_CHANGE_DETECTED (1 << 19)
+#define CON_PFAULT_DETECTED (1 << 20)
+#define PRSNT_CHANGE_INTR_MASK (1 << 24)
+#define ISO_PFAULT_INTR_MASK (1 << 25)
+#define BUTTON_PRESS_INTR_MASK (1 << 26)
+#define MRL_CHANGE_INTR_MASK (1 << 27)
+#define CON_PFAULT_INTR_MASK (1 << 28)
+#define MRL_CHANGE_SERR_MASK (1 << 29)
+#define CON_PFAULT_SERR_MASK (1 << 30)
+#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
+
+/*
+ * SHPC Command Code definitions
+ *
+ * Slot Operation 00h - 3Fh
+ * Set Bus Segment Speed/Mode A 40h - 47h
+ * Power-Only All Slots 48h
+ * Enable All Slots 49h
+ * Set Bus Segment Speed/Mode B (PI=2) 50h - 5Fh
+ * Reserved Command Codes 60h - BFh
+ * Vendor Specific Commands C0h - FFh
+ */
+#define SET_SLOT_PWR 0x01 /* Slot Operation */
+#define SET_SLOT_ENABLE 0x02
+#define SET_SLOT_DISABLE 0x03
+#define SET_PWR_ON 0x04
+#define SET_PWR_BLINK 0x08
+#define SET_PWR_OFF 0x0c
+#define SET_ATTN_ON 0x10
+#define SET_ATTN_BLINK 0x20
+#define SET_ATTN_OFF 0x30
+#define SETA_PCI_33MHZ 0x40 /* Set Bus Segment Speed/Mode A */
+#define SETA_PCI_66MHZ 0x41
+#define SETA_PCIX_66MHZ 0x42
+#define SETA_PCIX_100MHZ 0x43
+#define SETA_PCIX_133MHZ 0x44
+#define SETA_RESERVED1 0x45
+#define SETA_RESERVED2 0x46
+#define SETA_RESERVED3 0x47
+#define SET_PWR_ONLY_ALL 0x48 /* Power-Only All Slots */
+#define SET_ENABLE_ALL 0x49 /* Enable All Slots */
+#define SETB_PCI_33MHZ 0x50 /* Set Bus Segment Speed/Mode B */
+#define SETB_PCI_66MHZ 0x51
+#define SETB_PCIX_66MHZ_PM 0x52
+#define SETB_PCIX_100MHZ_PM 0x53
+#define SETB_PCIX_133MHZ_PM 0x54
+#define SETB_PCIX_66MHZ_EM 0x55
+#define SETB_PCIX_100MHZ_EM 0x56
+#define SETB_PCIX_133MHZ_EM 0x57
+#define SETB_PCIX_66MHZ_266 0x58
+#define SETB_PCIX_100MHZ_266 0x59
+#define SETB_PCIX_133MHZ_266 0x5a
+#define SETB_PCIX_66MHZ_533 0x5b
+#define SETB_PCIX_100MHZ_533 0x5c
+#define SETB_PCIX_133MHZ_533 0x5d
+#define SETB_RESERVED1 0x5e
+#define SETB_RESERVED2 0x5f
+
+/*
+ * SHPC controller command error code
+ */
+#define SWITCH_OPEN 0x1
+#define INVALID_CMD 0x2
+#define INVALID_SPEED_MODE 0x4
+
+/*
+ * For accessing SHPC Working Register Set via PCI Configuration Space
+ */
+#define DWORD_SELECT 0x2
+#define DWORD_DATA 0x4
+
+/* Field Offset in Logical Slot Register - byte boundary */
+#define SLOT_EVENT_LATCH 0x2
+#define SLOT_SERR_INT_MASK 0x3
+
+static irqreturn_t shpc_isr(int irq, void *dev_id);
+static void start_int_poll_timer(struct controller *ctrl, int sec);
+static int hpc_check_cmd_status(struct controller *ctrl);
+
+static inline u8 shpc_readb(struct controller *ctrl, int reg)
+{
+ return readb(ctrl->creg + reg);
+}
+
+static inline void shpc_writeb(struct controller *ctrl, int reg, u8 val)
+{
+ writeb(val, ctrl->creg + reg);
+}
+
+static inline u16 shpc_readw(struct controller *ctrl, int reg)
+{
+ return readw(ctrl->creg + reg);
+}
+
+static inline void shpc_writew(struct controller *ctrl, int reg, u16 val)
+{
+ writew(val, ctrl->creg + reg);
+}
+
+static inline u32 shpc_readl(struct controller *ctrl, int reg)
+{
+ return readl(ctrl->creg + reg);
+}
+
+static inline void shpc_writel(struct controller *ctrl, int reg, u32 val)
+{
+ writel(val, ctrl->creg + reg);
+}
+
+static inline int shpc_indirect_read(struct controller *ctrl, int index,
+ u32 *value)
+{
+ int rc;
+ u32 cap_offset = ctrl->cap_offset;
+ struct pci_dev *pdev = ctrl->pci_dev;
+
+ rc = pci_write_config_byte(pdev, cap_offset + DWORD_SELECT, index);
+ if (rc)
+ return rc;
+ return pci_read_config_dword(pdev, cap_offset + DWORD_DATA, value);
+}
+
+/*
+ * This is the interrupt polling timeout function.
+ */
+static void int_poll_timeout(unsigned long data)
+{
+ struct controller *ctrl = (struct controller *)data;
+
+ /* Poll for interrupt events. regs == NULL => polling */
+ shpc_isr(0, ctrl);
+
+ init_timer(&ctrl->poll_timer);
+ if (!shpchp_poll_time)
+ shpchp_poll_time = 2; /* default polling interval is 2 sec */
+
+ start_int_poll_timer(ctrl, shpchp_poll_time);
+}
+
+/*
+ * This function starts the interrupt polling timer.
+ */
+static void start_int_poll_timer(struct controller *ctrl, int sec)
+{
+ /* Clamp to sane value */
+ if ((sec <= 0) || (sec > 60))
+ sec = 2;
+
+ ctrl->poll_timer.function = &int_poll_timeout;
+ ctrl->poll_timer.data = (unsigned long)ctrl;
+ ctrl->poll_timer.expires = jiffies + sec * HZ;
+ add_timer(&ctrl->poll_timer);
+}
+
+static inline int is_ctrl_busy(struct controller *ctrl)
+{
+ u16 cmd_status = shpc_readw(ctrl, CMD_STATUS);
+ return cmd_status & 0x1;
+}
+
+/*
+ * Returns 1 if SHPC finishes executing a command within 1 sec,
+ * otherwise returns 0.
+ */
+static inline int shpc_poll_ctrl_busy(struct controller *ctrl)
+{
+ int i;
+
+ if (!is_ctrl_busy(ctrl))
+ return 1;
+
+ /* Check every 0.1 sec for a total of 1 sec */
+ for (i = 0; i < 10; i++) {
+ msleep(100);
+ if (!is_ctrl_busy(ctrl))
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int shpc_wait_cmd(struct controller *ctrl)
+{
+ int retval = 0;
+ unsigned long timeout = msecs_to_jiffies(1000);
+ int rc;
+
+ if (shpchp_poll_mode)
+ rc = shpc_poll_ctrl_busy(ctrl);
+ else
+ rc = wait_event_interruptible_timeout(ctrl->queue,
+ !is_ctrl_busy(ctrl), timeout);
+ if (!rc && is_ctrl_busy(ctrl)) {
+ retval = -EIO;
+ ctrl_err(ctrl, "Command not completed in 1000 msec\n");
+ } else if (rc < 0) {
+ retval = -EINTR;
+ ctrl_info(ctrl, "Command was interrupted by a signal\n");
+ }
+
+ return retval;
+}
+
+static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
+{
+ struct controller *ctrl = slot->ctrl;
+ u16 cmd_status;
+ int retval = 0;
+ u16 temp_word;
+
+ mutex_lock(&slot->ctrl->cmd_lock);
+
+ if (!shpc_poll_ctrl_busy(ctrl)) {
+ /* After 1 sec and and the controller is still busy */
+ ctrl_err(ctrl, "Controller is still busy after 1 sec\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ ++t_slot;
+ temp_word = (t_slot << 8) | (cmd & 0xFF);
+ ctrl_dbg(ctrl, "%s: t_slot %x cmd %x\n", __func__, t_slot, cmd);
+
+ /* To make sure the Controller Busy bit is 0 before we send out the
+ * command.
+ */
+ shpc_writew(ctrl, CMD, temp_word);
+
+ /*
+ * Wait for command completion.
+ */
+ retval = shpc_wait_cmd(slot->ctrl);
+ if (retval)
+ goto out;
+
+ cmd_status = hpc_check_cmd_status(slot->ctrl);
+ if (cmd_status) {
+ ctrl_err(ctrl, "Failed to issued command 0x%x (error code = %d)\n",
+ cmd, cmd_status);
+ retval = -EIO;
+ }
+ out:
+ mutex_unlock(&slot->ctrl->cmd_lock);
+ return retval;
+}
+
+static int hpc_check_cmd_status(struct controller *ctrl)
+{
+ int retval = 0;
+ u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
+
+ switch (cmd_status >> 1) {
+ case 0:
+ retval = 0;
+ break;
+ case 1:
+ retval = SWITCH_OPEN;
+ ctrl_err(ctrl, "Switch opened!\n");
+ break;
+ case 2:
+ retval = INVALID_CMD;
+ ctrl_err(ctrl, "Invalid HPC command!\n");
+ break;
+ case 4:
+ retval = INVALID_SPEED_MODE;
+ ctrl_err(ctrl, "Invalid bus speed/mode!\n");
+ break;
+ default:
+ retval = cmd_status;
+ }
+
+ return retval;
+}
+
+
+static int hpc_get_attention_status(struct slot *slot, u8 *status)
+{
+ struct controller *ctrl = slot->ctrl;
+ u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
+ u8 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT;
+
+ switch (state) {
+ case ATN_LED_STATE_ON:
+ *status = 1; /* On */
+ break;
+ case ATN_LED_STATE_BLINK:
+ *status = 2; /* Blink */
+ break;
+ case ATN_LED_STATE_OFF:
+ *status = 0; /* Off */
+ break;
+ default:
+ *status = 0xFF; /* Reserved */
+ break;
+ }
+
+ return 0;
+}
+
+static int hpc_get_power_status(struct slot *slot, u8 *status)
+{
+ struct controller *ctrl = slot->ctrl;
+ u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
+ u8 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT;
+
+ switch (state) {
+ case SLOT_STATE_PWRONLY:
+ *status = 2; /* Powered only */
+ break;
+ case SLOT_STATE_ENABLED:
+ *status = 1; /* Enabled */
+ break;
+ case SLOT_STATE_DISABLED:
+ *status = 0; /* Disabled */
+ break;
+ default:
+ *status = 0xFF; /* Reserved */
+ break;
+ }
+
+ return 0;
+}
+
+
+static int hpc_get_latch_status(struct slot *slot, u8 *status)
+{
+ struct controller *ctrl = slot->ctrl;
+ u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
+
+ *status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */
+
+ return 0;
+}
+
+static int hpc_get_adapter_status(struct slot *slot, u8 *status)
+{
+ struct controller *ctrl = slot->ctrl;
+ u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
+ u8 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT;
+
+ *status = (state != 0x3) ? 1 : 0;
+
+ return 0;
+}
+
+static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ *prog_int = shpc_readb(ctrl, PROG_INTERFACE);
+
+ return 0;
+}
+
+static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
+{
+ int retval = 0;
+ struct controller *ctrl = slot->ctrl;
+ u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
+ u8 m66_cap = !!(slot_reg & MHZ66_CAP);
+ u8 pi, pcix_cap;
+
+ retval = hpc_get_prog_int(slot, &pi);
+ if (retval)
+ return retval;
+
+ switch (pi) {
+ case 1:
+ pcix_cap = (slot_reg & PCIX_CAP_MASK_PI1) >> PCIX_CAP_SHIFT;
+ break;
+ case 2:
+ pcix_cap = (slot_reg & PCIX_CAP_MASK_PI2) >> PCIX_CAP_SHIFT;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ctrl_dbg(ctrl, "%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n",
+ __func__, slot_reg, pcix_cap, m66_cap);
+
+ switch (pcix_cap) {
+ case 0x0:
+ *value = m66_cap ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+ break;
+ case 0x1:
+ *value = PCI_SPEED_66MHz_PCIX;
+ break;
+ case 0x3:
+ *value = PCI_SPEED_133MHz_PCIX;
+ break;
+ case 0x4:
+ *value = PCI_SPEED_133MHz_PCIX_266;
+ break;
+ case 0x5:
+ *value = PCI_SPEED_133MHz_PCIX_533;
+ break;
+ case 0x2:
+ default:
+ *value = PCI_SPEED_UNKNOWN;
+ retval = -ENODEV;
+ break;
+ }
+
+ ctrl_dbg(ctrl, "Adapter speed = %d\n", *value);
+ return retval;
+}
+
+static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
+{
+ int retval = 0;
+ struct controller *ctrl = slot->ctrl;
+ u16 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG);
+ u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
+
+ if (pi == 2) {
+ *mode = (sec_bus_status & 0x0100) >> 8;
+ } else {
+ retval = -1;
+ }
+
+ ctrl_dbg(ctrl, "Mode 1 ECC cap = %d\n", *mode);
+ return retval;
+}
+
+static int hpc_query_power_fault(struct slot *slot)
+{
+ struct controller *ctrl = slot->ctrl;
+ u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
+
+ /* Note: Logic 0 => fault */
+ return !(slot_reg & POWER_FAULT);
+}
+
+static int hpc_set_attention_status(struct slot *slot, u8 value)
+{
+ u8 slot_cmd = 0;
+
+ switch (value) {
+ case 0 :
+ slot_cmd = SET_ATTN_OFF; /* OFF */
+ break;
+ case 1:
+ slot_cmd = SET_ATTN_ON; /* ON */
+ break;
+ case 2:
+ slot_cmd = SET_ATTN_BLINK; /* BLINK */
+ break;
+ default:
+ return -1;
+ }
+
+ return shpc_write_cmd(slot, slot->hp_slot, slot_cmd);
+}
+
+
+static void hpc_set_green_led_on(struct slot *slot)
+{
+ shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
+}
+
+static void hpc_set_green_led_off(struct slot *slot)
+{
+ shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
+}
+
+static void hpc_set_green_led_blink(struct slot *slot)
+{
+ shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
+}
+
+static void hpc_release_ctlr(struct controller *ctrl)
+{
+ int i;
+ u32 slot_reg, serr_int;
+
+ /*
+ * Mask event interrupts and SERRs of all slots
+ */
+ for (i = 0; i < ctrl->num_slots; i++) {
+ slot_reg = shpc_readl(ctrl, SLOT_REG(i));
+ slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
+ BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
+ CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK |
+ CON_PFAULT_SERR_MASK);
+ slot_reg &= ~SLOT_REG_RSVDZ_MASK;
+ shpc_writel(ctrl, SLOT_REG(i), slot_reg);
+ }
+
+ cleanup_slots(ctrl);
+
+ /*
+ * Mask SERR and System Interrupt generation
+ */
+ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ serr_int |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK |
+ COMMAND_INTR_MASK | ARBITER_SERR_MASK);
+ serr_int &= ~SERR_INTR_RSVDZ_MASK;
+ shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
+
+ if (shpchp_poll_mode)
+ del_timer(&ctrl->poll_timer);
+ else {
+ free_irq(ctrl->pci_dev->irq, ctrl);
+ pci_disable_msi(ctrl->pci_dev);
+ }
+
+ iounmap(ctrl->creg);
+ release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
+}
+
+static int hpc_power_on_slot(struct slot *slot)
+{
+ int retval;
+
+ retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR);
+ if (retval)
+ ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
+
+ return retval;
+}
+
+static int hpc_slot_enable(struct slot *slot)
+{
+ int retval;
+
+ /* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */
+ retval = shpc_write_cmd(slot, slot->hp_slot,
+ SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF);
+ if (retval)
+ ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
+
+ return retval;
+}
+
+static int hpc_slot_disable(struct slot *slot)
+{
+ int retval;
+
+ /* Slot - Disable, Power Indicator - Off, Attention Indicator - On */
+ retval = shpc_write_cmd(slot, slot->hp_slot,
+ SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON);
+ if (retval)
+ ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
+
+ return retval;
+}
+
+static int shpc_get_cur_bus_speed(struct controller *ctrl)
+{
+ int retval = 0;
+ struct pci_bus *bus = ctrl->pci_dev->subordinate;
+ enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
+ u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
+ u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
+ u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
+
+ if ((pi == 1) && (speed_mode > 4)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ switch (speed_mode) {
+ case 0x0:
+ bus_speed = PCI_SPEED_33MHz;
+ break;
+ case 0x1:
+ bus_speed = PCI_SPEED_66MHz;
+ break;
+ case 0x2:
+ bus_speed = PCI_SPEED_66MHz_PCIX;
+ break;
+ case 0x3:
+ bus_speed = PCI_SPEED_100MHz_PCIX;
+ break;
+ case 0x4:
+ bus_speed = PCI_SPEED_133MHz_PCIX;
+ break;
+ case 0x5:
+ bus_speed = PCI_SPEED_66MHz_PCIX_ECC;
+ break;
+ case 0x6:
+ bus_speed = PCI_SPEED_100MHz_PCIX_ECC;
+ break;
+ case 0x7:
+ bus_speed = PCI_SPEED_133MHz_PCIX_ECC;
+ break;
+ case 0x8:
+ bus_speed = PCI_SPEED_66MHz_PCIX_266;
+ break;
+ case 0x9:
+ bus_speed = PCI_SPEED_100MHz_PCIX_266;
+ break;
+ case 0xa:
+ bus_speed = PCI_SPEED_133MHz_PCIX_266;
+ break;
+ case 0xb:
+ bus_speed = PCI_SPEED_66MHz_PCIX_533;
+ break;
+ case 0xc:
+ bus_speed = PCI_SPEED_100MHz_PCIX_533;
+ break;
+ case 0xd:
+ bus_speed = PCI_SPEED_133MHz_PCIX_533;
+ break;
+ default:
+ retval = -ENODEV;
+ break;
+ }
+
+ out:
+ bus->cur_bus_speed = bus_speed;
+ dbg("Current bus speed = %d\n", bus_speed);
+ return retval;
+}
+
+
+static int hpc_set_bus_speed_mode(struct slot *slot, enum pci_bus_speed value)
+{
+ int retval;
+ struct controller *ctrl = slot->ctrl;
+ u8 pi, cmd;
+
+ pi = shpc_readb(ctrl, PROG_INTERFACE);
+ if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX))
+ return -EINVAL;
+
+ switch (value) {
+ case PCI_SPEED_33MHz:
+ cmd = SETA_PCI_33MHZ;
+ break;
+ case PCI_SPEED_66MHz:
+ cmd = SETA_PCI_66MHZ;
+ break;
+ case PCI_SPEED_66MHz_PCIX:
+ cmd = SETA_PCIX_66MHZ;
+ break;
+ case PCI_SPEED_100MHz_PCIX:
+ cmd = SETA_PCIX_100MHZ;
+ break;
+ case PCI_SPEED_133MHz_PCIX:
+ cmd = SETA_PCIX_133MHZ;
+ break;
+ case PCI_SPEED_66MHz_PCIX_ECC:
+ cmd = SETB_PCIX_66MHZ_EM;
+ break;
+ case PCI_SPEED_100MHz_PCIX_ECC:
+ cmd = SETB_PCIX_100MHZ_EM;
+ break;
+ case PCI_SPEED_133MHz_PCIX_ECC:
+ cmd = SETB_PCIX_133MHZ_EM;
+ break;
+ case PCI_SPEED_66MHz_PCIX_266:
+ cmd = SETB_PCIX_66MHZ_266;
+ break;
+ case PCI_SPEED_100MHz_PCIX_266:
+ cmd = SETB_PCIX_100MHZ_266;
+ break;
+ case PCI_SPEED_133MHz_PCIX_266:
+ cmd = SETB_PCIX_133MHZ_266;
+ break;
+ case PCI_SPEED_66MHz_PCIX_533:
+ cmd = SETB_PCIX_66MHZ_533;
+ break;
+ case PCI_SPEED_100MHz_PCIX_533:
+ cmd = SETB_PCIX_100MHZ_533;
+ break;
+ case PCI_SPEED_133MHz_PCIX_533:
+ cmd = SETB_PCIX_133MHZ_533;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ retval = shpc_write_cmd(slot, 0, cmd);
+ if (retval)
+ ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
+ else
+ shpc_get_cur_bus_speed(ctrl);
+
+ return retval;
+}
+
+static irqreturn_t shpc_isr(int irq, void *dev_id)
+{
+ struct controller *ctrl = (struct controller *)dev_id;
+ u32 serr_int, slot_reg, intr_loc, intr_loc2;
+ int hp_slot;
+
+ /* Check to see if it was our interrupt */
+ intr_loc = shpc_readl(ctrl, INTR_LOC);
+ if (!intr_loc)
+ return IRQ_NONE;
+
+ ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
+
+ if (!shpchp_poll_mode) {
+ /*
+ * Mask Global Interrupt Mask - see implementation
+ * note on p. 139 of SHPC spec rev 1.0
+ */
+ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ serr_int |= GLOBAL_INTR_MASK;
+ serr_int &= ~SERR_INTR_RSVDZ_MASK;
+ shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
+
+ intr_loc2 = shpc_readl(ctrl, INTR_LOC);
+ ctrl_dbg(ctrl, "%s: intr_loc2 = %x\n", __func__, intr_loc2);
+ }
+
+ if (intr_loc & CMD_INTR_PENDING) {
+ /*
+ * Command Complete Interrupt Pending
+ * RO only - clear by writing 1 to the Command Completion
+ * Detect bit in Controller SERR-INT register
+ */
+ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ serr_int &= ~SERR_INTR_RSVDZ_MASK;
+ shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
+
+ wake_up_interruptible(&ctrl->queue);
+ }
+
+ if (!(intr_loc & ~CMD_INTR_PENDING))
+ goto out;
+
+ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
+ /* To find out which slot has interrupt pending */
+ if (!(intr_loc & SLOT_INTR_PENDING(hp_slot)))
+ continue;
+
+ slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
+ ctrl_dbg(ctrl, "Slot %x with intr, slot register = %x\n",
+ hp_slot, slot_reg);
+
+ if (slot_reg & MRL_CHANGE_DETECTED)
+ shpchp_handle_switch_change(hp_slot, ctrl);
+
+ if (slot_reg & BUTTON_PRESS_DETECTED)
+ shpchp_handle_attention_button(hp_slot, ctrl);
+
+ if (slot_reg & PRSNT_CHANGE_DETECTED)
+ shpchp_handle_presence_change(hp_slot, ctrl);
+
+ if (slot_reg & (ISO_PFAULT_DETECTED | CON_PFAULT_DETECTED))
+ shpchp_handle_power_fault(hp_slot, ctrl);
+
+ /* Clear all slot events */
+ slot_reg &= ~SLOT_REG_RSVDZ_MASK;
+ shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
+ }
+ out:
+ if (!shpchp_poll_mode) {
+ /* Unmask Global Interrupt Mask */
+ serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK);
+ shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int shpc_get_max_bus_speed(struct controller *ctrl)
+{
+ int retval = 0;
+ struct pci_bus *bus = ctrl->pci_dev->subordinate;
+ enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
+ u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
+ u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
+ u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2);
+
+ if (pi == 2) {
+ if (slot_avail2 & SLOT_133MHZ_PCIX_533)
+ bus_speed = PCI_SPEED_133MHz_PCIX_533;
+ else if (slot_avail2 & SLOT_100MHZ_PCIX_533)
+ bus_speed = PCI_SPEED_100MHz_PCIX_533;
+ else if (slot_avail2 & SLOT_66MHZ_PCIX_533)
+ bus_speed = PCI_SPEED_66MHz_PCIX_533;
+ else if (slot_avail2 & SLOT_133MHZ_PCIX_266)
+ bus_speed = PCI_SPEED_133MHz_PCIX_266;
+ else if (slot_avail2 & SLOT_100MHZ_PCIX_266)
+ bus_speed = PCI_SPEED_100MHz_PCIX_266;
+ else if (slot_avail2 & SLOT_66MHZ_PCIX_266)
+ bus_speed = PCI_SPEED_66MHz_PCIX_266;
+ }
+
+ if (bus_speed == PCI_SPEED_UNKNOWN) {
+ if (slot_avail1 & SLOT_133MHZ_PCIX)
+ bus_speed = PCI_SPEED_133MHz_PCIX;
+ else if (slot_avail1 & SLOT_100MHZ_PCIX)
+ bus_speed = PCI_SPEED_100MHz_PCIX;
+ else if (slot_avail1 & SLOT_66MHZ_PCIX)
+ bus_speed = PCI_SPEED_66MHz_PCIX;
+ else if (slot_avail2 & SLOT_66MHZ)
+ bus_speed = PCI_SPEED_66MHz;
+ else if (slot_avail1 & SLOT_33MHZ)
+ bus_speed = PCI_SPEED_33MHz;
+ else
+ retval = -ENODEV;
+ }
+
+ bus->max_bus_speed = bus_speed;
+ ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
+
+ return retval;
+}
+
+static struct hpc_ops shpchp_hpc_ops = {
+ .power_on_slot = hpc_power_on_slot,
+ .slot_enable = hpc_slot_enable,
+ .slot_disable = hpc_slot_disable,
+ .set_bus_speed_mode = hpc_set_bus_speed_mode,
+ .set_attention_status = hpc_set_attention_status,
+ .get_power_status = hpc_get_power_status,
+ .get_attention_status = hpc_get_attention_status,
+ .get_latch_status = hpc_get_latch_status,
+ .get_adapter_status = hpc_get_adapter_status,
+
+ .get_adapter_speed = hpc_get_adapter_speed,
+ .get_mode1_ECC_cap = hpc_get_mode1_ECC_cap,
+ .get_prog_int = hpc_get_prog_int,
+
+ .query_power_fault = hpc_query_power_fault,
+ .green_led_on = hpc_set_green_led_on,
+ .green_led_off = hpc_set_green_led_off,
+ .green_led_blink = hpc_set_green_led_blink,
+
+ .release_ctlr = hpc_release_ctlr,
+};
+
+int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
+{
+ int rc = -1, num_slots = 0;
+ u8 hp_slot;
+ u32 shpc_base_offset;
+ u32 tempdword, slot_reg, slot_config;
+ u8 i;
+
+ ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
+ ctrl_dbg(ctrl, "Hotplug Controller:\n");
+
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
+ /* amd shpc driver doesn't use Base Offset; assume 0 */
+ ctrl->mmio_base = pci_resource_start(pdev, 0);
+ ctrl->mmio_size = pci_resource_len(pdev, 0);
+ } else {
+ ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC);
+ if (!ctrl->cap_offset) {
+ ctrl_err(ctrl, "Cannot find PCI capability\n");
+ goto abort;
+ }
+ ctrl_dbg(ctrl, " cap_offset = %x\n", ctrl->cap_offset);
+
+ rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset);
+ if (rc) {
+ ctrl_err(ctrl, "Cannot read base_offset\n");
+ goto abort;
+ }
+
+ rc = shpc_indirect_read(ctrl, 3, &tempdword);
+ if (rc) {
+ ctrl_err(ctrl, "Cannot read slot config\n");
+ goto abort;
+ }
+ num_slots = tempdword & SLOT_NUM;
+ ctrl_dbg(ctrl, " num_slots (indirect) %x\n", num_slots);
+
+ for (i = 0; i < 9 + num_slots; i++) {
+ rc = shpc_indirect_read(ctrl, i, &tempdword);
+ if (rc) {
+ ctrl_err(ctrl, "Cannot read creg (index = %d)\n",
+ i);
+ goto abort;
+ }
+ ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword);
+ }
+
+ ctrl->mmio_base =
+ pci_resource_start(pdev, 0) + shpc_base_offset;
+ ctrl->mmio_size = 0x24 + 0x4 * num_slots;
+ }
+
+ ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ ctrl_err(ctrl, "pci_enable_device failed\n");
+ goto abort;
+ }
+
+ if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) {
+ ctrl_err(ctrl, "Cannot reserve MMIO region\n");
+ rc = -1;
+ goto abort;
+ }
+
+ ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size);
+ if (!ctrl->creg) {
+ ctrl_err(ctrl, "Cannot remap MMIO region %lx @ %lx\n",
+ ctrl->mmio_size, ctrl->mmio_base);
+ release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
+ rc = -1;
+ goto abort;
+ }
+ ctrl_dbg(ctrl, "ctrl->creg %p\n", ctrl->creg);
+
+ mutex_init(&ctrl->crit_sect);
+ mutex_init(&ctrl->cmd_lock);
+
+ /* Setup wait queue */
+ init_waitqueue_head(&ctrl->queue);
+
+ ctrl->hpc_ops = &shpchp_hpc_ops;
+
+ /* Return PCI Controller Info */
+ slot_config = shpc_readl(ctrl, SLOT_CONFIG);
+ ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
+ ctrl->num_slots = slot_config & SLOT_NUM;
+ ctrl->first_slot = (slot_config & PSN) >> 16;
+ ctrl->slot_num_inc = ((slot_config & UPDOWN) >> 29) ? 1 : -1;
+
+ /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */
+ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
+ tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK |
+ COMMAND_INTR_MASK | ARBITER_SERR_MASK);
+ tempdword &= ~SERR_INTR_RSVDZ_MASK;
+ shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
+ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
+
+ /* Mask the MRL sensor SERR Mask of individual slot in
+ * Slot SERR-INT Mask & clear all the existing event if any
+ */
+ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
+ slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
+ ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
+ hp_slot, slot_reg);
+ slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
+ BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
+ CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK |
+ CON_PFAULT_SERR_MASK);
+ slot_reg &= ~SLOT_REG_RSVDZ_MASK;
+ shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
+ }
+
+ if (shpchp_poll_mode) {
+ /* Install interrupt polling timer. Start with 10 sec delay */
+ init_timer(&ctrl->poll_timer);
+ start_int_poll_timer(ctrl, 10);
+ } else {
+ /* Installs the interrupt handler */
+ rc = pci_enable_msi(pdev);
+ if (rc) {
+ ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
+ ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
+ }
+
+ rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
+ MY_NAME, (void *)ctrl);
+ ctrl_dbg(ctrl, "request_irq %d (returns %d)\n",
+ ctrl->pci_dev->irq, rc);
+ if (rc) {
+ ctrl_err(ctrl, "Can't get irq %d for the hotplug controller\n",
+ ctrl->pci_dev->irq);
+ goto abort_iounmap;
+ }
+ }
+ ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
+
+ shpc_get_max_bus_speed(ctrl);
+ shpc_get_cur_bus_speed(ctrl);
+
+ /*
+ * Unmask all event interrupts of all slots
+ */
+ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
+ slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
+ ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
+ hp_slot, slot_reg);
+ slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
+ BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
+ CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK);
+ shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
+ }
+ if (!shpchp_poll_mode) {
+ /* Unmask all general input interrupts and SERR */
+ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ tempdword &= ~(GLOBAL_INTR_MASK | COMMAND_INTR_MASK |
+ SERR_INTR_RSVDZ_MASK);
+ shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
+ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
+ ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
+ }
+
+ return 0;
+
+ /* We end up here for the many possible ways to fail this API. */
+abort_iounmap:
+ iounmap(ctrl->creg);
+abort:
+ return rc;
+}
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
new file mode 100644
index 000000000..f8cd3a27e
--- /dev/null
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -0,0 +1,116 @@
+/*
+ * Standard Hot Plug Controller Driver
+ *
+ * Copyright (C) 1995,2001 Compaq Computer Corporation
+ * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2001 IBM Corp.
+ * Copyright (C) 2003-2004 Intel Corporation
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "../pci.h"
+#include "shpchp.h"
+
+int shpchp_configure_device(struct slot *p_slot)
+{
+ struct pci_dev *dev;
+ struct controller *ctrl = p_slot->ctrl;
+ struct pci_dev *bridge = ctrl->pci_dev;
+ struct pci_bus *parent = bridge->subordinate;
+ int num, ret = 0;
+
+ pci_lock_rescan_remove();
+
+ dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ if (dev) {
+ ctrl_err(ctrl, "Device %s already exists at %04x:%02x:%02x, cannot hot-add\n",
+ pci_name(dev), pci_domain_nr(parent),
+ p_slot->bus, p_slot->device);
+ pci_dev_put(dev);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ if (num == 0) {
+ ctrl_err(ctrl, "No new device found\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ list_for_each_entry(dev, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
+ continue;
+ if (pci_is_bridge(dev))
+ pci_hp_add_bridge(dev);
+ }
+
+ pci_assign_unassigned_bridge_resources(bridge);
+ pcie_bus_configure_settings(parent);
+ pci_bus_add_devices(parent);
+
+ out:
+ pci_unlock_rescan_remove();
+ return ret;
+}
+
+int shpchp_unconfigure_device(struct slot *p_slot)
+{
+ int rc = 0;
+ u8 bctl = 0;
+ struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ struct pci_dev *dev, *temp;
+ struct controller *ctrl = p_slot->ctrl;
+
+ ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n",
+ __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device);
+
+ pci_lock_rescan_remove();
+
+ list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ if (PCI_SLOT(dev->devfn) != p_slot->device)
+ continue;
+
+ pci_dev_get(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
+ if (bctl & PCI_BRIDGE_CTL_VGA) {
+ ctrl_err(ctrl,
+ "Cannot remove display device %s\n",
+ pci_name(dev));
+ pci_dev_put(dev);
+ rc = -EINVAL;
+ break;
+ }
+ }
+ pci_stop_and_remove_bus_device(dev);
+ pci_dev_put(dev);
+ }
+
+ pci_unlock_rescan_remove();
+ return rc;
+}
+
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
new file mode 100644
index 000000000..52875b360
--- /dev/null
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -0,0 +1,96 @@
+/*
+ * Compaq Hot Plug Controller Driver
+ *
+ * Copyright (c) 1995,2001 Compaq Computer Corporation
+ * Copyright (c) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (c) 2001 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <greg@kroah.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include "shpchp.h"
+
+
+/* A few routines that create sysfs entries for the hot plug controller */
+
+static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev;
+ char *out = buf;
+ int index, busnr;
+ struct resource *res;
+ struct pci_bus *bus;
+
+ pdev = container_of (dev, struct pci_dev, dev);
+ bus = pdev->subordinate;
+
+ out += sprintf(buf, "Free resources: memory\n");
+ pci_bus_for_each_resource(bus, res, index) {
+ if (res && (res->flags & IORESOURCE_MEM) &&
+ !(res->flags & IORESOURCE_PREFETCH)) {
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
+ }
+ }
+ out += sprintf(out, "Free resources: prefetchable memory\n");
+ pci_bus_for_each_resource(bus, res, index) {
+ if (res && (res->flags & IORESOURCE_MEM) &&
+ (res->flags & IORESOURCE_PREFETCH)) {
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
+ }
+ }
+ out += sprintf(out, "Free resources: IO\n");
+ pci_bus_for_each_resource(bus, res, index) {
+ if (res && (res->flags & IORESOURCE_IO)) {
+ out += sprintf(out, "start = %8.8llx, length = %8.8llx\n",
+ (unsigned long long)res->start,
+ (unsigned long long)resource_size(res));
+ }
+ }
+ out += sprintf(out, "Free resources: bus numbers\n");
+ for (busnr = bus->busn_res.start; busnr <= bus->busn_res.end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(bus), busnr))
+ break;
+ }
+ if (busnr < bus->busn_res.end)
+ out += sprintf(out, "start = %8.8x, length = %8.8x\n",
+ busnr, (int)(bus->busn_res.end - busnr));
+
+ return out - buf;
+}
+static DEVICE_ATTR (ctrl, S_IRUGO, show_ctrl, NULL);
+
+int shpchp_create_ctrl_files (struct controller *ctrl)
+{
+ return device_create_file (&ctrl->pci_dev->dev, &dev_attr_ctrl);
+}
+
+void shpchp_remove_ctrl_files(struct controller *ctrl)
+{
+ device_remove_file(&ctrl->pci_dev->dev, &dev_attr_ctrl);
+}
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
new file mode 100644
index 000000000..a94dd2c41
--- /dev/null
+++ b/drivers/pci/htirq.c
@@ -0,0 +1,170 @@
+/*
+ * File: htirq.c
+ * Purpose: Hypertransport Interrupt Capability
+ *
+ * Copyright (C) 2006 Linux Networx
+ * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
+ */
+
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/htirq.h>
+
+/* Global ht irq lock.
+ *
+ * This is needed to serialize access to the data port in hypertransport
+ * irq capability.
+ *
+ * With multiple simultaneous hypertransport irq devices it might pay
+ * to make this more fine grained. But start with simple, stupid, and correct.
+ */
+static DEFINE_SPINLOCK(ht_irq_lock);
+
+struct ht_irq_cfg {
+ struct pci_dev *dev;
+ /* Update callback used to cope with buggy hardware */
+ ht_irq_update_t *update;
+ unsigned pos;
+ unsigned idx;
+ struct ht_irq_msg msg;
+};
+
+
+void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
+{
+ struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
+ unsigned long flags;
+ spin_lock_irqsave(&ht_irq_lock, flags);
+ if (cfg->msg.address_lo != msg->address_lo) {
+ pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
+ pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
+ }
+ if (cfg->msg.address_hi != msg->address_hi) {
+ pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
+ pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
+ }
+ if (cfg->update)
+ cfg->update(cfg->dev, irq, msg);
+ spin_unlock_irqrestore(&ht_irq_lock, flags);
+ cfg->msg = *msg;
+}
+
+void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
+{
+ struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
+ *msg = cfg->msg;
+}
+
+void mask_ht_irq(struct irq_data *data)
+{
+ struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
+ struct ht_irq_msg msg = cfg->msg;
+
+ msg.address_lo |= 1;
+ write_ht_irq_msg(data->irq, &msg);
+}
+
+void unmask_ht_irq(struct irq_data *data)
+{
+ struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
+ struct ht_irq_msg msg = cfg->msg;
+
+ msg.address_lo &= ~1;
+ write_ht_irq_msg(data->irq, &msg);
+}
+
+/**
+ * __ht_create_irq - create an irq and attach it to a device.
+ * @dev: The hypertransport device to find the irq capability on.
+ * @idx: Which of the possible irqs to attach to.
+ * @update: Function to be called when changing the htirq message
+ *
+ * The irq number of the new irq or a negative error value is returned.
+ */
+int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
+{
+ struct ht_irq_cfg *cfg;
+ int max_irq, pos, irq;
+ unsigned long flags;
+ u32 data;
+
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
+ if (!pos)
+ return -EINVAL;
+
+ /* Verify the idx I want to use is in range */
+ spin_lock_irqsave(&ht_irq_lock, flags);
+ pci_write_config_byte(dev, pos + 2, 1);
+ pci_read_config_dword(dev, pos + 4, &data);
+ spin_unlock_irqrestore(&ht_irq_lock, flags);
+
+ max_irq = (data >> 16) & 0xff;
+ if (idx > max_irq)
+ return -EINVAL;
+
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ cfg->dev = dev;
+ cfg->update = update;
+ cfg->pos = pos;
+ cfg->idx = 0x10 + (idx * 2);
+ /* Initialize msg to a value that will never match the first write. */
+ cfg->msg.address_lo = 0xffffffff;
+ cfg->msg.address_hi = 0xffffffff;
+
+ irq = irq_alloc_hwirq(dev_to_node(&dev->dev));
+ if (!irq) {
+ kfree(cfg);
+ return -EBUSY;
+ }
+ irq_set_handler_data(irq, cfg);
+
+ if (arch_setup_ht_irq(irq, dev) < 0) {
+ ht_destroy_irq(irq);
+ return -EBUSY;
+ }
+
+ return irq;
+}
+EXPORT_SYMBOL(__ht_create_irq);
+
+/**
+ * ht_create_irq - create an irq and attach it to a device.
+ * @dev: The hypertransport device to find the irq capability on.
+ * @idx: Which of the possible irqs to attach to.
+ *
+ * ht_create_irq needs to be called for all hypertransport devices
+ * that generate irqs.
+ *
+ * The irq number of the new irq or a negative error value is returned.
+ */
+int ht_create_irq(struct pci_dev *dev, int idx)
+{
+ return __ht_create_irq(dev, idx, NULL);
+}
+EXPORT_SYMBOL(ht_create_irq);
+
+/**
+ * ht_destroy_irq - destroy an irq created with ht_create_irq
+ * @irq: irq to be destroyed
+ *
+ * This reverses ht_create_irq removing the specified irq from
+ * existence. The irq should be free before this happens.
+ */
+void ht_destroy_irq(unsigned int irq)
+{
+ struct ht_irq_cfg *cfg;
+
+ cfg = irq_get_handler_data(irq);
+ irq_set_chip(irq, NULL);
+ irq_set_handler_data(irq, NULL);
+ irq_free_hwirq(irq);
+
+ kfree(cfg);
+}
+EXPORT_SYMBOL(ht_destroy_irq);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
new file mode 100644
index 000000000..ee0ebff10
--- /dev/null
+++ b/drivers/pci/iov.c
@@ -0,0 +1,762 @@
+/*
+ * drivers/pci/iov.c
+ *
+ * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
+ *
+ * PCI Express I/O Virtualization (IOV) support.
+ * Single Root IOV 1.0
+ * Address Translation Service 1.0
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/pci-ats.h>
+#include "pci.h"
+
+#define VIRTFN_ID_LEN 16
+
+int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
+{
+ if (!dev->is_physfn)
+ return -EINVAL;
+ return dev->bus->number + ((dev->devfn + dev->sriov->offset +
+ dev->sriov->stride * vf_id) >> 8);
+}
+
+int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
+{
+ if (!dev->is_physfn)
+ return -EINVAL;
+ return (dev->devfn + dev->sriov->offset +
+ dev->sriov->stride * vf_id) & 0xff;
+}
+
+/*
+ * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
+ * change when NumVFs changes.
+ *
+ * Update iov->offset and iov->stride when NumVFs is written.
+ */
+static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+}
+
+/*
+ * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride
+ * determine how many additional bus numbers will be consumed by VFs.
+ *
+ * Iterate over all valid NumVFs and calculate the maximum number of bus
+ * numbers that could ever be required.
+ */
+static inline u8 virtfn_max_buses(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+ int nr_virtfn;
+ u8 max = 0;
+ int busnr;
+
+ for (nr_virtfn = 1; nr_virtfn <= iov->total_VFs; nr_virtfn++) {
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
+ if (busnr > max)
+ max = busnr;
+ }
+
+ return max;
+}
+
+static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
+{
+ struct pci_bus *child;
+
+ if (bus->number == busnr)
+ return bus;
+
+ child = pci_find_bus(pci_domain_nr(bus), busnr);
+ if (child)
+ return child;
+
+ child = pci_add_new_bus(bus, NULL, busnr);
+ if (!child)
+ return NULL;
+
+ pci_bus_insert_busn_res(child, busnr, busnr);
+
+ return child;
+}
+
+static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus)
+{
+ if (physbus != virtbus && list_empty(&virtbus->devices))
+ pci_remove_bus(virtbus);
+}
+
+resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ return dev->sriov->barsz[resno - PCI_IOV_RESOURCES];
+}
+
+static int virtfn_add(struct pci_dev *dev, int id, int reset)
+{
+ int i;
+ int rc = -ENOMEM;
+ u64 size;
+ char buf[VIRTFN_ID_LEN];
+ struct pci_dev *virtfn;
+ struct resource *res;
+ struct pci_sriov *iov = dev->sriov;
+ struct pci_bus *bus;
+
+ mutex_lock(&iov->dev->sriov->lock);
+ bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id));
+ if (!bus)
+ goto failed;
+
+ virtfn = pci_alloc_dev(bus);
+ if (!virtfn)
+ goto failed0;
+
+ virtfn->devfn = pci_iov_virtfn_devfn(dev, id);
+ virtfn->vendor = dev->vendor;
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
+ pci_setup_device(virtfn);
+ virtfn->dev.parent = dev->dev.parent;
+ virtfn->physfn = pci_dev_get(dev);
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->parent)
+ continue;
+ virtfn->resource[i].name = pci_name(virtfn);
+ virtfn->resource[i].flags = res->flags;
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ virtfn->resource[i].start = res->start + size * id;
+ virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
+ rc = request_resource(res, &virtfn->resource[i]);
+ BUG_ON(rc);
+ }
+
+ if (reset)
+ __pci_reset_function(virtfn);
+
+ pci_device_add(virtfn, virtfn->bus);
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ pci_bus_add_device(virtfn);
+ sprintf(buf, "virtfn%u", id);
+ rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ if (rc)
+ goto failed1;
+ rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
+ if (rc)
+ goto failed2;
+
+ kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+
+failed2:
+ sysfs_remove_link(&dev->dev.kobj, buf);
+failed1:
+ pci_dev_put(dev);
+ mutex_lock(&iov->dev->sriov->lock);
+ pci_stop_and_remove_bus_device(virtfn);
+failed0:
+ virtfn_remove_bus(dev->bus, bus);
+failed:
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ return rc;
+}
+
+static void virtfn_remove(struct pci_dev *dev, int id, int reset)
+{
+ char buf[VIRTFN_ID_LEN];
+ struct pci_dev *virtfn;
+ struct pci_sriov *iov = dev->sriov;
+
+ virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus),
+ pci_iov_virtfn_bus(dev, id),
+ pci_iov_virtfn_devfn(dev, id));
+ if (!virtfn)
+ return;
+
+ if (reset) {
+ device_release_driver(&virtfn->dev);
+ __pci_reset_function(virtfn);
+ }
+
+ sprintf(buf, "virtfn%u", id);
+ sysfs_remove_link(&dev->dev.kobj, buf);
+ /*
+ * pci_stop_dev() could have been called for this virtfn already,
+ * so the directory for the virtfn may have been removed before.
+ * Double check to avoid spurious sysfs warnings.
+ */
+ if (virtfn->dev.kobj.sd)
+ sysfs_remove_link(&virtfn->dev.kobj, "physfn");
+
+ mutex_lock(&iov->dev->sriov->lock);
+ pci_stop_and_remove_bus_device(virtfn);
+ virtfn_remove_bus(dev->bus, virtfn->bus);
+ mutex_unlock(&iov->dev->sriov->lock);
+
+ /* balance pci_get_domain_bus_and_slot() */
+ pci_dev_put(virtfn);
+ pci_dev_put(dev);
+}
+
+int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ return 0;
+}
+
+static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
+{
+ int rc;
+ int i, j;
+ int nres;
+ u16 offset, stride, initial;
+ struct resource *res;
+ struct pci_dev *pdev;
+ struct pci_sriov *iov = dev->sriov;
+ int bars = 0;
+ int bus;
+ int retval;
+
+ if (!nr_virtfn)
+ return 0;
+
+ if (iov->num_VFs)
+ return -EINVAL;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
+ if (initial > iov->total_VFs ||
+ (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs)))
+ return -EIO;
+
+ if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs ||
+ (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
+ return -EINVAL;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
+ if (!offset || (nr_virtfn > 1 && !stride))
+ return -EIO;
+
+ nres = 0;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ bars |= (1 << (i + PCI_IOV_RESOURCES));
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (res->parent)
+ nres++;
+ }
+ if (nres != iov->nres) {
+ dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
+ return -ENOMEM;
+ }
+
+ iov->offset = offset;
+ iov->stride = stride;
+
+ bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1);
+ if (bus > dev->bus->busn_res.end) {
+ dev_err(&dev->dev, "can't enable %d VFs (bus %02x out of range of %pR)\n",
+ nr_virtfn, bus, &dev->bus->busn_res);
+ return -ENOMEM;
+ }
+
+ if (pci_enable_resources(dev, bars)) {
+ dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+ return -ENOMEM;
+ }
+
+ if (iov->link != dev->devfn) {
+ pdev = pci_get_slot(dev->bus, iov->link);
+ if (!pdev)
+ return -ENODEV;
+
+ if (!pdev->is_physfn) {
+ pci_dev_put(pdev);
+ return -ENOSYS;
+ }
+
+ rc = sysfs_create_link(&dev->dev.kobj,
+ &pdev->dev.kobj, "dep_link");
+ pci_dev_put(pdev);
+ if (rc)
+ return rc;
+ }
+
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_cfg_access_unlock(dev);
+
+ iov->initial_VFs = initial;
+ if (nr_virtfn < initial)
+ initial = nr_virtfn;
+
+ if ((retval = pcibios_sriov_enable(dev, initial))) {
+ dev_err(&dev->dev, "failure %d from pcibios_sriov_enable()\n",
+ retval);
+ return retval;
+ }
+
+ for (i = 0; i < initial; i++) {
+ rc = virtfn_add(dev, i, 0);
+ if (rc)
+ goto failed;
+ }
+
+ kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
+ iov->num_VFs = nr_virtfn;
+
+ return 0;
+
+failed:
+ for (j = 0; j < i; j++)
+ virtfn_remove(dev, j, 0);
+
+ iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ pci_iov_set_numvfs(dev, 0);
+ ssleep(1);
+ pci_cfg_access_unlock(dev);
+
+ if (iov->link != dev->devfn)
+ sysfs_remove_link(&dev->dev.kobj, "dep_link");
+
+ return rc;
+}
+
+int __weak pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static void sriov_disable(struct pci_dev *dev)
+{
+ int i;
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!iov->num_VFs)
+ return;
+
+ for (i = 0; i < iov->num_VFs; i++)
+ virtfn_remove(dev, i, 0);
+
+ pcibios_sriov_disable(dev);
+
+ iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ ssleep(1);
+ pci_cfg_access_unlock(dev);
+
+ if (iov->link != dev->devfn)
+ sysfs_remove_link(&dev->dev.kobj, "dep_link");
+
+ iov->num_VFs = 0;
+ pci_iov_set_numvfs(dev, 0);
+}
+
+static int sriov_init(struct pci_dev *dev, int pos)
+{
+ int i, bar64;
+ int rc;
+ int nres;
+ u32 pgsz;
+ u16 ctrl, total, offset, stride;
+ struct pci_sriov *iov;
+ struct resource *res;
+ struct pci_dev *pdev;
+
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END &&
+ pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE) {
+ pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
+ ssleep(1);
+ }
+
+ pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
+ if (!total)
+ return 0;
+
+ ctrl = 0;
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev->is_physfn)
+ goto found;
+
+ pdev = NULL;
+ if (pci_ari_enabled(dev->bus))
+ ctrl |= PCI_SRIOV_CTRL_ARI;
+
+found:
+ pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
+ pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+ if (!offset || (total > 1 && !stride))
+ return -EIO;
+
+ pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
+ i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
+ pgsz &= ~((1 << i) - 1);
+ if (!pgsz)
+ return -EIO;
+
+ pgsz &= ~(pgsz - 1);
+ pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
+
+ iov = kzalloc(sizeof(*iov), GFP_KERNEL);
+ if (!iov)
+ return -ENOMEM;
+
+ nres = 0;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ bar64 = __pci_read_base(dev, pci_bar_unknown, res,
+ pos + PCI_SRIOV_BAR + i * 4);
+ if (!res->flags)
+ continue;
+ if (resource_size(res) & (PAGE_SIZE - 1)) {
+ rc = -EIO;
+ goto failed;
+ }
+ iov->barsz[i] = resource_size(res);
+ res->end = res->start + resource_size(res) * total - 1;
+ dev_info(&dev->dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n",
+ i, res, i, total);
+ i += bar64;
+ nres++;
+ }
+
+ iov->pos = pos;
+ iov->nres = nres;
+ iov->ctrl = ctrl;
+ iov->total_VFs = total;
+ iov->offset = offset;
+ iov->stride = stride;
+ iov->pgsz = pgsz;
+ iov->self = dev;
+ pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
+ pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
+ iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
+
+ if (pdev)
+ iov->dev = pci_dev_get(pdev);
+ else
+ iov->dev = dev;
+
+ mutex_init(&iov->lock);
+
+ dev->sriov = iov;
+ dev->is_physfn = 1;
+ iov->max_VF_buses = virtfn_max_buses(dev);
+
+ return 0;
+
+failed:
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ res->flags = 0;
+ }
+
+ kfree(iov);
+ return rc;
+}
+
+static void sriov_release(struct pci_dev *dev)
+{
+ BUG_ON(dev->sriov->num_VFs);
+
+ if (dev != dev->sriov->dev)
+ pci_dev_put(dev->sriov->dev);
+
+ mutex_destroy(&dev->sriov->lock);
+
+ kfree(dev->sriov);
+ dev->sriov = NULL;
+}
+
+static void sriov_restore_state(struct pci_dev *dev)
+{
+ int i;
+ u16 ctrl;
+ struct pci_sriov *iov = dev->sriov;
+
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
+ if (ctrl & PCI_SRIOV_CTRL_VFE)
+ return;
+
+ for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
+ pci_update_resource(dev, i);
+
+ pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
+ pci_iov_set_numvfs(dev, iov->num_VFs);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
+ msleep(100);
+}
+
+/**
+ * pci_iov_init - initialize the IOV capability
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_iov_init(struct pci_dev *dev)
+{
+ int pos;
+
+ if (!pci_is_pcie(dev))
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos)
+ return sriov_init(dev, pos);
+
+ return -ENODEV;
+}
+
+/**
+ * pci_iov_release - release resources used by the IOV capability
+ * @dev: the PCI device
+ */
+void pci_iov_release(struct pci_dev *dev)
+{
+ if (dev->is_physfn)
+ sriov_release(dev);
+}
+
+/**
+ * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ *
+ * Returns position of the BAR encapsulated in the SR-IOV capability.
+ */
+int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+{
+ if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
+ return 0;
+
+ BUG_ON(!dev->is_physfn);
+
+ return dev->sriov->pos + PCI_SRIOV_BAR +
+ 4 * (resno - PCI_IOV_RESOURCES);
+}
+
+resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
+ int resno)
+{
+ return pci_iov_resource_size(dev, resno);
+}
+
+/**
+ * pci_sriov_resource_alignment - get resource alignment for VF BAR
+ * @dev: the PCI device
+ * @resno: the resource number
+ *
+ * Returns the alignment of the VF BAR found in the SR-IOV capability.
+ * This is not the same as the resource size which is defined as
+ * the VF BAR size multiplied by the number of VFs. The alignment
+ * is just the VF BAR size.
+ */
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
+{
+ return pcibios_iov_resource_alignment(dev, resno);
+}
+
+/**
+ * pci_restore_iov_state - restore the state of the IOV capability
+ * @dev: the PCI device
+ */
+void pci_restore_iov_state(struct pci_dev *dev)
+{
+ if (dev->is_physfn)
+ sriov_restore_state(dev);
+}
+
+/**
+ * pci_iov_bus_range - find bus range used by Virtual Function
+ * @bus: the PCI bus
+ *
+ * Returns max number of buses (exclude current one) used by Virtual
+ * Functions.
+ */
+int pci_iov_bus_range(struct pci_bus *bus)
+{
+ int max = 0;
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!dev->is_physfn)
+ continue;
+ if (dev->sriov->max_VF_buses > max)
+ max = dev->sriov->max_VF_buses;
+ }
+
+ return max ? max - bus->number : 0;
+}
+
+/**
+ * pci_enable_sriov - enable the SR-IOV capability
+ * @dev: the PCI device
+ * @nr_virtfn: number of virtual functions to enable
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
+{
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return -ENOSYS;
+
+ return sriov_enable(dev, nr_virtfn);
+}
+EXPORT_SYMBOL_GPL(pci_enable_sriov);
+
+/**
+ * pci_disable_sriov - disable the SR-IOV capability
+ * @dev: the PCI device
+ */
+void pci_disable_sriov(struct pci_dev *dev)
+{
+ might_sleep();
+
+ if (!dev->is_physfn)
+ return;
+
+ sriov_disable(dev);
+}
+EXPORT_SYMBOL_GPL(pci_disable_sriov);
+
+/**
+ * pci_num_vf - return number of VFs associated with a PF device_release_driver
+ * @dev: the PCI device
+ *
+ * Returns number of VFs, or 0 if SR-IOV is not enabled.
+ */
+int pci_num_vf(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ return dev->sriov->num_VFs;
+}
+EXPORT_SYMBOL_GPL(pci_num_vf);
+
+/**
+ * pci_vfs_assigned - returns number of VFs are assigned to a guest
+ * @dev: the PCI device
+ *
+ * Returns number of VFs belonging to this device that are assigned to a guest.
+ * If device is not a physical function returns 0.
+ */
+int pci_vfs_assigned(struct pci_dev *dev)
+{
+ struct pci_dev *vfdev;
+ unsigned int vfs_assigned = 0;
+ unsigned short dev_id;
+
+ /* only search if we are a PF */
+ if (!dev->is_physfn)
+ return 0;
+
+ /*
+ * determine the device ID for the VFs, the vendor ID will be the
+ * same as the PF so there is no need to check for that one
+ */
+ pci_read_config_word(dev, dev->sriov->pos + PCI_SRIOV_VF_DID, &dev_id);
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(dev->vendor, dev_id, NULL);
+ while (vfdev) {
+ /*
+ * It is considered assigned if it is a virtual function with
+ * our dev as the physical function and the assigned bit is set
+ */
+ if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
+ pci_is_dev_assigned(vfdev))
+ vfs_assigned++;
+
+ vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
+ }
+
+ return vfs_assigned;
+}
+EXPORT_SYMBOL_GPL(pci_vfs_assigned);
+
+/**
+ * pci_sriov_set_totalvfs -- reduce the TotalVFs available
+ * @dev: the PCI PF device
+ * @numvfs: number that should be used for TotalVFs supported
+ *
+ * Should be called from PF driver's probe routine with
+ * device's mutex held.
+ *
+ * Returns 0 if PF is an SRIOV-capable device and
+ * value of numvfs valid. If not a PF return -ENOSYS;
+ * if numvfs is invalid return -EINVAL;
+ * if VFs already enabled, return -EBUSY.
+ */
+int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
+{
+ if (!dev->is_physfn)
+ return -ENOSYS;
+ if (numvfs > dev->sriov->total_VFs)
+ return -EINVAL;
+
+ /* Shouldn't change if VFs already enabled */
+ if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE)
+ return -EBUSY;
+ else
+ dev->sriov->driver_max_VFs = numvfs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
+
+/**
+ * pci_sriov_get_totalvfs -- get total VFs supported on this device
+ * @dev: the PCI PF device
+ *
+ * For a PCIe device with SRIOV support, return the PCIe
+ * SRIOV capability value of TotalVFs or the value of driver_max_VFs
+ * if the driver reduced it. Otherwise 0.
+ */
+int pci_sriov_get_totalvfs(struct pci_dev *dev)
+{
+ if (!dev->is_physfn)
+ return 0;
+
+ if (dev->sriov->driver_max_VFs)
+ return dev->sriov->driver_max_VFs;
+
+ return dev->sriov->total_VFs;
+}
+EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs);
diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c
new file mode 100644
index 000000000..6684f153a
--- /dev/null
+++ b/drivers/pci/irq.c
@@ -0,0 +1,61 @@
+/*
+ * PCI IRQ failure handing code
+ *
+ * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+
+static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason)
+{
+ struct pci_dev *parent = to_pci_dev(pdev->dev.parent);
+
+ dev_err(&pdev->dev,
+ "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n",
+ dev_name(&parent->dev), parent->vendor, parent->device);
+ dev_err(&pdev->dev, "%s\n", reason);
+ dev_err(&pdev->dev, "Please report to linux-kernel@vger.kernel.org\n");
+ WARN_ON(1);
+}
+
+/**
+ * pci_lost_interrupt - reports a lost PCI interrupt
+ * @pdev: device whose interrupt is lost
+ *
+ * The primary function of this routine is to report a lost interrupt
+ * in a standard way which users can recognise (instead of blaming the
+ * driver).
+ *
+ * Returns:
+ * a suggestion for fixing it (although the driver is not required to
+ * act on this).
+ */
+enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev)
+{
+ if (pdev->msi_enabled || pdev->msix_enabled) {
+ enum pci_lost_interrupt_reason ret;
+
+ if (pdev->msix_enabled) {
+ pci_note_irq_problem(pdev, "MSIX routing failure");
+ ret = PCI_LOST_IRQ_DISABLE_MSIX;
+ } else {
+ pci_note_irq_problem(pdev, "MSI routing failure");
+ ret = PCI_LOST_IRQ_DISABLE_MSI;
+ }
+ return ret;
+ }
+#ifdef CONFIG_ACPI
+ if (!(acpi_disabled || acpi_noirq)) {
+ pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq");
+ /* currently no way to fix acpi on the fly */
+ return PCI_LOST_IRQ_DISABLE_ACPI;
+ }
+#endif
+ pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)");
+ return PCI_LOST_IRQ_NO_INFORMATION;
+}
+EXPORT_SYMBOL(pci_lost_interrupt);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
new file mode 100644
index 000000000..c3e7dfcf9
--- /dev/null
+++ b/drivers/pci/msi.c
@@ -0,0 +1,1365 @@
+/*
+ * File: msi.c
+ * Purpose: PCI Message Signaled Interrupt (MSI)
+ *
+ * Copyright (C) 2003-2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/msi.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+
+#include "pci.h"
+
+static int pci_msi_enable = 1;
+int pci_msi_ignore_mask;
+
+#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
+
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+static struct irq_domain *pci_msi_default_domain;
+static DEFINE_MUTEX(pci_msi_domain_lock);
+
+struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev)
+{
+ return pci_msi_default_domain;
+}
+
+static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
+{
+ struct irq_domain *domain = NULL;
+
+ if (dev->bus->msi)
+ domain = dev->bus->msi->domain;
+ if (!domain)
+ domain = arch_get_pci_msi_domain(dev);
+
+ return domain;
+}
+
+static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct irq_domain *domain;
+
+ domain = pci_msi_get_domain(dev);
+ if (domain)
+ return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
+
+ return arch_setup_msi_irqs(dev, nvec, type);
+}
+
+static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct irq_domain *domain;
+
+ domain = pci_msi_get_domain(dev);
+ if (domain)
+ pci_msi_domain_free_irqs(domain, dev);
+ else
+ arch_teardown_msi_irqs(dev);
+}
+#else
+#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
+#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
+#endif
+
+/* Arch hooks */
+
+struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev)
+{
+ return NULL;
+}
+
+static struct msi_controller *pci_msi_controller(struct pci_dev *dev)
+{
+ struct msi_controller *msi_ctrl = dev->bus->msi;
+
+ if (msi_ctrl)
+ return msi_ctrl;
+
+ return pcibios_msi_controller(dev);
+}
+
+int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ struct msi_controller *chip = pci_msi_controller(dev);
+ int err;
+
+ if (!chip || !chip->setup_irq)
+ return -EINVAL;
+
+ err = chip->setup_irq(chip, dev, desc);
+ if (err < 0)
+ return err;
+
+ irq_set_chip_data(desc->irq, chip);
+
+ return 0;
+}
+
+void __weak arch_teardown_msi_irq(unsigned int irq)
+{
+ struct msi_controller *chip = irq_get_chip_data(irq);
+
+ if (!chip || !chip->teardown_irq)
+ return;
+
+ chip->teardown_irq(chip, irq);
+}
+
+int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *entry;
+ int ret;
+
+ /*
+ * If an architecture wants to support multiple MSI, it needs to
+ * override arch_setup_msi_irqs()
+ */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ ret = arch_setup_msi_irq(dev, entry);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/*
+ * We have a default implementation available as a separate non-weak
+ * function, as it is used by the Xen x86 PCI code
+ */
+void default_teardown_msi_irqs(struct pci_dev *dev)
+{
+ int i;
+ struct msi_desc *entry;
+
+ list_for_each_entry(entry, &dev->msi_list, list)
+ if (entry->irq)
+ for (i = 0; i < entry->nvec_used; i++)
+ arch_teardown_msi_irq(entry->irq + i);
+}
+
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+ return default_teardown_msi_irqs(dev);
+}
+
+static void default_restore_msi_irq(struct pci_dev *dev, int irq)
+{
+ struct msi_desc *entry;
+
+ entry = NULL;
+ if (dev->msix_enabled) {
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (irq == entry->irq)
+ break;
+ }
+ } else if (dev->msi_enabled) {
+ entry = irq_get_msi_desc(irq);
+ }
+
+ if (entry)
+ __pci_write_msi_msg(entry, &entry->msg);
+}
+
+void __weak arch_restore_msi_irqs(struct pci_dev *dev)
+{
+ return default_restore_msi_irqs(dev);
+}
+
+static void msi_set_enable(struct pci_dev *dev, int enable)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ ctrl &= ~clear;
+ ctrl |= set;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
+static inline __attribute_const__ u32 msi_mask(unsigned x)
+{
+ /* Don't shift by >= width of type */
+ if (x >= 5)
+ return 0xffffffff;
+ return (1 << (1 << x)) - 1;
+}
+
+/*
+ * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
+ * mask all MSI interrupts by clearing the MSI enable bit does not work
+ * reliably as devices without an INTx disable bit will then generate a
+ * level IRQ which will never be cleared.
+ */
+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ u32 mask_bits = desc->masked;
+
+ if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
+ return 0;
+
+ mask_bits &= ~mask;
+ mask_bits |= flag;
+ pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
+
+ return mask_bits;
+}
+
+static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
+{
+ desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
+}
+
+/*
+ * This internal function does not flush PCI writes to the device.
+ * All users must ensure that they read from the device before either
+ * assuming that the device state is up to date, or returning out of this
+ * file. This saves a few milliseconds when initialising devices with lots
+ * of MSI-X interrupts.
+ */
+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ u32 mask_bits = desc->masked;
+ unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ if (pci_msi_ignore_mask)
+ return 0;
+
+ mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ if (flag)
+ mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ writel(mask_bits, desc->mask_base + offset);
+
+ return mask_bits;
+}
+
+static void msix_mask_irq(struct msi_desc *desc, u32 flag)
+{
+ desc->masked = __pci_msix_desc_mask_irq(desc, flag);
+}
+
+static void msi_set_mask_bit(struct irq_data *data, u32 flag)
+{
+ struct msi_desc *desc = irq_data_get_msi(data);
+
+ if (desc->msi_attrib.is_msix) {
+ msix_mask_irq(desc, flag);
+ readl(desc->mask_base); /* Flush write to device */
+ } else {
+ unsigned offset = data->irq - desc->irq;
+ msi_mask_irq(desc, 1 << offset, flag << offset);
+ }
+}
+
+/**
+ * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts
+ * @data: pointer to irqdata associated to that interrupt
+ */
+void pci_msi_mask_irq(struct irq_data *data)
+{
+ msi_set_mask_bit(data, 1);
+}
+
+/**
+ * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts
+ * @data: pointer to irqdata associated to that interrupt
+ */
+void pci_msi_unmask_irq(struct irq_data *data)
+{
+ msi_set_mask_bit(data, 0);
+}
+
+void default_restore_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ list_for_each_entry(entry, &dev->msi_list, list)
+ default_restore_msi_irq(dev, entry->irq);
+}
+
+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+{
+ BUG_ON(entry->dev->current_state != PCI_D0);
+
+ if (entry->msi_attrib.is_msix) {
+ void __iomem *base = entry->mask_base +
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+
+ msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
+ } else {
+ struct pci_dev *dev = entry->dev;
+ int pos = dev->msi_cap;
+ u16 data;
+
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ &msg->address_lo);
+ if (entry->msi_attrib.is_64) {
+ pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ &msg->address_hi);
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
+ } else {
+ msg->address_hi = 0;
+ pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
+ }
+ msg->data = data;
+ }
+}
+
+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
+{
+ if (entry->dev->current_state != PCI_D0) {
+ /* Don't touch the hardware now */
+ } else if (entry->msi_attrib.is_msix) {
+ void __iomem *base;
+ base = entry->mask_base +
+ entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+
+ writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
+ writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
+ writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
+ } else {
+ struct pci_dev *dev = entry->dev;
+ int pos = dev->msi_cap;
+ u16 msgctl;
+
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
+ msgctl &= ~PCI_MSI_FLAGS_QSIZE;
+ msgctl |= entry->msi_attrib.multiple << 4;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
+
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
+ msg->address_lo);
+ if (entry->msi_attrib.is_64) {
+ pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
+ msg->address_hi);
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
+ msg->data);
+ } else {
+ pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
+ msg->data);
+ }
+ }
+ entry->msg = *msg;
+}
+
+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
+{
+ struct msi_desc *entry = irq_get_msi_desc(irq);
+
+ __pci_write_msi_msg(entry, msg);
+}
+EXPORT_SYMBOL_GPL(pci_write_msi_msg);
+
+static void free_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry, *tmp;
+ struct attribute **msi_attrs;
+ struct device_attribute *dev_attr;
+ int i, count = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list)
+ if (entry->irq)
+ for (i = 0; i < entry->nvec_used; i++)
+ BUG_ON(irq_has_action(entry->irq + i));
+
+ pci_msi_teardown_msi_irqs(dev);
+
+ list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ if (entry->msi_attrib.is_msix) {
+ if (list_is_last(&entry->list, &dev->msi_list))
+ iounmap(entry->mask_base);
+ }
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
+ if (dev->msi_irq_groups) {
+ sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
+ msi_attrs = dev->msi_irq_groups[0]->attrs;
+ while (msi_attrs[count]) {
+ dev_attr = container_of(msi_attrs[count],
+ struct device_attribute, attr);
+ kfree(dev_attr->attr.name);
+ kfree(dev_attr);
+ ++count;
+ }
+ kfree(msi_attrs);
+ kfree(dev->msi_irq_groups[0]);
+ kfree(dev->msi_irq_groups);
+ dev->msi_irq_groups = NULL;
+ }
+}
+
+static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
+{
+ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return NULL;
+
+ INIT_LIST_HEAD(&desc->list);
+ desc->dev = dev;
+
+ return desc;
+}
+
+static void pci_intx_for_msi(struct pci_dev *dev, int enable)
+{
+ if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
+ pci_intx(dev, enable);
+}
+
+static void __pci_restore_msi_state(struct pci_dev *dev)
+{
+ u16 control;
+ struct msi_desc *entry;
+
+ if (!dev->msi_enabled)
+ return;
+
+ entry = irq_get_msi_desc(dev->irq);
+
+ pci_intx_for_msi(dev, 0);
+ msi_set_enable(dev, 0);
+ arch_restore_msi_irqs(dev);
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+ msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
+ entry->masked);
+ control &= ~PCI_MSI_FLAGS_QSIZE;
+ control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void __pci_restore_msix_state(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ if (!dev->msix_enabled)
+ return;
+ BUG_ON(list_empty(&dev->msi_list));
+
+ /* route the table */
+ pci_intx_for_msi(dev, 0);
+ msix_clear_and_set_ctrl(dev, 0,
+ PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
+
+ arch_restore_msi_irqs(dev);
+ list_for_each_entry(entry, &dev->msi_list, list)
+ msix_mask_irq(entry, entry->masked);
+
+ msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
+}
+
+void pci_restore_msi_state(struct pci_dev *dev)
+{
+ __pci_restore_msi_state(dev);
+ __pci_restore_msix_state(dev);
+}
+EXPORT_SYMBOL_GPL(pci_restore_msi_state);
+
+static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct msi_desc *entry;
+ unsigned long irq;
+ int retval;
+
+ retval = kstrtoul(attr->attr.name, 10, &irq);
+ if (retval)
+ return retval;
+
+ entry = irq_get_msi_desc(irq);
+ if (entry)
+ return sprintf(buf, "%s\n",
+ entry->msi_attrib.is_msix ? "msix" : "msi");
+
+ return -ENODEV;
+}
+
+static int populate_msi_sysfs(struct pci_dev *pdev)
+{
+ struct attribute **msi_attrs;
+ struct attribute *msi_attr;
+ struct device_attribute *msi_dev_attr;
+ struct attribute_group *msi_irq_group;
+ const struct attribute_group **msi_irq_groups;
+ struct msi_desc *entry;
+ int ret = -ENOMEM;
+ int num_msi = 0;
+ int count = 0;
+
+ /* Determine how many msi entries we have */
+ list_for_each_entry(entry, &pdev->msi_list, list)
+ ++num_msi;
+ if (!num_msi)
+ return 0;
+
+ /* Dynamically create the MSI attributes for the PCI device */
+ msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
+ if (!msi_attrs)
+ return -ENOMEM;
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
+ if (!msi_dev_attr)
+ goto error_attrs;
+ msi_attrs[count] = &msi_dev_attr->attr;
+
+ sysfs_attr_init(&msi_dev_attr->attr);
+ msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
+ entry->irq);
+ if (!msi_dev_attr->attr.name)
+ goto error_attrs;
+ msi_dev_attr->attr.mode = S_IRUGO;
+ msi_dev_attr->show = msi_mode_show;
+ ++count;
+ }
+
+ msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
+ if (!msi_irq_group)
+ goto error_attrs;
+ msi_irq_group->name = "msi_irqs";
+ msi_irq_group->attrs = msi_attrs;
+
+ msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL);
+ if (!msi_irq_groups)
+ goto error_irq_group;
+ msi_irq_groups[0] = msi_irq_group;
+
+ ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
+ if (ret)
+ goto error_irq_groups;
+ pdev->msi_irq_groups = msi_irq_groups;
+
+ return 0;
+
+error_irq_groups:
+ kfree(msi_irq_groups);
+error_irq_group:
+ kfree(msi_irq_group);
+error_attrs:
+ count = 0;
+ msi_attr = msi_attrs[count];
+ while (msi_attr) {
+ msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
+ kfree(msi_attr->name);
+ kfree(msi_dev_attr);
+ ++count;
+ msi_attr = msi_attrs[count];
+ }
+ kfree(msi_attrs);
+ return ret;
+}
+
+static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
+{
+ u16 control;
+ struct msi_desc *entry;
+
+ /* MSI Entry Initialization */
+ entry = alloc_msi_entry(dev);
+ if (!entry)
+ return NULL;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ entry->msi_attrib.is_msix = 0;
+ entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
+ entry->msi_attrib.entry_nr = 0;
+ entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
+ entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
+ entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ entry->nvec_used = nvec;
+
+ if (control & PCI_MSI_FLAGS_64BIT)
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ else
+ entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+
+ /* Save the initial mask status */
+ if (entry->msi_attrib.maskbit)
+ pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+
+ return entry;
+}
+
+static int msi_verify_entries(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (!dev->no_64bit_msi || !entry->msg.address_hi)
+ continue;
+ dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
+ " tried to assign one above 4G\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/**
+ * msi_capability_init - configure device's MSI capability structure
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: number of interrupts to allocate
+ *
+ * Setup the MSI capability structure of the device with the requested
+ * number of interrupts. A return value of zero indicates the successful
+ * setup of an entry with the new MSI irq. A negative return value indicates
+ * an error, and a positive return value indicates the number of interrupts
+ * which could have been allocated.
+ */
+static int msi_capability_init(struct pci_dev *dev, int nvec)
+{
+ struct msi_desc *entry;
+ int ret;
+ unsigned mask;
+
+ msi_set_enable(dev, 0); /* Disable MSI during set up */
+
+ entry = msi_setup_entry(dev, nvec);
+ if (!entry)
+ return -ENOMEM;
+
+ /* All MSIs are unmasked by default, Mask them all */
+ mask = msi_mask(entry->msi_attrib.multi_cap);
+ msi_mask_irq(entry, mask, mask);
+
+ list_add_tail(&entry->list, &dev->msi_list);
+
+ /* Configure MSI capability structure */
+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
+ ret = msi_verify_entries(dev);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
+ /* Set MSI enabled bits */
+ pci_intx_for_msi(dev, 0);
+ msi_set_enable(dev, 1);
+ dev->msi_enabled = 1;
+
+ dev->irq = entry->irq;
+ return 0;
+}
+
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
+{
+ resource_size_t phys_addr;
+ u32 table_offset;
+ unsigned long flags;
+ u8 bir;
+
+ pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
+ &table_offset);
+ bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
+ flags = pci_resource_flags(dev, bir);
+ if (!flags || (flags & IORESOURCE_UNSET))
+ return NULL;
+
+ table_offset &= PCI_MSIX_TABLE_OFFSET;
+ phys_addr = pci_resource_start(dev, bir) + table_offset;
+
+ return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+}
+
+static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec)
+{
+ struct msi_desc *entry;
+ int i;
+
+ for (i = 0; i < nvec; i++) {
+ entry = alloc_msi_entry(dev);
+ if (!entry) {
+ if (!i)
+ iounmap(base);
+ else
+ free_msi_irqs(dev);
+ /* No enough memory. Don't try again */
+ return -ENOMEM;
+ }
+
+ entry->msi_attrib.is_msix = 1;
+ entry->msi_attrib.is_64 = 1;
+ entry->msi_attrib.entry_nr = entries[i].entry;
+ entry->msi_attrib.default_irq = dev->irq;
+ entry->mask_base = base;
+ entry->nvec_used = 1;
+
+ list_add_tail(&entry->list, &dev->msi_list);
+ }
+
+ return 0;
+}
+
+static void msix_program_entries(struct pci_dev *dev,
+ struct msix_entry *entries)
+{
+ struct msi_desc *entry;
+ int i = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ entries[i].vector = entry->irq;
+ entry->masked = readl(entry->mask_base + offset);
+ msix_mask_irq(entry, 1);
+ i++;
+ }
+}
+
+/**
+ * msix_capability_init - configure device's MSI-X capability
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of struct msix_entry entries
+ * @nvec: number of @entries
+ *
+ * Setup the MSI-X capability structure of device function with a
+ * single MSI-X irq. A return of zero indicates the successful setup of
+ * requested MSI-X entries with allocated irqs or non-zero for otherwise.
+ **/
+static int msix_capability_init(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int ret;
+ u16 control;
+ void __iomem *base;
+
+ /* Ensure MSI-X is disabled while it is set up */
+ msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ /* Request & Map MSI-X table region */
+ base = msix_map_region(dev, msix_table_size(control));
+ if (!base)
+ return -ENOMEM;
+
+ ret = msix_setup_entries(dev, base, entries, nvec);
+ if (ret)
+ return ret;
+
+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+ goto out_avail;
+
+ /* Check if all MSI entries honor device restrictions */
+ ret = msi_verify_entries(dev);
+ if (ret)
+ goto out_free;
+
+ /*
+ * Some devices require MSI-X to be enabled before we can touch the
+ * MSI-X registers. We need to mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ msix_clear_and_set_ctrl(dev, 0,
+ PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
+
+ msix_program_entries(dev, entries);
+
+ ret = populate_msi_sysfs(dev);
+ if (ret)
+ goto out_free;
+
+ /* Set MSI-X enabled bits and unmask the function */
+ pci_intx_for_msi(dev, 0);
+ dev->msix_enabled = 1;
+
+ msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
+
+ return 0;
+
+out_avail:
+ if (ret < 0) {
+ /*
+ * If we had some success, report the number of irqs
+ * we succeeded in setting up.
+ */
+ struct msi_desc *entry;
+ int avail = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (entry->irq != 0)
+ avail++;
+ }
+ if (avail != 0)
+ ret = avail;
+ }
+
+out_free:
+ free_msi_irqs(dev);
+
+ return ret;
+}
+
+/**
+ * pci_msi_supported - check whether MSI may be enabled on a device
+ * @dev: pointer to the pci_dev data structure of MSI device function
+ * @nvec: how many MSIs have been requested ?
+ *
+ * Look at global flags, the device itself, and its parent buses
+ * to determine if MSI/-X are supported for the device. If MSI/-X is
+ * supported return 1, else return 0.
+ **/
+static int pci_msi_supported(struct pci_dev *dev, int nvec)
+{
+ struct pci_bus *bus;
+
+ /* MSI must be globally enabled and supported by the device */
+ if (!pci_msi_enable)
+ return 0;
+
+ if (!dev || dev->no_msi || dev->current_state != PCI_D0)
+ return 0;
+
+ /*
+ * You can't ask to have 0 or less MSIs configured.
+ * a) it's stupid ..
+ * b) the list manipulation code assumes nvec >= 1.
+ */
+ if (nvec < 1)
+ return 0;
+
+ /*
+ * Any bridge which does NOT route MSI transactions from its
+ * secondary bus to its primary bus must set NO_MSI flag on
+ * the secondary pci_bus.
+ * We expect only arch-specific PCI host bus controller driver
+ * or quirks for specific PCI bridges to be setting NO_MSI.
+ */
+ for (bus = dev->bus; bus; bus = bus->parent)
+ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * pci_msi_vec_count - Return the number of MSI vectors a device can send
+ * @dev: device to report about
+ *
+ * This function returns the number of MSI vectors a device requested via
+ * Multiple Message Capable register. It returns a negative errno if the
+ * device is not capable sending MSI interrupts. Otherwise, the call succeeds
+ * and returns a power of two, up to a maximum of 2^5 (32), according to the
+ * MSI specification.
+ **/
+int pci_msi_vec_count(struct pci_dev *dev)
+{
+ int ret;
+ u16 msgctl;
+
+ if (!dev->msi_cap)
+ return -EINVAL;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
+ ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
+
+ return ret;
+}
+EXPORT_SYMBOL(pci_msi_vec_count);
+
+void pci_msi_shutdown(struct pci_dev *dev)
+{
+ struct msi_desc *desc;
+ u32 mask;
+
+ if (!pci_msi_enable || !dev || !dev->msi_enabled)
+ return;
+
+ BUG_ON(list_empty(&dev->msi_list));
+ desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+
+ msi_set_enable(dev, 0);
+ pci_intx_for_msi(dev, 1);
+ dev->msi_enabled = 0;
+
+ /* Return the device with MSI unmasked as initial states */
+ mask = msi_mask(desc->msi_attrib.multi_cap);
+ /* Keep cached state to be restored */
+ __pci_msi_desc_mask_irq(desc, mask, ~mask);
+
+ /* Restore dev->irq to its default pin-assertion irq */
+ dev->irq = desc->msi_attrib.default_irq;
+}
+
+void pci_disable_msi(struct pci_dev *dev)
+{
+ if (!pci_msi_enable || !dev || !dev->msi_enabled)
+ return;
+
+ pci_msi_shutdown(dev);
+ free_msi_irqs(dev);
+}
+EXPORT_SYMBOL(pci_disable_msi);
+
+/**
+ * pci_msix_vec_count - return the number of device's MSI-X table entries
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * This function returns the number of device's MSI-X table entries and
+ * therefore the number of MSI-X vectors device is capable of sending.
+ * It returns a negative errno if the device is not capable of sending MSI-X
+ * interrupts.
+ **/
+int pci_msix_vec_count(struct pci_dev *dev)
+{
+ u16 control;
+
+ if (!dev->msix_cap)
+ return -EINVAL;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ return msix_table_size(control);
+}
+EXPORT_SYMBOL(pci_msix_vec_count);
+
+/**
+ * pci_enable_msix - configure device's MSI-X capability structure
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of MSI-X entries
+ * @nvec: number of MSI-X irqs requested for allocation by device driver
+ *
+ * Setup the MSI-X capability structure of device function with the number
+ * of requested irqs upon its software driver call to request for
+ * MSI-X mode enabled on its hardware device function. A return of zero
+ * indicates the successful configuration of MSI-X capability structure
+ * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
+ * Or a return of > 0 indicates that driver request is exceeding the number
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
+ **/
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
+{
+ int nr_entries;
+ int i, j;
+
+ if (!pci_msi_supported(dev, nvec))
+ return -EINVAL;
+
+ if (!entries)
+ return -EINVAL;
+
+ nr_entries = pci_msix_vec_count(dev);
+ if (nr_entries < 0)
+ return nr_entries;
+ if (nvec > nr_entries)
+ return nr_entries;
+
+ /* Check for any invalid entries */
+ for (i = 0; i < nvec; i++) {
+ if (entries[i].entry >= nr_entries)
+ return -EINVAL; /* invalid entry */
+ for (j = i + 1; j < nvec; j++) {
+ if (entries[i].entry == entries[j].entry)
+ return -EINVAL; /* duplicate entry */
+ }
+ }
+ WARN_ON(!!dev->msix_enabled);
+
+ /* Check whether driver already requested for MSI irq */
+ if (dev->msi_enabled) {
+ dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
+ return -EINVAL;
+ }
+ return msix_capability_init(dev, entries, nvec);
+}
+EXPORT_SYMBOL(pci_enable_msix);
+
+void pci_msix_shutdown(struct pci_dev *dev)
+{
+ struct msi_desc *entry;
+
+ if (!pci_msi_enable || !dev || !dev->msix_enabled)
+ return;
+
+ /* Return the device with MSI-X masked as initial states */
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ /* Keep cached states to be restored */
+ __pci_msix_desc_mask_irq(entry, 1);
+ }
+
+ msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+ pci_intx_for_msi(dev, 1);
+ dev->msix_enabled = 0;
+}
+
+void pci_disable_msix(struct pci_dev *dev)
+{
+ if (!pci_msi_enable || !dev || !dev->msix_enabled)
+ return;
+
+ pci_msix_shutdown(dev);
+ free_msi_irqs(dev);
+}
+EXPORT_SYMBOL(pci_disable_msix);
+
+void pci_no_msi(void)
+{
+ pci_msi_enable = 0;
+}
+
+/**
+ * pci_msi_enabled - is MSI enabled?
+ *
+ * Returns true if MSI has not been disabled by the command-line option
+ * pci=nomsi.
+ **/
+int pci_msi_enabled(void)
+{
+ return pci_msi_enable;
+}
+EXPORT_SYMBOL(pci_msi_enabled);
+
+void pci_msi_init_pci_dev(struct pci_dev *dev)
+{
+ INIT_LIST_HEAD(&dev->msi_list);
+
+ /* Disable the msi hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (dev->msi_cap)
+ msi_set_enable(dev, 0);
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (dev->msix_cap)
+ msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
+}
+
+/**
+ * pci_enable_msi_range - configure device's MSI capability structure
+ * @dev: device to configure
+ * @minvec: minimal number of interrupts to configure
+ * @maxvec: maximum number of interrupts to configure
+ *
+ * This function tries to allocate a maximum possible number of interrupts in a
+ * range between @minvec and @maxvec. It returns a negative errno if an error
+ * occurs. If it succeeds, it returns the actual number of interrupts allocated
+ * and updates the @dev's irq member to the lowest new interrupt number;
+ * the other interrupt numbers allocated to this device are consecutive.
+ **/
+int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
+{
+ int nvec;
+ int rc;
+
+ if (!pci_msi_supported(dev, minvec))
+ return -EINVAL;
+
+ WARN_ON(!!dev->msi_enabled);
+
+ /* Check whether driver already requested MSI-X irqs */
+ if (dev->msix_enabled) {
+ dev_info(&dev->dev,
+ "can't enable MSI (MSI-X already enabled)\n");
+ return -EINVAL;
+ }
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ nvec = pci_msi_vec_count(dev);
+ if (nvec < 0)
+ return nvec;
+ else if (nvec < minvec)
+ return -EINVAL;
+ else if (nvec > maxvec)
+ nvec = maxvec;
+
+ do {
+ rc = msi_capability_init(dev, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msi_range);
+
+/**
+ * pci_enable_msix_range - configure device's MSI-X capability structure
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of MSI-X entries
+ * @minvec: minimum number of MSI-X irqs requested
+ * @maxvec: maximum number of MSI-X irqs requested
+ *
+ * Setup the MSI-X capability structure of device function with a maximum
+ * possible number of interrupts in the range between @minvec and @maxvec
+ * upon its software driver call to request for MSI-X mode enabled on its
+ * hardware device function. It returns a negative errno if an error occurs.
+ * If it succeeds, it returns the actual number of interrupts allocated and
+ * indicates the successful configuration of MSI-X capability structure
+ * with new allocated MSI-X interrupts.
+ **/
+int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
+ int minvec, int maxvec)
+{
+ int nvec = maxvec;
+ int rc;
+
+ if (maxvec < minvec)
+ return -ERANGE;
+
+ do {
+ rc = pci_enable_msix(dev, entries, nvec);
+ if (rc < 0) {
+ return rc;
+ } else if (rc > 0) {
+ if (rc < minvec)
+ return -ENOSPC;
+ nvec = rc;
+ }
+ } while (rc);
+
+ return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
+
+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+/**
+ * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
+ * @irq_data: Pointer to interrupt data of the MSI interrupt
+ * @msg: Pointer to the message
+ */
+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data->msi_desc;
+
+ /*
+ * For MSI-X desc->irq is always equal to irq_data->irq. For
+ * MSI only the first interrupt of MULTI MSI passes the test.
+ */
+ if (desc->irq == irq_data->irq)
+ __pci_write_msi_msg(desc, msg);
+}
+
+/**
+ * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
+ * @dev: Pointer to the PCI device
+ * @desc: Pointer to the msi descriptor
+ *
+ * The ID number is only used within the irqdomain.
+ */
+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
+ struct msi_desc *desc)
+{
+ return (irq_hw_number_t)desc->msi_attrib.entry_nr |
+ PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
+ (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+}
+
+static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
+{
+ return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
+}
+
+/**
+ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev
+ * @domain: The interrupt domain to check
+ * @info: The domain info for verification
+ * @dev: The device to check
+ *
+ * Returns:
+ * 0 if the functionality is supported
+ * 1 if Multi MSI is requested, but the domain does not support it
+ * -ENOTSUPP otherwise
+ */
+int pci_msi_domain_check_cap(struct irq_domain *domain,
+ struct msi_domain_info *info, struct device *dev)
+{
+ struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
+
+ /* Special handling to support pci_enable_msi_range() */
+ if (pci_msi_desc_is_multi_msi(desc) &&
+ !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
+ return 1;
+ else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
+ return -ENOTSUPP;
+
+ return 0;
+}
+
+static int pci_msi_domain_handle_error(struct irq_domain *domain,
+ struct msi_desc *desc, int error)
+{
+ /* Special handling to support pci_enable_msi_range() */
+ if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
+ return 1;
+
+ return error;
+}
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc),
+ desc);
+}
+#else
+#define pci_msi_domain_set_desc NULL
+#endif
+
+static struct msi_domain_ops pci_msi_domain_ops_default = {
+ .set_desc = pci_msi_domain_set_desc,
+ .msi_check = pci_msi_domain_check_cap,
+ .handle_error = pci_msi_domain_handle_error,
+};
+
+static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (ops == NULL) {
+ info->ops = &pci_msi_domain_ops_default;
+ } else {
+ if (ops->set_desc == NULL)
+ ops->set_desc = pci_msi_domain_set_desc;
+ if (ops->msi_check == NULL)
+ ops->msi_check = pci_msi_domain_check_cap;
+ if (ops->handle_error == NULL)
+ ops->handle_error = pci_msi_domain_handle_error;
+ }
+}
+
+static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+}
+
+/**
+ * pci_msi_create_irq_domain - Creat a MSI interrupt domain
+ * @node: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a MSI interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ pci_msi_domain_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ pci_msi_domain_update_chip_ops(info);
+
+ return msi_create_irq_domain(node, info, parent);
+}
+
+/**
+ * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain
+ * @domain: The interrupt domain to allocate from
+ * @dev: The device for which to allocate
+ * @nvec: The number of interrupts to allocate
+ * @type: Unused to allow simpler migration from the arch_XXX interfaces
+ *
+ * Returns:
+ * A virtual interrupt number or an error code in case of failure
+ */
+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
+ int nvec, int type)
+{
+ return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
+}
+
+/**
+ * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain
+ * @domain: The interrupt domain
+ * @dev: The device for which to free interrupts
+ */
+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev)
+{
+ msi_domain_free_irqs(domain, &dev->dev);
+}
+
+/**
+ * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
+ * @node: Optional device-tree node of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Returns: A domain pointer or NULL in case of failure. If successful
+ * the default PCI/MSI irqdomain pointer is updated.
+ */
+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+ struct msi_domain_info *info, struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ mutex_lock(&pci_msi_domain_lock);
+ if (pci_msi_default_domain) {
+ pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
+ domain = NULL;
+ } else {
+ domain = pci_msi_create_irq_domain(node, info, parent);
+ pci_msi_default_domain = domain;
+ }
+ mutex_unlock(&pci_msi_domain_lock);
+
+ return domain;
+}
+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
new file mode 100644
index 000000000..f0929934b
--- /dev/null
+++ b/drivers/pci/of.c
@@ -0,0 +1,61 @@
+/*
+ * PCI <-> OF mapping helpers
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include "pci.h"
+
+void pci_set_of_node(struct pci_dev *dev)
+{
+ if (!dev->bus->dev.of_node)
+ return;
+ dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
+ dev->devfn);
+}
+
+void pci_release_of_node(struct pci_dev *dev)
+{
+ of_node_put(dev->dev.of_node);
+ dev->dev.of_node = NULL;
+}
+
+void pci_set_bus_of_node(struct pci_bus *bus)
+{
+ if (bus->self == NULL)
+ bus->dev.of_node = pcibios_get_phb_of_node(bus);
+ else
+ bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+}
+
+void pci_release_bus_of_node(struct pci_bus *bus)
+{
+ of_node_put(bus->dev.of_node);
+ bus->dev.of_node = NULL;
+}
+
+struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ /* This should only be called for PHBs */
+ if (WARN_ON(bus->self || bus->parent))
+ return NULL;
+
+ /* Look for a node pointer in either the intermediary device we
+ * create above the root bus or it's own parent. Normally only
+ * the later is populated.
+ */
+ if (bus->bridge->of_node)
+ return of_node_get(bus->bridge->of_node);
+ if (bus->bridge->parent && bus->bridge->parent->of_node)
+ return of_node_get(bus->bridge->parent->of_node);
+ return NULL;
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
new file mode 100644
index 000000000..6f6f175f5
--- /dev/null
+++ b/drivers/pci/pci-acpi.c
@@ -0,0 +1,716 @@
+/*
+ * File: pci-acpi.c
+ * Purpose: Provide PCI support in ACPI
+ *
+ * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
+ * Copyright (C) 2004 Intel Corp.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/module.h>
+#include <linux/pci-aspm.h>
+#include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include "pci.h"
+
+/*
+ * The UUID is defined in the PCI Firmware Specification available here:
+ * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
+ */
+const u8 pci_acpi_dsm_uuid[] = {
+ 0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d,
+ 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d
+};
+
+phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
+{
+ acpi_status status = AE_NOT_EXIST;
+ unsigned long long mcfg_addr;
+
+ if (handle)
+ status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
+ NULL, &mcfg_addr);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ return (phys_addr_t)mcfg_addr;
+}
+
+static acpi_status decode_type0_hpx_record(union acpi_object *record,
+ struct hotplug_params *hpx)
+{
+ int i;
+ union acpi_object *fields = record->package.elements;
+ u32 revision = fields[1].integer.value;
+
+ switch (revision) {
+ case 1:
+ if (record->package.count != 6)
+ return AE_ERROR;
+ for (i = 2; i < 6; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+ hpx->t0 = &hpx->type0_data;
+ hpx->t0->revision = revision;
+ hpx->t0->cache_line_size = fields[2].integer.value;
+ hpx->t0->latency_timer = fields[3].integer.value;
+ hpx->t0->enable_serr = fields[4].integer.value;
+ hpx->t0->enable_perr = fields[5].integer.value;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 0 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status decode_type1_hpx_record(union acpi_object *record,
+ struct hotplug_params *hpx)
+{
+ int i;
+ union acpi_object *fields = record->package.elements;
+ u32 revision = fields[1].integer.value;
+
+ switch (revision) {
+ case 1:
+ if (record->package.count != 5)
+ return AE_ERROR;
+ for (i = 2; i < 5; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+ hpx->t1 = &hpx->type1_data;
+ hpx->t1->revision = revision;
+ hpx->t1->max_mem_read = fields[2].integer.value;
+ hpx->t1->avg_max_split = fields[3].integer.value;
+ hpx->t1->tot_max_split = fields[4].integer.value;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 1 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status decode_type2_hpx_record(union acpi_object *record,
+ struct hotplug_params *hpx)
+{
+ int i;
+ union acpi_object *fields = record->package.elements;
+ u32 revision = fields[1].integer.value;
+
+ switch (revision) {
+ case 1:
+ if (record->package.count != 18)
+ return AE_ERROR;
+ for (i = 2; i < 18; i++)
+ if (fields[i].type != ACPI_TYPE_INTEGER)
+ return AE_ERROR;
+ hpx->t2 = &hpx->type2_data;
+ hpx->t2->revision = revision;
+ hpx->t2->unc_err_mask_and = fields[2].integer.value;
+ hpx->t2->unc_err_mask_or = fields[3].integer.value;
+ hpx->t2->unc_err_sever_and = fields[4].integer.value;
+ hpx->t2->unc_err_sever_or = fields[5].integer.value;
+ hpx->t2->cor_err_mask_and = fields[6].integer.value;
+ hpx->t2->cor_err_mask_or = fields[7].integer.value;
+ hpx->t2->adv_err_cap_and = fields[8].integer.value;
+ hpx->t2->adv_err_cap_or = fields[9].integer.value;
+ hpx->t2->pci_exp_devctl_and = fields[10].integer.value;
+ hpx->t2->pci_exp_devctl_or = fields[11].integer.value;
+ hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value;
+ hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value;
+ hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
+ hpx->t2->sec_unc_err_sever_or = fields[15].integer.value;
+ hpx->t2->sec_unc_err_mask_and = fields[16].integer.value;
+ hpx->t2->sec_unc_err_mask_or = fields[17].integer.value;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Type 2 Revision %d record not supported\n",
+ __func__, revision);
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *package, *record, *fields;
+ u32 type;
+ int i;
+
+ /* Clear the return buffer with zeros */
+ memset(hpx, 0, sizeof(struct hotplug_params));
+
+ status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ package = (union acpi_object *)buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ for (i = 0; i < package->package.count; i++) {
+ record = &package->package.elements[i];
+ if (record->type != ACPI_TYPE_PACKAGE) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ fields = record->package.elements;
+ if (fields[0].type != ACPI_TYPE_INTEGER ||
+ fields[1].type != ACPI_TYPE_INTEGER) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ type = fields[0].integer.value;
+ switch (type) {
+ case 0:
+ status = decode_type0_hpx_record(record, hpx);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ break;
+ case 1:
+ status = decode_type1_hpx_record(record, hpx);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ break;
+ case 2:
+ status = decode_type2_hpx_record(record, hpx);
+ if (ACPI_FAILURE(status))
+ goto exit;
+ break;
+ default:
+ printk(KERN_ERR "%s: Type %d record not supported\n",
+ __func__, type);
+ status = AE_ERROR;
+ goto exit;
+ }
+ }
+ exit:
+ kfree(buffer.pointer);
+ return status;
+}
+
+static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *package, *fields;
+ int i;
+
+ memset(hpp, 0, sizeof(struct hotplug_params));
+
+ status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ package = (union acpi_object *) buffer.pointer;
+ if (package->type != ACPI_TYPE_PACKAGE ||
+ package->package.count != 4) {
+ status = AE_ERROR;
+ goto exit;
+ }
+
+ fields = package->package.elements;
+ for (i = 0; i < 4; i++) {
+ if (fields[i].type != ACPI_TYPE_INTEGER) {
+ status = AE_ERROR;
+ goto exit;
+ }
+ }
+
+ hpp->t0 = &hpp->type0_data;
+ hpp->t0->revision = 1;
+ hpp->t0->cache_line_size = fields[0].integer.value;
+ hpp->t0->latency_timer = fields[1].integer.value;
+ hpp->t0->enable_serr = fields[2].integer.value;
+ hpp->t0->enable_perr = fields[3].integer.value;
+
+exit:
+ kfree(buffer.pointer);
+ return status;
+}
+
+/* pci_get_hp_params
+ *
+ * @dev - the pci_dev for which we want parameters
+ * @hpp - allocated by the caller
+ */
+int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+{
+ acpi_status status;
+ acpi_handle handle, phandle;
+ struct pci_bus *pbus;
+
+ if (acpi_pci_disabled)
+ return -ENODEV;
+
+ handle = NULL;
+ for (pbus = dev->bus; pbus; pbus = pbus->parent) {
+ handle = acpi_pci_get_bridge_handle(pbus);
+ if (handle)
+ break;
+ }
+
+ /*
+ * _HPP settings apply to all child buses, until another _HPP is
+ * encountered. If we don't find an _HPP for the input pci dev,
+ * look for it in the parent device scope since that would apply to
+ * this pci dev.
+ */
+ while (handle) {
+ status = acpi_run_hpx(handle, hpp);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ status = acpi_run_hpp(handle, hpp);
+ if (ACPI_SUCCESS(status))
+ return 0;
+ if (acpi_is_root_bridge(handle))
+ break;
+ status = acpi_get_parent(handle, &phandle);
+ if (ACPI_FAILURE(status))
+ break;
+ handle = phandle;
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pci_get_hp_params);
+
+/**
+ * pci_acpi_wake_bus - Root bus wakeup notification fork function.
+ * @work: Work item to handle.
+ */
+static void pci_acpi_wake_bus(struct work_struct *work)
+{
+ struct acpi_device *adev;
+ struct acpi_pci_root *root;
+
+ adev = container_of(work, struct acpi_device, wakeup.context.work);
+ root = acpi_driver_data(adev);
+ pci_pme_wakeup_bus(root->bus);
+}
+
+/**
+ * pci_acpi_wake_dev - PCI device wakeup notification work function.
+ * @handle: ACPI handle of a device the notification is for.
+ * @work: Work item to handle.
+ */
+static void pci_acpi_wake_dev(struct work_struct *work)
+{
+ struct acpi_device_wakeup_context *context;
+ struct pci_dev *pci_dev;
+
+ context = container_of(work, struct acpi_device_wakeup_context, work);
+ pci_dev = to_pci_dev(context->dev);
+
+ if (pci_dev->pme_poll)
+ pci_dev->pme_poll = false;
+
+ if (pci_dev->current_state == PCI_D3cold) {
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
+ return;
+ }
+
+ /* Clear PME Status if set. */
+ if (pci_dev->pme_support)
+ pci_check_pme_status(pci_dev);
+
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
+
+ pci_pme_wakeup_bus(pci_dev->subordinate);
+}
+
+/**
+ * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
+ * @dev: PCI root bridge ACPI device.
+ */
+acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
+{
+ return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
+}
+
+/**
+ * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
+ * @dev: ACPI device to add the notifier for.
+ * @pci_dev: PCI device to check for the PME status if an event is signaled.
+ */
+acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
+ struct pci_dev *pci_dev)
+{
+ return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
+}
+
+/*
+ * _SxD returns the D-state with the highest power
+ * (lowest D-state number) supported in the S-state "x".
+ *
+ * If the devices does not have a _PRW
+ * (Power Resources for Wake) supporting system wakeup from "x"
+ * then the OS is free to choose a lower power (higher number
+ * D-state) than the return value from _SxD.
+ *
+ * But if _PRW is enabled at S-state "x", the OS
+ * must not choose a power lower than _SxD --
+ * unless the device has an _SxW method specifying
+ * the lowest power (highest D-state number) the device
+ * may enter while still able to wake the system.
+ *
+ * ie. depending on global OS policy:
+ *
+ * if (_PRW at S-state x)
+ * choose from highest power _SxD to lowest power _SxW
+ * else // no _PRW at S-state x
+ * choose highest power _SxD or any lower power
+ */
+
+static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+{
+ int acpi_state, d_max;
+
+ if (pdev->no_d3cold)
+ d_max = ACPI_STATE_D3_HOT;
+ else
+ d_max = ACPI_STATE_D3_COLD;
+ acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
+ if (acpi_state < 0)
+ return PCI_POWER_ERROR;
+
+ switch (acpi_state) {
+ case ACPI_STATE_D0:
+ return PCI_D0;
+ case ACPI_STATE_D1:
+ return PCI_D1;
+ case ACPI_STATE_D2:
+ return PCI_D2;
+ case ACPI_STATE_D3_HOT:
+ return PCI_D3hot;
+ case ACPI_STATE_D3_COLD:
+ return PCI_D3cold;
+ }
+ return PCI_POWER_ERROR;
+}
+
+static bool acpi_pci_power_manageable(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ return adev ? acpi_device_power_manageable(adev) : false;
+}
+
+static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ static const u8 state_conv[] = {
+ [PCI_D0] = ACPI_STATE_D0,
+ [PCI_D1] = ACPI_STATE_D1,
+ [PCI_D2] = ACPI_STATE_D2,
+ [PCI_D3hot] = ACPI_STATE_D3_COLD,
+ [PCI_D3cold] = ACPI_STATE_D3_COLD,
+ };
+ int error = -EINVAL;
+
+ /* If the ACPI device has _EJ0, ignore the device */
+ if (!adev || acpi_has_method(adev->handle, "_EJ0"))
+ return -ENODEV;
+
+ switch (state) {
+ case PCI_D3cold:
+ if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
+ PM_QOS_FLAGS_ALL) {
+ error = -EBUSY;
+ break;
+ }
+ case PCI_D0:
+ case PCI_D1:
+ case PCI_D2:
+ case PCI_D3hot:
+ error = acpi_device_set_power(adev, state_conv[state]);
+ }
+
+ if (!error)
+ dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
+ acpi_power_state_string(state_conv[state]));
+
+ return error;
+}
+
+static bool acpi_pci_can_wakeup(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ return adev ? acpi_device_can_wakeup(adev) : false;
+}
+
+static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
+{
+ while (bus->parent) {
+ if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
+ return;
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ acpi_pm_device_sleep_wake(bus->bridge, enable);
+}
+
+static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+ if (acpi_pci_can_wakeup(dev))
+ return acpi_pm_device_sleep_wake(&dev->dev, enable);
+
+ acpi_pci_propagate_wakeup_enable(dev->bus, enable);
+ return 0;
+}
+
+static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
+{
+ while (bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (bridge->pme_interrupt)
+ return;
+ if (!acpi_pm_device_run_wake(&bridge->dev, enable))
+ return;
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ acpi_pm_device_run_wake(bus->bridge, enable);
+}
+
+static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ /*
+ * Per PCI Express Base Specification Revision 2.0 section
+ * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
+ * waking up to power on the main link even if there is PME
+ * support for D3cold
+ */
+ if (dev->pme_interrupt && !dev->runtime_d3cold)
+ return 0;
+
+ if (!acpi_pm_device_run_wake(&dev->dev, enable))
+ return 0;
+
+ acpi_pci_propagate_run_wake(dev->bus, enable);
+ return 0;
+}
+
+static bool acpi_pci_need_resume(struct pci_dev *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+
+ if (!adev || !acpi_device_power_manageable(adev))
+ return false;
+
+ if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
+ return true;
+
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ return false;
+
+ return !!adev->power.flags.dsw_present;
+}
+
+static struct pci_platform_pm_ops acpi_pci_platform_pm = {
+ .is_manageable = acpi_pci_power_manageable,
+ .set_state = acpi_pci_set_power_state,
+ .choose_state = acpi_pci_choose_state,
+ .sleep_wake = acpi_pci_sleep_wake,
+ .run_wake = acpi_pci_run_wake,
+ .need_resume = acpi_pci_need_resume,
+};
+
+void acpi_pci_add_bus(struct pci_bus *bus)
+{
+ union acpi_object *obj;
+ struct pci_host_bridge *bridge;
+
+ if (acpi_pci_disabled || !bus->bridge)
+ return;
+
+ acpi_pci_slot_enumerate(bus);
+ acpiphp_enumerate_slots(bus);
+
+ /*
+ * For a host bridge, check its _DSM for function 8 and if
+ * that is available, mark it in pci_host_bridge.
+ */
+ if (!pci_is_root_bus(bus))
+ return;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3,
+ RESET_DELAY_DSM, NULL);
+ if (!obj)
+ return;
+
+ if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
+ bridge = pci_find_host_bridge(bus);
+ bridge->ignore_reset_delay = 1;
+ }
+ ACPI_FREE(obj);
+}
+
+void acpi_pci_remove_bus(struct pci_bus *bus)
+{
+ if (acpi_pci_disabled || !bus->bridge)
+ return;
+
+ acpiphp_remove_slots(bus);
+ acpi_pci_slot_remove(bus);
+}
+
+/* ACPI bus type */
+static struct acpi_device *acpi_pci_find_companion(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ bool check_children;
+ u64 addr;
+
+ check_children = pci_is_bridge(pci_dev);
+ /* Please ref to ACPI spec for the syntax of _ADR */
+ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
+ return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
+ check_children);
+}
+
+/**
+ * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
+ * @pdev: the PCI device whose delay is to be updated
+ * @adev: the companion ACPI device of this PCI device
+ *
+ * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
+ * control method of either the device itself or the PCI host bridge.
+ *
+ * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
+ * host bridge. If it returns one, the OS may assume that all devices in
+ * the hierarchy have already completed power-on reset delays.
+ *
+ * Function 9, "Device Readiness Durations," applies only to the object
+ * where it is located. It returns delay durations required after various
+ * events if the device requires less time than the spec requires. Delays
+ * from this function take precedence over the Reset Delay function.
+ *
+ * These _DSM functions are defined by the draft ECN of January 28, 2014,
+ * titled "ACPI additions for FW latency optimizations."
+ */
+static void pci_acpi_optimize_delay(struct pci_dev *pdev,
+ acpi_handle handle)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+ int value;
+ union acpi_object *obj, *elements;
+
+ if (bridge->ignore_reset_delay)
+ pdev->d3cold_delay = 0;
+
+ obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3,
+ FUNCTION_DELAY_DSM, NULL);
+ if (!obj)
+ return;
+
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
+ elements = obj->package.elements;
+ if (elements[0].type == ACPI_TYPE_INTEGER) {
+ value = (int)elements[0].integer.value / 1000;
+ if (value < PCI_PM_D3COLD_WAIT)
+ pdev->d3cold_delay = value;
+ }
+ if (elements[3].type == ACPI_TYPE_INTEGER) {
+ value = (int)elements[3].integer.value / 1000;
+ if (value < PCI_PM_D3_WAIT)
+ pdev->d3_delay = value;
+ }
+ }
+ ACPI_FREE(obj);
+}
+
+static void pci_acpi_setup(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
+
+ pci_acpi_optimize_delay(pci_dev, adev->handle);
+
+ pci_acpi_add_pm_notifier(adev, pci_dev);
+ if (!adev->wakeup.flags.valid)
+ return;
+
+ device_set_wakeup_capable(dev, true);
+ acpi_pci_sleep_wake(pci_dev, false);
+ if (adev->wakeup.flags.run_wake)
+ device_set_run_wake(dev, true);
+}
+
+static void pci_acpi_cleanup(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
+
+ pci_acpi_remove_pm_notifier(adev);
+ if (adev->wakeup.flags.valid) {
+ device_set_wakeup_capable(dev, false);
+ device_set_run_wake(dev, false);
+ }
+}
+
+static bool pci_acpi_bus_match(struct device *dev)
+{
+ return dev_is_pci(dev);
+}
+
+static struct acpi_bus_type acpi_pci_bus = {
+ .name = "PCI",
+ .match = pci_acpi_bus_match,
+ .find_companion = acpi_pci_find_companion,
+ .setup = pci_acpi_setup,
+ .cleanup = pci_acpi_cleanup,
+};
+
+static int __init acpi_pci_init(void)
+{
+ int ret;
+
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
+ pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
+ pci_no_msi();
+ }
+
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
+ pcie_no_aspm();
+ }
+
+ ret = register_acpi_bus_type(&acpi_pci_bus);
+ if (ret)
+ return 0;
+
+ pci_set_platform_pm(&acpi_pci_platform_pm);
+ acpi_pci_slot_init();
+ acpiphp_init();
+
+ return 0;
+}
+arch_initcall(acpi_pci_init);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
new file mode 100644
index 000000000..3cb2210de
--- /dev/null
+++ b/drivers/pci/pci-driver.c
@@ -0,0 +1,1415 @@
+/*
+ * drivers/pci/pci-driver.c
+ *
+ * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
+ * (C) Copyright 2007 Novell Inc.
+ *
+ * Released under the GPL v2 only.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mempolicy.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
+#include <linux/kexec.h>
+#include "pci.h"
+
+struct pci_dynid {
+ struct list_head node;
+ struct pci_device_id id;
+};
+
+/**
+ * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
+ * @drv: target pci driver
+ * @vendor: PCI vendor ID
+ * @device: PCI device ID
+ * @subvendor: PCI subvendor ID
+ * @subdevice: PCI subdevice ID
+ * @class: PCI class
+ * @class_mask: PCI class mask
+ * @driver_data: private driver data
+ *
+ * Adds a new dynamic pci device ID to this driver and causes the
+ * driver to probe for all devices again. @drv must have been
+ * registered prior to calling this function.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int pci_add_dynid(struct pci_driver *drv,
+ unsigned int vendor, unsigned int device,
+ unsigned int subvendor, unsigned int subdevice,
+ unsigned int class, unsigned int class_mask,
+ unsigned long driver_data)
+{
+ struct pci_dynid *dynid;
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.vendor = vendor;
+ dynid->id.device = device;
+ dynid->id.subvendor = subvendor;
+ dynid->id.subdevice = subdevice;
+ dynid->id.class = class;
+ dynid->id.class_mask = class_mask;
+ dynid->id.driver_data = driver_data;
+
+ spin_lock(&drv->dynids.lock);
+ list_add_tail(&dynid->node, &drv->dynids.list);
+ spin_unlock(&drv->dynids.lock);
+
+ return driver_attach(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(pci_add_dynid);
+
+static void pci_free_dynids(struct pci_driver *drv)
+{
+ struct pci_dynid *dynid, *n;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+/**
+ * store_new_id - sysfs frontend to pci_add_dynid()
+ * @driver: target device driver
+ * @buf: buffer for scanning device ID data
+ * @count: input size
+ *
+ * Allow PCI IDs to be added to an existing driver via sysfs.
+ */
+static ssize_t store_new_id(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct pci_driver *pdrv = to_pci_driver(driver);
+ const struct pci_device_id *ids = pdrv->id_table;
+ __u32 vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ unsigned long driver_data = 0;
+ int fields = 0;
+ int retval = 0;
+
+ fields = sscanf(buf, "%x %x %x %x %x %x %lx",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask, &driver_data);
+ if (fields < 2)
+ return -EINVAL;
+
+ if (fields != 7) {
+ struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+ if (!pdev)
+ return -ENOMEM;
+
+ pdev->vendor = vendor;
+ pdev->device = device;
+ pdev->subsystem_vendor = subvendor;
+ pdev->subsystem_device = subdevice;
+ pdev->class = class;
+
+ if (pci_match_id(pdrv->id_table, pdev))
+ retval = -EEXIST;
+
+ kfree(pdev);
+
+ if (retval)
+ return retval;
+ }
+
+ /* Only accept driver_data values that match an existing id_table
+ entry */
+ if (ids) {
+ retval = -EINVAL;
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ if (driver_data == ids->driver_data) {
+ retval = 0;
+ break;
+ }
+ ids++;
+ }
+ if (retval) /* No match */
+ return retval;
+ }
+
+ retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
+ class, class_mask, driver_data);
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
+
+/**
+ * store_remove_id - remove a PCI device ID from this driver
+ * @driver: target device driver
+ * @buf: buffer for scanning device ID data
+ * @count: input size
+ *
+ * Removes a dynamic pci device ID to this driver.
+ */
+static ssize_t store_remove_id(struct device_driver *driver, const char *buf,
+ size_t count)
+{
+ struct pci_dynid *dynid, *n;
+ struct pci_driver *pdrv = to_pci_driver(driver);
+ __u32 vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ int fields = 0;
+ int retval = -ENODEV;
+
+ fields = sscanf(buf, "%x %x %x %x %x %x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+ if (fields < 2)
+ return -EINVAL;
+
+ spin_lock(&pdrv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
+ struct pci_device_id *id = &dynid->id;
+ if ((id->vendor == vendor) &&
+ (id->device == device) &&
+ (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
+ (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
+ !((id->class ^ class) & class_mask)) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ retval = 0;
+ break;
+ }
+ }
+ spin_unlock(&pdrv->dynids.lock);
+
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
+
+static struct attribute *pci_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ &driver_attr_remove_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(pci_drv);
+
+/**
+ * pci_match_id - See if a pci device matches a given pci_id table
+ * @ids: array of PCI device id structures to search in
+ * @dev: the PCI device structure to match against.
+ *
+ * Used by a driver to check whether a PCI device present in the
+ * system is in its list of supported devices. Returns the matching
+ * pci_device_id structure or %NULL if there is no match.
+ *
+ * Deprecated, don't use this as it will not catch any dynamic ids
+ * that a driver might want to check for.
+ */
+const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev)
+{
+ if (ids) {
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ if (pci_match_one_device(ids, dev))
+ return ids;
+ ids++;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_match_id);
+
+static const struct pci_device_id pci_device_id_any = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+};
+
+/**
+ * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
+ * @drv: the PCI driver to match against
+ * @dev: the PCI device structure to match against
+ *
+ * Used by a driver to check whether a PCI device present in the
+ * system is in its list of supported devices. Returns the matching
+ * pci_device_id structure or %NULL if there is no match.
+ */
+static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
+ struct pci_dev *dev)
+{
+ struct pci_dynid *dynid;
+ const struct pci_device_id *found_id = NULL;
+
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
+
+ /* Look at the dynamic ids first, before the static ones */
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (pci_match_one_device(&dynid->id, dev)) {
+ found_id = &dynid->id;
+ break;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ if (!found_id)
+ found_id = pci_match_id(drv->id_table, dev);
+
+ /* driver_override will always match, send a dummy id */
+ if (!found_id && dev->driver_override)
+ found_id = &pci_device_id_any;
+
+ return found_id;
+}
+
+struct drv_dev_and_id {
+ struct pci_driver *drv;
+ struct pci_dev *dev;
+ const struct pci_device_id *id;
+};
+
+static long local_pci_probe(void *_ddi)
+{
+ struct drv_dev_and_id *ddi = _ddi;
+ struct pci_dev *pci_dev = ddi->dev;
+ struct pci_driver *pci_drv = ddi->drv;
+ struct device *dev = &pci_dev->dev;
+ int rc;
+
+ /*
+ * Unbound PCI devices are always put in D0, regardless of
+ * runtime PM status. During probe, the device is set to
+ * active and the usage count is incremented. If the driver
+ * supports runtime PM, it should call pm_runtime_put_noidle()
+ * in its probe routine and pm_runtime_get_noresume() in its
+ * remove routine.
+ */
+ pm_runtime_get_sync(dev);
+ pci_dev->driver = pci_drv;
+ rc = pci_drv->probe(pci_dev, ddi->id);
+ if (!rc)
+ return rc;
+ if (rc < 0) {
+ pci_dev->driver = NULL;
+ pm_runtime_put_sync(dev);
+ return rc;
+ }
+ /*
+ * Probe function should return < 0 for failure, 0 for success
+ * Treat values > 0 as success, but warn.
+ */
+ dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+ return 0;
+}
+
+static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int error, node;
+ struct drv_dev_and_id ddi = { drv, dev, id };
+
+ /*
+ * Execute driver initialization on node where the device is
+ * attached. This way the driver likely allocates its local memory
+ * on the right node.
+ */
+ node = dev_to_node(&dev->dev);
+
+ /*
+ * On NUMA systems, we are likely to call a PF probe function using
+ * work_on_cpu(). If that probe calls pci_enable_sriov() (which
+ * adds the VF devices via pci_bus_add_device()), we may re-enter
+ * this function to call the VF probe function. Calling
+ * work_on_cpu() again will cause a lockdep warning. Since VFs are
+ * always on the same node as the PF, we can work around this by
+ * avoiding work_on_cpu() when we're already on the correct node.
+ *
+ * Preemption is enabled, so it's theoretically unsafe to use
+ * numa_node_id(), but even if we run the probe function on the
+ * wrong node, it should be functionally correct.
+ */
+ if (node >= 0 && node != numa_node_id()) {
+ int cpu;
+
+ get_online_cpus();
+ cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
+ if (cpu < nr_cpu_ids)
+ error = work_on_cpu(cpu, local_pci_probe, &ddi);
+ else
+ error = local_pci_probe(&ddi);
+ put_online_cpus();
+ } else
+ error = local_pci_probe(&ddi);
+
+ return error;
+}
+
+/**
+ * __pci_device_probe - check if a driver wants to claim a specific PCI device
+ * @drv: driver to call to check if it wants the PCI device
+ * @pci_dev: PCI device being probed
+ *
+ * returns 0 on success, else error.
+ * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
+ */
+static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
+{
+ const struct pci_device_id *id;
+ int error = 0;
+
+ if (!pci_dev->driver && drv->probe) {
+ error = -ENODEV;
+
+ id = pci_match_device(drv, pci_dev);
+ if (id)
+ error = pci_call_probe(drv, pci_dev, id);
+ if (error >= 0)
+ error = 0;
+ }
+ return error;
+}
+
+static int pci_device_probe(struct device *dev)
+{
+ int error = 0;
+ struct pci_driver *drv;
+ struct pci_dev *pci_dev;
+
+ drv = to_pci_driver(dev->driver);
+ pci_dev = to_pci_dev(dev);
+ pci_dev_get(pci_dev);
+ error = __pci_device_probe(drv, pci_dev);
+ if (error)
+ pci_dev_put(pci_dev);
+
+ return error;
+}
+
+static int pci_device_remove(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ if (drv) {
+ if (drv->remove) {
+ pm_runtime_get_sync(dev);
+ drv->remove(pci_dev);
+ pm_runtime_put_noidle(dev);
+ }
+ pci_dev->driver = NULL;
+ }
+
+ /* Undo the runtime PM settings in local_pci_probe() */
+ pm_runtime_put_sync(dev);
+
+ /*
+ * If the device is still on, set the power state as "unknown",
+ * since it might change by the next time we load the driver.
+ */
+ if (pci_dev->current_state == PCI_D0)
+ pci_dev->current_state = PCI_UNKNOWN;
+
+ /*
+ * We would love to complain here if pci_dev->is_enabled is set, that
+ * the driver should have called pci_disable_device(), but the
+ * unfortunate fact is there are too many odd BIOS and bridge setups
+ * that don't like drivers doing that all of the time.
+ * Oh well, we can dream of sane hardware when we sleep, no matter how
+ * horrible the crap we have to deal with is when we are awake...
+ */
+
+ pci_dev_put(pci_dev);
+ return 0;
+}
+
+static void pci_device_shutdown(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ pm_runtime_resume(dev);
+
+ if (drv && drv->shutdown)
+ drv->shutdown(pci_dev);
+ pci_msi_shutdown(pci_dev);
+ pci_msix_shutdown(pci_dev);
+
+#ifdef CONFIG_KEXEC
+ /*
+ * If this is a kexec reboot, turn off Bus Master bit on the
+ * device to tell it to not continue to do DMA. Don't touch
+ * devices in D3cold or unknown states.
+ * If it is not a kexec reboot, firmware will hit the PCI
+ * devices with big hammer and stop their DMA any way.
+ */
+ if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
+ pci_clear_master(pci_dev);
+#endif
+}
+
+#ifdef CONFIG_PM
+
+/* Auxiliary functions used for system resume and run-time resume. */
+
+/**
+ * pci_restore_standard_config - restore standard config registers of PCI device
+ * @pci_dev: PCI device to handle
+ */
+static int pci_restore_standard_config(struct pci_dev *pci_dev)
+{
+ pci_update_current_state(pci_dev, PCI_UNKNOWN);
+
+ if (pci_dev->current_state != PCI_D0) {
+ int error = pci_set_power_state(pci_dev, PCI_D0);
+ if (error)
+ return error;
+ }
+
+ pci_restore_state(pci_dev);
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
+static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
+{
+ pci_power_up(pci_dev);
+ pci_restore_state(pci_dev);
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
+}
+
+/*
+ * Default "suspend" method for devices that have no driver provided suspend,
+ * or not even a driver at all (second part).
+ */
+static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
+{
+ /*
+ * mark its power state as "unknown", since we don't know if
+ * e.g. the BIOS will change its device state when we suspend.
+ */
+ if (pci_dev->current_state == PCI_D0)
+ pci_dev->current_state = PCI_UNKNOWN;
+}
+
+/*
+ * Default "resume" method for devices that have no driver provided resume,
+ * or not even a driver at all (second part).
+ */
+static int pci_pm_reenable_device(struct pci_dev *pci_dev)
+{
+ int retval;
+
+ /* if the device was enabled before suspend, reenable */
+ retval = pci_reenable_device(pci_dev);
+ /*
+ * if the device was busmaster before the suspend, make it busmaster
+ * again
+ */
+ if (pci_dev->is_busmaster)
+ pci_set_master(pci_dev);
+
+ return retval;
+}
+
+static int pci_legacy_suspend(struct device *dev, pm_message_t state)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ if (drv && drv->suspend) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ error = drv->suspend(pci_dev, state);
+ suspend_report_result(drv->suspend, error);
+ if (error)
+ return error;
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pF\n",
+ drv->suspend);
+ }
+ }
+
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ return 0;
+}
+
+static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ if (drv && drv->suspend_late) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ error = drv->suspend_late(pci_dev, state);
+ suspend_report_result(drv->suspend_late, error);
+ if (error)
+ return error;
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pF\n",
+ drv->suspend_late);
+ goto Fixup;
+ }
+ }
+
+ if (!pci_dev->state_saved)
+ pci_save_state(pci_dev);
+
+ pci_pm_set_unknown_state(pci_dev);
+
+Fixup:
+ pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+
+ return 0;
+}
+
+static int pci_legacy_resume_early(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ return drv && drv->resume_early ?
+ drv->resume_early(pci_dev) : 0;
+}
+
+static int pci_legacy_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+
+ return drv && drv->resume ?
+ drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
+}
+
+/* Auxiliary functions used by the new power management framework */
+
+static void pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+
+ if (!pci_has_subordinate(pci_dev))
+ pci_enable_wake(pci_dev, PCI_D0, false);
+}
+
+static void pci_pm_default_suspend(struct pci_dev *pci_dev)
+{
+ /* Disable non-bridge devices without PM support */
+ if (!pci_has_subordinate(pci_dev))
+ pci_disable_enabled_device(pci_dev);
+}
+
+static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
+{
+ struct pci_driver *drv = pci_dev->driver;
+ bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
+ || drv->resume_early);
+
+ /*
+ * Legacy PM support is used by default, so warn if the new framework is
+ * supported as well. Drivers are supposed to support either the
+ * former, or the latter, but not both at the same time.
+ */
+ WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
+ drv->name, pci_dev->vendor, pci_dev->device);
+
+ return ret;
+}
+
+/* New power management framework */
+
+static int pci_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ /*
+ * Devices having power.ignore_children set may still be necessary for
+ * suspending their children in the next phase of device suspend.
+ */
+ if (dev->power.ignore_children)
+ pm_runtime_resume(dev);
+
+ if (drv && drv->pm && drv->pm->prepare) {
+ int error = drv->pm->prepare(dev);
+ if (error)
+ return error;
+ }
+ return pci_dev_keep_suspended(to_pci_dev(dev));
+}
+
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define pci_pm_prepare NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+static int pci_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend(dev, PMSG_SUSPEND);
+
+ if (!pm) {
+ pci_pm_default_suspend(pci_dev);
+ goto Fixup;
+ }
+
+ /*
+ * PCI devices suspended at run time need to be resumed at this point,
+ * because in general it is necessary to reconfigure them for system
+ * suspend. Namely, if the device is supposed to wake up the system
+ * from the sleep state, we may need to reconfigure it for this purpose.
+ * In turn, if the device is not supposed to wake up the system from the
+ * sleep state, we'll have to prevent it from signaling wake-up.
+ */
+ pm_runtime_resume(dev);
+
+ pci_dev->state_saved = false;
+ if (pm->suspend) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ error = pm->suspend(dev);
+ suspend_report_result(pm->suspend, error);
+ if (error)
+ return error;
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pF\n",
+ pm->suspend);
+ }
+ }
+
+ Fixup:
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ return 0;
+}
+
+static int pci_pm_suspend_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
+
+ if (!pm) {
+ pci_save_state(pci_dev);
+ goto Fixup;
+ }
+
+ if (pm->suspend_noirq) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ error = pm->suspend_noirq(dev);
+ suspend_report_result(pm->suspend_noirq, error);
+ if (error)
+ return error;
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pF\n",
+ pm->suspend_noirq);
+ goto Fixup;
+ }
+ }
+
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+ if (!pci_has_subordinate(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+ }
+
+ pci_pm_set_unknown_state(pci_dev);
+
+ /*
+ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+ * hasn't been quiesced and tries to turn it off. If the controller
+ * is already in D3, this can hang or cause memory corruption.
+ *
+ * Since the value of the COMMAND register doesn't matter once the
+ * device has been suspended, we can safely set it to 0 here.
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
+Fixup:
+ pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+
+ return 0;
+}
+
+static int pci_pm_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ pci_pm_default_resume_early(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+ if (drv && drv->pm && drv->pm->resume_noirq)
+ error = drv->pm->resume_noirq(dev);
+
+ return error;
+}
+
+static int pci_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ /*
+ * This is necessary for the suspend error path in which resume is
+ * called without restoring the standard config registers of the device.
+ */
+ if (pci_dev->state_saved)
+ pci_restore_standard_config(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume(dev);
+
+ pci_pm_default_resume(pci_dev);
+
+ if (pm) {
+ if (pm->resume)
+ error = pm->resume(dev);
+ } else {
+ pci_pm_reenable_device(pci_dev);
+ }
+
+ return error;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define pci_pm_suspend NULL
+#define pci_pm_suspend_noirq NULL
+#define pci_pm_resume NULL
+#define pci_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+
+
+/*
+ * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
+ * a hibernate transition
+ */
+struct dev_pm_ops __weak pcibios_pm_ops;
+
+static int pci_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend(dev, PMSG_FREEZE);
+
+ if (!pm) {
+ pci_pm_default_suspend(pci_dev);
+ return 0;
+ }
+
+ /*
+ * This used to be done in pci_pm_prepare() for all devices and some
+ * drivers may depend on it, so do it here. Ideally, runtime-suspended
+ * devices should not be touched during freeze/thaw transitions,
+ * however.
+ */
+ pm_runtime_resume(dev);
+
+ pci_dev->state_saved = false;
+ if (pm->freeze) {
+ int error;
+
+ error = pm->freeze(dev);
+ suspend_report_result(pm->freeze, error);
+ if (error)
+ return error;
+ }
+
+ if (pcibios_pm_ops.freeze)
+ return pcibios_pm_ops.freeze(dev);
+
+ return 0;
+}
+
+static int pci_pm_freeze_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend_late(dev, PMSG_FREEZE);
+
+ if (drv && drv->pm && drv->pm->freeze_noirq) {
+ int error;
+
+ error = drv->pm->freeze_noirq(dev);
+ suspend_report_result(drv->pm->freeze_noirq, error);
+ if (error)
+ return error;
+ }
+
+ if (!pci_dev->state_saved)
+ pci_save_state(pci_dev);
+
+ pci_pm_set_unknown_state(pci_dev);
+
+ if (pcibios_pm_ops.freeze_noirq)
+ return pcibios_pm_ops.freeze_noirq(dev);
+
+ return 0;
+}
+
+static int pci_pm_thaw_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (pcibios_pm_ops.thaw_noirq) {
+ error = pcibios_pm_ops.thaw_noirq(dev);
+ if (error)
+ return error;
+ }
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+ pci_update_current_state(pci_dev, PCI_D0);
+
+ if (drv && drv->pm && drv->pm->thaw_noirq)
+ error = drv->pm->thaw_noirq(dev);
+
+ return error;
+}
+
+static int pci_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ if (pcibios_pm_ops.thaw) {
+ error = pcibios_pm_ops.thaw(dev);
+ if (error)
+ return error;
+ }
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume(dev);
+
+ if (pm) {
+ if (pm->thaw)
+ error = pm->thaw(dev);
+ } else {
+ pci_pm_reenable_device(pci_dev);
+ }
+
+ pci_dev->state_saved = false;
+
+ return error;
+}
+
+static int pci_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend(dev, PMSG_HIBERNATE);
+
+ if (!pm) {
+ pci_pm_default_suspend(pci_dev);
+ goto Fixup;
+ }
+
+ /* The reason to do that is the same as in pci_pm_suspend(). */
+ pm_runtime_resume(dev);
+
+ pci_dev->state_saved = false;
+ if (pm->poweroff) {
+ int error;
+
+ error = pm->poweroff(dev);
+ suspend_report_result(pm->poweroff, error);
+ if (error)
+ return error;
+ }
+
+ Fixup:
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ if (pcibios_pm_ops.poweroff)
+ return pcibios_pm_ops.poweroff(dev);
+
+ return 0;
+}
+
+static int pci_pm_poweroff_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+
+ if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+ return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
+
+ if (!drv || !drv->pm) {
+ pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+ return 0;
+ }
+
+ if (drv->pm->poweroff_noirq) {
+ int error;
+
+ error = drv->pm->poweroff_noirq(dev);
+ suspend_report_result(drv->pm->poweroff_noirq, error);
+ if (error)
+ return error;
+ }
+
+ if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+
+ /*
+ * The reason for doing this here is the same as for the analogous code
+ * in pci_pm_suspend_noirq().
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
+ pci_fixup_device(pci_fixup_suspend_late, pci_dev);
+
+ if (pcibios_pm_ops.poweroff_noirq)
+ return pcibios_pm_ops.poweroff_noirq(dev);
+
+ return 0;
+}
+
+static int pci_pm_restore_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (pcibios_pm_ops.restore_noirq) {
+ error = pcibios_pm_ops.restore_noirq(dev);
+ if (error)
+ return error;
+ }
+
+ pci_pm_default_resume_early(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+ if (drv && drv->pm && drv->pm->restore_noirq)
+ error = drv->pm->restore_noirq(dev);
+
+ return error;
+}
+
+static int pci_pm_restore(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ if (pcibios_pm_ops.restore) {
+ error = pcibios_pm_ops.restore(dev);
+ if (error)
+ return error;
+ }
+
+ /*
+ * This is necessary for the hibernation error path in which restore is
+ * called without restoring the standard config registers of the device.
+ */
+ if (pci_dev->state_saved)
+ pci_restore_standard_config(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume(dev);
+
+ pci_pm_default_resume(pci_dev);
+
+ if (pm) {
+ if (pm->restore)
+ error = pm->restore(dev);
+ } else {
+ pci_pm_reenable_device(pci_dev);
+ }
+
+ return error;
+}
+
+#else /* !CONFIG_HIBERNATE_CALLBACKS */
+
+#define pci_pm_freeze NULL
+#define pci_pm_freeze_noirq NULL
+#define pci_pm_thaw NULL
+#define pci_pm_thaw_noirq NULL
+#define pci_pm_poweroff NULL
+#define pci_pm_poweroff_noirq NULL
+#define pci_pm_restore NULL
+#define pci_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATE_CALLBACKS */
+
+#ifdef CONFIG_PM
+
+static int pci_pm_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
+ if (!pm || !pm->runtime_suspend)
+ return -ENOSYS;
+
+ pci_dev->state_saved = false;
+ pci_dev->no_d3cold = false;
+ error = pm->runtime_suspend(dev);
+ suspend_report_result(pm->runtime_suspend, error);
+ if (error)
+ return error;
+ if (!pci_dev->d3cold_allowed)
+ pci_dev->no_d3cold = true;
+
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pF\n",
+ pm->runtime_suspend);
+ return 0;
+ }
+
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+ pci_finish_runtime_suspend(pci_dev);
+ }
+
+ return 0;
+}
+
+static int pci_pm_runtime_resume(struct device *dev)
+{
+ int rc;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
+ if (!pm || !pm->runtime_resume)
+ return -ENOSYS;
+
+ pci_restore_standard_config(pci_dev);
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
+ __pci_enable_wake(pci_dev, PCI_D0, true, false);
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+
+ rc = pm->runtime_resume(dev);
+
+ pci_dev->runtime_d3cold = false;
+
+ return rc;
+}
+
+static int pci_pm_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret = 0;
+
+ /*
+ * If pci_dev->driver is not set (unbound), the device should
+ * always remain in D0 regardless of the runtime PM status
+ */
+ if (!pci_dev->driver)
+ return 0;
+
+ if (!pm)
+ return -ENOSYS;
+
+ if (pm->runtime_idle)
+ ret = pm->runtime_idle(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops pci_dev_pm_ops = {
+ .prepare = pci_pm_prepare,
+ .suspend = pci_pm_suspend,
+ .resume = pci_pm_resume,
+ .freeze = pci_pm_freeze,
+ .thaw = pci_pm_thaw,
+ .poweroff = pci_pm_poweroff,
+ .restore = pci_pm_restore,
+ .suspend_noirq = pci_pm_suspend_noirq,
+ .resume_noirq = pci_pm_resume_noirq,
+ .freeze_noirq = pci_pm_freeze_noirq,
+ .thaw_noirq = pci_pm_thaw_noirq,
+ .poweroff_noirq = pci_pm_poweroff_noirq,
+ .restore_noirq = pci_pm_restore_noirq,
+ .runtime_suspend = pci_pm_runtime_suspend,
+ .runtime_resume = pci_pm_runtime_resume,
+ .runtime_idle = pci_pm_runtime_idle,
+};
+
+#define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define pci_pm_runtime_suspend NULL
+#define pci_pm_runtime_resume NULL
+#define pci_pm_runtime_idle NULL
+
+#define PCI_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM */
+
+/**
+ * __pci_register_driver - register a new pci driver
+ * @drv: the driver structure to register
+ * @owner: owner module of drv
+ * @mod_name: module name string
+ *
+ * Adds the driver structure to the list of registered drivers.
+ * Returns a negative value on error, otherwise 0.
+ * If no error occurred, the driver remains registered even if
+ * no device was claimed during registration.
+ */
+int __pci_register_driver(struct pci_driver *drv, struct module *owner,
+ const char *mod_name)
+{
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &pci_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+
+ spin_lock_init(&drv->dynids.lock);
+ INIT_LIST_HEAD(&drv->dynids.list);
+
+ /* register with core */
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(__pci_register_driver);
+
+/**
+ * pci_unregister_driver - unregister a pci driver
+ * @drv: the driver structure to unregister
+ *
+ * Deletes the driver structure from the list of registered PCI drivers,
+ * gives it a chance to clean up by calling its remove() function for
+ * each device it was responsible for, and marks those devices as
+ * driverless.
+ */
+
+void pci_unregister_driver(struct pci_driver *drv)
+{
+ driver_unregister(&drv->driver);
+ pci_free_dynids(drv);
+}
+EXPORT_SYMBOL(pci_unregister_driver);
+
+static struct pci_driver pci_compat_driver = {
+ .name = "compat"
+};
+
+/**
+ * pci_dev_driver - get the pci_driver of a device
+ * @dev: the device to query
+ *
+ * Returns the appropriate pci_driver structure or %NULL if there is no
+ * registered driver for the device.
+ */
+struct pci_driver *pci_dev_driver(const struct pci_dev *dev)
+{
+ if (dev->driver)
+ return dev->driver;
+ else {
+ int i;
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & IORESOURCE_BUSY)
+ return &pci_compat_driver;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_dev_driver);
+
+/**
+ * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
+ * @dev: the PCI device structure to match against
+ * @drv: the device driver to search for matching PCI device id structures
+ *
+ * Used by a driver to check whether a PCI device present in the
+ * system is in its list of supported devices. Returns the matching
+ * pci_device_id structure or %NULL if there is no match.
+ */
+static int pci_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *pci_drv;
+ const struct pci_device_id *found_id;
+
+ if (!pci_dev->match_driver)
+ return 0;
+
+ pci_drv = to_pci_driver(drv);
+ found_id = pci_match_device(pci_drv, pci_dev);
+ if (found_id)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * pci_dev_get - increments the reference count of the pci device structure
+ * @dev: the device being referenced
+ *
+ * Each live reference to a device should be refcounted.
+ *
+ * Drivers for PCI devices should normally record such references in
+ * their probe() methods, when they bind to a device, and release
+ * them by calling pci_dev_put(), in their disconnect() methods.
+ *
+ * A pointer to the device with the incremented reference counter is returned.
+ */
+struct pci_dev *pci_dev_get(struct pci_dev *dev)
+{
+ if (dev)
+ get_device(&dev->dev);
+ return dev;
+}
+EXPORT_SYMBOL(pci_dev_get);
+
+/**
+ * pci_dev_put - release a use of the pci device structure
+ * @dev: device that's been disconnected
+ *
+ * Must be called when a user of a device is finished with it. When the last
+ * user of the device calls this function, the memory of the device is freed.
+ */
+void pci_dev_put(struct pci_dev *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+}
+EXPORT_SYMBOL(pci_dev_put);
+
+static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct pci_dev *pdev;
+
+ if (!dev)
+ return -ENODEV;
+
+ pdev = to_pci_dev(dev);
+
+ if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
+ pdev->subsystem_device))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device,
+ (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
+ (u8)(pdev->class)))
+ return -ENOMEM;
+
+ return 0;
+}
+
+struct bus_type pci_bus_type = {
+ .name = "pci",
+ .match = pci_bus_match,
+ .uevent = pci_uevent,
+ .probe = pci_device_probe,
+ .remove = pci_device_remove,
+ .shutdown = pci_device_shutdown,
+ .dev_groups = pci_dev_groups,
+ .bus_groups = pci_bus_groups,
+ .drv_groups = pci_drv_groups,
+ .pm = PCI_PM_OPS_PTR,
+};
+EXPORT_SYMBOL(pci_bus_type);
+
+static int __init pci_driver_init(void)
+{
+ return bus_register(&pci_bus_type);
+}
+postcore_initcall(pci_driver_init);
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
new file mode 100644
index 000000000..024b5c179
--- /dev/null
+++ b/drivers/pci/pci-label.c
@@ -0,0 +1,306 @@
+/*
+ * Purpose: Export the firmware instance and label associated with
+ * a pci device to sysfs
+ * Copyright (C) 2010 Dell Inc.
+ * by Narendra K <Narendra_K@dell.com>,
+ * Jordan Hargrave <Jordan_Hargrave@dell.com>
+ *
+ * PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a
+ * PCI or PCI Express Device Under Operating Systems) defines an instance
+ * number and string name. This code retrieves them and exports them to sysfs.
+ * If the system firmware does not provide the ACPI _DSM (Device Specific
+ * Method), then the SMBIOS type 41 instance number and string is exported to
+ * sysfs.
+ *
+ * SMBIOS defines type 41 for onboard pci devices. This code retrieves
+ * the instance number and string from the type 41 record and exports
+ * it to sysfs.
+ *
+ * Please see http://linux.dell.com/wiki/index.php/Oss/libnetdevname for more
+ * information.
+ */
+
+#include <linux/dmi.h>
+#include <linux/sysfs.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/nls.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include "pci.h"
+
+#ifdef CONFIG_DMI
+enum smbios_attr_enum {
+ SMBIOS_ATTR_NONE = 0,
+ SMBIOS_ATTR_LABEL_SHOW,
+ SMBIOS_ATTR_INSTANCE_SHOW,
+};
+
+static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf,
+ enum smbios_attr_enum attribute)
+{
+ const struct dmi_device *dmi;
+ struct dmi_dev_onboard *donboard;
+ int bus;
+ int devfn;
+
+ bus = pdev->bus->number;
+ devfn = pdev->devfn;
+
+ dmi = NULL;
+ while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD,
+ NULL, dmi)) != NULL) {
+ donboard = dmi->device_data;
+ if (donboard && donboard->bus == bus &&
+ donboard->devfn == devfn) {
+ if (buf) {
+ if (attribute == SMBIOS_ATTR_INSTANCE_SHOW)
+ return scnprintf(buf, PAGE_SIZE,
+ "%d\n",
+ donboard->instance);
+ else if (attribute == SMBIOS_ATTR_LABEL_SHOW)
+ return scnprintf(buf, PAGE_SIZE,
+ "%s\n",
+ dmi->name);
+ }
+ return strlen(dmi->name);
+ }
+ }
+ return 0;
+}
+
+static umode_t smbios_instance_string_exist(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev;
+ struct pci_dev *pdev;
+
+ dev = container_of(kobj, struct device, kobj);
+ pdev = to_pci_dev(dev);
+
+ return find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE) ?
+ S_IRUGO : 0;
+}
+
+static ssize_t smbioslabel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev;
+ pdev = to_pci_dev(dev);
+
+ return find_smbios_instance_string(pdev, buf,
+ SMBIOS_ATTR_LABEL_SHOW);
+}
+
+static ssize_t smbiosinstance_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev;
+ pdev = to_pci_dev(dev);
+
+ return find_smbios_instance_string(pdev, buf,
+ SMBIOS_ATTR_INSTANCE_SHOW);
+}
+
+static struct device_attribute smbios_attr_label = {
+ .attr = {.name = "label", .mode = 0444},
+ .show = smbioslabel_show,
+};
+
+static struct device_attribute smbios_attr_instance = {
+ .attr = {.name = "index", .mode = 0444},
+ .show = smbiosinstance_show,
+};
+
+static struct attribute *smbios_attributes[] = {
+ &smbios_attr_label.attr,
+ &smbios_attr_instance.attr,
+ NULL,
+};
+
+static struct attribute_group smbios_attr_group = {
+ .attrs = smbios_attributes,
+ .is_visible = smbios_instance_string_exist,
+};
+
+static int pci_create_smbiosname_file(struct pci_dev *pdev)
+{
+ return sysfs_create_group(&pdev->dev.kobj, &smbios_attr_group);
+}
+
+static void pci_remove_smbiosname_file(struct pci_dev *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &smbios_attr_group);
+}
+#else
+static inline int pci_create_smbiosname_file(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline void pci_remove_smbiosname_file(struct pci_dev *pdev)
+{
+}
+#endif
+
+#ifdef CONFIG_ACPI
+enum acpi_attr_enum {
+ ACPI_ATTR_LABEL_SHOW,
+ ACPI_ATTR_INDEX_SHOW,
+};
+
+static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
+{
+ int len;
+ len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
+ obj->buffer.length,
+ UTF16_LITTLE_ENDIAN,
+ buf, PAGE_SIZE);
+ buf[len] = '\n';
+}
+
+static int dsm_get_label(struct device *dev, char *buf,
+ enum acpi_attr_enum attr)
+{
+ acpi_handle handle;
+ union acpi_object *obj, *tmp;
+ int len = -1;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -1;
+
+ obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+ DEVICE_LABEL_DSM, NULL);
+ if (!obj)
+ return -1;
+
+ tmp = obj->package.elements;
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
+ tmp[0].type == ACPI_TYPE_INTEGER &&
+ (tmp[1].type == ACPI_TYPE_STRING ||
+ tmp[1].type == ACPI_TYPE_BUFFER)) {
+ /*
+ * The second string element is optional even when
+ * this _DSM is implemented; when not implemented,
+ * this entry must return a null string.
+ */
+ if (attr == ACPI_ATTR_INDEX_SHOW) {
+ scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
+ } else if (attr == ACPI_ATTR_LABEL_SHOW) {
+ if (tmp[1].type == ACPI_TYPE_STRING)
+ scnprintf(buf, PAGE_SIZE, "%s\n",
+ tmp[1].string.pointer);
+ else if (tmp[1].type == ACPI_TYPE_BUFFER)
+ dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+ }
+ len = strlen(buf) > 0 ? strlen(buf) : -1;
+ }
+
+ ACPI_FREE(obj);
+
+ return len;
+}
+
+static bool device_has_dsm(struct device *dev)
+{
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return false;
+
+ return !!acpi_check_dsm(handle, pci_acpi_dsm_uuid, 0x2,
+ 1 << DEVICE_LABEL_DSM);
+}
+
+static umode_t acpi_index_string_exist(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev;
+
+ dev = container_of(kobj, struct device, kobj);
+
+ if (device_has_dsm(dev))
+ return S_IRUGO;
+
+ return 0;
+}
+
+static ssize_t acpilabel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW);
+}
+
+static ssize_t acpiindex_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW);
+}
+
+static struct device_attribute acpi_attr_label = {
+ .attr = {.name = "label", .mode = 0444},
+ .show = acpilabel_show,
+};
+
+static struct device_attribute acpi_attr_index = {
+ .attr = {.name = "acpi_index", .mode = 0444},
+ .show = acpiindex_show,
+};
+
+static struct attribute *acpi_attributes[] = {
+ &acpi_attr_label.attr,
+ &acpi_attr_index.attr,
+ NULL,
+};
+
+static struct attribute_group acpi_attr_group = {
+ .attrs = acpi_attributes,
+ .is_visible = acpi_index_string_exist,
+};
+
+static int pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return sysfs_create_group(&pdev->dev.kobj, &acpi_attr_group);
+}
+
+static int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &acpi_attr_group);
+ return 0;
+}
+#else
+static inline int pci_create_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline int pci_remove_acpi_index_label_files(struct pci_dev *pdev)
+{
+ return -1;
+}
+
+static inline bool device_has_dsm(struct device *dev)
+{
+ return false;
+}
+#endif
+
+void pci_create_firmware_label_files(struct pci_dev *pdev)
+{
+ if (device_has_dsm(&pdev->dev))
+ pci_create_acpi_index_label_files(pdev);
+ else
+ pci_create_smbiosname_file(pdev);
+}
+
+void pci_remove_firmware_label_files(struct pci_dev *pdev)
+{
+ if (device_has_dsm(&pdev->dev))
+ pci_remove_acpi_index_label_files(pdev);
+ else
+ pci_remove_smbiosname_file(pdev);
+}
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c
new file mode 100644
index 000000000..886fb3570
--- /dev/null
+++ b/drivers/pci/pci-stub.c
@@ -0,0 +1,97 @@
+/* pci-stub - simple stub driver to reserve a pci device
+ *
+ * Copyright (C) 2008 Red Hat, Inc.
+ * Author:
+ * Chris Wright
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Usage is simple, allocate a new id to the stub driver and bind the
+ * device to it. For example:
+ *
+ * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id
+ * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
+ * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind
+ * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver
+ * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+static char ids[1024] __initdata;
+
+module_param_string(ids, ids, sizeof(ids), 0);
+MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is "
+ "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\""
+ " and multiple comma separated entries can be specified");
+
+static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ dev_info(&dev->dev, "claimed by stub\n");
+ return 0;
+}
+
+static struct pci_driver stub_driver = {
+ .name = "pci-stub",
+ .id_table = NULL, /* only dynamic id's */
+ .probe = pci_stub_probe,
+};
+
+static int __init pci_stub_init(void)
+{
+ char *p, *id;
+ int rc;
+
+ rc = pci_register_driver(&stub_driver);
+ if (rc)
+ return rc;
+
+ /* no ids passed actually */
+ if (ids[0] == '\0')
+ return 0;
+
+ /* add ids specified in the module parameter */
+ p = ids;
+ while ((id = strsep(&p, ","))) {
+ unsigned int vendor, device, subvendor = PCI_ANY_ID,
+ subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
+ int fields;
+
+ if (!strlen(id))
+ continue;
+
+ fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask);
+
+ if (fields < 2) {
+ printk(KERN_WARNING
+ "pci-stub: invalid id string \"%s\"\n", id);
+ continue;
+ }
+
+ printk(KERN_INFO
+ "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n",
+ vendor, device, subvendor, subdevice, class, class_mask);
+
+ rc = pci_add_dynid(&stub_driver, vendor, device,
+ subvendor, subdevice, class, class_mask, 0);
+ if (rc)
+ printk(KERN_WARNING
+ "pci-stub: failed to add dynamic id (%d)\n", rc);
+ }
+
+ return 0;
+}
+
+static void __exit pci_stub_exit(void)
+{
+ pci_unregister_driver(&stub_driver);
+}
+
+module_init(pci_stub_init);
+module_exit(pci_stub_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chris Wright <chrisw@sous-sol.org>");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
new file mode 100644
index 000000000..312f23a84
--- /dev/null
+++ b/drivers/pci/pci-sysfs.c
@@ -0,0 +1,1584 @@
+/*
+ * drivers/pci/pci-sysfs.c
+ *
+ * (C) Copyright 2002-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * (C) Copyright 2002-2004 IBM Corp.
+ * (C) Copyright 2003 Matthew Wilcox
+ * (C) Copyright 2003 Hewlett-Packard
+ * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
+ * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
+ *
+ * File attributes for PCI devices
+ *
+ * Modeled after usb's driverfs.c
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/stat.h>
+#include <linux/export.h>
+#include <linux/topology.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/capability.h>
+#include <linux/security.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/vgaarb.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include "pci.h"
+
+static int sysfs_initialized; /* = 0 */
+
+/* show configuration fields */
+#define pci_config_attr(field, format_string) \
+static ssize_t \
+field##_show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+ struct pci_dev *pdev; \
+ \
+ pdev = to_pci_dev(dev); \
+ return sprintf(buf, format_string, pdev->field); \
+} \
+static DEVICE_ATTR_RO(field)
+
+pci_config_attr(vendor, "0x%04x\n");
+pci_config_attr(device, "0x%04x\n");
+pci_config_attr(subsystem_vendor, "0x%04x\n");
+pci_config_attr(subsystem_device, "0x%04x\n");
+pci_config_attr(class, "0x%06x\n");
+pci_config_attr(irq, "%u\n");
+
+static ssize_t broken_parity_status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", pdev->broken_parity_status);
+}
+
+static ssize_t broken_parity_status_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ pdev->broken_parity_status = !!val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(broken_parity_status);
+
+static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *mask;
+
+#ifdef CONFIG_NUMA
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
+#else
+ mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
+#endif
+ return cpumap_print_to_pagebuf(list, buf, mask);
+}
+
+static ssize_t local_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_dev_show_local_cpu(dev, false, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpus);
+
+static ssize_t local_cpulist_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return pci_dev_show_local_cpu(dev, true, attr, buf);
+}
+static DEVICE_ATTR_RO(local_cpulist);
+
+/*
+ * PCI Bus Class Devices
+ */
+static ssize_t cpuaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
+
+ return cpumap_print_to_pagebuf(false, buf, cpumask);
+}
+static DEVICE_ATTR_RO(cpuaffinity);
+
+static ssize_t cpulistaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask);
+}
+static DEVICE_ATTR_RO(cpulistaffinity);
+
+/* show resources */
+static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ char *str = buf;
+ int i;
+ int max;
+ resource_size_t start, end;
+
+ if (pci_dev->subordinate)
+ max = DEVICE_COUNT_RESOURCE;
+ else
+ max = PCI_BRIDGE_RESOURCES;
+
+ for (i = 0; i < max; i++) {
+ struct resource *res = &pci_dev->resource[i];
+ pci_resource_to_user(pci_dev, i, res, &start, &end);
+ str += sprintf(str, "0x%016llx 0x%016llx 0x%016llx\n",
+ (unsigned long long)start,
+ (unsigned long long)end,
+ (unsigned long long)res->flags);
+ }
+ return (str - buf);
+}
+static DEVICE_ATTR_RO(resource);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
+ pci_dev->vendor, pci_dev->device,
+ pci_dev->subsystem_vendor, pci_dev->subsystem_device,
+ (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
+ (u8)(pci_dev->class));
+}
+static DEVICE_ATTR_RO(modalias);
+
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+ ssize_t result = kstrtoul(buf, 0, &val);
+
+ if (result < 0)
+ return result;
+
+ /* this can crash the machine when done on the "wrong" device */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (!val) {
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ else
+ result = -EIO;
+ } else
+ result = pci_enable_device(pdev);
+
+ return result < 0 ? result : count;
+}
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
+}
+static DEVICE_ATTR_RW(enable);
+
+#ifdef CONFIG_NUMA
+static ssize_t numa_node_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int node, ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ret = kstrtoint(buf, 0, &node);
+ if (ret)
+ return ret;
+
+ if (!node_online(node))
+ return -EINVAL;
+
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ dev_alert(&pdev->dev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.",
+ node);
+
+ dev->numa_node = node;
+ return count;
+}
+
+static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", dev->numa_node);
+}
+static DEVICE_ATTR_RW(numa_node);
+#endif
+
+static ssize_t dma_mask_bits_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%d\n", fls64(pdev->dma_mask));
+}
+static DEVICE_ATTR_RO(dma_mask_bits);
+
+static ssize_t consistent_dma_mask_bits_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", fls64(dev->coherent_dma_mask));
+}
+static DEVICE_ATTR_RO(consistent_dma_mask_bits);
+
+static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *subordinate = pdev->subordinate;
+
+ return sprintf(buf, "%u\n", subordinate ?
+ !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI)
+ : !pdev->no_msi);
+}
+
+static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus *subordinate = pdev->subordinate;
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /*
+ * "no_msi" and "bus_flags" only affect what happens when a driver
+ * requests MSI or MSI-X. They don't affect any drivers that have
+ * already requested MSI or MSI-X.
+ */
+ if (!subordinate) {
+ pdev->no_msi = !val;
+ dev_info(&pdev->dev, "MSI/MSI-X %s for future drivers\n",
+ val ? "allowed" : "disallowed");
+ return count;
+ }
+
+ if (val)
+ subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI;
+ else
+ subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
+
+ dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n",
+ val ? "allowed" : "disallowed");
+ return count;
+}
+static DEVICE_ATTR_RW(msi_bus);
+
+static ssize_t bus_rescan_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ struct pci_bus *b = NULL;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ pci_lock_rescan_remove();
+ while ((b = pci_find_next_bus(b)) != NULL)
+ pci_rescan_bus(b);
+ pci_unlock_rescan_remove();
+ }
+ return count;
+}
+static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store);
+
+static struct attribute *pci_bus_attrs[] = {
+ &bus_attr_rescan.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_bus_group = {
+ .attrs = pci_bus_attrs,
+};
+
+const struct attribute_group *pci_bus_groups[] = {
+ &pci_bus_group,
+ NULL,
+};
+
+static ssize_t dev_rescan_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ pci_lock_rescan_remove();
+ pci_rescan_bus(pdev->bus);
+ pci_unlock_rescan_remove();
+ }
+ return count;
+}
+static struct device_attribute dev_rescan_attr = __ATTR(rescan,
+ (S_IWUSR|S_IWGRP),
+ NULL, dev_rescan_store);
+
+static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val && device_remove_file_self(dev, attr))
+ pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
+ return count;
+}
+static struct device_attribute dev_remove_attr = __ATTR(remove,
+ (S_IWUSR|S_IWGRP),
+ NULL, remove_store);
+
+static ssize_t dev_bus_rescan_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long val;
+ struct pci_bus *bus = to_pci_bus(dev);
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val) {
+ pci_lock_rescan_remove();
+ if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
+ pci_rescan_bus_bridge_resize(bus->self);
+ else
+ pci_rescan_bus(bus);
+ pci_unlock_rescan_remove();
+ }
+ return count;
+}
+static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
+
+#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
+static ssize_t d3cold_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ pdev->d3cold_allowed = !!val;
+ pm_runtime_resume(dev);
+
+ return count;
+}
+
+static ssize_t d3cold_allowed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ return sprintf(buf, "%u\n", pdev->d3cold_allowed);
+}
+static DEVICE_ATTR_RW(d3cold_allowed);
+#endif
+
+#ifdef CONFIG_OF
+static ssize_t devspec_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct device_node *np = pci_device_to_OF_node(pdev);
+
+ if (np == NULL || np->full_name == NULL)
+ return 0;
+ return sprintf(buf, "%s", np->full_name);
+}
+static DEVICE_ATTR_RO(devspec);
+#endif
+
+#ifdef CONFIG_PCI_IOV
+static ssize_t sriov_totalvfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pci_sriov_get_totalvfs(pdev));
+}
+
+
+static ssize_t sriov_numvfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+}
+
+/*
+ * num_vfs > 0; number of VFs to enable
+ * num_vfs = 0; disable all VFs
+ *
+ * Note: SRIOV spec doesn't allow partial VF
+ * disable, so it's all or none.
+ */
+static ssize_t sriov_numvfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ u16 num_vfs;
+
+ ret = kstrtou16(buf, 0, &num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (num_vfs > pci_sriov_get_totalvfs(pdev))
+ return -ERANGE;
+
+ if (num_vfs == pdev->sriov->num_VFs)
+ return count; /* no change */
+
+ /* is PF driver loaded w/callback */
+ if (!pdev->driver || !pdev->driver->sriov_configure) {
+ dev_info(&pdev->dev, "Driver doesn't support SRIOV configuration via sysfs\n");
+ return -ENOSYS;
+ }
+
+ if (num_vfs == 0) {
+ /* disable VFs */
+ ret = pdev->driver->sriov_configure(pdev, 0);
+ if (ret < 0)
+ return ret;
+ return count;
+ }
+
+ /* enable VFs */
+ if (pdev->sriov->num_VFs) {
+ dev_warn(&pdev->dev, "%d VFs already enabled. Disable before enabling %d VFs\n",
+ pdev->sriov->num_VFs, num_vfs);
+ return -EBUSY;
+ }
+
+ ret = pdev->driver->sriov_configure(pdev, num_vfs);
+ if (ret < 0)
+ return ret;
+
+ if (ret != num_vfs)
+ dev_warn(&pdev->dev, "%d VFs requested; only %d enabled\n",
+ num_vfs, ret);
+
+ return count;
+}
+
+static struct device_attribute sriov_totalvfs_attr = __ATTR_RO(sriov_totalvfs);
+static struct device_attribute sriov_numvfs_attr =
+ __ATTR(sriov_numvfs, (S_IRUGO|S_IWUSR|S_IWGRP),
+ sriov_numvfs_show, sriov_numvfs_store);
+#endif /* CONFIG_PCI_IOV */
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ char *driver_override, *old = pdev->driver_override, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(driver_override)) {
+ pdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ pdev->driver_override = NULL;
+ }
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *pci_dev_attrs[] = {
+ &dev_attr_resource.attr,
+ &dev_attr_vendor.attr,
+ &dev_attr_device.attr,
+ &dev_attr_subsystem_vendor.attr,
+ &dev_attr_subsystem_device.attr,
+ &dev_attr_class.attr,
+ &dev_attr_irq.attr,
+ &dev_attr_local_cpus.attr,
+ &dev_attr_local_cpulist.attr,
+ &dev_attr_modalias.attr,
+#ifdef CONFIG_NUMA
+ &dev_attr_numa_node.attr,
+#endif
+ &dev_attr_dma_mask_bits.attr,
+ &dev_attr_consistent_dma_mask_bits.attr,
+ &dev_attr_enable.attr,
+ &dev_attr_broken_parity_status.attr,
+ &dev_attr_msi_bus.attr,
+#if defined(CONFIG_PM) && defined(CONFIG_ACPI)
+ &dev_attr_d3cold_allowed.attr,
+#endif
+#ifdef CONFIG_OF
+ &dev_attr_devspec.attr,
+#endif
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group pci_dev_group = {
+ .attrs = pci_dev_attrs,
+};
+
+const struct attribute_group *pci_dev_groups[] = {
+ &pci_dev_group,
+ NULL,
+};
+
+static struct attribute *pcibus_attrs[] = {
+ &dev_attr_rescan.attr,
+ &dev_attr_cpuaffinity.attr,
+ &dev_attr_cpulistaffinity.attr,
+ NULL,
+};
+
+static const struct attribute_group pcibus_group = {
+ .attrs = pcibus_attrs,
+};
+
+const struct attribute_group *pcibus_groups[] = {
+ &pcibus_group,
+ NULL,
+};
+
+static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *vga_dev = vga_default_device();
+
+ if (vga_dev)
+ return sprintf(buf, "%u\n", (pdev == vga_dev));
+
+ return sprintf(buf, "%u\n",
+ !!(pdev->resource[PCI_ROM_RESOURCE].flags &
+ IORESOURCE_ROM_SHADOW));
+}
+static struct device_attribute vga_attr = __ATTR_RO(boot_vga);
+
+static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
+ kobj));
+ unsigned int size = 64;
+ loff_t init_off = off;
+ u8 *data = (u8 *) buf;
+
+ /* Several chips lock up trying to read undefined config space */
+ if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0)
+ size = dev->cfg_size;
+ else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ size = 128;
+
+ if (off > size)
+ return 0;
+ if (off + count > size) {
+ size -= off;
+ count = size;
+ } else {
+ size = count;
+ }
+
+ pci_config_pm_runtime_get(dev);
+
+ if ((off & 1) && size) {
+ u8 val;
+ pci_user_read_config_byte(dev, off, &val);
+ data[off - init_off] = val;
+ off++;
+ size--;
+ }
+
+ if ((off & 3) && size > 2) {
+ u16 val;
+ pci_user_read_config_word(dev, off, &val);
+ data[off - init_off] = val & 0xff;
+ data[off - init_off + 1] = (val >> 8) & 0xff;
+ off += 2;
+ size -= 2;
+ }
+
+ while (size > 3) {
+ u32 val;
+ pci_user_read_config_dword(dev, off, &val);
+ data[off - init_off] = val & 0xff;
+ data[off - init_off + 1] = (val >> 8) & 0xff;
+ data[off - init_off + 2] = (val >> 16) & 0xff;
+ data[off - init_off + 3] = (val >> 24) & 0xff;
+ off += 4;
+ size -= 4;
+ }
+
+ if (size >= 2) {
+ u16 val;
+ pci_user_read_config_word(dev, off, &val);
+ data[off - init_off] = val & 0xff;
+ data[off - init_off + 1] = (val >> 8) & 0xff;
+ off += 2;
+ size -= 2;
+ }
+
+ if (size > 0) {
+ u8 val;
+ pci_user_read_config_byte(dev, off, &val);
+ data[off - init_off] = val;
+ off++;
+ --size;
+ }
+
+ pci_config_pm_runtime_put(dev);
+
+ return count;
+}
+
+static ssize_t pci_write_config(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *dev = to_pci_dev(container_of(kobj, struct device,
+ kobj));
+ unsigned int size = count;
+ loff_t init_off = off;
+ u8 *data = (u8 *) buf;
+
+ if (off > dev->cfg_size)
+ return 0;
+ if (off + count > dev->cfg_size) {
+ size = dev->cfg_size - off;
+ count = size;
+ }
+
+ pci_config_pm_runtime_get(dev);
+
+ if ((off & 1) && size) {
+ pci_user_write_config_byte(dev, off, data[off - init_off]);
+ off++;
+ size--;
+ }
+
+ if ((off & 3) && size > 2) {
+ u16 val = data[off - init_off];
+ val |= (u16) data[off - init_off + 1] << 8;
+ pci_user_write_config_word(dev, off, val);
+ off += 2;
+ size -= 2;
+ }
+
+ while (size > 3) {
+ u32 val = data[off - init_off];
+ val |= (u32) data[off - init_off + 1] << 8;
+ val |= (u32) data[off - init_off + 2] << 16;
+ val |= (u32) data[off - init_off + 3] << 24;
+ pci_user_write_config_dword(dev, off, val);
+ off += 4;
+ size -= 4;
+ }
+
+ if (size >= 2) {
+ u16 val = data[off - init_off];
+ val |= (u16) data[off - init_off + 1] << 8;
+ pci_user_write_config_word(dev, off, val);
+ off += 2;
+ size -= 2;
+ }
+
+ if (size) {
+ pci_user_write_config_byte(dev, off, data[off - init_off]);
+ off++;
+ --size;
+ }
+
+ pci_config_pm_runtime_put(dev);
+
+ return count;
+}
+
+static ssize_t read_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *dev =
+ to_pci_dev(container_of(kobj, struct device, kobj));
+
+ if (off > bin_attr->size)
+ count = 0;
+ else if (count > bin_attr->size - off)
+ count = bin_attr->size - off;
+
+ return pci_read_vpd(dev, off, count, buf);
+}
+
+static ssize_t write_vpd_attr(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *dev =
+ to_pci_dev(container_of(kobj, struct device, kobj));
+
+ if (off > bin_attr->size)
+ count = 0;
+ else if (count > bin_attr->size - off)
+ count = bin_attr->size - off;
+
+ return pci_write_vpd(dev, off, count, buf);
+}
+
+#ifdef HAVE_PCI_LEGACY
+/**
+ * pci_read_legacy_io - read byte(s) from legacy I/O port space
+ * @filp: open sysfs file
+ * @kobj: kobject corresponding to file to read from
+ * @bin_attr: struct bin_attribute for this file
+ * @buf: buffer to store results
+ * @off: offset into legacy I/O port space
+ * @count: number of bytes to read
+ *
+ * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific
+ * callback routine (pci_legacy_read).
+ */
+static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
+ kobj));
+
+ /* Only support 1, 2 or 4 byte accesses */
+ if (count != 1 && count != 2 && count != 4)
+ return -EINVAL;
+
+ return pci_legacy_read(bus, off, (u32 *)buf, count);
+}
+
+/**
+ * pci_write_legacy_io - write byte(s) to legacy I/O port space
+ * @filp: open sysfs file
+ * @kobj: kobject corresponding to file to read from
+ * @bin_attr: struct bin_attribute for this file
+ * @buf: buffer containing value to be written
+ * @off: offset into legacy I/O port space
+ * @count: number of bytes to write
+ *
+ * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific
+ * callback routine (pci_legacy_write).
+ */
+static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
+ kobj));
+
+ /* Only support 1, 2 or 4 byte accesses */
+ if (count != 1 && count != 2 && count != 4)
+ return -EINVAL;
+
+ return pci_legacy_write(bus, off, *(u32 *)buf, count);
+}
+
+/**
+ * pci_mmap_legacy_mem - map legacy PCI memory into user memory space
+ * @filp: open sysfs file
+ * @kobj: kobject corresponding to device to be mapped
+ * @attr: struct bin_attribute for this file
+ * @vma: struct vm_area_struct passed to mmap
+ *
+ * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap
+ * legacy memory space (first meg of bus space) into application virtual
+ * memory space.
+ */
+static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
+ kobj));
+
+ return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem);
+}
+
+/**
+ * pci_mmap_legacy_io - map legacy PCI IO into user memory space
+ * @filp: open sysfs file
+ * @kobj: kobject corresponding to device to be mapped
+ * @attr: struct bin_attribute for this file
+ * @vma: struct vm_area_struct passed to mmap
+ *
+ * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap
+ * legacy IO space (first meg of bus space) into application virtual
+ * memory space. Returns -ENOSYS if the operation isn't supported
+ */
+static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ struct pci_bus *bus = to_pci_bus(container_of(kobj, struct device,
+ kobj));
+
+ return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io);
+}
+
+/**
+ * pci_adjust_legacy_attr - adjustment of legacy file attributes
+ * @b: bus to create files under
+ * @mmap_type: I/O port or memory
+ *
+ * Stub implementation. Can be overridden by arch if necessary.
+ */
+void __weak pci_adjust_legacy_attr(struct pci_bus *b,
+ enum pci_mmap_state mmap_type)
+{
+}
+
+/**
+ * pci_create_legacy_files - create legacy I/O port and memory files
+ * @b: bus to create files under
+ *
+ * Some platforms allow access to legacy I/O port and ISA memory space on
+ * a per-bus basis. This routine creates the files and ties them into
+ * their associated read, write and mmap files from pci-sysfs.c
+ *
+ * On error unwind, but don't propagate the error to the caller
+ * as it is ok to set up the PCI bus without these files.
+ */
+void pci_create_legacy_files(struct pci_bus *b)
+{
+ int error;
+
+ b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
+ GFP_ATOMIC);
+ if (!b->legacy_io)
+ goto kzalloc_err;
+
+ sysfs_bin_attr_init(b->legacy_io);
+ b->legacy_io->attr.name = "legacy_io";
+ b->legacy_io->size = 0xffff;
+ b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
+ b->legacy_io->read = pci_read_legacy_io;
+ b->legacy_io->write = pci_write_legacy_io;
+ b->legacy_io->mmap = pci_mmap_legacy_io;
+ pci_adjust_legacy_attr(b, pci_mmap_io);
+ error = device_create_bin_file(&b->dev, b->legacy_io);
+ if (error)
+ goto legacy_io_err;
+
+ /* Allocated above after the legacy_io struct */
+ b->legacy_mem = b->legacy_io + 1;
+ sysfs_bin_attr_init(b->legacy_mem);
+ b->legacy_mem->attr.name = "legacy_mem";
+ b->legacy_mem->size = 1024*1024;
+ b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
+ b->legacy_mem->mmap = pci_mmap_legacy_mem;
+ pci_adjust_legacy_attr(b, pci_mmap_mem);
+ error = device_create_bin_file(&b->dev, b->legacy_mem);
+ if (error)
+ goto legacy_mem_err;
+
+ return;
+
+legacy_mem_err:
+ device_remove_bin_file(&b->dev, b->legacy_io);
+legacy_io_err:
+ kfree(b->legacy_io);
+ b->legacy_io = NULL;
+kzalloc_err:
+ printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n");
+ return;
+}
+
+void pci_remove_legacy_files(struct pci_bus *b)
+{
+ if (b->legacy_io) {
+ device_remove_bin_file(&b->dev, b->legacy_io);
+ device_remove_bin_file(&b->dev, b->legacy_mem);
+ kfree(b->legacy_io); /* both are allocated here */
+ }
+}
+#endif /* HAVE_PCI_LEGACY */
+
+#ifdef HAVE_PCI_MMAP
+
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
+ enum pci_mmap_api mmap_api)
+{
+ unsigned long nr, start, size, pci_start;
+
+ if (pci_resource_len(pdev, resno) == 0)
+ return 0;
+ nr = vma_pages(vma);
+ start = vma->vm_pgoff;
+ size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
+ pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
+ pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (start >= pci_start && start < pci_start + size &&
+ start + nr <= pci_start + size)
+ return 1;
+ return 0;
+}
+
+/**
+ * pci_mmap_resource - map a PCI resource into user memory space
+ * @kobj: kobject for mapping
+ * @attr: struct bin_attribute for the file being mapped
+ * @vma: struct vm_area_struct passed into the mmap
+ * @write_combine: 1 for write_combine mapping
+ *
+ * Use the regular PCI mapping routines to map a PCI resource into userspace.
+ */
+static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
+ struct vm_area_struct *vma, int write_combine)
+{
+ struct pci_dev *pdev = to_pci_dev(container_of(kobj,
+ struct device, kobj));
+ struct resource *res = attr->private;
+ enum pci_mmap_state mmap_type;
+ resource_size_t start, end;
+ int i;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ if (res == &pdev->resource[i])
+ break;
+ if (i >= PCI_ROM_RESOURCE)
+ return -ENODEV;
+
+ if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) {
+ WARN(1, "process \"%s\" tried to map 0x%08lx bytes at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n",
+ current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff,
+ pci_name(pdev), i,
+ (u64)pci_resource_start(pdev, i),
+ (u64)pci_resource_len(pdev, i));
+ return -EINVAL;
+ }
+
+ /* pci_mmap_page_range() expects the same kind of entry as coming
+ * from /proc/bus/pci/ which is a "user visible" value. If this is
+ * different from the resource itself, arch will do necessary fixup.
+ */
+ pci_resource_to_user(pdev, i, res, &start, &end);
+ vma->vm_pgoff += start >> PAGE_SHIFT;
+ mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
+
+ if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(start))
+ return -EINVAL;
+
+ return pci_mmap_page_range(pdev, vma, mmap_type, write_combine);
+}
+
+static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ return pci_mmap_resource(kobj, attr, vma, 0);
+}
+
+static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ struct vm_area_struct *vma)
+{
+ return pci_mmap_resource(kobj, attr, vma, 1);
+}
+
+static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count, bool write)
+{
+ struct pci_dev *pdev = to_pci_dev(container_of(kobj,
+ struct device, kobj));
+ struct resource *res = attr->private;
+ unsigned long port = off;
+ int i;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++)
+ if (res == &pdev->resource[i])
+ break;
+ if (i >= PCI_ROM_RESOURCE)
+ return -ENODEV;
+
+ port += pci_resource_start(pdev, i);
+
+ if (port > pci_resource_end(pdev, i))
+ return 0;
+
+ if (port + count - 1 > pci_resource_end(pdev, i))
+ return -EINVAL;
+
+ switch (count) {
+ case 1:
+ if (write)
+ outb(*(u8 *)buf, port);
+ else
+ *(u8 *)buf = inb(port);
+ return 1;
+ case 2:
+ if (write)
+ outw(*(u16 *)buf, port);
+ else
+ *(u16 *)buf = inw(port);
+ return 2;
+ case 4:
+ if (write)
+ outl(*(u32 *)buf, port);
+ else
+ *(u32 *)buf = inl(port);
+ return 4;
+ }
+ return -EINVAL;
+}
+
+static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return pci_resource_io(filp, kobj, attr, buf, off, count, false);
+}
+
+static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return pci_resource_io(filp, kobj, attr, buf, off, count, true);
+}
+
+/**
+ * pci_remove_resource_files - cleanup resource files
+ * @pdev: dev to cleanup
+ *
+ * If we created resource files for @pdev, remove them from sysfs and
+ * free their resources.
+ */
+static void pci_remove_resource_files(struct pci_dev *pdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ struct bin_attribute *res_attr;
+
+ res_attr = pdev->res_attr[i];
+ if (res_attr) {
+ sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
+ kfree(res_attr);
+ }
+
+ res_attr = pdev->res_attr_wc[i];
+ if (res_attr) {
+ sysfs_remove_bin_file(&pdev->dev.kobj, res_attr);
+ kfree(res_attr);
+ }
+ }
+}
+
+static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
+{
+ /* allocate attribute structure, piggyback attribute name */
+ int name_len = write_combine ? 13 : 10;
+ struct bin_attribute *res_attr;
+ int retval;
+
+ res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
+ if (res_attr) {
+ char *res_attr_name = (char *)(res_attr + 1);
+
+ sysfs_bin_attr_init(res_attr);
+ if (write_combine) {
+ pdev->res_attr_wc[num] = res_attr;
+ sprintf(res_attr_name, "resource%d_wc", num);
+ res_attr->mmap = pci_mmap_resource_wc;
+ } else {
+ pdev->res_attr[num] = res_attr;
+ sprintf(res_attr_name, "resource%d", num);
+ res_attr->mmap = pci_mmap_resource_uc;
+ }
+ if (pci_resource_flags(pdev, num) & IORESOURCE_IO) {
+ res_attr->read = pci_read_resource_io;
+ res_attr->write = pci_write_resource_io;
+ }
+ res_attr->attr.name = res_attr_name;
+ res_attr->attr.mode = S_IRUSR | S_IWUSR;
+ res_attr->size = pci_resource_len(pdev, num);
+ res_attr->private = &pdev->resource[num];
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr);
+ } else
+ retval = -ENOMEM;
+
+ return retval;
+}
+
+/**
+ * pci_create_resource_files - create resource files in sysfs for @dev
+ * @pdev: dev in question
+ *
+ * Walk the resources in @pdev creating files for each resource available.
+ */
+static int pci_create_resource_files(struct pci_dev *pdev)
+{
+ int i;
+ int retval;
+
+ /* Expose the PCI resources from this device as files */
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+
+ /* skip empty resources */
+ if (!pci_resource_len(pdev, i))
+ continue;
+
+ retval = pci_create_attr(pdev, i, 0);
+ /* for prefetchable resources, create a WC mappable file */
+ if (!retval && pdev->resource[i].flags & IORESOURCE_PREFETCH)
+ retval = pci_create_attr(pdev, i, 1);
+
+ if (retval) {
+ pci_remove_resource_files(pdev);
+ return retval;
+ }
+ }
+ return 0;
+}
+#else /* !HAVE_PCI_MMAP */
+int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
+void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
+#endif /* HAVE_PCI_MMAP */
+
+/**
+ * pci_write_rom - used to enable access to the PCI ROM display
+ * @filp: sysfs file
+ * @kobj: kernel object handle
+ * @bin_attr: struct bin_attribute for this file
+ * @buf: user input
+ * @off: file offset
+ * @count: number of byte in input
+ *
+ * writing anything except 0 enables it
+ */
+static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
+
+ if ((off == 0) && (*buf == '0') && (count == 2))
+ pdev->rom_attr_enabled = 0;
+ else
+ pdev->rom_attr_enabled = 1;
+
+ return count;
+}
+
+/**
+ * pci_read_rom - read a PCI ROM
+ * @filp: sysfs file
+ * @kobj: kernel object handle
+ * @bin_attr: struct bin_attribute for this file
+ * @buf: where to put the data we read from the ROM
+ * @off: file offset
+ * @count: number of bytes to read
+ *
+ * Put @count bytes starting at @off into @buf from the ROM in the PCI
+ * device corresponding to @kobj.
+ */
+static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(container_of(kobj, struct device, kobj));
+ void __iomem *rom;
+ size_t size;
+
+ if (!pdev->rom_attr_enabled)
+ return -EINVAL;
+
+ rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */
+ if (!rom || !size)
+ return -EIO;
+
+ if (off >= size)
+ count = 0;
+ else {
+ if (off + count > size)
+ count = size - off;
+
+ memcpy_fromio(buf, rom + off, count);
+ }
+ pci_unmap_rom(pdev, rom);
+
+ return count;
+}
+
+static struct bin_attribute pci_config_attr = {
+ .attr = {
+ .name = "config",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = PCI_CFG_SPACE_SIZE,
+ .read = pci_read_config,
+ .write = pci_write_config,
+};
+
+static struct bin_attribute pcie_config_attr = {
+ .attr = {
+ .name = "config",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .size = PCI_CFG_SPACE_EXP_SIZE,
+ .read = pci_read_config,
+ .write = pci_write_config,
+};
+
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+ ssize_t result = kstrtoul(buf, 0, &val);
+
+ if (result < 0)
+ return result;
+
+ if (val != 1)
+ return -EINVAL;
+
+ result = pci_reset_function(pdev);
+ if (result < 0)
+ return result;
+
+ return count;
+}
+
+static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
+
+static int pci_create_capabilities_sysfs(struct pci_dev *dev)
+{
+ int retval;
+ struct bin_attribute *attr;
+
+ /* If the device has VPD, try to expose it in sysfs. */
+ if (dev->vpd) {
+ attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
+ if (!attr)
+ return -ENOMEM;
+
+ sysfs_bin_attr_init(attr);
+ attr->size = dev->vpd->len;
+ attr->attr.name = "vpd";
+ attr->attr.mode = S_IRUSR | S_IWUSR;
+ attr->read = read_vpd_attr;
+ attr->write = write_vpd_attr;
+ retval = sysfs_create_bin_file(&dev->dev.kobj, attr);
+ if (retval) {
+ kfree(attr);
+ return retval;
+ }
+ dev->vpd->attr = attr;
+ }
+
+ /* Active State Power Management */
+ pcie_aspm_create_sysfs_dev_files(dev);
+
+ if (!pci_probe_reset_function(dev)) {
+ retval = device_create_file(&dev->dev, &reset_attr);
+ if (retval)
+ goto error;
+ dev->reset_fn = 1;
+ }
+ return 0;
+
+error:
+ pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->vpd && dev->vpd->attr) {
+ sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+ kfree(dev->vpd->attr);
+ }
+
+ return retval;
+}
+
+int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
+{
+ int retval;
+ int rom_size = 0;
+ struct bin_attribute *attr;
+
+ if (!sysfs_initialized)
+ return -EACCES;
+
+ if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ else
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+ if (retval)
+ goto err;
+
+ retval = pci_create_resource_files(pdev);
+ if (retval)
+ goto err_config_file;
+
+ if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
+ rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
+ rom_size = 0x20000;
+
+ /* If the device has a ROM, try to expose it in sysfs. */
+ if (rom_size) {
+ attr = kzalloc(sizeof(*attr), GFP_ATOMIC);
+ if (!attr) {
+ retval = -ENOMEM;
+ goto err_resource_files;
+ }
+ sysfs_bin_attr_init(attr);
+ attr->size = rom_size;
+ attr->attr.name = "rom";
+ attr->attr.mode = S_IRUSR | S_IWUSR;
+ attr->read = pci_read_rom;
+ attr->write = pci_write_rom;
+ retval = sysfs_create_bin_file(&pdev->dev.kobj, attr);
+ if (retval) {
+ kfree(attr);
+ goto err_resource_files;
+ }
+ pdev->rom_attr = attr;
+ }
+
+ /* add sysfs entries for various capabilities */
+ retval = pci_create_capabilities_sysfs(pdev);
+ if (retval)
+ goto err_rom_file;
+
+ pci_create_firmware_label_files(pdev);
+
+ return 0;
+
+err_rom_file:
+ if (rom_size) {
+ sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
+ kfree(pdev->rom_attr);
+ pdev->rom_attr = NULL;
+ }
+err_resource_files:
+ pci_remove_resource_files(pdev);
+err_config_file:
+ if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ else
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+err:
+ return retval;
+}
+
+static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
+{
+ if (dev->vpd && dev->vpd->attr) {
+ sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+ kfree(dev->vpd->attr);
+ }
+
+ pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->reset_fn) {
+ device_remove_file(&dev->dev, &reset_attr);
+ dev->reset_fn = 0;
+ }
+}
+
+/**
+ * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files
+ * @pdev: device whose entries we should free
+ *
+ * Cleanup when @pdev is removed from sysfs.
+ */
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
+{
+ int rom_size = 0;
+
+ if (!sysfs_initialized)
+ return;
+
+ pci_remove_capabilities_sysfs(pdev);
+
+ if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ else
+ sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
+
+ pci_remove_resource_files(pdev);
+
+ if (pci_resource_len(pdev, PCI_ROM_RESOURCE))
+ rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
+ rom_size = 0x20000;
+
+ if (rom_size && pdev->rom_attr) {
+ sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr);
+ kfree(pdev->rom_attr);
+ }
+
+ pci_remove_firmware_label_files(pdev);
+
+}
+
+static int __init pci_sysfs_init(void)
+{
+ struct pci_dev *pdev = NULL;
+ int retval;
+
+ sysfs_initialized = 1;
+ for_each_pci_dev(pdev) {
+ retval = pci_create_sysfs_dev_files(pdev);
+ if (retval) {
+ pci_dev_put(pdev);
+ return retval;
+ }
+ }
+
+ return 0;
+}
+late_initcall(pci_sysfs_init);
+
+static struct attribute *pci_dev_dev_attrs[] = {
+ &vga_attr.attr,
+ NULL,
+};
+
+static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (a == &vga_attr.attr)
+ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute *pci_dev_hp_attrs[] = {
+ &dev_remove_attr.attr,
+ &dev_rescan_attr.attr,
+ NULL,
+};
+
+static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->is_virtfn)
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group pci_dev_hp_attr_group = {
+ .attrs = pci_dev_hp_attrs,
+ .is_visible = pci_dev_hp_attrs_are_visible,
+};
+
+#ifdef CONFIG_PCI_IOV
+static struct attribute *sriov_dev_attrs[] = {
+ &sriov_totalvfs_attr.attr,
+ &sriov_numvfs_attr.attr,
+ NULL,
+};
+
+static umode_t sriov_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+
+ if (!dev_is_pf(dev))
+ return 0;
+
+ return a->mode;
+}
+
+static struct attribute_group sriov_dev_attr_group = {
+ .attrs = sriov_dev_attrs,
+ .is_visible = sriov_attrs_are_visible,
+};
+#endif /* CONFIG_PCI_IOV */
+
+static struct attribute_group pci_dev_attr_group = {
+ .attrs = pci_dev_dev_attrs,
+ .is_visible = pci_dev_attrs_are_visible,
+};
+
+static const struct attribute_group *pci_dev_attr_groups[] = {
+ &pci_dev_attr_group,
+ &pci_dev_hp_attr_group,
+#ifdef CONFIG_PCI_IOV
+ &sriov_dev_attr_group,
+#endif
+ NULL,
+};
+
+struct device_type pci_dev_type = {
+ .groups = pci_dev_attr_groups,
+};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
new file mode 100644
index 000000000..c44393f26
--- /dev/null
+++ b/drivers/pci/pci.c
@@ -0,0 +1,4635 @@
+/*
+ * PCI Bus Services, see include/linux/pci.h for further explanation.
+ *
+ * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
+ *
+ * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+#include <linux/pci-aspm.h>
+#include <linux/pm_wakeup.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci_hotplug.h>
+#include <asm-generic/pci-bridge.h>
+#include <asm/setup.h>
+#include "pci.h"
+
+const char *pci_power_names[] = {
+ "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
+};
+EXPORT_SYMBOL_GPL(pci_power_names);
+
+int isa_dma_bridge_buggy;
+EXPORT_SYMBOL(isa_dma_bridge_buggy);
+
+int pci_pci_problems;
+EXPORT_SYMBOL(pci_pci_problems);
+
+unsigned int pci_pm_d3_delay;
+
+static void pci_pme_list_scan(struct work_struct *work);
+
+static LIST_HEAD(pci_pme_list);
+static DEFINE_MUTEX(pci_pme_list_mutex);
+static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
+
+struct pci_pme_device {
+ struct list_head list;
+ struct pci_dev *dev;
+};
+
+#define PME_TIMEOUT 1000 /* How long between PME checks */
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
+
+#ifdef CONFIG_PCI_DOMAINS
+int pci_domains_supported = 1;
+#endif
+
+#define DEFAULT_CARDBUS_IO_SIZE (256)
+#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
+/* pci=cbmemsize=nnM,cbiosize=nn can override this */
+unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
+unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
+
+#define DEFAULT_HOTPLUG_IO_SIZE (256)
+#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
+/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
+unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+
+/*
+ * The default CLS is used if arch didn't set CLS explicitly and not
+ * all pci devices agree on the same value. Arch can override either
+ * the dfl or actual value as it sees fit. Don't forget this is
+ * measured in 32-bit words, not bytes.
+ */
+u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size;
+
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain BIOSes forget to set it properly.
+ */
+unsigned int pcibios_max_latency = 255;
+
+/* If set, the PCIe ARI capability will not be used. */
+static bool pcie_ari_disabled;
+
+/**
+ * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
+ * @bus: pointer to PCI bus structure to search
+ *
+ * Given a PCI bus, returns the highest PCI bus number present in the set
+ * including the given PCI bus and its list of child PCI buses.
+ */
+unsigned char pci_bus_max_busnr(struct pci_bus *bus)
+{
+ struct pci_bus *tmp;
+ unsigned char max, n;
+
+ max = bus->busn_res.end;
+ list_for_each_entry(tmp, &bus->children, node) {
+ n = pci_bus_max_busnr(tmp);
+ if (n > max)
+ max = n;
+ }
+ return max;
+}
+EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
+
+#ifdef CONFIG_HAS_IOMEM
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+ struct resource *res = &pdev->resource[bar];
+
+ /*
+ * Make sure the BAR is actually a memory resource, not an IO resource
+ */
+ if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
+ dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
+ return NULL;
+ }
+ return ioremap_nocache(res->start, resource_size(res));
+}
+EXPORT_SYMBOL_GPL(pci_ioremap_bar);
+#endif
+
+#define PCI_FIND_CAP_TTL 48
+
+static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap, int *ttl)
+{
+ u8 id;
+ u16 ent;
+
+ pci_bus_read_config_byte(bus, devfn, pos, &pos);
+
+ while ((*ttl)--) {
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ pci_bus_read_config_word(bus, devfn, pos, &ent);
+
+ id = ent & 0xff;
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos = (ent >> 8);
+ }
+ return 0;
+}
+
+static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap)
+{
+ int ttl = PCI_FIND_CAP_TTL;
+
+ return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
+}
+
+int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
+{
+ return __pci_find_next_cap(dev->bus, dev->devfn,
+ pos + PCI_CAP_LIST_NEXT, cap);
+}
+EXPORT_SYMBOL_GPL(pci_find_next_capability);
+
+static int __pci_bus_find_cap_start(struct pci_bus *bus,
+ unsigned int devfn, u8 hdr_type)
+{
+ u16 status;
+
+ pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ case PCI_HEADER_TYPE_BRIDGE:
+ return PCI_CAPABILITY_LIST;
+ case PCI_HEADER_TYPE_CARDBUS:
+ return PCI_CB_CAPABILITY_LIST;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_find_capability - query for devices' capabilities
+ * @dev: PCI device to query
+ * @cap: capability code
+ *
+ * Tell if a device supports a given PCI capability.
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it. Possible values for @cap:
+ *
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_MSI Message Signalled Interrupts
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_PCIX PCI-X
+ * %PCI_CAP_ID_EXP PCI Express
+ */
+int pci_find_capability(struct pci_dev *dev, int cap)
+{
+ int pos;
+
+ pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
+ if (pos)
+ pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
+
+ return pos;
+}
+EXPORT_SYMBOL(pci_find_capability);
+
+/**
+ * pci_bus_find_capability - query for devices' capabilities
+ * @bus: the PCI bus to query
+ * @devfn: PCI device to query
+ * @cap: capability code
+ *
+ * Like pci_find_capability() but works for pci devices that do not have a
+ * pci_dev structure set up yet.
+ *
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it.
+ */
+int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
+{
+ int pos;
+ u8 hdr_type;
+
+ pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+
+ pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
+ if (pos)
+ pos = __pci_find_next_cap(bus, devfn, pos, cap);
+
+ return pos;
+}
+EXPORT_SYMBOL(pci_bus_find_capability);
+
+/**
+ * pci_find_next_ext_capability - Find an extended capability
+ * @dev: PCI device to query
+ * @start: address at which to start looking (0 to start at beginning of list)
+ * @cap: capability code
+ *
+ * Returns the address of the next matching extended capability structure
+ * within the device's PCI configuration space or 0 if the device does
+ * not support it. Some capabilities can occur several times, e.g., the
+ * vendor-specific capability, and this provides a way to find them all.
+ */
+int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
+{
+ u32 header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
+ return 0;
+
+ if (start)
+ pos = start;
+
+ if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
+ return 0;
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos != start)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
+
+/**
+ * pci_find_ext_capability - Find an extended capability
+ * @dev: PCI device to query
+ * @cap: capability code
+ *
+ * Returns the address of the requested extended capability structure
+ * within the device's PCI configuration space or 0 if the device does
+ * not support it. Possible values for @cap:
+ *
+ * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
+ * %PCI_EXT_CAP_ID_VC Virtual Channel
+ * %PCI_EXT_CAP_ID_DSN Device Serial Number
+ * %PCI_EXT_CAP_ID_PWR Power Budgeting
+ */
+int pci_find_ext_capability(struct pci_dev *dev, int cap)
+{
+ return pci_find_next_ext_capability(dev, 0, cap);
+}
+EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+
+static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
+{
+ int rc, ttl = PCI_FIND_CAP_TTL;
+ u8 cap, mask;
+
+ if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
+ mask = HT_3BIT_CAP_MASK;
+ else
+ mask = HT_5BIT_CAP_MASK;
+
+ pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
+ PCI_CAP_ID_HT, &ttl);
+ while (pos) {
+ rc = pci_read_config_byte(dev, pos + 3, &cap);
+ if (rc != PCIBIOS_SUCCESSFUL)
+ return 0;
+
+ if ((cap & mask) == ht_cap)
+ return pos;
+
+ pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
+ pos + PCI_CAP_LIST_NEXT,
+ PCI_CAP_ID_HT, &ttl);
+ }
+
+ return 0;
+}
+/**
+ * pci_find_next_ht_capability - query a device's Hypertransport capabilities
+ * @dev: PCI device to query
+ * @pos: Position from which to continue searching
+ * @ht_cap: Hypertransport capability code
+ *
+ * To be used in conjunction with pci_find_ht_capability() to search for
+ * all capabilities matching @ht_cap. @pos should always be a value returned
+ * from pci_find_ht_capability().
+ *
+ * NB. To be 100% safe against broken PCI devices, the caller should take
+ * steps to avoid an infinite loop.
+ */
+int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
+{
+ return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
+}
+EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
+
+/**
+ * pci_find_ht_capability - query a device's Hypertransport capabilities
+ * @dev: PCI device to query
+ * @ht_cap: Hypertransport capability code
+ *
+ * Tell if a device supports a given Hypertransport capability.
+ * Returns an address within the device's PCI configuration space
+ * or 0 in case the device does not support the request capability.
+ * The address points to the PCI capability, of type PCI_CAP_ID_HT,
+ * which has a Hypertransport capability matching @ht_cap.
+ */
+int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
+{
+ int pos;
+
+ pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
+ if (pos)
+ pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
+
+ return pos;
+}
+EXPORT_SYMBOL_GPL(pci_find_ht_capability);
+
+/**
+ * pci_find_parent_resource - return resource region of parent bus of given region
+ * @dev: PCI device structure contains resources to be searched
+ * @res: child resource record for which parent is sought
+ *
+ * For given resource region of given device, return the resource
+ * region of parent bus the given region is contained in.
+ */
+struct resource *pci_find_parent_resource(const struct pci_dev *dev,
+ struct resource *res)
+{
+ const struct pci_bus *bus = dev->bus;
+ struct resource *r;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ if (!r)
+ continue;
+ if (res->start && resource_contains(r, res)) {
+
+ /*
+ * If the window is prefetchable but the BAR is
+ * not, the allocator made a mistake.
+ */
+ if (r->flags & IORESOURCE_PREFETCH &&
+ !(res->flags & IORESOURCE_PREFETCH))
+ return NULL;
+
+ /*
+ * If we're below a transparent bridge, there may
+ * be both a positively-decoded aperture and a
+ * subtractively-decoded region that contain the BAR.
+ * We want the positively-decoded one, so this depends
+ * on pci_bus_for_each_resource() giving us those
+ * first.
+ */
+ return r;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_find_parent_resource);
+
+/**
+ * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
+ * @dev: the PCI device to operate on
+ * @pos: config space offset of status word
+ * @mask: mask of bit(s) to care about in status word
+ *
+ * Return 1 when mask bit(s) in status word clear, 0 otherwise.
+ */
+int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
+{
+ int i;
+
+ /* Wait for Transaction Pending bit clean */
+ for (i = 0; i < 4; i++) {
+ u16 status;
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_word(dev, pos, &status);
+ if (!(status & mask))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
+ * @dev: PCI device to have its BARs restored
+ *
+ * Restore the BAR values for a given device, so as to make it
+ * accessible by its driver.
+ */
+static void pci_restore_bars(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
+ pci_update_resource(dev, i);
+}
+
+static struct pci_platform_pm_ops *pci_platform_pm;
+
+int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
+{
+ if (!ops->is_manageable || !ops->set_state || !ops->choose_state
+ || !ops->sleep_wake)
+ return -EINVAL;
+ pci_platform_pm = ops;
+ return 0;
+}
+
+static inline bool platform_pci_power_manageable(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
+}
+
+static inline int platform_pci_set_power_state(struct pci_dev *dev,
+ pci_power_t t)
+{
+ return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
+}
+
+static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
+}
+
+static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
+}
+
+static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->run_wake(dev, enable) : -ENODEV;
+}
+
+static inline bool platform_pci_need_resume(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
+}
+
+/**
+ * pci_raw_set_power_state - Use PCI PM registers to set the power state of
+ * given PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ u16 pmcsr;
+ bool need_restore = false;
+
+ /* Check if we're already there */
+ if (dev->current_state == state)
+ return 0;
+
+ if (!dev->pm_cap)
+ return -EIO;
+
+ if (state < PCI_D0 || state > PCI_D3hot)
+ return -EINVAL;
+
+ /* Validate current state:
+ * Can enter D0 from any state, but if we can only go deeper
+ * to sleep if we're already in a low power state
+ */
+ if (state != PCI_D0 && dev->current_state <= PCI_D3cold
+ && dev->current_state > state) {
+ dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
+ dev->current_state, state);
+ return -EINVAL;
+ }
+
+ /* check if this device supports the desired state */
+ if ((state == PCI_D1 && !dev->d1_support)
+ || (state == PCI_D2 && !dev->d2_support))
+ return -EIO;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ /* If we're (effectively) in D3, force entire word to 0.
+ * This doesn't affect PME_Status, disables PME_En, and
+ * sets PowerState to 0.
+ */
+ switch (dev->current_state) {
+ case PCI_D0:
+ case PCI_D1:
+ case PCI_D2:
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= state;
+ break;
+ case PCI_D3hot:
+ case PCI_D3cold:
+ case PCI_UNKNOWN: /* Boot-up */
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
+ && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
+ need_restore = true;
+ /* Fall-through: force to D0 */
+ default:
+ pmcsr = 0;
+ break;
+ }
+
+ /* enter specified state */
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+
+ /* Mandatory power management transition delays */
+ /* see PCI PM 1.1 5.6.1 table 18 */
+ if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
+ pci_dev_d3_sleep(dev);
+ else if (state == PCI_D2 || dev->current_state == PCI_D2)
+ udelay(PCI_PM_D2_DELAY);
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ if (dev->current_state != state && printk_ratelimit())
+ dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
+ dev->current_state);
+
+ /*
+ * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
+ * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
+ * from D3hot to D0 _may_ perform an internal reset, thereby
+ * going to "D0 Uninitialized" rather than "D0 Initialized".
+ * For example, at least some versions of the 3c905B and the
+ * 3c556B exhibit this behaviour.
+ *
+ * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
+ * devices in a D3hot state at boot. Consequently, we need to
+ * restore at least the BARs so that the device will be
+ * accessible to its driver.
+ */
+ if (need_restore)
+ pci_restore_bars(dev);
+
+ if (dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
+ return 0;
+}
+
+/**
+ * pci_update_current_state - Read PCI power state of given device from its
+ * PCI PM registers and cache it
+ * @dev: PCI device to handle.
+ * @state: State to cache in case the device doesn't have the PM capability
+ */
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
+{
+ if (dev->pm_cap) {
+ u16 pmcsr;
+
+ /*
+ * Configuration space is not accessible for device in
+ * D3cold, so just keep or set D3cold for safety
+ */
+ if (dev->current_state == PCI_D3cold)
+ return;
+ if (state == PCI_D3cold) {
+ dev->current_state = PCI_D3cold;
+ return;
+ }
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ } else {
+ dev->current_state = state;
+ }
+}
+
+/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ if (platform_pci_power_manageable(dev))
+ platform_pci_set_power_state(dev, PCI_D0);
+
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
+/**
+ * pci_platform_power_transition - Use platform to change device power state
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+ */
+static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+{
+ int error;
+
+ if (platform_pci_power_manageable(dev)) {
+ error = platform_pci_set_power_state(dev, state);
+ if (!error)
+ pci_update_current_state(dev, state);
+ } else
+ error = -ENODEV;
+
+ if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
+ dev->current_state = PCI_D0;
+
+ return error;
+}
+
+/**
+ * pci_wakeup - Wake up a PCI device
+ * @pci_dev: Device to handle.
+ * @ign: ignored parameter
+ */
+static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
+{
+ pci_wakeup_event(pci_dev);
+ pm_request_resume(&pci_dev->dev);
+ return 0;
+}
+
+/**
+ * pci_wakeup_bus - Walk given bus and wake up devices on it
+ * @bus: Top bus of the subtree to walk.
+ */
+static void pci_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_wakeup, NULL);
+}
+
+/**
+ * __pci_start_power_transition - Start power transition of a PCI device
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+ */
+static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+{
+ if (state == PCI_D0) {
+ pci_platform_power_transition(dev, PCI_D0);
+ /*
+ * Mandatory power management transition delays, see
+ * PCI Express Base Specification Revision 2.0 Section
+ * 6.6.1: Conventional Reset. Do not delay for
+ * devices powered on/off by corresponding bridge,
+ * because have already delayed for the bridge.
+ */
+ if (dev->runtime_d3cold) {
+ msleep(dev->d3cold_delay);
+ /*
+ * When powering on a bridge from D3cold, the
+ * whole hierarchy may be powered on into
+ * D0uninitialized state, resume them to give
+ * them a chance to suspend again
+ */
+ pci_wakeup_bus(dev->subordinate);
+ }
+ }
+}
+
+/**
+ * __pci_dev_set_current_state - Set current state of a PCI device
+ * @dev: Device to handle
+ * @data: pointer to state to be set
+ */
+static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
+{
+ pci_power_t state = *(pci_power_t *)data;
+
+ dev->current_state = state;
+ return 0;
+}
+
+/**
+ * __pci_bus_set_current_state - Walk given bus and set current state of devices
+ * @bus: Top bus of the subtree to walk.
+ * @state: state to be set
+ */
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+{
+ if (bus)
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+}
+
+/**
+ * __pci_complete_power_transition - Complete power transition of a PCI device
+ * @dev: PCI device to handle.
+ * @state: State to put the device into.
+ *
+ * This function should not be called directly by device drivers.
+ */
+int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
+{
+ int ret;
+
+ if (state <= PCI_D0)
+ return -EINVAL;
+ ret = pci_platform_power_transition(dev, state);
+ /* Power off the bridge may power off the whole hierarchy */
+ if (!ret && state == PCI_D3cold)
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform firmware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ int error;
+
+ /* bound the state we're entering */
+ if (state > PCI_D3cold)
+ state = PCI_D3cold;
+ else if (state < PCI_D0)
+ state = PCI_D0;
+ else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
+ /*
+ * If the device or the parent bridge do not support PCI PM,
+ * ignore the request if we're doing anything other than putting
+ * it into D0 (which would only happen on boot).
+ */
+ return 0;
+
+ /* Check if we're already there */
+ if (dev->current_state == state)
+ return 0;
+
+ __pci_start_power_transition(dev, state);
+
+ /* This device is quirked not to be put into D3, so
+ don't put it in D3 */
+ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
+ return 0;
+
+ /*
+ * To put device in D3cold, we put device into D3hot in native
+ * way, then put device into D3cold with platform ops
+ */
+ error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
+ PCI_D3hot : state);
+
+ if (!__pci_complete_power_transition(dev, state))
+ error = 0;
+
+ return error;
+}
+EXPORT_SYMBOL(pci_set_power_state);
+
+/**
+ * pci_choose_state - Choose the power state of a PCI device
+ * @dev: PCI device to be suspended
+ * @state: target sleep state for the whole system. This is the value
+ * that is passed to suspend() function.
+ *
+ * Returns PCI power state suitable for given device and given system
+ * message.
+ */
+
+pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
+{
+ pci_power_t ret;
+
+ if (!dev->pm_cap)
+ return PCI_D0;
+
+ ret = platform_pci_choose_state(dev);
+ if (ret != PCI_POWER_ERROR)
+ return ret;
+
+ switch (state.event) {
+ case PM_EVENT_ON:
+ return PCI_D0;
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_PRETHAW:
+ /* REVISIT both freeze and pre-thaw "should" use D0 */
+ case PM_EVENT_SUSPEND:
+ case PM_EVENT_HIBERNATE:
+ return PCI_D3hot;
+ default:
+ dev_info(&dev->dev, "unrecognized suspend event %d\n",
+ state.event);
+ BUG();
+ }
+ return PCI_D0;
+}
+EXPORT_SYMBOL(pci_choose_state);
+
+#define PCI_EXP_SAVE_REGS 7
+
+static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
+ u16 cap, bool extended)
+{
+ struct pci_cap_saved_state *tmp;
+
+ hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
+ if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
+ return tmp;
+ }
+ return NULL;
+}
+
+struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
+{
+ return _pci_find_saved_cap(dev, cap, false);
+}
+
+struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
+{
+ return _pci_find_saved_cap(dev, cap, true);
+}
+
+static int pci_save_pcie_state(struct pci_dev *dev)
+{
+ int i = 0;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state) {
+ dev_err(&dev->dev, "buffer not found in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
+ pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
+
+ return 0;
+}
+
+static void pci_restore_pcie_state(struct pci_dev *dev)
+{
+ int i = 0;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
+ pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
+}
+
+
+static int pci_save_pcix_state(struct pci_dev *dev)
+{
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (pos <= 0)
+ return 0;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
+ if (!save_state) {
+ dev_err(&dev->dev, "buffer not found in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ pci_read_config_word(dev, pos + PCI_X_CMD,
+ (u16 *)save_state->cap.data);
+
+ return 0;
+}
+
+static void pci_restore_pcix_state(struct pci_dev *dev)
+{
+ int i = 0, pos;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!save_state || pos <= 0)
+ return;
+ cap = (u16 *)&save_state->cap.data[0];
+
+ pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
+}
+
+
+/**
+ * pci_save_state - save the PCI configuration space of a device before suspending
+ * @dev: - PCI device that we're dealing with
+ */
+int pci_save_state(struct pci_dev *dev)
+{
+ int i;
+ /* XXX: 100% dword access ok here? */
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
+ dev->state_saved = true;
+
+ i = pci_save_pcie_state(dev);
+ if (i != 0)
+ return i;
+
+ i = pci_save_pcix_state(dev);
+ if (i != 0)
+ return i;
+
+ return pci_save_vc_state(dev);
+}
+EXPORT_SYMBOL(pci_save_state);
+
+static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
+ u32 saved_val, int retry)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, offset, &val);
+ if (val == saved_val)
+ return;
+
+ for (;;) {
+ dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
+ offset, val, saved_val);
+ pci_write_config_dword(pdev, offset, saved_val);
+ if (retry-- <= 0)
+ return;
+
+ pci_read_config_dword(pdev, offset, &val);
+ if (val == saved_val)
+ return;
+
+ mdelay(1);
+ }
+}
+
+static void pci_restore_config_space_range(struct pci_dev *pdev,
+ int start, int end, int retry)
+{
+ int index;
+
+ for (index = end; index >= start; index--)
+ pci_restore_config_dword(pdev, 4 * index,
+ pdev->saved_config_space[index],
+ retry);
+}
+
+static void pci_restore_config_space(struct pci_dev *pdev)
+{
+ if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+ pci_restore_config_space_range(pdev, 10, 15, 0);
+ /* Restore BARs before the command register. */
+ pci_restore_config_space_range(pdev, 4, 9, 10);
+ pci_restore_config_space_range(pdev, 0, 3, 0);
+ } else {
+ pci_restore_config_space_range(pdev, 0, 15, 0);
+ }
+}
+
+/**
+ * pci_restore_state - Restore the saved state of a PCI device
+ * @dev: - PCI device that we're dealing with
+ */
+void pci_restore_state(struct pci_dev *dev)
+{
+ if (!dev->state_saved)
+ return;
+
+ /* PCI Express register must be restored first */
+ pci_restore_pcie_state(dev);
+ pci_restore_ats_state(dev);
+ pci_restore_vc_state(dev);
+
+ pci_restore_config_space(dev);
+
+ pci_restore_pcix_state(dev);
+ pci_restore_msi_state(dev);
+ pci_restore_iov_state(dev);
+
+ dev->state_saved = false;
+}
+EXPORT_SYMBOL(pci_restore_state);
+
+struct pci_saved_state {
+ u32 config_space[16];
+ struct pci_cap_saved_data cap[0];
+};
+
+/**
+ * pci_store_saved_state - Allocate and return an opaque struct containing
+ * the device saved state.
+ * @dev: PCI device that we're dealing with
+ *
+ * Return NULL if no state or error.
+ */
+struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
+{
+ struct pci_saved_state *state;
+ struct pci_cap_saved_state *tmp;
+ struct pci_cap_saved_data *cap;
+ size_t size;
+
+ if (!dev->state_saved)
+ return NULL;
+
+ size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
+
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
+ size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
+
+ state = kzalloc(size, GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ memcpy(state->config_space, dev->saved_config_space,
+ sizeof(state->config_space));
+
+ cap = state->cap;
+ hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
+ size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
+ memcpy(cap, &tmp->cap, len);
+ cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
+ }
+ /* Empty cap_save terminates list */
+
+ return state;
+}
+EXPORT_SYMBOL_GPL(pci_store_saved_state);
+
+/**
+ * pci_load_saved_state - Reload the provided save state into struct pci_dev.
+ * @dev: PCI device that we're dealing with
+ * @state: Saved state returned from pci_store_saved_state()
+ */
+int pci_load_saved_state(struct pci_dev *dev,
+ struct pci_saved_state *state)
+{
+ struct pci_cap_saved_data *cap;
+
+ dev->state_saved = false;
+
+ if (!state)
+ return 0;
+
+ memcpy(dev->saved_config_space, state->config_space,
+ sizeof(state->config_space));
+
+ cap = state->cap;
+ while (cap->size) {
+ struct pci_cap_saved_state *tmp;
+
+ tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
+ if (!tmp || tmp->cap.size != cap->size)
+ return -EINVAL;
+
+ memcpy(tmp->cap.data, cap->data, tmp->cap.size);
+ cap = (struct pci_cap_saved_data *)((u8 *)cap +
+ sizeof(struct pci_cap_saved_data) + cap->size);
+ }
+
+ dev->state_saved = true;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_load_saved_state);
+
+/**
+ * pci_load_and_free_saved_state - Reload the save state pointed to by state,
+ * and free the memory allocated for it.
+ * @dev: PCI device that we're dealing with
+ * @state: Pointer to saved state returned from pci_store_saved_state()
+ */
+int pci_load_and_free_saved_state(struct pci_dev *dev,
+ struct pci_saved_state **state)
+{
+ int ret = pci_load_saved_state(dev, *state);
+ kfree(*state);
+ *state = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
+
+int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
+{
+ return pci_enable_resources(dev, bars);
+}
+
+static int do_pci_enable_device(struct pci_dev *dev, int bars)
+{
+ int err;
+ struct pci_dev *bridge;
+ u16 cmd;
+ u8 pin;
+
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err < 0 && err != -EIO)
+ return err;
+
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pcie_aspm_powersave_config_link(bridge);
+
+ err = pcibios_enable_device(dev, bars);
+ if (err < 0)
+ return err;
+ pci_fixup_device(pci_fixup_enable, dev);
+
+ if (dev->msi_enabled || dev->msix_enabled)
+ return 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_INTX_DISABLE);
+ }
+
+ return 0;
+}
+
+/**
+ * pci_reenable_device - Resume abandoned device
+ * @dev: PCI device to be resumed
+ *
+ * Note this function is a backend of pci_default_resume and is not supposed
+ * to be called by normal code, write proper resume handler and use it instead.
+ */
+int pci_reenable_device(struct pci_dev *dev)
+{
+ if (pci_is_enabled(dev))
+ return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
+ return 0;
+}
+EXPORT_SYMBOL(pci_reenable_device);
+
+static void pci_enable_bridge(struct pci_dev *dev)
+{
+ struct pci_dev *bridge;
+ int retval;
+
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_enable_bridge(bridge);
+
+ if (pci_is_enabled(dev)) {
+ if (!dev->is_busmaster)
+ pci_set_master(dev);
+ return;
+ }
+
+ retval = pci_enable_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
+ retval);
+ pci_set_master(dev);
+}
+
+static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
+{
+ struct pci_dev *bridge;
+ int err;
+ int i, bars = 0;
+
+ /*
+ * Power state could be unknown at this point, either due to a fresh
+ * boot or a device removal call. So get the current power state
+ * so that things like MSI message writing will behave as expected
+ * (e.g. if the device really is in D0 at enable time).
+ */
+ if (dev->pm_cap) {
+ u16 pmcsr;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ }
+
+ if (atomic_inc_return(&dev->enable_cnt) > 1)
+ return 0; /* already enabled */
+
+ bridge = pci_upstream_bridge(dev);
+ if (bridge)
+ pci_enable_bridge(bridge);
+
+ /* only skip sriov related */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+ for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+
+ err = do_pci_enable_device(dev, bars);
+ if (err < 0)
+ atomic_dec(&dev->enable_cnt);
+ return err;
+}
+
+/**
+ * pci_enable_device_io - Initialize a device for use with IO space
+ * @dev: PCI device to be initialized
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ */
+int pci_enable_device_io(struct pci_dev *dev)
+{
+ return pci_enable_device_flags(dev, IORESOURCE_IO);
+}
+EXPORT_SYMBOL(pci_enable_device_io);
+
+/**
+ * pci_enable_device_mem - Initialize a device for use with Memory space
+ * @dev: PCI device to be initialized
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable Memory resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ */
+int pci_enable_device_mem(struct pci_dev *dev)
+{
+ return pci_enable_device_flags(dev, IORESOURCE_MEM);
+}
+EXPORT_SYMBOL(pci_enable_device_mem);
+
+/**
+ * pci_enable_device - Initialize device before it's used by a driver.
+ * @dev: PCI device to be initialized
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ *
+ * Note we don't actually enable the device many times if we call
+ * this function repeatedly (we just increment the count).
+ */
+int pci_enable_device(struct pci_dev *dev)
+{
+ return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
+}
+EXPORT_SYMBOL(pci_enable_device);
+
+/*
+ * Managed PCI resources. This manages device on/off, intx/msi/msix
+ * on/off and BAR regions. pci_dev itself records msi/msix status, so
+ * there's no need to track it separately. pci_devres is initialized
+ * when a device is enabled using managed PCI device enable interface.
+ */
+struct pci_devres {
+ unsigned int enabled:1;
+ unsigned int pinned:1;
+ unsigned int orig_intx:1;
+ unsigned int restore_intx:1;
+ u32 region_mask;
+};
+
+static void pcim_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+ struct pci_devres *this = res;
+ int i;
+
+ if (dev->msi_enabled)
+ pci_disable_msi(dev);
+ if (dev->msix_enabled)
+ pci_disable_msix(dev);
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (this->region_mask & (1 << i))
+ pci_release_region(dev, i);
+
+ if (this->restore_intx)
+ pci_intx(dev, this->orig_intx);
+
+ if (this->enabled && !this->pinned)
+ pci_disable_device(dev);
+}
+
+static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
+{
+ struct pci_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ if (dr)
+ return dr;
+
+ new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ return devres_get(&pdev->dev, new_dr, NULL, NULL);
+}
+
+static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
+{
+ if (pci_is_managed(pdev))
+ return devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ return NULL;
+}
+
+/**
+ * pcim_enable_device - Managed pci_enable_device()
+ * @pdev: PCI device to be initialized
+ *
+ * Managed pci_enable_device().
+ */
+int pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int rc;
+
+ dr = get_pci_dr(pdev);
+ if (unlikely(!dr))
+ return -ENOMEM;
+ if (dr->enabled)
+ return 0;
+
+ rc = pci_enable_device(pdev);
+ if (!rc) {
+ pdev->is_managed = 1;
+ dr->enabled = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pcim_enable_device);
+
+/**
+ * pcim_pin_device - Pin managed PCI device
+ * @pdev: PCI device to pin
+ *
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on
+ * driver detach. @pdev must have been enabled with
+ * pcim_enable_device().
+ */
+void pcim_pin_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(pdev);
+ WARN_ON(!dr || !dr->enabled);
+ if (dr)
+ dr->pinned = 1;
+}
+EXPORT_SYMBOL(pcim_pin_device);
+
+/*
+ * pcibios_add_device - provide arch specific hooks when adding device dev
+ * @dev: the PCI device being added
+ *
+ * Permits the platform to provide architecture specific functionality when
+ * devices are added. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+int __weak pcibios_add_device(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/**
+ * pcibios_release_device - provide arch specific hooks when releasing device dev
+ * @dev: the PCI device being released
+ *
+ * Permits the platform to provide architecture specific functionality when
+ * devices are released. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_release_device(struct pci_dev *dev) {}
+
+/**
+ * pcibios_disable_device - disable arch specific PCI resources for device dev
+ * @dev: the PCI device to disable
+ *
+ * Disables architecture specific PCI resources for the device. This
+ * is the default implementation. Architecture implementations can
+ * override this.
+ */
+void __weak pcibios_disable_device (struct pci_dev *dev) {}
+
+/**
+ * pcibios_penalize_isa_irq - penalize an ISA IRQ
+ * @irq: ISA IRQ to penalize
+ * @active: IRQ active or not
+ *
+ * Permits the platform to provide architecture-specific functionality when
+ * penalizing ISA IRQs. This is the default implementation. Architecture
+ * implementations can override this.
+ */
+void __weak pcibios_penalize_isa_irq(int irq, int active) {}
+
+static void do_pci_disable_device(struct pci_dev *dev)
+{
+ u16 pci_command;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+ if (pci_command & PCI_COMMAND_MASTER) {
+ pci_command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(dev, PCI_COMMAND, pci_command);
+ }
+
+ pcibios_disable_device(dev);
+}
+
+/**
+ * pci_disable_enabled_device - Disable device without updating enable_cnt
+ * @dev: PCI device to disable
+ *
+ * NOTE: This function is a backend of PCI power management routines and is
+ * not supposed to be called drivers.
+ */
+void pci_disable_enabled_device(struct pci_dev *dev)
+{
+ if (pci_is_enabled(dev))
+ do_pci_disable_device(dev);
+}
+
+/**
+ * pci_disable_device - Disable PCI device after use
+ * @dev: PCI device to be disabled
+ *
+ * Signal to the system that the PCI device is not in use by the system
+ * anymore. This only involves disabling PCI bus-mastering, if active.
+ *
+ * Note we don't actually disable the device until all callers of
+ * pci_enable_device() have called pci_disable_device().
+ */
+void pci_disable_device(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (dr)
+ dr->enabled = 0;
+
+ dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
+ "disabling already-disabled device");
+
+ if (atomic_dec_return(&dev->enable_cnt) != 0)
+ return;
+
+ do_pci_disable_device(dev);
+
+ dev->is_busmaster = 0;
+}
+EXPORT_SYMBOL(pci_disable_device);
+
+/**
+ * pcibios_set_pcie_reset_state - set reset state for device dev
+ * @dev: the PCIe device reset
+ * @state: Reset state to enter into
+ *
+ *
+ * Sets the PCIe reset state for the device. This is the default
+ * implementation. Architecture implementations can override this.
+ */
+int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state)
+{
+ return -EINVAL;
+}
+
+/**
+ * pci_set_pcie_reset_state - set reset state for device dev
+ * @dev: the PCIe device reset
+ * @state: Reset state to enter into
+ *
+ *
+ * Sets the PCI reset state for the device.
+ */
+int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
+{
+ return pcibios_set_pcie_reset_state(dev, state);
+}
+EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
+
+/**
+ * pci_check_pme_status - Check if given device has generated PME.
+ * @dev: Device to check.
+ *
+ * Check the PME status of the device and if set, clear it and clear PME enable
+ * (if set). Return 'true' if PME status and PME enable were both set or
+ * 'false' otherwise.
+ */
+bool pci_check_pme_status(struct pci_dev *dev)
+{
+ int pmcsr_pos;
+ u16 pmcsr;
+ bool ret = false;
+
+ if (!dev->pm_cap)
+ return false;
+
+ pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
+ pci_read_config_word(dev, pmcsr_pos, &pmcsr);
+ if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
+ return false;
+
+ /* Clear PME status. */
+ pmcsr |= PCI_PM_CTRL_PME_STATUS;
+ if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
+ /* Disable PME to avoid interrupt flood. */
+ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+ ret = true;
+ }
+
+ pci_write_config_word(dev, pmcsr_pos, pmcsr);
+
+ return ret;
+}
+
+/**
+ * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
+ * @dev: Device to handle.
+ * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
+ *
+ * Check if @dev has generated PME and queue a resume request for it in that
+ * case.
+ */
+static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
+{
+ if (pme_poll_reset && dev->pme_poll)
+ dev->pme_poll = false;
+
+ if (pci_check_pme_status(dev)) {
+ pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
+ }
+ return 0;
+}
+
+/**
+ * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
+ * @bus: Top bus of the subtree to walk.
+ */
+void pci_pme_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
+}
+
+
+/**
+ * pci_pme_capable - check the capability of PCI device to generate PME#
+ * @dev: PCI device to handle.
+ * @state: PCI state from which device will issue PME#.
+ */
+bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
+{
+ if (!dev->pm_cap)
+ return false;
+
+ return !!(dev->pme_support & (1 << state));
+}
+EXPORT_SYMBOL(pci_pme_capable);
+
+static void pci_pme_list_scan(struct work_struct *work)
+{
+ struct pci_pme_device *pme_dev, *n;
+
+ mutex_lock(&pci_pme_list_mutex);
+ list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
+ if (pme_dev->dev->pme_poll) {
+ struct pci_dev *bridge;
+
+ bridge = pme_dev->dev->bus->self;
+ /*
+ * If bridge is in low power state, the
+ * configuration space of subordinate devices
+ * may be not accessible
+ */
+ if (bridge && bridge->current_state != PCI_D0)
+ continue;
+ pci_pme_wakeup(pme_dev->dev, NULL);
+ } else {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+ }
+ }
+ if (!list_empty(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
+ mutex_unlock(&pci_pme_list_mutex);
+}
+
+/**
+ * pci_pme_active - enable or disable PCI device's PME# function
+ * @dev: PCI device to handle.
+ * @enable: 'true' to enable PME# generation; 'false' to disable it.
+ *
+ * The caller must verify that the device is capable of generating PME# before
+ * calling this function with @enable equal to 'true'.
+ */
+void pci_pme_active(struct pci_dev *dev, bool enable)
+{
+ u16 pmcsr;
+
+ if (!dev->pme_support)
+ return;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ /* Clear PME_Status by writing 1 to it and enable PME# */
+ pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
+ if (!enable)
+ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+
+ /*
+ * PCI (as opposed to PCIe) PME requires that the device have
+ * its PME# line hooked up correctly. Not all hardware vendors
+ * do this, so the PME never gets delivered and the device
+ * remains asleep. The easiest way around this is to
+ * periodically walk the list of suspended devices and check
+ * whether any have their PME flag set. The assumption is that
+ * we'll wake up often enough anyway that this won't be a huge
+ * hit, and the power savings from the devices will still be a
+ * win.
+ *
+ * Although PCIe uses in-band PME message instead of PME# line
+ * to report PME, PME does not work for some PCIe devices in
+ * reality. For example, there are devices that set their PME
+ * status bits, but don't really bother to send a PME message;
+ * there are PCI Express Root Ports that don't bother to
+ * trigger interrupts when they receive PME messages from the
+ * devices below. So PME poll is used for PCIe devices too.
+ */
+
+ if (dev->pme_poll) {
+ struct pci_pme_device *pme_dev;
+ if (enable) {
+ pme_dev = kmalloc(sizeof(struct pci_pme_device),
+ GFP_KERNEL);
+ if (!pme_dev) {
+ dev_warn(&dev->dev, "can't enable PME#\n");
+ return;
+ }
+ pme_dev->dev = dev;
+ mutex_lock(&pci_pme_list_mutex);
+ list_add(&pme_dev->list, &pci_pme_list);
+ if (list_is_singular(&pci_pme_list))
+ schedule_delayed_work(&pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
+ mutex_unlock(&pci_pme_list_mutex);
+ } else {
+ mutex_lock(&pci_pme_list_mutex);
+ list_for_each_entry(pme_dev, &pci_pme_list, list) {
+ if (pme_dev->dev == dev) {
+ list_del(&pme_dev->list);
+ kfree(pme_dev);
+ break;
+ }
+ }
+ mutex_unlock(&pci_pme_list_mutex);
+ }
+ }
+
+ dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
+}
+EXPORT_SYMBOL(pci_pme_active);
+
+/**
+ * __pci_enable_wake - enable PCI device as wakeup event source
+ * @dev: PCI device affected
+ * @state: PCI state from which device will issue wakeup events
+ * @runtime: True if the events are to be generated at run time
+ * @enable: True to enable event generation; false to disable
+ *
+ * This enables the device as a wakeup event source, or disables it.
+ * When such events involves platform-specific hooks, those hooks are
+ * called automatically by this routine.
+ *
+ * Devices with legacy power management (no standard PCI PM capabilities)
+ * always require such platform hooks.
+ *
+ * RETURN VALUE:
+ * 0 is returned on success
+ * -EINVAL is returned if device is not supposed to wake up the system
+ * Error code depending on the platform is returned if both the platform and
+ * the native mechanism fail to enable the generation of wake-up events
+ */
+int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ bool runtime, bool enable)
+{
+ int ret = 0;
+
+ if (enable && !runtime && !device_may_wakeup(&dev->dev))
+ return -EINVAL;
+
+ /* Don't do the same thing twice in a row for one device. */
+ if (!!enable == !!dev->wakeup_prepared)
+ return 0;
+
+ /*
+ * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
+ * Anderson we should be doing PME# wake enable followed by ACPI wake
+ * enable. To disable wake-up we call the platform first, for symmetry.
+ */
+
+ if (enable) {
+ int error;
+
+ if (pci_pme_capable(dev, state))
+ pci_pme_active(dev, true);
+ else
+ ret = 1;
+ error = runtime ? platform_pci_run_wake(dev, true) :
+ platform_pci_sleep_wake(dev, true);
+ if (ret)
+ ret = error;
+ if (!ret)
+ dev->wakeup_prepared = true;
+ } else {
+ if (runtime)
+ platform_pci_run_wake(dev, false);
+ else
+ platform_pci_sleep_wake(dev, false);
+ pci_pme_active(dev, false);
+ dev->wakeup_prepared = false;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__pci_enable_wake);
+
+/**
+ * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
+ * @dev: PCI device to prepare
+ * @enable: True to enable wake-up event generation; false to disable
+ *
+ * Many drivers want the device to wake up the system from D3_hot or D3_cold
+ * and this function allows them to set that up cleanly - pci_enable_wake()
+ * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
+ * ordering constraints.
+ *
+ * This function only returns error code if the device is not capable of
+ * generating PME# from both D3_hot and D3_cold, and the platform is unable to
+ * enable wake-up power for it.
+ */
+int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+ return pci_pme_capable(dev, PCI_D3cold) ?
+ pci_enable_wake(dev, PCI_D3cold, enable) :
+ pci_enable_wake(dev, PCI_D3hot, enable);
+}
+EXPORT_SYMBOL(pci_wake_from_d3);
+
+/**
+ * pci_target_state - find an appropriate low power state for a given PCI dev
+ * @dev: PCI device
+ *
+ * Use underlying platform code to find a supported low power state for @dev.
+ * If the platform can't manage @dev, return the deepest state from which it
+ * can generate wake events, based on any available PME info.
+ */
+static pci_power_t pci_target_state(struct pci_dev *dev)
+{
+ pci_power_t target_state = PCI_D3hot;
+
+ if (platform_pci_power_manageable(dev)) {
+ /*
+ * Call the platform to choose the target state of the device
+ * and enable wake-up from this state if supported.
+ */
+ pci_power_t state = platform_pci_choose_state(dev);
+
+ switch (state) {
+ case PCI_POWER_ERROR:
+ case PCI_UNKNOWN:
+ break;
+ case PCI_D1:
+ case PCI_D2:
+ if (pci_no_d1d2(dev))
+ break;
+ default:
+ target_state = state;
+ }
+ } else if (!dev->pm_cap) {
+ target_state = PCI_D0;
+ } else if (device_may_wakeup(&dev->dev)) {
+ /*
+ * Find the deepest state from which the device can generate
+ * wake-up events, make it the target state and enable device
+ * to generate PME#.
+ */
+ if (dev->pme_support) {
+ while (target_state
+ && !(dev->pme_support & (1 << target_state)))
+ target_state--;
+ }
+ }
+
+ return target_state;
+}
+
+/**
+ * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
+ * @dev: Device to handle.
+ *
+ * Choose the power state appropriate for the device depending on whether
+ * it can wake up the system and/or is power manageable by the platform
+ * (PCI_D3hot is the default) and put the device into that state.
+ */
+int pci_prepare_to_sleep(struct pci_dev *dev)
+{
+ pci_power_t target_state = pci_target_state(dev);
+ int error;
+
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+ pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ pci_enable_wake(dev, target_state, false);
+
+ return error;
+}
+EXPORT_SYMBOL(pci_prepare_to_sleep);
+
+/**
+ * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
+ * @dev: Device to handle.
+ *
+ * Disable device's system wake-up capability and put it into D0.
+ */
+int pci_back_from_sleep(struct pci_dev *dev)
+{
+ pci_enable_wake(dev, PCI_D0, false);
+ return pci_set_power_state(dev, PCI_D0);
+}
+EXPORT_SYMBOL(pci_back_from_sleep);
+
+/**
+ * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
+ * @dev: PCI device being suspended.
+ *
+ * Prepare @dev to generate wake-up events at run time and put it into a low
+ * power state.
+ */
+int pci_finish_runtime_suspend(struct pci_dev *dev)
+{
+ pci_power_t target_state = pci_target_state(dev);
+ int error;
+
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+ dev->runtime_d3cold = target_state == PCI_D3cold;
+
+ __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error) {
+ __pci_enable_wake(dev, target_state, true, false);
+ dev->runtime_d3cold = false;
+ }
+
+ return error;
+}
+
+/**
+ * pci_dev_run_wake - Check if device can generate run-time wake-up events.
+ * @dev: Device to check.
+ *
+ * Return true if the device itself is capable of generating wake-up events
+ * (through the platform or using the native PCIe PME) or if the device supports
+ * PME and one of its upstream bridges can generate wake-up events.
+ */
+bool pci_dev_run_wake(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+
+ if (device_run_wake(&dev->dev))
+ return true;
+
+ if (!dev->pme_support)
+ return false;
+
+ while (bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (device_run_wake(&bridge->dev))
+ return true;
+
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ return device_run_wake(bus->bridge);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+
+/**
+ * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
+ * @pci_dev: Device to check.
+ *
+ * Return 'true' if the device is runtime-suspended, it doesn't have to be
+ * reconfigured due to wakeup settings difference between system and runtime
+ * suspend and the current power state of it is suitable for the upcoming
+ * (system) transition.
+ */
+bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
+{
+ struct device *dev = &pci_dev->dev;
+
+ if (!pm_runtime_suspended(dev)
+ || (device_can_wakeup(dev) && !device_may_wakeup(dev))
+ || platform_pci_need_resume(pci_dev))
+ return false;
+
+ return pci_target_state(pci_dev) == pci_dev->current_state;
+}
+
+void pci_config_pm_runtime_get(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ if (parent)
+ pm_runtime_get_sync(parent);
+ pm_runtime_get_noresume(dev);
+ /*
+ * pdev->current_state is set to PCI_D3cold during suspending,
+ * so wait until suspending completes
+ */
+ pm_runtime_barrier(dev);
+ /*
+ * Only need to resume devices in D3cold, because config
+ * registers are still accessible for devices suspended but
+ * not in D3cold.
+ */
+ if (pdev->current_state == PCI_D3cold)
+ pm_runtime_resume(dev);
+}
+
+void pci_config_pm_runtime_put(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+
+ pm_runtime_put(dev);
+ if (parent)
+ pm_runtime_put_sync(parent);
+}
+
+/**
+ * pci_pm_init - Initialize PM functions of given PCI device
+ * @dev: PCI device to handle.
+ */
+void pci_pm_init(struct pci_dev *dev)
+{
+ int pm;
+ u16 pmc;
+
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_set_active(&dev->dev);
+ pm_runtime_enable(&dev->dev);
+ device_enable_async_suspend(&dev->dev);
+ dev->wakeup_prepared = false;
+
+ dev->pm_cap = 0;
+ dev->pme_support = 0;
+
+ /* find PCI PM capability in list */
+ pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+ if (!pm)
+ return;
+ /* Check device's ability to generate PME# */
+ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
+
+ if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
+ dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
+ pmc & PCI_PM_CAP_VER_MASK);
+ return;
+ }
+
+ dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
+ dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
+ dev->d3cold_allowed = true;
+
+ dev->d1_support = false;
+ dev->d2_support = false;
+ if (!pci_no_d1d2(dev)) {
+ if (pmc & PCI_PM_CAP_D1)
+ dev->d1_support = true;
+ if (pmc & PCI_PM_CAP_D2)
+ dev->d2_support = true;
+
+ if (dev->d1_support || dev->d2_support)
+ dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
+ dev->d1_support ? " D1" : "",
+ dev->d2_support ? " D2" : "");
+ }
+
+ pmc &= PCI_PM_CAP_PME_MASK;
+ if (pmc) {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "PME# supported from%s%s%s%s%s\n",
+ (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
+ (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
+ (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
+ (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
+ (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
+ dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
+ dev->pme_poll = true;
+ /*
+ * Make device's PM flags reflect the wake-up capability, but
+ * let the user space enable it to wake up the system as needed.
+ */
+ device_set_wakeup_capable(&dev->dev, true);
+ /* Disable the PME# generation functionality */
+ pci_pme_active(dev, false);
+ }
+}
+
+static void pci_add_saved_cap(struct pci_dev *pci_dev,
+ struct pci_cap_saved_state *new_cap)
+{
+ hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
+}
+
+/**
+ * _pci_add_cap_save_buffer - allocate buffer for saving given
+ * capability registers
+ * @dev: the PCI device
+ * @cap: the capability to allocate the buffer for
+ * @extended: Standard or Extended capability ID
+ * @size: requested size of the buffer
+ */
+static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
+ bool extended, unsigned int size)
+{
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ if (extended)
+ pos = pci_find_ext_capability(dev, cap);
+ else
+ pos = pci_find_capability(dev, cap);
+
+ if (pos <= 0)
+ return 0;
+
+ save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
+ if (!save_state)
+ return -ENOMEM;
+
+ save_state->cap.cap_nr = cap;
+ save_state->cap.cap_extended = extended;
+ save_state->cap.size = size;
+ pci_add_saved_cap(dev, save_state);
+
+ return 0;
+}
+
+int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
+{
+ return _pci_add_cap_save_buffer(dev, cap, false, size);
+}
+
+int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
+{
+ return _pci_add_cap_save_buffer(dev, cap, true, size);
+}
+
+/**
+ * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
+ * @dev: the PCI device
+ */
+void pci_allocate_cap_save_buffers(struct pci_dev *dev)
+{
+ int error;
+
+ error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
+ PCI_EXP_SAVE_REGS * sizeof(u16));
+ if (error)
+ dev_err(&dev->dev,
+ "unable to preallocate PCI Express save buffer\n");
+
+ error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
+ if (error)
+ dev_err(&dev->dev,
+ "unable to preallocate PCI-X save buffer\n");
+
+ pci_allocate_vc_save_buffers(dev);
+}
+
+void pci_free_cap_save_buffers(struct pci_dev *dev)
+{
+ struct pci_cap_saved_state *tmp;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
+ kfree(tmp);
+}
+
+/**
+ * pci_configure_ari - enable or disable ARI forwarding
+ * @dev: the PCI device
+ *
+ * If @dev and its upstream bridge both support ARI, enable ARI in the
+ * bridge. Otherwise, disable ARI in the bridge.
+ */
+void pci_configure_ari(struct pci_dev *dev)
+{
+ u32 cap;
+ struct pci_dev *bridge;
+
+ if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
+ return;
+
+ bridge = dev->bus->self;
+ if (!bridge)
+ return;
+
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_ARI))
+ return;
+
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
+ pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 1;
+ } else {
+ pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ARI);
+ bridge->ari_enabled = 0;
+ }
+}
+
+static int pci_acs_enable;
+
+/**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+void pci_request_acs(void)
+{
+ pci_acs_enable = 1;
+}
+
+/**
+ * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
+ * @dev: the PCI device
+ */
+static int pci_std_enable_acs(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* Source Validation */
+ ctrl |= (cap & PCI_ACS_SV);
+
+ /* P2P Request Redirect */
+ ctrl |= (cap & PCI_ACS_RR);
+
+ /* P2P Completion Redirect */
+ ctrl |= (cap & PCI_ACS_CR);
+
+ /* Upstream Forwarding */
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+
+ return 0;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_std_enable_acs(dev))
+ return;
+
+ pci_dev_specific_enable_acs(dev);
+}
+
+static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int pos;
+ u16 cap, ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return false;
+
+ /*
+ * Except for egress control, capabilities are either required
+ * or only required if controllable. Features missing from the
+ * capability field can therefore be assumed as hard-wired enabled.
+ */
+ pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
+ acs_flags &= (cap | PCI_ACS_EC);
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+ return (ctrl & acs_flags) == acs_flags;
+}
+
+/**
+ * pci_acs_enabled - test ACS against required flags for a given device
+ * @pdev: device to test
+ * @acs_flags: required PCI ACS flags
+ *
+ * Return true if the device supports the provided flags. Automatically
+ * filters out flags that are not implemented on multifunction devices.
+ *
+ * Note that this interface checks the effective ACS capabilities of the
+ * device rather than the actual capabilities. For instance, most single
+ * function endpoints are not required to support ACS because they have no
+ * opportunity for peer-to-peer access. We therefore return 'true'
+ * regardless of whether the device exposes an ACS capability. This makes
+ * it much easier for callers of this function to ignore the actual type
+ * or topology of the device when testing ACS support.
+ */
+bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int ret;
+
+ ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
+ if (ret >= 0)
+ return ret > 0;
+
+ /*
+ * Conventional PCI and PCI-X devices never support ACS, either
+ * effectively or actually. The shared bus topology implies that
+ * any device on the bus can receive or snoop DMA.
+ */
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ switch (pci_pcie_type(pdev)) {
+ /*
+ * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
+ * but since their primary interface is PCI/X, we conservatively
+ * handle them as we would a non-PCIe device.
+ */
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ /*
+ * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
+ * applicable... must never implement an ACS Extended Capability...".
+ * This seems arbitrary, but we take a conservative interpretation
+ * of this statement.
+ */
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ case PCI_EXP_TYPE_RC_EC:
+ return false;
+ /*
+ * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
+ * implement ACS in order to indicate their peer-to-peer capabilities,
+ * regardless of whether they are single- or multi-function devices.
+ */
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ case PCI_EXP_TYPE_ROOT_PORT:
+ return pci_acs_flags_enabled(pdev, acs_flags);
+ /*
+ * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
+ * implemented by the remaining PCIe types to indicate peer-to-peer
+ * capabilities, but only when they are part of a multifunction
+ * device. The footnote for section 6.12 indicates the specific
+ * PCIe types included here.
+ */
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_LEG_END:
+ case PCI_EXP_TYPE_RC_END:
+ if (!pdev->multifunction)
+ break;
+
+ return pci_acs_flags_enabled(pdev, acs_flags);
+ }
+
+ /*
+ * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
+ * to single function devices with the exception of downstream ports.
+ */
+ return true;
+}
+
+/**
+ * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
+ * @start: starting downstream device
+ * @end: ending upstream device or NULL to search to the root bus
+ * @acs_flags: required flags
+ *
+ * Walk up a device tree from start to end testing PCI ACS support. If
+ * any step along the way does not support the required flags, return false.
+ */
+bool pci_acs_path_enabled(struct pci_dev *start,
+ struct pci_dev *end, u16 acs_flags)
+{
+ struct pci_dev *pdev, *parent = start;
+
+ do {
+ pdev = parent;
+
+ if (!pci_acs_enabled(pdev, acs_flags))
+ return false;
+
+ if (pci_is_root_bus(pdev->bus))
+ return (end == NULL);
+
+ parent = pdev->bus->self;
+ } while (pdev != end);
+
+ return true;
+}
+
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge. This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards. For devices with ARI enabled, the slot
+ * number is always 0 (see the Implementation Note in section 2.2.8.1 of
+ * the PCI Express Base Specification, Revision 2.1)
+ */
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
+{
+ int slot;
+
+ if (pci_ari_enabled(dev->bus))
+ slot = 0;
+ else
+ slot = PCI_SLOT(dev->devfn);
+
+ return (((pin - 1) + slot) % 4) + 1;
+}
+
+int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+{
+ u8 pin;
+
+ pin = dev->pin;
+ if (!pin)
+ return -1;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *bridge = dev;
+ return pin;
+}
+
+/**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (!pci_is_root_bus(dev->bus)) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return PCI_SLOT(dev->devfn);
+}
+EXPORT_SYMBOL_GPL(pci_common_swizzle);
+
+/**
+ * pci_release_region - Release a PCI bar
+ * @pdev: PCI device whose resources were previously reserved by pci_request_region
+ * @bar: BAR to release
+ *
+ * Releases the PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_region. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+void pci_release_region(struct pci_dev *pdev, int bar)
+{
+ struct pci_devres *dr;
+
+ if (pci_resource_len(pdev, bar) == 0)
+ return;
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
+ release_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+ else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+
+ dr = find_pci_dr(pdev);
+ if (dr)
+ dr->region_mask &= ~(1 << bar);
+}
+EXPORT_SYMBOL(pci_release_region);
+
+/**
+ * __pci_request_region - Reserved PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource.
+ * @exclusive: whether the region access is exclusive or not
+ *
+ * Mark the PCI region associated with PCI device @pdev BR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * If @exclusive is set, then the region is marked so that userspace
+ * is explicitly not allowed to map the resource via /dev/mem or
+ * sysfs MMIO access.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+static int __pci_request_region(struct pci_dev *pdev, int bar,
+ const char *res_name, int exclusive)
+{
+ struct pci_devres *dr;
+
+ if (pci_resource_len(pdev, bar) == 0)
+ return 0;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar), res_name))
+ goto err_out;
+ } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+ if (!__request_mem_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar), res_name,
+ exclusive))
+ goto err_out;
+ }
+
+ dr = find_pci_dr(pdev);
+ if (dr)
+ dr->region_mask |= 1 << bar;
+
+ return 0;
+
+err_out:
+ dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
+ &pdev->resource[bar]);
+ return -EBUSY;
+}
+
+/**
+ * pci_request_region - Reserve PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource
+ *
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+{
+ return __pci_request_region(pdev, bar, res_name, 0);
+}
+EXPORT_SYMBOL(pci_request_region);
+
+/**
+ * pci_request_region_exclusive - Reserved PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource.
+ *
+ * Mark the PCI region associated with PCI device @pdev BR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ *
+ * The key difference that _exclusive makes it that userspace is
+ * explicitly not allowed to map the resource via /dev/mem or
+ * sysfs.
+ */
+int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
+ const char *res_name)
+{
+ return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
+}
+EXPORT_SYMBOL(pci_request_region_exclusive);
+
+/**
+ * pci_release_selected_regions - Release selected PCI I/O and memory resources
+ * @pdev: PCI device whose resources were previously reserved
+ * @bars: Bitmask of BARs to be released
+ *
+ * Release selected PCI I/O and memory resources previously reserved.
+ * Call this function only after all use of the PCI regions has ceased.
+ */
+void pci_release_selected_regions(struct pci_dev *pdev, int bars)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (bars & (1 << i))
+ pci_release_region(pdev, i);
+}
+EXPORT_SYMBOL(pci_release_selected_regions);
+
+static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
+ const char *res_name, int excl)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (bars & (1 << i))
+ if (__pci_request_region(pdev, i, res_name, excl))
+ goto err_out;
+ return 0;
+
+err_out:
+ while (--i >= 0)
+ if (bars & (1 << i))
+ pci_release_region(pdev, i);
+
+ return -EBUSY;
+}
+
+
+/**
+ * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @bars: Bitmask of BARs to be requested
+ * @res_name: Name to be associated with resource
+ */
+int pci_request_selected_regions(struct pci_dev *pdev, int bars,
+ const char *res_name)
+{
+ return __pci_request_selected_regions(pdev, bars, res_name, 0);
+}
+EXPORT_SYMBOL(pci_request_selected_regions);
+
+int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
+ const char *res_name)
+{
+ return __pci_request_selected_regions(pdev, bars, res_name,
+ IORESOURCE_EXCLUSIVE);
+}
+EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
+
+/**
+ * pci_release_regions - Release reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources were previously reserved by pci_request_regions
+ *
+ * Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+
+void pci_release_regions(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev, (1 << 6) - 1);
+}
+EXPORT_SYMBOL(pci_release_regions);
+
+/**
+ * pci_request_regions - Reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
+ *
+ * Mark all PCI regions associated with PCI device @pdev as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+{
+ return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
+}
+EXPORT_SYMBOL(pci_request_regions);
+
+/**
+ * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
+ *
+ * Mark all PCI regions associated with PCI device @pdev as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * pci_request_regions_exclusive() will mark the region so that
+ * /dev/mem and the sysfs MMIO access will not be allowed.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+{
+ return pci_request_selected_regions_exclusive(pdev,
+ ((1 << 6) - 1), res_name);
+}
+EXPORT_SYMBOL(pci_request_regions_exclusive);
+
+/**
+ * pci_remap_iospace - Remap the memory mapped I/O space
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Remap the memory mapped I/O space described by the @res
+ * and the CPU physical address @phys_addr into virtual address space.
+ * Only architectures that have memory mapped IO functions defined
+ * (and the PCI_IOBASE value defined) should call this function.
+ */
+int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
+{
+#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
+
+ if (!(res->flags & IORESOURCE_IO))
+ return -EINVAL;
+
+ if (res->end > IO_SPACE_LIMIT)
+ return -EINVAL;
+
+ return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
+ pgprot_device(PAGE_KERNEL));
+#else
+ /* this architecture does not have memory mapped I/O space,
+ so this function should never be called */
+ WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
+ return -ENODEV;
+#endif
+}
+
+static void __pci_set_master(struct pci_dev *dev, bool enable)
+{
+ u16 old_cmd, cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
+ if (enable)
+ cmd = old_cmd | PCI_COMMAND_MASTER;
+ else
+ cmd = old_cmd & ~PCI_COMMAND_MASTER;
+ if (cmd != old_cmd) {
+ dev_dbg(&dev->dev, "%s bus mastering\n",
+ enable ? "enabling" : "disabling");
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ dev->is_busmaster = enable;
+}
+
+/**
+ * pcibios_setup - process "pci=" kernel boot arguments
+ * @str: string used to pass in "pci=" kernel boot arguments
+ *
+ * Process kernel boot arguments. This is the default implementation.
+ * Architecture specific implementations can override this as necessary.
+ */
+char * __weak __init pcibios_setup(char *str)
+{
+ return str;
+}
+
+/**
+ * pcibios_set_master - enable PCI bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables PCI bus-mastering for the device. This is the default
+ * implementation. Architecture specific implementations can override
+ * this if necessary.
+ */
+void __weak pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
+ if (pci_is_pcie(dev))
+ return;
+
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16)
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+ else if (lat > pcibios_max_latency)
+ lat = pcibios_max_latency;
+ else
+ return;
+
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+}
+
+/**
+ * pci_set_master - enables bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables bus-mastering on the device and calls pcibios_set_master()
+ * to do the needed arch specific settings.
+ */
+void pci_set_master(struct pci_dev *dev)
+{
+ __pci_set_master(dev, true);
+ pcibios_set_master(dev);
+}
+EXPORT_SYMBOL(pci_set_master);
+
+/**
+ * pci_clear_master - disables bus-mastering for device dev
+ * @dev: the PCI device to disable
+ */
+void pci_clear_master(struct pci_dev *dev)
+{
+ __pci_set_master(dev, false);
+}
+EXPORT_SYMBOL(pci_clear_master);
+
+/**
+ * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
+ * @dev: the PCI device for which MWI is to be enabled
+ *
+ * Helper function for pci_set_mwi.
+ * Originally copied from drivers/net/acenic.c.
+ * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pci_set_cacheline_size(struct pci_dev *dev)
+{
+ u8 cacheline_size;
+
+ if (!pci_cache_line_size)
+ return -EINVAL;
+
+ /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
+ equal to or multiple of the right value. */
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
+ if (cacheline_size >= pci_cache_line_size &&
+ (cacheline_size % pci_cache_line_size) == 0)
+ return 0;
+
+ /* Write the correct value. */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
+ /* Read it back. */
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
+ if (cacheline_size == pci_cache_line_size)
+ return 0;
+
+ dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
+ pci_cache_line_size << 2);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
+
+/**
+ * pci_set_mwi - enables memory-write-invalidate PCI transaction
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pci_set_mwi(struct pci_dev *dev)
+{
+#ifdef PCI_DISABLE_MWI
+ return 0;
+#else
+ int rc;
+ u16 cmd;
+
+ rc = pci_set_cacheline_size(dev);
+ if (rc)
+ return rc;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_INVALIDATE)) {
+ dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
+ cmd |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(pci_set_mwi);
+
+/**
+ * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
+ * Callers are not required to check the return value.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+#ifdef PCI_DISABLE_MWI
+ return 0;
+#else
+ return pci_set_mwi(dev);
+#endif
+}
+EXPORT_SYMBOL(pci_try_set_mwi);
+
+/**
+ * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
+ * @dev: the PCI device to disable
+ *
+ * Disables PCI Memory-Write-Invalidate transaction on the device
+ */
+void pci_clear_mwi(struct pci_dev *dev)
+{
+#ifndef PCI_DISABLE_MWI
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INVALIDATE) {
+ cmd &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+#endif
+}
+EXPORT_SYMBOL(pci_clear_mwi);
+
+/**
+ * pci_intx - enables/disables PCI INTx for device dev
+ * @pdev: the PCI device to operate on
+ * @enable: boolean: whether to enable or disable PCI INTx
+ *
+ * Enables/disables PCI INTx for device dev
+ */
+void pci_intx(struct pci_dev *pdev, int enable)
+{
+ u16 pci_command, new;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+
+ if (enable)
+ new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
+ else
+ new = pci_command | PCI_COMMAND_INTX_DISABLE;
+
+ if (new != pci_command) {
+ struct pci_devres *dr;
+
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+
+ dr = find_pci_dr(pdev);
+ if (dr && !dr->restore_intx) {
+ dr->restore_intx = 1;
+ dr->orig_intx = !enable;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(pci_intx);
+
+/**
+ * pci_intx_mask_supported - probe for INTx masking support
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev support INTx masking via the config space
+ * command word.
+ */
+bool pci_intx_mask_supported(struct pci_dev *dev)
+{
+ bool mask_supported = false;
+ u16 orig, new;
+
+ if (dev->broken_intx_masking)
+ return false;
+
+ pci_cfg_access_lock(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(dev, PCI_COMMAND, &new);
+
+ /*
+ * There's no way to protect against hardware bugs or detect them
+ * reliably, but as long as we know what the value should be, let's
+ * go ahead and check it.
+ */
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
+ orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(dev, PCI_COMMAND, orig);
+ }
+
+ pci_cfg_access_unlock(dev);
+ return mask_supported;
+}
+EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and
+ * return true in that case. False is returned if not interrupt was
+ * pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not
+ * and return true. False is returned and the mask remains active if
+ * there was still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
+ * pci_msi_off - disables any MSI or MSI-X capabilities
+ * @dev: the PCI device to operate on
+ *
+ * If you want to use MSI, see pci_enable_msi() and friends.
+ * This is a lower-level primitive that allows us to disable
+ * MSI operation at the device level.
+ */
+void pci_msi_off(struct pci_dev *dev)
+{
+ int pos;
+ u16 control;
+
+ /*
+ * This looks like it could go in msi.c, but we need it even when
+ * CONFIG_PCI_MSI=n. For the same reason, we can't use
+ * dev->msi_cap or dev->msix_cap here.
+ */
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ }
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ }
+}
+EXPORT_SYMBOL_GPL(pci_msi_off);
+
+int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
+{
+ return dma_set_max_seg_size(&dev->dev, size);
+}
+EXPORT_SYMBOL(pci_set_dma_max_seg_size);
+
+int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
+{
+ return dma_set_seg_boundary(&dev->dev, mask);
+}
+EXPORT_SYMBOL(pci_set_dma_seg_boundary);
+
+/**
+ * pci_wait_for_pending_transaction - waits for pending transaction
+ * @dev: the PCI device to operate on
+ *
+ * Return 0 if transaction is pending 1 otherwise.
+ */
+int pci_wait_for_pending_transaction(struct pci_dev *dev)
+{
+ if (!pci_is_pcie(dev))
+ return 1;
+
+ return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
+ PCI_EXP_DEVSTA_TRPND);
+}
+EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+
+static int pcie_flr(struct pci_dev *dev, int probe)
+{
+ u32 cap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ msleep(100);
+ return 0;
+}
+
+static int pci_af_flr(struct pci_dev *dev, int probe)
+{
+ int pos;
+ u8 cap;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ if (!pos)
+ return -ENOTTY;
+
+ pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
+ if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ /*
+ * Wait for Transaction Pending bit to clear. A word-aligned test
+ * is used, so we use the conrol offset rather than status and shift
+ * the test bit to match.
+ */
+ if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
+ PCI_AF_STATUS_TP << 8))
+ dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
+
+ pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ msleep(100);
+ return 0;
+}
+
+/**
+ * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
+ * @dev: Device to reset.
+ * @probe: If set, only check if the device can be reset this way.
+ *
+ * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
+ * unset, it will be reinitialized internally when going from PCI_D3hot to
+ * PCI_D0. If that's the case and the device is not in a low-power state
+ * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
+ *
+ * NOTE: This causes the caller to sleep for twice the device power transition
+ * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
+ * by default (i.e. unless the @dev's d3_delay field has a different value).
+ * Moreover, only devices in D0 can be reset by this function.
+ */
+static int pci_pm_reset(struct pci_dev *dev, int probe)
+{
+ u16 csr;
+
+ if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
+ return -ENOTTY;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
+ if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ pci_dev_d3_sleep(dev);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ pci_dev_d3_sleep(dev);
+
+ return 0;
+}
+
+void pci_reset_secondary_bus(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+ /*
+ * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
+ * this to 2ms to ensure that we meet the minimum requirement.
+ */
+ msleep(2);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+
+ /*
+ * Trhfa for conventional PCI is 2^25 clock cycles.
+ * Assuming a minimum 33MHz clock this results in a 1s
+ * delay before we can consider subordinate devices to
+ * be re-initialized. PCIe has some ways to shorten this,
+ * but we don't make use of them yet.
+ */
+ ssleep(1);
+}
+
+void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+{
+ pci_reset_secondary_bus(dev);
+}
+
+/**
+ * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * @dev: Bridge device
+ *
+ * Use the bridge control register to assert reset on the secondary bus.
+ * Devices on the secondary bus are left in power-on state.
+ */
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
+{
+ pcibios_reset_secondary_bus(dev);
+}
+EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
+ struct pci_dev *pdev;
+
+ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_reset_bridge_secondary_bus(dev->bus->self);
+
+ return 0;
+}
+
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+{
+ int rc = -ENOTTY;
+
+ if (!hotplug || !try_module_get(hotplug->ops->owner))
+ return rc;
+
+ if (hotplug->ops->reset_slot)
+ rc = hotplug->ops->reset_slot(hotplug, probe);
+
+ module_put(hotplug->ops->owner);
+
+ return rc;
+}
+
+static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+{
+ struct pci_dev *pdev;
+
+ if (dev->subordinate || !dev->slot ||
+ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev && pdev->slot == dev->slot)
+ return -ENOTTY;
+
+ return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
+}
+
+static int __pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ might_sleep();
+
+ rc = pci_dev_specific_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pcie_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_af_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_pm_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_dev_reset_slot_function(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_parent_bus_reset(dev, probe);
+done:
+ return rc;
+}
+
+static void pci_dev_lock(struct pci_dev *dev)
+{
+ pci_cfg_access_lock(dev);
+ /* block PM suspend, driver probe, etc. */
+ device_lock(&dev->dev);
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_dev_trylock(struct pci_dev *dev)
+{
+ if (pci_cfg_access_trylock(dev)) {
+ if (device_trylock(&dev->dev))
+ return 1;
+ pci_cfg_access_unlock(dev);
+ }
+
+ return 0;
+}
+
+static void pci_dev_unlock(struct pci_dev *dev)
+{
+ device_unlock(&dev->dev);
+ pci_cfg_access_unlock(dev);
+}
+
+/**
+ * pci_reset_notify - notify device driver of reset
+ * @dev: device to be notified of reset
+ * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
+ * completed
+ *
+ * Must be called prior to device access being disabled and after device
+ * access is restored.
+ */
+static void pci_reset_notify(struct pci_dev *dev, bool prepare)
+{
+ const struct pci_error_handlers *err_handler =
+ dev->driver ? dev->driver->err_handler : NULL;
+ if (err_handler && err_handler->reset_notify)
+ err_handler->reset_notify(dev, prepare);
+}
+
+static void pci_dev_save_and_disable(struct pci_dev *dev)
+{
+ pci_reset_notify(dev, true);
+
+ /*
+ * Wake-up device prior to save. PM registers default to D0 after
+ * reset and a simple register restore doesn't reliably return
+ * to a non-D0 state anyway.
+ */
+ pci_set_power_state(dev, PCI_D0);
+
+ pci_save_state(dev);
+ /*
+ * Disable the device by clearing the Command register, except for
+ * INTx-disable which is set. This not only disables MMIO and I/O port
+ * BARs, but also prevents the device from being Bus Master, preventing
+ * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
+ * compliant devices, INTx-disable prevents legacy interrupts.
+ */
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+}
+
+static void pci_dev_restore(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+ pci_reset_notify(dev, false);
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ if (!probe)
+ pci_dev_lock(dev);
+
+ rc = __pci_dev_reset(dev, probe);
+
+ if (!probe)
+ pci_dev_unlock(dev);
+
+ return rc;
+}
+
+/**
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int __pci_reset_function(struct pci_dev *dev)
+{
+ return pci_dev_reset(dev, 0);
+}
+EXPORT_SYMBOL_GPL(__pci_reset_function);
+
+/**
+ * __pci_reset_function_locked - reset a PCI device function while holding
+ * the @dev mutex lock.
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused and the caller is holding
+ * the device mutex lock when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int __pci_reset_function_locked(struct pci_dev *dev)
+{
+ return __pci_dev_reset(dev, 0);
+}
+EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
+
+/**
+ * pci_probe_reset_function - check whether the device can be safely reset
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * Returns 0 if the device function can be reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_probe_reset_function(struct pci_dev *dev)
+{
+ return pci_dev_reset(dev, 1);
+}
+
+/**
+ * pci_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device. This function differs
+ * from __pci_reset_function in that it saves and restores device state
+ * over the reset.
+ *
+ * Returns 0 if the device function was successfully reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ rc = pci_dev_reset(dev, 0);
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function);
+
+/**
+ * pci_try_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
+ *
+ * Same as above, except return -EAGAIN if unable to lock device.
+ */
+int pci_try_reset_function(struct pci_dev *dev)
+{
+ int rc;
+
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
+
+ pci_dev_save_and_disable(dev);
+
+ if (pci_dev_trylock(dev)) {
+ rc = __pci_dev_reset(dev, 0);
+ pci_dev_unlock(dev);
+ } else
+ rc = -EAGAIN;
+
+ pci_dev_restore(dev);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_function);
+
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
+/* Lock devices from the top of the tree down */
+static void pci_bus_lock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_bus_unlock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_bus_trylock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!pci_dev_trylock(dev))
+ goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+ }
+ }
+ return 1;
+
+unlock:
+ list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+ return 0;
+}
+
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
+/* Lock devices from the top of the tree down */
+static void pci_slot_lock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_slot_unlock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Return 1 on successful lock, 0 on contention */
+static int pci_slot_trylock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (!pci_dev_trylock(dev))
+ goto unlock;
+ if (dev->subordinate) {
+ if (!pci_bus_trylock(dev->subordinate)) {
+ pci_dev_unlock(dev);
+ goto unlock;
+ }
+ }
+ }
+ return 1;
+
+unlock:
+ list_for_each_entry_continue_reverse(dev,
+ &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+ return 0;
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_bus_save_and_disable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_bus_restore(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_slot_save_and_disable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_slot_restore(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+static int pci_slot_reset(struct pci_slot *slot, int probe)
+{
+ int rc;
+
+ if (!slot || !pci_slot_resetable(slot))
+ return -ENOTTY;
+
+ if (!probe)
+ pci_slot_lock(slot);
+
+ might_sleep();
+
+ rc = pci_reset_hotplug_slot(slot->hotplug, probe);
+
+ if (!probe)
+ pci_slot_unlock(slot);
+
+ return rc;
+}
+
+/**
+ * pci_probe_reset_slot - probe whether a PCI slot can be reset
+ * @slot: PCI slot to probe
+ *
+ * Return 0 if slot can be reset, negative if a slot reset is not supported.
+ */
+int pci_probe_reset_slot(struct pci_slot *slot)
+{
+ return pci_slot_reset(slot, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
+
+/**
+ * pci_reset_slot - reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * A PCI bus may host multiple slots, each slot may support a reset mechanism
+ * independent of other slots. For instance, some slots may support slot power
+ * control. In the case of a 1:1 bus to slot architecture, this function may
+ * wrap the bus reset to avoid spurious slot related events such as hotplug.
+ * Generally a slot reset should be attempted before a bus reset. All of the
+ * function of the slot and any subordinate buses behind the slot are reset
+ * through this function. PCI config space of all devices in the slot and
+ * behind the slot is saved before and restored after reset.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ rc = pci_slot_reset(slot, 0);
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_slot);
+
+/**
+ * pci_try_reset_slot - Try to reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * Same as above except return -EAGAIN if the slot cannot be locked
+ */
+int pci_try_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ if (pci_slot_trylock(slot)) {
+ might_sleep();
+ rc = pci_reset_hotplug_slot(slot->hotplug, 0);
+ pci_slot_unlock(slot);
+ } else
+ rc = -EAGAIN;
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_slot);
+
+static int pci_bus_reset(struct pci_bus *bus, int probe)
+{
+ if (!bus->self || !pci_bus_resetable(bus))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_bus_lock(bus);
+
+ might_sleep();
+
+ pci_reset_bridge_secondary_bus(bus->self);
+
+ pci_bus_unlock(bus);
+
+ return 0;
+}
+
+/**
+ * pci_probe_reset_bus - probe whether a PCI bus can be reset
+ * @bus: PCI bus to probe
+ *
+ * Return 0 if bus can be reset, negative if a bus reset is not supported.
+ */
+int pci_probe_reset_bus(struct pci_bus *bus)
+{
+ return pci_bus_reset(bus, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
+
+/**
+ * pci_reset_bus - reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Do a bus reset on the given bus and any subordinate buses, saving
+ * and restoring state of all devices.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ rc = pci_bus_reset(bus, 0);
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_bus);
+
+/**
+ * pci_try_reset_bus - Try to reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Same as above except return -EAGAIN if the bus cannot be locked
+ */
+int pci_try_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ if (pci_bus_trylock(bus)) {
+ might_sleep();
+ pci_reset_bridge_secondary_bus(bus->self);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_try_reset_bus);
+
+/**
+ * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
+ * @dev: PCI device to query
+ *
+ * Returns mmrbc: maximum designed memory read count in bytes
+ * or appropriate error value.
+ */
+int pcix_get_max_mmrbc(struct pci_dev *dev)
+{
+ int cap;
+ u32 stat;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
+ return -EINVAL;
+
+ return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
+}
+EXPORT_SYMBOL(pcix_get_max_mmrbc);
+
+/**
+ * pcix_get_mmrbc - get PCI-X maximum memory read byte count
+ * @dev: PCI device to query
+ *
+ * Returns mmrbc: maximum memory read count in bytes
+ * or appropriate error value.
+ */
+int pcix_get_mmrbc(struct pci_dev *dev)
+{
+ int cap;
+ u16 cmd;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
+ return -EINVAL;
+
+ return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+}
+EXPORT_SYMBOL(pcix_get_mmrbc);
+
+/**
+ * pcix_set_mmrbc - set PCI-X maximum memory read byte count
+ * @dev: PCI device to query
+ * @mmrbc: maximum memory read count in bytes
+ * valid values are 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum memory read byte count, some bridges have erratas
+ * that prevent this.
+ */
+int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
+{
+ int cap;
+ u32 stat, v, o;
+ u16 cmd;
+
+ if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
+ return -EINVAL;
+
+ v = ffs(mmrbc) - 10;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
+ return -EINVAL;
+
+ if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
+ return -E2BIG;
+
+ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
+ return -EINVAL;
+
+ o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
+ if (o != v) {
+ if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
+ return -EIO;
+
+ cmd &= ~PCI_X_CMD_MAX_READ;
+ cmd |= v << 2;
+ if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(pcix_set_mmrbc);
+
+/**
+ * pcie_get_readrq - get PCI Express read request size
+ * @dev: PCI device to query
+ *
+ * Returns maximum memory read request in bytes
+ * or appropriate error value.
+ */
+int pcie_get_readrq(struct pci_dev *dev)
+{
+ u16 ctl;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
+
+ return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+}
+EXPORT_SYMBOL(pcie_get_readrq);
+
+/**
+ * pcie_set_readrq - set PCI Express maximum memory read request
+ * @dev: PCI device to query
+ * @rq: maximum memory read count in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum memory read request in bytes
+ */
+int pcie_set_readrq(struct pci_dev *dev, int rq)
+{
+ u16 v;
+
+ if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
+ return -EINVAL;
+
+ /*
+ * If using the "performance" PCIe config, we clamp the
+ * read rq size to the max packet size to prevent the
+ * host bridge generating requests larger than we can
+ * cope with
+ */
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ int mps = pcie_get_mps(dev);
+
+ if (mps < rq)
+ rq = mps;
+ }
+
+ v = (ffs(rq) - 8) << 12;
+
+ return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_READRQ, v);
+}
+EXPORT_SYMBOL(pcie_set_readrq);
+
+/**
+ * pcie_get_mps - get PCI Express maximum payload size
+ * @dev: PCI device to query
+ *
+ * Returns maximum payload size in bytes
+ */
+int pcie_get_mps(struct pci_dev *dev)
+{
+ u16 ctl;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
+
+ return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+}
+EXPORT_SYMBOL(pcie_get_mps);
+
+/**
+ * pcie_set_mps - set PCI Express maximum payload size
+ * @dev: PCI device to query
+ * @mps: maximum payload size in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum payload size
+ */
+int pcie_set_mps(struct pci_dev *dev, int mps)
+{
+ u16 v;
+
+ if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
+ return -EINVAL;
+
+ v = ffs(mps) - 8;
+ if (v > dev->pcie_mpss)
+ return -EINVAL;
+ v <<= 5;
+
+ return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_PAYLOAD, v);
+}
+EXPORT_SYMBOL(pcie_set_mps);
+
+/**
+ * pcie_get_minimum_link - determine minimum link settings of a PCI device
+ * @dev: PCI device to query
+ * @speed: storage for minimum speed
+ * @width: storage for minimum width
+ *
+ * This function will walk up the PCI device chain and determine the minimum
+ * link width and speed of the device.
+ */
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ int ret;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while (dev) {
+ u16 lnksta;
+ enum pci_bus_speed next_speed;
+ enum pcie_link_width next_width;
+
+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ if (ret)
+ return ret;
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ if (next_speed < *speed)
+ *speed = next_speed;
+
+ if (next_width < *width)
+ *width = next_width;
+
+ dev = dev->bus->self;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_get_minimum_link);
+
+/**
+ * pci_select_bars - Make BAR mask from the type of resource
+ * @dev: the PCI device for which BAR mask is made
+ * @flags: resource type mask to be selected
+ *
+ * This helper routine makes bar mask from the type of resource.
+ */
+int pci_select_bars(struct pci_dev *dev, unsigned long flags)
+{
+ int i, bars = 0;
+ for (i = 0; i < PCI_NUM_RESOURCES; i++)
+ if (pci_resource_flags(dev, i) & flags)
+ bars |= (1 << i);
+ return bars;
+}
+EXPORT_SYMBOL(pci_select_bars);
+
+/**
+ * pci_resource_bar - get position of the BAR associated with a resource
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @type: the BAR type to be filled in
+ *
+ * Returns BAR position in config space, or 0 if the BAR is invalid.
+ */
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
+{
+ int reg;
+
+ if (resno < PCI_ROM_RESOURCE) {
+ *type = pci_bar_unknown;
+ return PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ *type = pci_bar_mem32;
+ return dev->rom_base_reg;
+ } else if (resno < PCI_BRIDGE_RESOURCES) {
+ /* device specific resource */
+ *type = pci_bar_unknown;
+ reg = pci_iov_resource_bar(dev, resno);
+ if (reg)
+ return reg;
+ }
+
+ dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
+ return 0;
+}
+
+/* Some architectures require additional programming to enable VGA */
+static arch_set_vga_state_t arch_set_vga_state;
+
+void __init pci_register_set_vga_state(arch_set_vga_state_t func)
+{
+ arch_set_vga_state = func; /* NULL disables */
+}
+
+static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, u32 flags)
+{
+ if (arch_set_vga_state)
+ return arch_set_vga_state(dev, decode, command_bits,
+ flags);
+ return 0;
+}
+
+/**
+ * pci_set_vga_state - set VGA decode state on device and parents if requested
+ * @dev: the PCI device
+ * @decode: true = enable decoding, false = disable decoding
+ * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
+ * @flags: traverse ancestors and change bridges
+ * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
+ */
+int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, u32 flags)
+{
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
+ int rc;
+
+ WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
+
+ /* ARCH specific VGA enables */
+ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
+ if (rc)
+ return rc;
+
+ if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (decode == true)
+ cmd |= command_bits;
+ else
+ cmd &= ~command_bits;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+
+ if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
+ return 0;
+
+ bus = dev->bus;
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &cmd);
+ if (decode == true)
+ cmd |= PCI_BRIDGE_CTL_VGA;
+ else
+ cmd &= ~PCI_BRIDGE_CTL_VGA;
+ pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
+ cmd);
+ }
+ bus = bus->parent;
+ }
+ return 0;
+}
+
+bool pci_device_is_present(struct pci_dev *pdev)
+{
+ u32 v;
+
+ return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+}
+EXPORT_SYMBOL_GPL(pci_device_is_present);
+
+void pci_ignore_hotplug(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = dev->bus->self;
+
+ dev->ignore_hotplug = 1;
+ /* Propagate the "ignore hotplug" setting to the parent bridge. */
+ if (bridge)
+ bridge->ignore_hotplug = 1;
+}
+EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
+
+#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
+static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
+static DEFINE_SPINLOCK(resource_alignment_lock);
+
+/**
+ * pci_specified_resource_alignment - get resource alignment specified by user.
+ * @dev: the PCI device to get
+ *
+ * RETURNS: Resource alignment if it is specified.
+ * Zero if it is not specified.
+ */
+static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
+{
+ int seg, bus, slot, func, align_order, count;
+ resource_size_t align = 0;
+ char *p;
+
+ spin_lock(&resource_alignment_lock);
+ p = resource_alignment_param;
+ while (*p) {
+ count = 0;
+ if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
+ p[count] == '@') {
+ p += count + 1;
+ } else {
+ align_order = -1;
+ }
+ if (sscanf(p, "%x:%x:%x.%x%n",
+ &seg, &bus, &slot, &func, &count) != 4) {
+ seg = 0;
+ if (sscanf(p, "%x:%x.%x%n",
+ &bus, &slot, &func, &count) != 3) {
+ /* Invalid format */
+ printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
+ p);
+ break;
+ }
+ }
+ p += count;
+ if (seg == pci_domain_nr(dev->bus) &&
+ bus == dev->bus->number &&
+ slot == PCI_SLOT(dev->devfn) &&
+ func == PCI_FUNC(dev->devfn)) {
+ if (align_order == -1)
+ align = PAGE_SIZE;
+ else
+ align = 1 << align_order;
+ /* Found */
+ break;
+ }
+ if (*p != ';' && *p != ',') {
+ /* End of param or invalid format */
+ break;
+ }
+ p++;
+ }
+ spin_unlock(&resource_alignment_lock);
+ return align;
+}
+
+/*
+ * This function disables memory decoding and releases memory resources
+ * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
+ * It also rounds up size to specified alignment.
+ * Later on, the kernel will assign page-aligned memory resource back
+ * to the device.
+ */
+void pci_reassigndev_resource_alignment(struct pci_dev *dev)
+{
+ int i;
+ struct resource *r;
+ resource_size_t align, size;
+ u16 command;
+
+ /* check if specified PCI is target device to reassign */
+ align = pci_specified_resource_alignment(dev);
+ if (!align)
+ return;
+
+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
+ dev_warn(&dev->dev,
+ "Can't reassign resources to host bridge.\n");
+ return;
+ }
+
+ dev_info(&dev->dev,
+ "Disabling memory decoding and releasing memory resources.\n");
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ command &= ~PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, command);
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ size = resource_size(r);
+ if (size < align) {
+ size = align;
+ dev_info(&dev->dev,
+ "Rounding up size of resource #%d to %#llx.\n",
+ i, (unsigned long long)size);
+ }
+ r->flags |= IORESOURCE_UNSET;
+ r->end = size - 1;
+ r->start = 0;
+ }
+ /* Need to disable bridge's resource window,
+ * to enable the kernel to reassign new resource
+ * window later on.
+ */
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
+ r = &dev->resource[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ r->flags |= IORESOURCE_UNSET;
+ r->end = resource_size(r) - 1;
+ r->start = 0;
+ }
+ pci_disable_bridge_window(dev);
+ }
+}
+
+static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
+{
+ if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
+ count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
+ spin_lock(&resource_alignment_lock);
+ strncpy(resource_alignment_param, buf, count);
+ resource_alignment_param[count] = '\0';
+ spin_unlock(&resource_alignment_lock);
+ return count;
+}
+
+static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
+{
+ size_t count;
+ spin_lock(&resource_alignment_lock);
+ count = snprintf(buf, size, "%s", resource_alignment_param);
+ spin_unlock(&resource_alignment_lock);
+ return count;
+}
+
+static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
+{
+ return pci_get_resource_alignment_param(buf, PAGE_SIZE);
+}
+
+static ssize_t pci_resource_alignment_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ return pci_set_resource_alignment_param(buf, count);
+}
+
+BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
+ pci_resource_alignment_store);
+
+static int __init pci_resource_alignment_sysfs_init(void)
+{
+ return bus_create_file(&pci_bus_type,
+ &bus_attr_resource_alignment);
+}
+late_initcall(pci_resource_alignment_sysfs_init);
+
+static void pci_no_domains(void)
+{
+#ifdef CONFIG_PCI_DOMAINS
+ pci_domains_supported = 0;
+#endif
+}
+
+#ifdef CONFIG_PCI_DOMAINS
+static atomic_t __domain_nr = ATOMIC_INIT(-1);
+
+int pci_get_new_domain_nr(void)
+{
+ return atomic_inc_return(&__domain_nr);
+}
+
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+{
+ static int use_dt_domains = -1;
+ int domain = of_get_pci_domain_nr(parent->of_node);
+
+ /*
+ * Check DT domain and use_dt_domains values.
+ *
+ * If DT domain property is valid (domain >= 0) and
+ * use_dt_domains != 0, the DT assignment is valid since this means
+ * we have not previously allocated a domain number by using
+ * pci_get_new_domain_nr(); we should also update use_dt_domains to
+ * 1, to indicate that we have just assigned a domain number from
+ * DT.
+ *
+ * If DT domain property value is not valid (ie domain < 0), and we
+ * have not previously assigned a domain number from DT
+ * (use_dt_domains != 1) we should assign a domain number by
+ * using the:
+ *
+ * pci_get_new_domain_nr()
+ *
+ * API and update the use_dt_domains value to keep track of method we
+ * are using to assign domain numbers (use_dt_domains = 0).
+ *
+ * All other combinations imply we have a platform that is trying
+ * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
+ * which is a recipe for domain mishandling and it is prevented by
+ * invalidating the domain value (domain = -1) and printing a
+ * corresponding error.
+ */
+ if (domain >= 0 && use_dt_domains) {
+ use_dt_domains = 1;
+ } else if (domain < 0 && use_dt_domains != 1) {
+ use_dt_domains = 0;
+ domain = pci_get_new_domain_nr();
+ } else {
+ dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
+ parent->of_node->full_name);
+ domain = -1;
+ }
+
+ bus->domain_nr = domain;
+}
+#endif
+#endif
+
+/**
+ * pci_ext_cfg_avail - can we access extended PCI config space?
+ *
+ * Returns 1 if we can access PCI extended config space (offsets
+ * greater than 0xff). This is the default implementation. Architecture
+ * implementations can override this.
+ */
+int __weak pci_ext_cfg_avail(void)
+{
+ return 1;
+}
+
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
+{
+}
+EXPORT_SYMBOL(pci_fixup_cardbus);
+
+static int __init pci_setup(char *str)
+{
+ while (str) {
+ char *k = strchr(str, ',');
+ if (k)
+ *k++ = 0;
+ if (*str && (str = pcibios_setup(str)) && *str) {
+ if (!strcmp(str, "nomsi")) {
+ pci_no_msi();
+ } else if (!strcmp(str, "noaer")) {
+ pci_no_aer();
+ } else if (!strncmp(str, "realloc=", 8)) {
+ pci_realloc_get_opt(str + 8);
+ } else if (!strncmp(str, "realloc", 7)) {
+ pci_realloc_get_opt("on");
+ } else if (!strcmp(str, "nodomains")) {
+ pci_no_domains();
+ } else if (!strncmp(str, "noari", 5)) {
+ pcie_ari_disabled = true;
+ } else if (!strncmp(str, "cbiosize=", 9)) {
+ pci_cardbus_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "cbmemsize=", 10)) {
+ pci_cardbus_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "resource_alignment=", 19)) {
+ pci_set_resource_alignment_param(str + 19,
+ strlen(str + 19));
+ } else if (!strncmp(str, "ecrc=", 5)) {
+ pcie_ecrc_get_policy(str + 5);
+ } else if (!strncmp(str, "hpiosize=", 9)) {
+ pci_hotplug_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "hpmemsize=", 10)) {
+ pci_hotplug_mem_size = memparse(str + 10, &str);
+ } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
+ pcie_bus_config = PCIE_BUS_TUNE_OFF;
+ } else if (!strncmp(str, "pcie_bus_safe", 13)) {
+ pcie_bus_config = PCIE_BUS_SAFE;
+ } else if (!strncmp(str, "pcie_bus_perf", 13)) {
+ pcie_bus_config = PCIE_BUS_PERFORMANCE;
+ } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
+ pcie_bus_config = PCIE_BUS_PEER2PEER;
+ } else if (!strncmp(str, "pcie_scan_all", 13)) {
+ pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
+ } else {
+ printk(KERN_ERR "PCI: Unknown option `%s'\n",
+ str);
+ }
+ }
+ str = k;
+ }
+ return 0;
+}
+early_param("pci", pci_setup);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
new file mode 100644
index 000000000..9bd762c23
--- /dev/null
+++ b/drivers/pci/pci.h
@@ -0,0 +1,328 @@
+#ifndef DRIVERS_PCI_H
+#define DRIVERS_PCI_H
+
+#define PCI_CFG_SPACE_SIZE 256
+#define PCI_CFG_SPACE_EXP_SIZE 4096
+
+extern const unsigned char pcie_link_speed[];
+
+bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
+
+/* Functions internal to the PCI core code */
+
+int pci_create_sysfs_dev_files(struct pci_dev *pdev);
+void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
+#if !defined(CONFIG_DMI) && !defined(CONFIG_ACPI)
+static inline void pci_create_firmware_label_files(struct pci_dev *pdev)
+{ return; }
+static inline void pci_remove_firmware_label_files(struct pci_dev *pdev)
+{ return; }
+#else
+void pci_create_firmware_label_files(struct pci_dev *pdev);
+void pci_remove_firmware_label_files(struct pci_dev *pdev);
+#endif
+void pci_cleanup_rom(struct pci_dev *dev);
+#ifdef HAVE_PCI_MMAP
+enum pci_mmap_api {
+ PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */
+ PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */
+};
+int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
+ enum pci_mmap_api mmap_api);
+#endif
+int pci_probe_reset_function(struct pci_dev *dev);
+
+/**
+ * struct pci_platform_pm_ops - Firmware PM callbacks
+ *
+ * @is_manageable: returns 'true' if given device is power manageable by the
+ * platform firmware
+ *
+ * @set_state: invokes the platform firmware to set the device's power state
+ *
+ * @choose_state: returns PCI power state of given device preferred by the
+ * platform; to be used during system-wide transitions from a
+ * sleeping state to the working state and vice versa
+ *
+ * @sleep_wake: enables/disables the system wake up capability of given device
+ *
+ * @run_wake: enables/disables the platform to generate run-time wake-up events
+ * for given device (the device's wake-up capability has to be
+ * enabled by @sleep_wake for this feature to work)
+ *
+ * @need_resume: returns 'true' if the given device (which is currently
+ * suspended) needs to be resumed to be configured for system
+ * wakeup.
+ *
+ * If given platform is generally capable of power managing PCI devices, all of
+ * these callbacks are mandatory.
+ */
+struct pci_platform_pm_ops {
+ bool (*is_manageable)(struct pci_dev *dev);
+ int (*set_state)(struct pci_dev *dev, pci_power_t state);
+ pci_power_t (*choose_state)(struct pci_dev *dev);
+ int (*sleep_wake)(struct pci_dev *dev, bool enable);
+ int (*run_wake)(struct pci_dev *dev, bool enable);
+ bool (*need_resume)(struct pci_dev *dev);
+};
+
+int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+void pci_power_up(struct pci_dev *dev);
+void pci_disable_enabled_device(struct pci_dev *dev);
+int pci_finish_runtime_suspend(struct pci_dev *dev);
+int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+bool pci_dev_keep_suspended(struct pci_dev *dev);
+void pci_config_pm_runtime_get(struct pci_dev *dev);
+void pci_config_pm_runtime_put(struct pci_dev *dev);
+void pci_pm_init(struct pci_dev *dev);
+void pci_allocate_cap_save_buffers(struct pci_dev *dev);
+void pci_free_cap_save_buffers(struct pci_dev *dev);
+
+static inline void pci_wakeup_event(struct pci_dev *dev)
+{
+ /* Wait 100 ms before the system can be put into a sleep state. */
+ pm_wakeup_event(&dev->dev, 100);
+}
+
+static inline bool pci_has_subordinate(struct pci_dev *pci_dev)
+{
+ return !!(pci_dev->subordinate);
+}
+
+struct pci_vpd_ops {
+ ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
+ ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
+ void (*release)(struct pci_dev *dev);
+};
+
+struct pci_vpd {
+ unsigned int len;
+ const struct pci_vpd_ops *ops;
+ struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
+};
+
+int pci_vpd_pci22_init(struct pci_dev *dev);
+static inline void pci_vpd_release(struct pci_dev *dev)
+{
+ if (dev->vpd)
+ dev->vpd->ops->release(dev);
+}
+
+/* PCI /proc functions */
+#ifdef CONFIG_PROC_FS
+int pci_proc_attach_device(struct pci_dev *dev);
+int pci_proc_detach_device(struct pci_dev *dev);
+int pci_proc_detach_bus(struct pci_bus *bus);
+#else
+static inline int pci_proc_attach_device(struct pci_dev *dev) { return 0; }
+static inline int pci_proc_detach_device(struct pci_dev *dev) { return 0; }
+static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
+#endif
+
+/* Functions for PCI Hotplug drivers to use */
+int pci_hp_add_bridge(struct pci_dev *dev);
+
+#ifdef HAVE_PCI_LEGACY
+void pci_create_legacy_files(struct pci_bus *bus);
+void pci_remove_legacy_files(struct pci_bus *bus);
+#else
+static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
+static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
+#endif
+
+/* Lock for read/write access to pci device and bus lists */
+extern struct rw_semaphore pci_bus_sem;
+
+extern raw_spinlock_t pci_lock;
+
+extern unsigned int pci_pm_d3_delay;
+
+#ifdef CONFIG_PCI_MSI
+void pci_no_msi(void);
+void pci_msi_init_pci_dev(struct pci_dev *dev);
+#else
+static inline void pci_no_msi(void) { }
+static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
+#endif
+
+void pci_realloc_get_opt(char *);
+
+static inline int pci_no_d1d2(struct pci_dev *dev)
+{
+ unsigned int parent_dstates = 0;
+
+ if (dev->bus->self)
+ parent_dstates = dev->bus->self->no_d1d2;
+ return (dev->no_d1d2 || parent_dstates);
+
+}
+extern const struct attribute_group *pci_dev_groups[];
+extern const struct attribute_group *pcibus_groups[];
+extern struct device_type pci_dev_type;
+extern const struct attribute_group *pci_bus_groups[];
+
+
+/**
+ * pci_match_one_device - Tell if a PCI device structure has a matching
+ * PCI device id structure
+ * @id: single PCI device id structure to match
+ * @dev: the PCI device structure to match against
+ *
+ * Returns the matching pci_device_id structure or %NULL if there is no match.
+ */
+static inline const struct pci_device_id *
+pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
+{
+ if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
+ (id->device == PCI_ANY_ID || id->device == dev->device) &&
+ (id->subvendor == PCI_ANY_ID || id->subvendor == dev->subsystem_vendor) &&
+ (id->subdevice == PCI_ANY_ID || id->subdevice == dev->subsystem_device) &&
+ !((id->class ^ dev->class) & id->class_mask))
+ return id;
+ return NULL;
+}
+
+/* PCI slot sysfs helper code */
+#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
+
+extern struct kset *pci_slots_kset;
+
+struct pci_slot_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct pci_slot *, char *);
+ ssize_t (*store)(struct pci_slot *, const char *, size_t);
+};
+#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)
+
+enum pci_bar_type {
+ pci_bar_unknown, /* Standard PCI BAR probe */
+ pci_bar_io, /* An io port BAR */
+ pci_bar_mem32, /* A 32-bit memory BAR */
+ pci_bar_mem64, /* A 64-bit memory BAR */
+};
+
+bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
+ int crs_timeout);
+int pci_setup_device(struct pci_dev *dev);
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int reg);
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
+void pci_configure_ari(struct pci_dev *dev);
+void __pci_bus_size_bridges(struct pci_bus *bus,
+ struct list_head *realloc_head);
+void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
+
+/**
+ * pci_ari_enabled - query ARI forwarding status
+ * @bus: the PCI bus
+ *
+ * Returns 1 if ARI forwarding is enabled, or 0 if not enabled;
+ */
+static inline int pci_ari_enabled(struct pci_bus *bus)
+{
+ return bus->self && bus->self->ari_enabled;
+}
+
+void pci_reassigndev_resource_alignment(struct pci_dev *dev);
+void pci_disable_bridge_window(struct pci_dev *dev);
+
+/* Single Root I/O Virtualization */
+struct pci_sriov {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_VFs; /* total VFs associated with the PF */
+ u16 initial_VFs; /* initial VFs associated with the PF */
+ u16 num_VFs; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+ u8 max_VF_buses; /* max buses consumed by VFs */
+ u16 driver_max_VFs; /* max num VFs driver supports */
+ struct pci_dev *dev; /* lowest numbered PF */
+ struct pci_dev *self; /* this PF */
+ struct mutex lock; /* lock for VF bus */
+ resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */
+};
+
+#ifdef CONFIG_PCI_ATS
+void pci_restore_ats_state(struct pci_dev *dev);
+#else
+static inline void pci_restore_ats_state(struct pci_dev *dev)
+{
+}
+#endif /* CONFIG_PCI_ATS */
+
+#ifdef CONFIG_PCI_IOV
+int pci_iov_init(struct pci_dev *dev);
+void pci_iov_release(struct pci_dev *dev);
+int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+void pci_restore_iov_state(struct pci_dev *dev);
+int pci_iov_bus_range(struct pci_bus *bus);
+
+#else
+static inline int pci_iov_init(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+static inline void pci_iov_release(struct pci_dev *dev)
+
+{
+}
+static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+{
+ return 0;
+}
+static inline void pci_restore_iov_state(struct pci_dev *dev)
+{
+}
+static inline int pci_iov_bus_range(struct pci_bus *bus)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PCI_IOV */
+
+unsigned long pci_cardbus_resource_alignment(struct resource *);
+
+static inline resource_size_t pci_resource_alignment(struct pci_dev *dev,
+ struct resource *res)
+{
+#ifdef CONFIG_PCI_IOV
+ int resno = res - dev->resource;
+
+ if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ return pci_sriov_resource_alignment(dev, resno);
+#endif
+ if (dev->class >> 8 == PCI_CLASS_BRIDGE_CARDBUS)
+ return pci_cardbus_resource_alignment(res);
+ return resource_alignment(res);
+}
+
+void pci_enable_acs(struct pci_dev *dev);
+
+struct pci_dev_reset_methods {
+ u16 vendor;
+ u16 device;
+ int (*reset)(struct pci_dev *dev, int probe);
+};
+
+#ifdef CONFIG_PCI_QUIRKS
+int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+#else
+static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+ return -ENOTTY;
+}
+#endif
+
+struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
+
+#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
new file mode 100644
index 000000000..e294713c8
--- /dev/null
+++ b/drivers/pci/pcie/Kconfig
@@ -0,0 +1,82 @@
+#
+# PCI Express Port Bus Configuration
+#
+config PCIEPORTBUS
+ bool "PCI Express Port Bus support"
+ depends on PCI
+ help
+ This automatically enables PCI Express Port Bus support. Users can
+ choose Native Hot-Plug support, Advanced Error Reporting support,
+ Power Management Event support and Virtual Channel support to run
+ on PCI Express Ports (Root or Switch).
+
+#
+# Include service Kconfig here
+#
+config HOTPLUG_PCI_PCIE
+ bool "PCI Express Hotplug driver"
+ depends on HOTPLUG_PCI && PCIEPORTBUS
+ help
+ Say Y here if you have a motherboard that supports PCI Express Native
+ Hotplug
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig"
+
+#
+# PCI Express ASPM
+#
+config PCIEASPM
+ bool "PCI Express ASPM control" if EXPERT
+ depends on PCI && PCIEPORTBUS
+ default y
+ help
+ This enables OS control over PCI Express ASPM (Active State
+ Power Management) and Clock Power Management. ASPM supports
+ state L0/L0s/L1.
+
+ ASPM is initially set up by the firmware. With this option enabled,
+ Linux can modify this state in order to disable ASPM on known-bad
+ hardware or configurations and enable it when known-safe.
+
+ ASPM can be disabled or enabled at runtime via
+ /sys/module/pcie_aspm/parameters/policy
+
+ When in doubt, say Y.
+config PCIEASPM_DEBUG
+ bool "Debug PCI Express ASPM"
+ depends on PCIEASPM
+ default n
+ help
+ This enables PCI Express ASPM debug support. It will add per-device
+ interface to control ASPM.
+
+choice
+ prompt "Default ASPM policy"
+ default PCIEASPM_DEFAULT
+ depends on PCIEASPM
+
+config PCIEASPM_DEFAULT
+ bool "BIOS default"
+ depends on PCIEASPM
+ help
+ Use the BIOS defaults for PCI Express ASPM.
+
+config PCIEASPM_POWERSAVE
+ bool "Powersave"
+ depends on PCIEASPM
+ help
+ Enable PCI Express ASPM L0s and L1 where possible, even if the
+ BIOS did not.
+
+config PCIEASPM_PERFORMANCE
+ bool "Performance"
+ depends on PCIEASPM
+ help
+ Disable PCI Express ASPM L0s and L1, even if the BIOS enabled them.
+endchoice
+
+config PCIE_PME
+ def_bool y
+ depends on PCIEPORTBUS && PM
diff --git a/drivers/pci/pcie/Makefile b/drivers/pci/pcie/Makefile
new file mode 100644
index 000000000..00c62df5a
--- /dev/null
+++ b/drivers/pci/pcie/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for PCI-Express PORT Driver
+#
+
+# Build PCI Express ASPM if needed
+obj-$(CONFIG_PCIEASPM) += aspm.o
+
+pcieportdrv-y := portdrv_core.o portdrv_pci.o portdrv_bus.o
+pcieportdrv-$(CONFIG_ACPI) += portdrv_acpi.o
+
+obj-$(CONFIG_PCIEPORTBUS) += pcieportdrv.o
+
+# Build PCI Express AER if needed
+obj-$(CONFIG_PCIEAER) += aer/
+
+obj-$(CONFIG_PCIE_PME) += pme.o
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
new file mode 100644
index 000000000..7d1437b01
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -0,0 +1,28 @@
+#
+# PCI Express Root Port Device AER Configuration
+#
+
+config PCIEAER
+ bool "Root Port Advanced Error Reporting support"
+ depends on PCIEPORTBUS
+ select RAS
+ default y
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) driver support. Error reporting messages sent to Root
+ Port will be handled by PCI Express AER driver.
+
+
+#
+# PCI Express ECRC
+#
+config PCIE_ECRC
+ bool "PCI Express ECRC settings control"
+ depends on PCIEAER
+ help
+ Used to override firmware/bios settings for PCI Express ECRC
+ (transaction layer end-to-end CRC checking).
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 000000000..914294973
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
+#
+# PCI Express Root Port Device AER Debug Configuration
+#
+
+config PCIEAER_INJECT
+ tristate "PCIe AER error injector support"
+ depends on PCIEAER
+ default n
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) software error injector.
+
+ Debugging PCIe AER code is quite difficult because it is hard
+ to trigger various real hardware errors. Software based
+ error injection can fake almost all kinds of errors with the
+ help of a user space helper tool aer-inject, which can be
+ gotten from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
new file mode 100644
index 000000000..2cba67510
--- /dev/null
+++ b/drivers/pci/pcie/aer/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for PCI-Express Root Port Advanced Error Reporting Driver
+#
+
+obj-$(CONFIG_PCIEAER) += aerdriver.o
+
+obj-$(CONFIG_PCIE_ECRC) += ecrc.o
+
+aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
+aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
+
+obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 000000000..182224ace
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,536 @@
+/*
+ * PCIe AER software error injection support.
+ *
+ * Debuging PCIe AER code is quite difficult because it is hard to
+ * trigger various real hardware errors. Software based error
+ * injection can fake almost all kinds of errors with the help of a
+ * user space helper tool aer-inject, which can be gotten from:
+ * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ *
+ * Copyright 2009 Intel Corporation.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/stddef.h>
+#include "aerdrv.h"
+
+/* Override the existing corrected and uncorrected error masks */
+static bool aer_mask_override;
+module_param(aer_mask_override, bool, 0);
+
+struct aer_error_inj {
+ u8 bus;
+ u8 dev;
+ u8 fn;
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u16 domain;
+};
+
+struct aer_error {
+ struct list_head list;
+ u16 domain;
+ unsigned int bus;
+ unsigned int devfn;
+ int pos_cap_err;
+
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u32 root_status;
+ u32 source_id;
+};
+
+struct pci_bus_ops {
+ struct list_head list;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+};
+
+static LIST_HEAD(einjected);
+
+static LIST_HEAD(pci_bus_ops_list);
+
+/* Protect einjected and pci_bus_ops_list */
+static DEFINE_SPINLOCK(inject_lock);
+
+static void aer_error_init(struct aer_error *err, u16 domain,
+ unsigned int bus, unsigned int devfn,
+ int pos_cap_err)
+{
+ INIT_LIST_HEAD(&err->list);
+ err->domain = domain;
+ err->bus = bus;
+ err->devfn = devfn;
+ err->pos_cap_err = pos_cap_err;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error(u16 domain, unsigned int bus,
+ unsigned int devfn)
+{
+ struct aer_error *err;
+
+ list_for_each_entry(err, &einjected, list) {
+ if (domain == err->domain &&
+ bus == err->bus &&
+ devfn == err->devfn)
+ return err;
+ }
+ return NULL;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
+{
+ int domain = pci_domain_nr(dev->bus);
+ if (domain < 0)
+ return NULL;
+ return __find_aer_error((u16)domain, dev->bus->number, dev->devfn);
+}
+
+/* inject_lock must be held before calling */
+static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+{
+ struct pci_bus_ops *bus_ops;
+
+ list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
+ if (bus_ops->bus == bus)
+ return bus_ops->ops;
+ }
+ return NULL;
+}
+
+static struct pci_bus_ops *pci_bus_ops_pop(void)
+{
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops = NULL;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (list_empty(&pci_bus_ops_list))
+ bus_ops = NULL;
+ else {
+ struct list_head *lh = pci_bus_ops_list.next;
+ list_del(lh);
+ bus_ops = list_entry(lh, struct pci_bus_ops, list);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return bus_ops;
+}
+
+static u32 *find_pci_config_dword(struct aer_error *err, int where,
+ int *prw1cs)
+{
+ int rw1cs = 0;
+ u32 *target = NULL;
+
+ if (err->pos_cap_err == -1)
+ return NULL;
+
+ switch (where - err->pos_cap_err) {
+ case PCI_ERR_UNCOR_STATUS:
+ target = &err->uncor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_COR_STATUS:
+ target = &err->cor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_HEADER_LOG:
+ target = &err->header_log0;
+ break;
+ case PCI_ERR_HEADER_LOG+4:
+ target = &err->header_log1;
+ break;
+ case PCI_ERR_HEADER_LOG+8:
+ target = &err->header_log2;
+ break;
+ case PCI_ERR_HEADER_LOG+12:
+ target = &err->header_log3;
+ break;
+ case PCI_ERR_ROOT_STATUS:
+ target = &err->root_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_ROOT_ERR_SRC:
+ target = &err->source_id;
+ break;
+ }
+ if (prw1cs)
+ *prw1cs = rw1cs;
+ return target;
+}
+
+static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ struct pci_ops *ops;
+ int domain;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ domain = pci_domain_nr(bus);
+ if (domain < 0)
+ goto out;
+ err = __find_aer_error((u16)domain, bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, NULL);
+ if (sim) {
+ *val = *sim;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->read(bus, devfn, where, size, val);
+}
+
+static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+ struct pci_ops *ops;
+ int domain;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ domain = pci_domain_nr(bus);
+ if (domain < 0)
+ goto out;
+ err = __find_aer_error((u16)domain, bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, &rw1cs);
+ if (sim) {
+ if (rw1cs)
+ *sim ^= val;
+ else
+ *sim = val;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops pci_ops_aer = {
+ .read = pci_read_aer,
+ .write = pci_write_aer,
+};
+
+static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+ struct pci_ops *ops)
+{
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+ bus_ops->ops = ops;
+}
+
+static int pci_bus_set_aer_ops(struct pci_bus *bus)
+{
+ struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+ bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+ ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ spin_lock_irqsave(&inject_lock, flags);
+ if (ops == &pci_ops_aer)
+ goto out;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+out:
+ spin_unlock_irqrestore(&inject_lock, flags);
+ kfree(bus_ops);
+ return 0;
+}
+
+static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!pci_is_pcie(dev))
+ break;
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
+static int find_aer_device_iter(struct device *device, void *data)
+{
+ struct pcie_device **result = data;
+ struct pcie_device *pcie_dev;
+
+ if (device->bus == &pcie_port_bus_type) {
+ pcie_dev = to_pcie_device(device);
+ if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
+ *result = pcie_dev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
+{
+ return device_for_each_child(&dev->dev, result, find_aer_device_iter);
+}
+
+static int aer_inject(struct aer_error_inj *einj)
+{
+ struct aer_error *err, *rperr;
+ struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
+ struct pci_dev *dev, *rpdev;
+ struct pcie_device *edev;
+ unsigned long flags;
+ unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
+ int pos_cap_err, rp_pos_cap_err;
+ u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
+ int ret = 0;
+
+ dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
+ if (!dev)
+ return -ENODEV;
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev) {
+ ret = -ENODEV;
+ goto out_put;
+ }
+
+ pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos_cap_err) {
+ ret = -EPERM;
+ goto out_put;
+ }
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ &uncor_mask);
+
+ rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ if (!rp_pos_cap_err) {
+ ret = -EPERM;
+ goto out_put;
+ }
+
+ err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!err_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!rperr_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ if (aer_mask_override) {
+ cor_mask_orig = cor_mask;
+ cor_mask &= !(einj->cor_status);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
+ cor_mask);
+
+ uncor_mask_orig = uncor_mask;
+ uncor_mask &= !(einj->uncor_status);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ uncor_mask);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->domain, einj->bus, devfn,
+ pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ if (!aer_mask_override && einj->cor_status &&
+ !(einj->cor_status & ~cor_mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The correctable error(s) is masked by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+ if (!aer_mask_override && einj->uncor_status &&
+ !(einj->uncor_status & ~uncor_mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The uncorrectable error(s) is masked by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, pci_domain_nr(rpdev->bus),
+ rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= (einj->bus << 8) | devfn;
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+
+ if (aer_mask_override) {
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
+ cor_mask_orig);
+ pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
+ uncor_mask_orig);
+ }
+
+ ret = pci_bus_set_aer_ops(dev->bus);
+ if (ret)
+ goto out_put;
+ ret = pci_bus_set_aer_ops(rpdev->bus);
+ if (ret)
+ goto out_put;
+
+ if (find_aer_device(rpdev, &edev)) {
+ if (!get_service_data(edev)) {
+ printk(KERN_WARNING "AER service is not initialized\n");
+ ret = -EINVAL;
+ goto out_put;
+ }
+ aer_irq(-1, edev);
+ } else
+ ret = -EINVAL;
+out_put:
+ kfree(err_alloc);
+ kfree(rperr_alloc);
+ pci_dev_put(dev);
+ return ret;
+}
+
+static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ struct aer_error_inj einj;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (usize < offsetof(struct aer_error_inj, domain) ||
+ usize > sizeof(einj))
+ return -EINVAL;
+
+ memset(&einj, 0, sizeof(einj));
+ if (copy_from_user(&einj, ubuf, usize))
+ return -EFAULT;
+
+ ret = aer_inject(&einj);
+ return ret ? ret : usize;
+}
+
+static const struct file_operations aer_inject_fops = {
+ .write = aer_inject_write,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice aer_inject_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aer_inject",
+ .fops = &aer_inject_fops,
+};
+
+static int __init aer_inject_init(void)
+{
+ return misc_register(&aer_inject_device);
+}
+
+static void __exit aer_inject_exit(void)
+{
+ struct aer_error *err, *err_next;
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops;
+
+ misc_deregister(&aer_inject_device);
+
+ while ((bus_ops = pci_bus_ops_pop())) {
+ pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
+ kfree(bus_ops);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+ list_for_each_entry_safe(err, err_next, &einjected, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+}
+
+module_init(aer_inject_init);
+module_exit(aer_inject_exit);
+
+MODULE_DESCRIPTION("PCIe AER software error injector");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
new file mode 100644
index 000000000..0bf82a20a
--- /dev/null
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -0,0 +1,434 @@
+/*
+ * drivers/pci/pcie/aer/aerdrv.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file implements the AER root port service driver. The driver will
+ * register an irq handler. When root port triggers an AER interrupt, the irq
+ * handler will collect root port status and schedule a work.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pcieport_if.h>
+#include <linux/slab.h>
+
+#include "aerdrv.h"
+#include "../../pci.h"
+
+/*
+ * Version Information
+ */
+#define DRIVER_VERSION "v1.0"
+#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
+#define DRIVER_DESC "Root Port Advanced Error Reporting Driver"
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+static int aer_probe(struct pcie_device *dev);
+static void aer_remove(struct pcie_device *dev);
+static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
+ enum pci_channel_state error);
+static void aer_error_resume(struct pci_dev *dev);
+static pci_ers_result_t aer_root_reset(struct pci_dev *dev);
+
+static const struct pci_error_handlers aer_error_handlers = {
+ .error_detected = aer_error_detected,
+ .resume = aer_error_resume,
+};
+
+static struct pcie_port_service_driver aerdriver = {
+ .name = "aer",
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .service = PCIE_PORT_SERVICE_AER,
+
+ .probe = aer_probe,
+ .remove = aer_remove,
+
+ .err_handler = &aer_error_handlers,
+
+ .reset_link = aer_root_reset,
+};
+
+static int pcie_aer_disable;
+
+void pci_no_aer(void)
+{
+ pcie_aer_disable = 1; /* has priority over 'forceload' */
+}
+
+bool pci_aer_available(void)
+{
+ return !pcie_aer_disable && pci_msi_enabled();
+}
+
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
+{
+ bool enable = *((bool *)data);
+ int type = pci_pcie_type(dev);
+
+ if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (type == PCI_EXP_TYPE_UPSTREAM) ||
+ (type == PCI_EXP_TYPE_DOWNSTREAM)) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
+
+ if (enable)
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
+}
+
+/**
+ * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
+ * @dev: pointer to root port's pci_dev data structure
+ * @enable: true = enable error reporting, false = disable error reporting.
+ */
+static void set_downstream_devices_error_reporting(struct pci_dev *dev,
+ bool enable)
+{
+ set_device_error_reporting(dev, &enable);
+
+ if (!dev->subordinate)
+ return;
+ pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
+}
+
+/**
+ * aer_enable_rootport - enable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus loads AER service driver.
+ */
+static void aer_enable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ int aer_pos;
+ u16 reg16;
+ u32 reg32;
+
+ /* Clear PCIe Capability's Device Status */
+ pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, &reg16);
+ pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16);
+
+ /* Disable system error generation in response to error messages */
+ pcie_capability_clear_word(pdev, PCI_EXP_RTCTL,
+ SYSTEM_ERROR_INTR_ON_MESG_MASK);
+
+ aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Clear error status */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
+
+ /*
+ * Enable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, true);
+
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32);
+}
+
+/**
+ * aer_disable_rootport - disable Root Port's interrupts when receiving messages
+ * @rpc: pointer to a Root Port data structure
+ *
+ * Invoked when PCIe bus unloads AER service driver.
+ */
+static void aer_disable_rootport(struct aer_rpc *rpc)
+{
+ struct pci_dev *pdev = rpc->rpd->port;
+ u32 reg32;
+ int pos;
+
+ /*
+ * Disable error reporting for the root port device and downstream port
+ * devices.
+ */
+ set_downstream_devices_error_reporting(pdev, false);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+
+ /* Clear Root's error status reg */
+ pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
+}
+
+/**
+ * aer_irq - Root Port's ISR
+ * @irq: IRQ assigned to Root Port
+ * @context: pointer to Root Port data structure
+ *
+ * Invoked when Root Port detects AER messages.
+ */
+irqreturn_t aer_irq(int irq, void *context)
+{
+ unsigned int status, id;
+ struct pcie_device *pdev = (struct pcie_device *)context;
+ struct aer_rpc *rpc = get_service_data(pdev);
+ int next_prod_idx;
+ unsigned long flags;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR);
+ /*
+ * Must lock access to Root Error Status Reg, Root Error ID Reg,
+ * and Root error producer/consumer index
+ */
+ spin_lock_irqsave(&rpc->e_lock, flags);
+
+ /* Read error status */
+ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
+ if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+ return IRQ_NONE;
+ }
+
+ /* Read error source and clear error status */
+ pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
+ pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
+
+ /* Store error source for later DPC handler */
+ next_prod_idx = rpc->prod_idx + 1;
+ if (next_prod_idx == AER_ERROR_SOURCES_MAX)
+ next_prod_idx = 0;
+ if (next_prod_idx == rpc->cons_idx) {
+ /*
+ * Error Storm Condition - possibly the same error occurred.
+ * Drop the error.
+ */
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+ return IRQ_HANDLED;
+ }
+ rpc->e_sources[rpc->prod_idx].status = status;
+ rpc->e_sources[rpc->prod_idx].id = id;
+ rpc->prod_idx = next_prod_idx;
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+
+ /* Invoke DPC handler */
+ schedule_work(&rpc->dpc_handler);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(aer_irq);
+
+/**
+ * aer_alloc_rpc - allocate Root Port data structure
+ * @dev: pointer to the pcie_dev data structure
+ *
+ * Invoked when Root Port's AER service is loaded.
+ */
+static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc;
+
+ rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
+ if (!rpc)
+ return NULL;
+
+ /* Initialize Root lock access, e_lock, to Root Error Status Reg */
+ spin_lock_init(&rpc->e_lock);
+
+ rpc->rpd = dev;
+ INIT_WORK(&rpc->dpc_handler, aer_isr);
+ mutex_init(&rpc->rpc_mutex);
+ init_waitqueue_head(&rpc->wait_release);
+
+ /* Use PCIe bus function to store rpc into PCIe device */
+ set_service_data(dev, rpc);
+
+ return rpc;
+}
+
+/**
+ * aer_remove - clean up resources
+ * @dev: pointer to the pcie_dev data structure
+ *
+ * Invoked when PCI Express bus unloads or AER probe fails.
+ */
+static void aer_remove(struct pcie_device *dev)
+{
+ struct aer_rpc *rpc = get_service_data(dev);
+
+ if (rpc) {
+ /* If register interrupt service, it must be free. */
+ if (rpc->isr)
+ free_irq(dev->irq, dev);
+
+ wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+
+ aer_disable_rootport(rpc);
+ kfree(rpc);
+ set_service_data(dev, NULL);
+ }
+}
+
+/**
+ * aer_probe - initialize resources
+ * @dev: pointer to the pcie_dev data structure
+ * @id: pointer to the service id data structure
+ *
+ * Invoked when PCI Express bus loads AER service driver.
+ */
+static int aer_probe(struct pcie_device *dev)
+{
+ int status;
+ struct aer_rpc *rpc;
+ struct device *device = &dev->device;
+
+ /* Init */
+ status = aer_init(dev);
+ if (status)
+ return status;
+
+ /* Alloc rpc data structure */
+ rpc = aer_alloc_rpc(dev);
+ if (!rpc) {
+ dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
+ aer_remove(dev);
+ return -ENOMEM;
+ }
+
+ /* Request IRQ ISR */
+ status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
+ if (status) {
+ dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
+ aer_remove(dev);
+ return status;
+ }
+
+ rpc->isr = 1;
+
+ aer_enable_rootport(rpc);
+
+ return status;
+}
+
+/**
+ * aer_root_reset - reset link on Root Port
+ * @dev: pointer to Root Port's pci_dev data structure
+ *
+ * Invoked by Port Bus driver when performing link reset at Root Port.
+ */
+static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
+{
+ u32 reg32;
+ int pos;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+
+ /* Disable Root's interrupt in response to error messages */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+
+ pci_reset_bridge_secondary_bus(dev);
+ dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
+
+ /* Clear Root Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32);
+
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, &reg32);
+ reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * aer_error_detected - update severity status
+ * @dev: pointer to Root Port's pci_dev data structure
+ * @error: error severity being notified by port bus
+ *
+ * Invoked by Port Bus driver during error recovery.
+ */
+static pci_ers_result_t aer_error_detected(struct pci_dev *dev,
+ enum pci_channel_state error)
+{
+ /* Root Port has no impact. Always recovers. */
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
+/**
+ * aer_error_resume - clean up corresponding error status bits
+ * @dev: pointer to Root Port's pci_dev data structure
+ *
+ * Invoked by Port Bus driver during nonfatal recovery.
+ */
+static void aer_error_resume(struct pci_dev *dev)
+{
+ int pos;
+ u32 status, mask;
+ u16 reg16;
+
+ /* Clean up Root device status */
+ pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &reg16);
+ pcie_capability_write_word(dev, PCI_EXP_DEVSTA, reg16);
+
+ /* Clean AER Root Error Status */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * aer_service_init - register AER root service driver
+ *
+ * Invoked when AER root service driver is loaded.
+ */
+static int __init aer_service_init(void)
+{
+ if (!pci_aer_available() || aer_acpi_firmware_first())
+ return -ENXIO;
+ return pcie_port_service_register(&aerdriver);
+}
+
+/**
+ * aer_service_exit - unregister AER root service driver
+ *
+ * Invoked when AER root service driver is unloaded.
+ */
+static void __exit aer_service_exit(void)
+{
+ pcie_port_service_unregister(&aerdriver);
+}
+
+module_init(aer_service_init);
+module_exit(aer_service_exit);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
new file mode 100644
index 000000000..84420b7c9
--- /dev/null
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ *
+ */
+
+#ifndef _AERDRV_H_
+#define _AERDRV_H_
+
+#include <linux/workqueue.h>
+#include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/interrupt.h>
+
+#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
+ PCI_EXP_RTCTL_SENFEE| \
+ PCI_EXP_RTCTL_SEFEE)
+#define ROOT_PORT_INTR_ON_MESG_MASK (PCI_ERR_ROOT_CMD_COR_EN| \
+ PCI_ERR_ROOT_CMD_NONFATAL_EN| \
+ PCI_ERR_ROOT_CMD_FATAL_EN)
+#define ERR_COR_ID(d) (d & 0xffff)
+#define ERR_UNCOR_ID(d) (d >> 16)
+
+#define AER_ERROR_SOURCES_MAX 100
+
+#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
+ PCI_ERR_UNC_ECRC| \
+ PCI_ERR_UNC_UNSUP| \
+ PCI_ERR_UNC_COMP_ABORT| \
+ PCI_ERR_UNC_UNX_COMP| \
+ PCI_ERR_UNC_MALF_TLP)
+
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
+struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+
+ unsigned int id:16;
+
+ unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
+ unsigned int __pad1:5;
+ unsigned int multi_error_valid:1;
+
+ unsigned int first_error:5;
+ unsigned int __pad2:2;
+ unsigned int tlp_header_valid:1;
+
+ unsigned int status; /* COR/UNCOR Error Status */
+ unsigned int mask; /* COR/UNCOR Error Mask */
+ struct aer_header_log_regs tlp; /* TLP Header */
+};
+
+struct aer_err_source {
+ unsigned int status;
+ unsigned int id;
+};
+
+struct aer_rpc {
+ struct pcie_device *rpd; /* Root Port device */
+ struct work_struct dpc_handler;
+ struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
+ unsigned short prod_idx; /* Error Producer Index */
+ unsigned short cons_idx; /* Error Consumer Index */
+ int isr;
+ spinlock_t e_lock; /*
+ * Lock access to Error Status/ID Regs
+ * and error producer/consumer index
+ */
+ struct mutex rpc_mutex; /*
+ * only one thread could do
+ * recovery on the same
+ * root port hierarchy
+ */
+ wait_queue_head_t wait_release;
+};
+
+struct aer_broadcast_data {
+ enum pci_channel_state state;
+ enum pci_ers_result result;
+};
+
+static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
+ enum pci_ers_result new)
+{
+ if (new == PCI_ERS_RESULT_NO_AER_DRIVER)
+ return PCI_ERS_RESULT_NO_AER_DRIVER;
+
+ if (new == PCI_ERS_RESULT_NONE)
+ return orig;
+
+ switch (orig) {
+ case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_RECOVERED:
+ orig = new;
+ break;
+ case PCI_ERS_RESULT_DISCONNECT:
+ if (new == PCI_ERS_RESULT_NEED_RESET)
+ orig = PCI_ERS_RESULT_NEED_RESET;
+ break;
+ default:
+ break;
+ }
+
+ return orig;
+}
+
+extern struct bus_type pcie_port_bus_type;
+int aer_init(struct pcie_device *dev);
+void aer_isr(struct work_struct *work);
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
+irqreturn_t aer_irq(int irq, void *context);
+
+#ifdef CONFIG_ACPI_APEI
+int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
+#else
+static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
+{
+ if (pci_dev->__aer_firmware_first_valid)
+ return pci_dev->__aer_firmware_first;
+ return 0;
+}
+#endif
+
+static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
+ int enable)
+{
+ pci_dev->__aer_firmware_first = !!enable;
+ pci_dev->__aer_firmware_first_valid = 1;
+}
+#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
new file mode 100644
index 000000000..01906576a
--- /dev/null
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -0,0 +1,141 @@
+/*
+ * Access ACPI _OSC method
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/delay.h>
+#include <acpi/apei.h>
+#include "aerdrv.h"
+
+#ifdef CONFIG_ACPI_APEI
+static inline int hest_match_pci(struct acpi_hest_aer_common *p,
+ struct pci_dev *pci)
+{
+ return ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) &&
+ ACPI_HEST_BUS(p->bus) == pci->bus->number &&
+ p->device == PCI_SLOT(pci->devfn) &&
+ p->function == PCI_FUNC(pci->devfn);
+}
+
+static inline bool hest_match_type(struct acpi_hest_header *hest_hdr,
+ struct pci_dev *dev)
+{
+ u16 hest_type = hest_hdr->type;
+ u8 pcie_type = pci_pcie_type(dev);
+
+ if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT &&
+ pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
+ (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT &&
+ pcie_type == PCI_EXP_TYPE_ENDPOINT) ||
+ (hest_type == ACPI_HEST_TYPE_AER_BRIDGE &&
+ (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE))
+ return true;
+ return false;
+}
+
+struct aer_hest_parse_info {
+ struct pci_dev *pci_dev;
+ int firmware_first;
+};
+
+static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr)
+{
+ if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT ||
+ hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT ||
+ hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE)
+ return 1;
+ return 0;
+}
+
+static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ struct aer_hest_parse_info *info = data;
+ struct acpi_hest_aer_common *p;
+ int ff;
+
+ if (!hest_source_is_pcie_aer(hest_hdr))
+ return 0;
+
+ p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
+ ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
+
+ /*
+ * If no specific device is supplied, determine whether
+ * FIRMWARE_FIRST is set for *any* PCIe device.
+ */
+ if (!info->pci_dev) {
+ info->firmware_first |= ff;
+ return 0;
+ }
+
+ /* Otherwise, check the specific device */
+ if (p->flags & ACPI_HEST_GLOBAL) {
+ if (hest_match_type(hest_hdr, info->pci_dev))
+ info->firmware_first = ff;
+ } else
+ if (hest_match_pci(p, info->pci_dev))
+ info->firmware_first = ff;
+
+ return 0;
+}
+
+static void aer_set_firmware_first(struct pci_dev *pci_dev)
+{
+ int rc;
+ struct aer_hest_parse_info info = {
+ .pci_dev = pci_dev,
+ .firmware_first = 0,
+ };
+
+ rc = apei_hest_parse(aer_hest_parse, &info);
+
+ if (rc)
+ pci_dev->__aer_firmware_first = 0;
+ else
+ pci_dev->__aer_firmware_first = info.firmware_first;
+ pci_dev->__aer_firmware_first_valid = 1;
+}
+
+int pcie_aer_get_firmware_first(struct pci_dev *dev)
+{
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ if (!dev->__aer_firmware_first_valid)
+ aer_set_firmware_first(dev);
+ return dev->__aer_firmware_first;
+}
+
+static bool aer_firmware_first;
+
+/**
+ * aer_acpi_firmware_first - Check if APEI should control AER.
+ */
+bool aer_acpi_firmware_first(void)
+{
+ static bool parsed = false;
+ struct aer_hest_parse_info info = {
+ .pci_dev = NULL, /* Check all PCIe devices */
+ .firmware_first = 0,
+ };
+
+ if (!parsed) {
+ apei_hest_parse(aer_hest_parse, &info);
+ aer_firmware_first = info.firmware_first;
+ parsed = true;
+ }
+ return aer_firmware_first;
+}
+#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
new file mode 100644
index 000000000..5653ea945
--- /dev/null
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -0,0 +1,805 @@
+/*
+ * drivers/pci/pcie/aer/aerdrv_core.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * This file implements the core part of PCI-Express AER. When an pci-express
+ * error is delivered, an error message will be collected and printed to
+ * console, then, an error recovery procedure will be executed by following
+ * the pci error recovery rules.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/kfifo.h>
+#include "aerdrv.h"
+
+static bool forceload;
+static bool nosourceid;
+module_param(forceload, bool, 0);
+module_param(nosourceid, bool, 0);
+
+#define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
+ PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
+
+int pci_enable_pcie_error_reporting(struct pci_dev *dev)
+{
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
+ return -EIO;
+
+ return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
+}
+EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
+
+int pci_disable_pcie_error_reporting(struct pci_dev *dev)
+{
+ if (pcie_aer_get_firmware_first(dev))
+ return -EIO;
+
+ return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
+ PCI_EXP_AER_FLAGS);
+}
+EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
+
+int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -EIO;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ if (status)
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
+
+/**
+ * add_error_device - list device to be handled
+ * @e_info: pointer to error info
+ * @dev: pointer to pci_dev to be added
+ */
+static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
+{
+ if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
+ e_info->dev[e_info->error_dev_num] = dev;
+ e_info->error_dev_num++;
+ return 0;
+ }
+ return -ENOSPC;
+}
+
+/**
+ * is_error_source - check whether the device is source of reported error
+ * @dev: pointer to pci_dev to be checked
+ * @e_info: pointer to reported error info
+ */
+static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
+{
+ int pos;
+ u32 status, mask;
+ u16 reg16;
+
+ /*
+ * When bus id is equal to 0, it might be a bad id
+ * reported by root port.
+ */
+ if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
+ /* Device ID match? */
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
+ return true;
+
+ /* Continue id comparing if there is no multiple error */
+ if (!e_info->multi_error_valid)
+ return false;
+ }
+
+ /*
+ * When either
+ * 1) nosourceid==y;
+ * 2) bus id is equal to 0. Some ports might lose the bus
+ * id of error source id;
+ * 3) There are multiple errors and prior id comparing fails;
+ * We check AER status registers to find possible reporter.
+ */
+ if (atomic_read(&dev->enable_cnt) == 0)
+ return false;
+
+ /* Check if AER is enabled */
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
+ if (!(reg16 & PCI_EXP_AER_FLAGS))
+ return false;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return false;
+
+ /* Check if error is recorded */
+ if (e_info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
+ } else {
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
+ }
+ if (status & ~mask)
+ return true;
+
+ return false;
+}
+
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ if (is_error_source(dev, e_info)) {
+ /* List this device */
+ if (add_error_device(e_info, dev)) {
+ /* We cannot handle more... Stop iteration */
+ /* TODO: Should print error message here? */
+ return 1;
+ }
+
+ /* If there is only a single error, stop iteration */
+ if (!e_info->multi_error_valid)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * find_source_device - search through device hierarchy for source device
+ * @parent: pointer to Root Port pci_dev data structure
+ * @e_info: including detailed error information such like id
+ *
+ * Return true if found.
+ *
+ * Invoked by DPC when error is detected at the Root Port.
+ * Caller of this function must set id, severity, and multi_error_valid of
+ * struct aer_err_info pointed by @e_info properly. This function must fill
+ * e_info->error_dev_num and e_info->dev[], based on the given information.
+ */
+static bool find_source_device(struct pci_dev *parent,
+ struct aer_err_info *e_info)
+{
+ struct pci_dev *dev = parent;
+ int result;
+
+ /* Must reset in this function */
+ e_info->error_dev_num = 0;
+
+ /* Is Root Port an agent that sends error message? */
+ result = find_device_iter(dev, e_info);
+ if (result)
+ return true;
+
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
+
+ if (!e_info->error_dev_num) {
+ dev_printk(KERN_DEBUG, &parent->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ return false;
+ }
+ return true;
+}
+
+static int report_error_detected(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ dev->error_state = result_data->state;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->error_detected) {
+ if (result_data->state == pci_channel_io_frozen &&
+ !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
+ /*
+ * In case of fatal recovery, if one of down-
+ * stream device has no driver. We might be
+ * unable to recover because a later insmod
+ * of a driver for this device is unaware of
+ * its hw state.
+ */
+ dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
+ dev->driver ?
+ "no AER-aware driver" : "no driver");
+ }
+
+ /*
+ * If there's any device in the subtree that does not
+ * have an error_detected callback, returning
+ * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
+ * the subsequent mmio_enabled/slot_reset/resume
+ * callbacks of "any" device in the subtree. All the
+ * devices in the subtree are left in the error state
+ * without recovery.
+ */
+
+ if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
+ vote = PCI_ERS_RESULT_NO_AER_DRIVER;
+ else
+ vote = PCI_ERS_RESULT_NONE;
+ } else {
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->error_detected(dev, result_data->state);
+ }
+
+ result_data->result = merge_result(result_data->result, vote);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->mmio_enabled)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->mmio_enabled(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_slot_reset(struct pci_dev *dev, void *data)
+{
+ pci_ers_result_t vote;
+ const struct pci_error_handlers *err_handler;
+ struct aer_broadcast_data *result_data;
+ result_data = (struct aer_broadcast_data *) data;
+
+ device_lock(&dev->dev);
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->slot_reset)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ vote = err_handler->slot_reset(dev);
+ result_data->result = merge_result(result_data->result, vote);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+static int report_resume(struct pci_dev *dev, void *data)
+{
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ dev->error_state = pci_channel_io_normal;
+
+ if (!dev->driver ||
+ !dev->driver->err_handler ||
+ !dev->driver->err_handler->resume)
+ goto out;
+
+ err_handler = dev->driver->err_handler;
+ err_handler->resume(dev);
+out:
+ device_unlock(&dev->dev);
+ return 0;
+}
+
+/**
+ * broadcast_error_message - handle message broadcast to downstream drivers
+ * @dev: pointer to from where in a hierarchy message is broadcasted down
+ * @state: error state
+ * @error_mesg: message to print
+ * @cb: callback to be broadcasted
+ *
+ * Invoked during error recovery process. Once being invoked, the content
+ * of error severity will be broadcasted to all downstream drivers in a
+ * hierarchy in question.
+ */
+static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+ enum pci_channel_state state,
+ char *error_mesg,
+ int (*cb)(struct pci_dev *, void *))
+{
+ struct aer_broadcast_data result_data;
+
+ dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
+ result_data.state = state;
+ if (cb == report_error_detected)
+ result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
+ else
+ result_data.result = PCI_ERS_RESULT_RECOVERED;
+
+ if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
+ /*
+ * If the error is reported by a bridge, we think this error
+ * is related to the downstream link of the bridge, so we
+ * do error recovery on all subordinates of the bridge instead
+ * of the bridge and clear the error status of the bridge.
+ */
+ if (cb == report_error_detected)
+ dev->error_state = state;
+ pci_walk_bus(dev->subordinate, cb, &result_data);
+ if (cb == report_resume) {
+ pci_cleanup_aer_uncorrect_error_status(dev);
+ dev->error_state = pci_channel_io_normal;
+ }
+ } else {
+ /*
+ * If the error is reported by an end point, we think this
+ * error is related to the upstream link of the end point.
+ */
+ pci_walk_bus(dev->bus, cb, &result_data);
+ }
+
+ return result_data.result;
+}
+
+/**
+ * default_reset_link - default reset function
+ * @dev: pointer to pci_dev data structure
+ *
+ * Invoked when performing link reset on a Downstream Port or a
+ * Root Port with no aer driver.
+ */
+static pci_ers_result_t default_reset_link(struct pci_dev *dev)
+{
+ pci_reset_bridge_secondary_bus(dev);
+ dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static int find_aer_service_iter(struct device *device, void *data)
+{
+ struct pcie_port_service_driver *service_driver, **drv;
+
+ drv = (struct pcie_port_service_driver **) data;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ service_driver = to_service_driver(device->driver);
+ if (service_driver->service == PCIE_PORT_SERVICE_AER) {
+ *drv = service_driver;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
+{
+ struct pcie_port_service_driver *drv = NULL;
+
+ device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
+
+ return drv;
+}
+
+static pci_ers_result_t reset_link(struct pci_dev *dev)
+{
+ struct pci_dev *udev;
+ pci_ers_result_t status;
+ struct pcie_port_service_driver *driver;
+
+ if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
+ /* Reset this port for all subordinates */
+ udev = dev;
+ } else {
+ /* Reset the upstream component (likely downstream port) */
+ udev = dev->bus->self;
+ }
+
+ /* Use the aer driver of the component firstly */
+ driver = find_aer_service(udev);
+
+ if (driver && driver->reset_link) {
+ status = driver->reset_link(udev);
+ } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ pci_pcie_type(udev) == PCI_EXP_TYPE_ROOT_PORT) {
+ status = default_reset_link(udev);
+ } else {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "no link-reset support at upstream device %s\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED) {
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "link reset at upstream device %s failed\n",
+ pci_name(udev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return status;
+}
+
+/**
+ * do_recovery - handle nonfatal/fatal error recovery process
+ * @dev: pointer to a pci_dev data structure of agent detecting an error
+ * @severity: error severity type
+ *
+ * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
+ * error detected message to all downstream drivers within a hierarchy in
+ * question and return the returned code.
+ */
+static void do_recovery(struct pci_dev *dev, int severity)
+{
+ pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
+ enum pci_channel_state state;
+
+ if (severity == AER_FATAL)
+ state = pci_channel_io_frozen;
+ else
+ state = pci_channel_io_normal;
+
+ status = broadcast_error_message(dev,
+ state,
+ "error_detected",
+ report_error_detected);
+
+ if (severity == AER_FATAL) {
+ result = reset_link(dev);
+ if (result != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+ }
+
+ if (status == PCI_ERS_RESULT_CAN_RECOVER)
+ status = broadcast_error_message(dev,
+ state,
+ "mmio_enabled",
+ report_mmio_enabled);
+
+ if (status == PCI_ERS_RESULT_NEED_RESET) {
+ /*
+ * TODO: Should call platform-specific
+ * functions to reset slot before calling
+ * drivers' slot_reset callbacks?
+ */
+ status = broadcast_error_message(dev,
+ state,
+ "slot_reset",
+ report_slot_reset);
+ }
+
+ if (status != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
+
+ broadcast_error_message(dev,
+ state,
+ "resume",
+ report_resume);
+
+ dev_info(&dev->dev, "AER: Device recovery successful\n");
+ return;
+
+failed:
+ /* TODO: Should kernel panic here? */
+ dev_info(&dev->dev, "AER: Device recovery failed\n");
+}
+
+/**
+ * handle_error_source - handle logging error into an event log
+ * @aerdev: pointer to pcie_device data structure of the root port
+ * @dev: pointer to pci_dev data structure of error source device
+ * @info: comprehensive error information
+ *
+ * Invoked when an error being detected by Root Port.
+ */
+static void handle_error_source(struct pcie_device *aerdev,
+ struct pci_dev *dev,
+ struct aer_err_info *info)
+{
+ int pos;
+
+ if (info->severity == AER_CORRECTABLE) {
+ /*
+ * Correctable error does not need software intervention.
+ * No need to go through error recovery process.
+ */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (pos)
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
+ info->status);
+ } else
+ do_recovery(dev, info->severity);
+}
+
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+static void aer_recover_work_func(struct work_struct *work);
+
+#define AER_RECOVER_RING_ORDER 4
+#define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
+
+struct aer_recover_entry {
+ u8 bus;
+ u8 devfn;
+ u16 domain;
+ int severity;
+ struct aer_capability_regs *regs;
+};
+
+static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
+ AER_RECOVER_RING_SIZE);
+/*
+ * Mutual exclusion for writers of aer_recover_ring, reader side don't
+ * need lock, because there is only one reader and lock is not needed
+ * between reader and writer.
+ */
+static DEFINE_SPINLOCK(aer_recover_ring_lock);
+static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
+
+void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
+ int severity, struct aer_capability_regs *aer_regs)
+{
+ unsigned long flags;
+ struct aer_recover_entry entry = {
+ .bus = bus,
+ .devfn = devfn,
+ .domain = domain,
+ .severity = severity,
+ .regs = aer_regs,
+ };
+
+ spin_lock_irqsave(&aer_recover_ring_lock, flags);
+ if (kfifo_put(&aer_recover_ring, entry))
+ schedule_work(&aer_recover_work);
+ else
+ pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
+ domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
+}
+EXPORT_SYMBOL_GPL(aer_recover_queue);
+
+static void aer_recover_work_func(struct work_struct *work)
+{
+ struct aer_recover_entry entry;
+ struct pci_dev *pdev;
+
+ while (kfifo_get(&aer_recover_ring, &entry)) {
+ pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
+ entry.devfn);
+ if (!pdev) {
+ pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
+ entry.domain, entry.bus,
+ PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
+ continue;
+ }
+ cper_print_aer(pdev, entry.severity, entry.regs);
+ do_recovery(pdev, entry.severity);
+ pci_dev_put(pdev);
+ }
+}
+#endif
+
+/**
+ * get_device_error_info - read error status from dev and store it to info
+ * @dev: pointer to the device expected to have a error record
+ * @info: pointer to structure to store the error record
+ *
+ * Return 1 on success, 0 on error.
+ *
+ * Note that @info is reused among all error devices. Clear fields properly.
+ */
+static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
+{
+ int pos, temp;
+
+ /* Must reset in this function */
+ info->status = 0;
+ info->tlp_header_valid = 0;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+
+ /* The device might not support AER */
+ if (!pos)
+ return 1;
+
+ if (info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
+ &info->status);
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
+ &info->mask);
+ if (!(info->status & ~info->mask))
+ return 0;
+ } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
+ info->severity == AER_NONFATAL) {
+
+ /* Link is still healthy for IO reads */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
+ &info->status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
+ &info->mask);
+ if (!(info->status & ~info->mask))
+ return 0;
+
+ /* Get First Error Pointer */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
+ info->first_error = PCI_ERR_CAP_FEP(temp);
+
+ if (info->status & AER_LOG_TLP_MASKS) {
+ info->tlp_header_valid = 1;
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
+ }
+ }
+
+ return 1;
+}
+
+static inline void aer_process_err_devices(struct pcie_device *p_device,
+ struct aer_err_info *e_info)
+{
+ int i;
+
+ /* Report all before handle them, not to lost records by reset etc. */
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info))
+ aer_print_error(e_info->dev[i], e_info);
+ }
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info))
+ handle_error_source(p_device, e_info->dev[i], e_info);
+ }
+}
+
+/**
+ * aer_isr_one_error - consume an error detected by root port
+ * @p_device: pointer to error root port service device
+ * @e_src: pointer to an error source
+ */
+static void aer_isr_one_error(struct pcie_device *p_device,
+ struct aer_err_source *e_src)
+{
+ struct aer_err_info *e_info;
+
+ /* struct aer_err_info might be big, so we allocate it with slab */
+ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
+ if (!e_info) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "Can't allocate mem when processing AER errors\n");
+ return;
+ }
+
+ /*
+ * There is a possibility that both correctable error and
+ * uncorrectable error being logged. Report correctable error first.
+ */
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
+ e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
+
+ aer_print_port_info(p_device->port, e_info);
+
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
+ }
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ e_info->severity = AER_FATAL;
+ else
+ e_info->severity = AER_NONFATAL;
+
+ if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
+ e_info->multi_error_valid = 1;
+ else
+ e_info->multi_error_valid = 0;
+
+ aer_print_port_info(p_device->port, e_info);
+
+ if (find_source_device(p_device->port, e_info))
+ aer_process_err_devices(p_device, e_info);
+ }
+
+ kfree(e_info);
+}
+
+/**
+ * get_e_source - retrieve an error source
+ * @rpc: pointer to the root port which holds an error
+ * @e_src: pointer to store retrieved error source
+ *
+ * Return 1 if an error source is retrieved, otherwise 0.
+ *
+ * Invoked by DPC handler to consume an error.
+ */
+static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
+{
+ unsigned long flags;
+
+ /* Lock access to Root error producer/consumer index */
+ spin_lock_irqsave(&rpc->e_lock, flags);
+ if (rpc->prod_idx == rpc->cons_idx) {
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+ return 0;
+ }
+
+ *e_src = rpc->e_sources[rpc->cons_idx];
+ rpc->cons_idx++;
+ if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
+ rpc->cons_idx = 0;
+ spin_unlock_irqrestore(&rpc->e_lock, flags);
+
+ return 1;
+}
+
+/**
+ * aer_isr - consume errors detected by root port
+ * @work: definition of this work item
+ *
+ * Invoked, as DPC, when root port records new detected error
+ */
+void aer_isr(struct work_struct *work)
+{
+ struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+ struct pcie_device *p_device = rpc->rpd;
+ struct aer_err_source uninitialized_var(e_src);
+
+ mutex_lock(&rpc->rpc_mutex);
+ while (get_e_source(rpc, &e_src))
+ aer_isr_one_error(p_device, &e_src);
+ mutex_unlock(&rpc->rpc_mutex);
+
+ wake_up(&rpc->wait_release);
+}
+
+/**
+ * aer_init - provide AER initialization
+ * @dev: pointer to AER pcie device
+ *
+ * Invoked when AER service driver is loaded.
+ */
+int aer_init(struct pcie_device *dev)
+{
+ if (forceload) {
+ dev_printk(KERN_DEBUG, &dev->device,
+ "aerdrv forceload requested.\n");
+ pcie_aer_force_firmware_first(dev->port, 0);
+ }
+ return 0;
+}
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
new file mode 100644
index 000000000..167fe411c
--- /dev/null
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -0,0 +1,262 @@
+/*
+ * drivers/pci/pcie/aer/aerdrv_errprint.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Format error messages and print them to console.
+ *
+ * Copyright (C) 2006 Intel Corp.
+ * Tom Long Nguyen (tom.l.nguyen@intel.com)
+ * Zhang Yanmin (yanmin.zhang@intel.com)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/cper.h>
+
+#include "aerdrv.h"
+#include <ras/ras_event.h>
+
+#define AER_AGENT_RECEIVER 0
+#define AER_AGENT_REQUESTER 1
+#define AER_AGENT_COMPLETER 2
+#define AER_AGENT_TRANSMITTER 3
+
+#define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
+#define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ 0 : PCI_ERR_UNC_COMP_ABORT)
+#define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
+ (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
+
+#define AER_GET_AGENT(t, e) \
+ ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
+ (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
+ (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
+ AER_AGENT_RECEIVER)
+
+#define AER_PHYSICAL_LAYER_ERROR 0
+#define AER_DATA_LINK_LAYER_ERROR 1
+#define AER_TRANSACTION_LAYER_ERROR 2
+
+#define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
+ PCI_ERR_COR_RCVR : 0)
+#define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
+ (PCI_ERR_COR_BAD_TLP| \
+ PCI_ERR_COR_BAD_DLLP| \
+ PCI_ERR_COR_REP_ROLL| \
+ PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
+
+#define AER_GET_LAYER_ERROR(t, e) \
+ ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
+ (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
+ AER_TRANSACTION_LAYER_ERROR)
+
+/*
+ * AER error strings
+ */
+static const char *aer_error_severity_string[] = {
+ "Uncorrected (Non-Fatal)",
+ "Uncorrected (Fatal)",
+ "Corrected"
+};
+
+static const char *aer_error_layer[] = {
+ "Physical Layer",
+ "Data Link Layer",
+ "Transaction Layer"
+};
+
+static const char *aer_correctable_error_string[] = {
+ "Receiver Error", /* Bit Position 0 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "Bad TLP", /* Bit Position 6 */
+ "Bad DLLP", /* Bit Position 7 */
+ "RELAY_NUM Rollover", /* Bit Position 8 */
+ NULL,
+ NULL,
+ NULL,
+ "Replay Timer Timeout", /* Bit Position 12 */
+ "Advisory Non-Fatal", /* Bit Position 13 */
+ "Corrected Internal Error", /* Bit Position 14 */
+ "Header Log Overflow", /* Bit Position 15 */
+};
+
+static const char *aer_uncorrectable_error_string[] = {
+ "Undefined", /* Bit Position 0 */
+ NULL,
+ NULL,
+ NULL,
+ "Data Link Protocol", /* Bit Position 4 */
+ "Surprise Down Error", /* Bit Position 5 */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "Poisoned TLP", /* Bit Position 12 */
+ "Flow Control Protocol", /* Bit Position 13 */
+ "Completion Timeout", /* Bit Position 14 */
+ "Completer Abort", /* Bit Position 15 */
+ "Unexpected Completion", /* Bit Position 16 */
+ "Receiver Overflow", /* Bit Position 17 */
+ "Malformed TLP", /* Bit Position 18 */
+ "ECRC", /* Bit Position 19 */
+ "Unsupported Request", /* Bit Position 20 */
+ "ACS Violation", /* Bit Position 21 */
+ "Uncorrectable Internal Error", /* Bit Position 22 */
+ "MC Blocked TLP", /* Bit Position 23 */
+ "AtomicOp Egress Blocked", /* Bit Position 24 */
+ "TLP Prefix Blocked Error", /* Bit Position 25 */
+};
+
+static const char *aer_agent_string[] = {
+ "Receiver ID",
+ "Requester ID",
+ "Completer ID",
+ "Transmitter ID"
+};
+
+static void __print_tlp_header(struct pci_dev *dev,
+ struct aer_header_log_regs *t)
+{
+ dev_err(&dev->dev, " TLP Header: %08x %08x %08x %08x\n",
+ t->dw0, t->dw1, t->dw2, t->dw3);
+}
+
+static void __aer_print_error(struct pci_dev *dev,
+ struct aer_err_info *info)
+{
+ int i, status;
+ const char *errmsg = NULL;
+ status = (info->status & ~info->mask);
+
+ for (i = 0; i < 32; i++) {
+ if (!(status & (1 << i)))
+ continue;
+
+ if (info->severity == AER_CORRECTABLE)
+ errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
+ aer_correctable_error_string[i] : NULL;
+ else
+ errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ?
+ aer_uncorrectable_error_string[i] : NULL;
+
+ if (errmsg)
+ dev_err(&dev->dev, " [%2d] %-22s%s\n", i, errmsg,
+ info->first_error == i ? " (First)" : "");
+ else
+ dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n",
+ i, info->first_error == i ? " (First)" : "");
+ }
+}
+
+void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
+{
+ int layer, agent;
+ int id = ((dev->bus->number << 8) | dev->devfn);
+
+ if (!info->status) {
+ dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=Unaccessible, id=%04x(Unregistered Agent ID)\n",
+ aer_error_severity_string[info->severity], id);
+ goto out;
+ }
+
+ layer = AER_GET_LAYER_ERROR(info->severity, info->status);
+ agent = AER_GET_AGENT(info->severity, info->status);
+
+ dev_err(&dev->dev, "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+ aer_error_severity_string[info->severity],
+ aer_error_layer[layer], id, aer_agent_string[agent]);
+
+ dev_err(&dev->dev, " device [%04x:%04x] error status/mask=%08x/%08x\n",
+ dev->vendor, dev->device,
+ info->status, info->mask);
+
+ __aer_print_error(dev, info);
+
+ if (info->tlp_header_valid)
+ __print_tlp_header(dev, &info->tlp);
+
+out:
+ if (info->id && info->error_dev_num > 1 && info->id == id)
+ dev_err(&dev->dev, " Error of this Agent(%04x) is reported first\n", id);
+
+ trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),
+ info->severity);
+}
+
+void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info)
+{
+ dev_info(&dev->dev, "AER: %s%s error received: id=%04x\n",
+ info->multi_error_valid ? "Multiple " : "",
+ aer_error_severity_string[info->severity], info->id);
+}
+
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+int cper_severity_to_aer(int cper_severity)
+{
+ switch (cper_severity) {
+ case CPER_SEV_RECOVERABLE:
+ return AER_NONFATAL;
+ case CPER_SEV_FATAL:
+ return AER_FATAL;
+ default:
+ return AER_CORRECTABLE;
+ }
+}
+EXPORT_SYMBOL_GPL(cper_severity_to_aer);
+
+void cper_print_aer(struct pci_dev *dev, int cper_severity,
+ struct aer_capability_regs *aer)
+{
+ int aer_severity, layer, agent, status_strs_size, tlp_header_valid = 0;
+ u32 status, mask;
+ const char **status_strs;
+
+ aer_severity = cper_severity_to_aer(cper_severity);
+
+ if (aer_severity == AER_CORRECTABLE) {
+ status = aer->cor_status;
+ mask = aer->cor_mask;
+ status_strs = aer_correctable_error_string;
+ status_strs_size = ARRAY_SIZE(aer_correctable_error_string);
+ } else {
+ status = aer->uncor_status;
+ mask = aer->uncor_mask;
+ status_strs = aer_uncorrectable_error_string;
+ status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);
+ tlp_header_valid = status & AER_LOG_TLP_MASKS;
+ }
+
+ layer = AER_GET_LAYER_ERROR(aer_severity, status);
+ agent = AER_GET_AGENT(aer_severity, status);
+
+ dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);
+ cper_print_bits("", status, status_strs, status_strs_size);
+ dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n",
+ aer_error_layer[layer], aer_agent_string[agent]);
+
+ if (aer_severity != AER_CORRECTABLE)
+ dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n",
+ aer->uncor_severity);
+
+ if (tlp_header_valid)
+ __print_tlp_header(dev, &aer->header_log);
+
+ trace_aer_event(dev_name(&dev->dev), (status & ~mask),
+ aer_severity);
+}
+#endif
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 000000000..a2747a663
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
+/*
+ * Enables/disables PCIe ECRC checking.
+ *
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * Andrew Patterson <andrew.patterson@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include "../../pci.h"
+
+#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
+#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
+#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
+
+static int ecrc_policy = ECRC_POLICY_DEFAULT;
+
+static const char *ecrc_policy_str[] = {
+ [ECRC_POLICY_DEFAULT] = "bios",
+ [ECRC_POLICY_OFF] = "off",
+ [ECRC_POLICY_ON] = "on"
+};
+
+/**
+ * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int enable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!pci_is_pcie(dev))
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ if (reg32 & PCI_ERR_CAP_ECRC_GENC)
+ reg32 |= PCI_ERR_CAP_ECRC_GENE;
+ if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
+ reg32 |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int disable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!pci_is_pcie(dev))
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * @dev: the PCI device
+ */
+void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ switch (ecrc_policy) {
+ case ECRC_POLICY_DEFAULT:
+ return;
+ case ECRC_POLICY_OFF:
+ disable_ecrc_checking(dev);
+ break;
+ case ECRC_POLICY_ON:
+ enable_ecrc_checking(dev);
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ */
+void pcie_ecrc_get_policy(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
+ if (!strncmp(str, ecrc_policy_str[i],
+ strlen(ecrc_policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(ecrc_policy_str))
+ return;
+
+ ecrc_policy = i;
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
new file mode 100644
index 000000000..7d4fcdc51
--- /dev/null
+++ b/drivers/pci/pcie/aspm.c
@@ -0,0 +1,975 @@
+/*
+ * File: drivers/pci/pcie/aspm.c
+ * Enabling PCIe link L0s/L1 state and Clock Power Management
+ *
+ * Copyright (C) 2007 Intel
+ * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
+ * Copyright (C) Shaohua Li (shaohua.li@intel.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/pci-aspm.h>
+#include "../pci.h"
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "pcie_aspm."
+
+/* Note: those are not register definitions */
+#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+#define ASPM_STATE_L1 (4) /* L1 state */
+#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
+struct aspm_latency {
+ u32 l0s; /* L0s latency (nsec) */
+ u32 l1; /* L1 latency (nsec) */
+};
+
+struct pcie_link_state {
+ struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pcie_link_state *root; /* pointer to the root port link */
+ struct pcie_link_state *parent; /* pointer to the parent Link state */
+ struct list_head sibling; /* node in link_list */
+ struct list_head children; /* list of child link states */
+ struct list_head link; /* node in parent's children list */
+
+ /* ASPM state */
+ u32 aspm_support:3; /* Supported ASPM state */
+ u32 aspm_enabled:3; /* Enabled ASPM state */
+ u32 aspm_capable:3; /* Capable ASPM state with latency */
+ u32 aspm_default:3; /* Default ASPM state by BIOS */
+ u32 aspm_disable:3; /* Disabled ASPM state */
+
+ /* Clock PM state */
+ u32 clkpm_capable:1; /* Clock PM capable? */
+ u32 clkpm_enabled:1; /* Current Clock PM state */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+
+ /* Exit latencies */
+ struct aspm_latency latency_up; /* Upstream direction exit latency */
+ struct aspm_latency latency_dw; /* Downstream direction exit latency */
+ /*
+ * Endpoint acceptable latencies. A pcie downstream port only
+ * has one slot under it, so at most there are 8 functions.
+ */
+ struct aspm_latency acceptable[8];
+};
+
+static int aspm_disabled, aspm_force;
+static bool aspm_support_enabled = true;
+static DEFINE_MUTEX(aspm_lock);
+static LIST_HEAD(link_list);
+
+#define POLICY_DEFAULT 0 /* BIOS default setting */
+#define POLICY_PERFORMANCE 1 /* high performance */
+#define POLICY_POWERSAVE 2 /* high power saving */
+
+#ifdef CONFIG_PCIEASPM_PERFORMANCE
+static int aspm_policy = POLICY_PERFORMANCE;
+#elif defined CONFIG_PCIEASPM_POWERSAVE
+static int aspm_policy = POLICY_POWERSAVE;
+#else
+static int aspm_policy;
+#endif
+
+static const char *policy_str[] = {
+ [POLICY_DEFAULT] = "default",
+ [POLICY_PERFORMANCE] = "performance",
+ [POLICY_POWERSAVE] = "powersave"
+};
+
+#define LINK_RETRAIN_TIMEOUT HZ
+
+static int policy_to_aspm_state(struct pcie_link_state *link)
+{
+ switch (aspm_policy) {
+ case POLICY_PERFORMANCE:
+ /* Disable ASPM and Clock PM */
+ return 0;
+ case POLICY_POWERSAVE:
+ /* Enable ASPM L0s/L1 */
+ return ASPM_STATE_ALL;
+ case POLICY_DEFAULT:
+ return link->aspm_default;
+ }
+ return 0;
+}
+
+static int policy_to_clkpm_state(struct pcie_link_state *link)
+{
+ switch (aspm_policy) {
+ case POLICY_PERFORMANCE:
+ /* Disable ASPM and Clock PM */
+ return 0;
+ case POLICY_POWERSAVE:
+ /* Disable Clock PM */
+ return 1;
+ case POLICY_DEFAULT:
+ return link->clkpm_default;
+ }
+ return 0;
+}
+
+static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
+{
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (enable)
+ pcie_capability_set_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
+ else
+ pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_CLKREQ_EN);
+ }
+ link->clkpm_enabled = !!enable;
+}
+
+static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
+{
+ /* Don't enable Clock PM if the link is not Clock PM capable */
+ if (!link->clkpm_capable && enable)
+ enable = 0;
+ /* Need nothing if the specified equals to current state */
+ if (link->clkpm_enabled == enable)
+ return;
+ pcie_set_clkpm_nocheck(link, enable);
+}
+
+static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ int capable = 1, enabled = 1;
+ u32 reg32;
+ u16 reg16;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
+
+ /* All functions should have the same cap and state, take the worst */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &reg32);
+ if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
+ capable = 0;
+ enabled = 0;
+ break;
+ }
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
+ if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
+ enabled = 0;
+ }
+ link->clkpm_enabled = enabled;
+ link->clkpm_default = enabled;
+ link->clkpm_capable = (blacklist) ? 0 : capable;
+}
+
+/*
+ * pcie_aspm_configure_common_clock: check if the 2 ends of a link
+ * could use common clock. If they are, configure them to use the
+ * common clock. That will reduce the ASPM state exit latency.
+ */
+static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
+{
+ int same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
+ unsigned long start_jiffies;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+ /*
+ * All functions of a slot should have the same Slot Clock
+ * Configuration, so just check one function
+ */
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ BUG_ON(!pci_is_pcie(child));
+
+ /* Check downstream component if bit Slot Clock Configuration is 1 */
+ pcie_capability_read_word(child, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_SLC))
+ same_clock = 0;
+
+ /* Check upstream component if bit Slot Clock Configuration is 1 */
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_SLC))
+ same_clock = 0;
+
+ /* Configure downstream component, all functions */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL, &reg16);
+ child_reg[PCI_FUNC(child->devfn)] = reg16;
+ if (same_clock)
+ reg16 |= PCI_EXP_LNKCTL_CCC;
+ else
+ reg16 &= ~PCI_EXP_LNKCTL_CCC;
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16);
+ }
+
+ /* Configure upstream component */
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &reg16);
+ parent_reg = reg16;
+ if (same_clock)
+ reg16 |= PCI_EXP_LNKCTL_CCC;
+ else
+ reg16 &= ~PCI_EXP_LNKCTL_CCC;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+
+ /* Retrain link */
+ reg16 |= PCI_EXP_LNKCTL_RL;
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16);
+
+ /* Wait for link training end. Break out after waiting for timeout */
+ start_jiffies = jiffies;
+ for (;;) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
+ break;
+ msleep(1);
+ }
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+ dev_err(&parent->dev, "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_capability_write_word(child, PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
+ pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg);
+}
+
+/* Convert L0s latency encoding to ns */
+static u32 calc_l0s_latency(u32 encoding)
+{
+ if (encoding == 0x7)
+ return (5 * 1000); /* > 4us */
+ return (64 << encoding);
+}
+
+/* Convert L0s acceptable latency encoding to ns */
+static u32 calc_l0s_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (64 << encoding);
+}
+
+/* Convert L1 latency encoding to ns */
+static u32 calc_l1_latency(u32 encoding)
+{
+ if (encoding == 0x7)
+ return (65 * 1000); /* > 64us */
+ return (1000 << encoding);
+}
+
+/* Convert L1 acceptable latency encoding to ns */
+static u32 calc_l1_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (1000 << encoding);
+}
+
+struct aspm_register_info {
+ u32 support:2;
+ u32 enabled:2;
+ u32 latency_encoding_l0s;
+ u32 latency_encoding_l1;
+};
+
+static void pcie_get_aspm_reg(struct pci_dev *pdev,
+ struct aspm_register_info *info)
+{
+ u16 reg16;
+ u32 reg32;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
+ info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
+ info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
+ info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
+}
+
+static void pcie_aspm_check_latency(struct pci_dev *endpoint)
+{
+ u32 latency, l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ /* Device not in D0 doesn't need latency check */
+ if ((endpoint->current_state != PCI_D0) &&
+ (endpoint->current_state != PCI_UNKNOWN))
+ return;
+
+ link = endpoint->bus->self->link_state;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link) {
+ /* Check upstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ (link->latency_up.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+
+ /* Check downstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ (link->latency_dw.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ /*
+ * Check L1 latency.
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
+ if ((link->aspm_capable & ASPM_STATE_L1) &&
+ (latency + l1_switch_latency > acceptable->l1))
+ link->aspm_capable &= ~ASPM_STATE_L1;
+ l1_switch_latency += 1000;
+
+ link = link->parent;
+ }
+}
+
+static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+ struct aspm_register_info upreg, dwreg;
+
+ if (blacklist) {
+ /* Set enabled/disable so that we will disable ASPM later */
+ link->aspm_enabled = ASPM_STATE_ALL;
+ link->aspm_disable = ASPM_STATE_ALL;
+ return;
+ }
+
+ /* Configure common clock before checking latencies */
+ pcie_aspm_configure_common_clock(link);
+
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_get_aspm_reg(child, &dwreg);
+
+ /*
+ * Setup L0s state
+ *
+ * Note that we must not enable L0s in either direction on a
+ * given link unless components on both sides of the link each
+ * support L0s.
+ */
+ if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
+ link->aspm_support |= ASPM_STATE_L0S;
+ if (dwreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ if (upreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
+ link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
+
+ /* Setup L1 state */
+ if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
+ link->aspm_support |= ASPM_STATE_L1;
+ if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
+ link->aspm_enabled |= ASPM_STATE_L1;
+ link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
+ link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+
+ /* Save default state */
+ link->aspm_default = link->aspm_enabled;
+
+ /* Setup initial capable state. Will be updated later */
+ link->aspm_capable = link->aspm_support;
+ /*
+ * If the downstream component has pci bridge function, don't
+ * do ASPM for now.
+ */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (pci_pcie_type(child) == PCI_EXP_TYPE_PCI_BRIDGE) {
+ link->aspm_disable = ASPM_STATE_ALL;
+ break;
+ }
+ }
+
+ /* Get and check endpoint acceptable latencies */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ u32 reg32, encoding;
+ struct aspm_latency *acceptable =
+ &link->acceptable[PCI_FUNC(child->devfn)];
+
+ if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
+ pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
+ continue;
+
+ pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
+ /* Calculate endpoint L0s acceptable latency */
+ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable->l0s = calc_l0s_acceptable(encoding);
+ /* Calculate endpoint L1 acceptable latency */
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
+
+ pcie_aspm_check_latency(child);
+ }
+}
+
+static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
+{
+ pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC, val);
+}
+
+static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
+{
+ u32 upstream = 0, dwstream = 0;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+
+ /* Nothing to do if the link is already in the requested state */
+ state &= (link->aspm_capable & ~link->aspm_disable);
+ if (link->aspm_enabled == state)
+ return;
+ /* Convert ASPM state to upstream/downstream ASPM register state */
+ if (state & ASPM_STATE_L0S_UP)
+ dwstream |= PCI_EXP_LNKCTL_ASPM_L0S;
+ if (state & ASPM_STATE_L0S_DW)
+ upstream |= PCI_EXP_LNKCTL_ASPM_L0S;
+ if (state & ASPM_STATE_L1) {
+ upstream |= PCI_EXP_LNKCTL_ASPM_L1;
+ dwstream |= PCI_EXP_LNKCTL_ASPM_L1;
+ }
+ /*
+ * Spec 2.0 suggests all functions should be configured the
+ * same setting for ASPM. Enabling ASPM L1 should be done in
+ * upstream component first and then downstream, and vice
+ * versa for disabling ASPM L1. Spec doesn't mention L0S.
+ */
+ if (state & ASPM_STATE_L1)
+ pcie_config_aspm_dev(parent, upstream);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ pcie_config_aspm_dev(child, dwstream);
+ if (!(state & ASPM_STATE_L1))
+ pcie_config_aspm_dev(parent, upstream);
+
+ link->aspm_enabled = state;
+}
+
+static void pcie_config_aspm_path(struct pcie_link_state *link)
+{
+ while (link) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ link = link->parent;
+ }
+}
+
+static void free_link_state(struct pcie_link_state *link)
+{
+ link->pdev->link_state = NULL;
+ kfree(link);
+}
+
+static int pcie_aspm_sanity_check(struct pci_dev *pdev)
+{
+ struct pci_dev *child;
+ u32 reg32;
+
+ /*
+ * Some functions in a slot might not all be PCIe functions,
+ * very strange. Disable ASPM for the whole slot
+ */
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ if (!pci_is_pcie(child))
+ return -EINVAL;
+
+ /*
+ * If ASPM is disabled then we're not going to change
+ * the BIOS state. It's safe to continue even if it's a
+ * pre-1.1 device
+ */
+
+ if (aspm_disabled)
+ continue;
+
+ /*
+ * Disable ASPM for pre-1.1 PCIe device, we follow MS to use
+ * RBER bit to determine if a function is 1.1 version device
+ */
+ pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
+ if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
+ dev_info(&child->dev, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) {
+ struct pcie_link_state *parent;
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
+ link->parent = parent;
+ list_add(&link->link, &parent->children);
+ }
+ /* Setup a pointer to the root port link */
+ if (!link->parent)
+ link->root = link;
+ else
+ link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+ pdev->link_state = link;
+ return link;
+}
+
+/*
+ * pcie_aspm_init_link_state: Initiate PCI express link state.
+ * It is called after the pcie and its children devices are scanned.
+ * @pdev: the root port or switch downstream port
+ */
+void pcie_aspm_init_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
+
+ if (!aspm_support_enabled)
+ return;
+
+ if (!pci_is_pcie(pdev) || pdev->link_state)
+ return;
+ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
+ return;
+
+ down_read(&pci_bus_sem);
+ if (list_empty(&pdev->subordinate->devices))
+ goto out;
+
+ mutex_lock(&aspm_lock);
+ link = alloc_pcie_link_state(pdev);
+ if (!link)
+ goto unlock;
+ /*
+ * Setup initial ASPM state. Note that we need to configure
+ * upstream links also because capable state of them can be
+ * update through pcie_aspm_cap_init().
+ */
+ pcie_aspm_cap_init(link, blacklist);
+
+ /* Setup initial Clock PM state */
+ pcie_clkpm_cap_init(link, blacklist);
+
+ /*
+ * At this stage drivers haven't had an opportunity to change the
+ * link policy setting. Enabling ASPM on broken hardware can cripple
+ * it even before the driver has had a chance to disable ASPM, so
+ * default to a safe level right now. If we're enabling ASPM beyond
+ * the BIOS's expectation, we'll do so once pci_enable_device() is
+ * called.
+ */
+ if (aspm_policy != POLICY_POWERSAVE) {
+ pcie_config_aspm_path(link);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ }
+
+unlock:
+ mutex_unlock(&aspm_lock);
+out:
+ up_read(&pci_bus_sem);
+}
+
+/* Recheck latencies and update aspm_capable for links under the root */
+static void pcie_update_aspm_capable(struct pcie_link_state *root)
+{
+ struct pcie_link_state *link;
+ BUG_ON(root->parent);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ link->aspm_capable = link->aspm_support;
+ }
+ list_for_each_entry(link, &link_list, sibling) {
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
+ if (link->root != root)
+ continue;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) &&
+ (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END))
+ continue;
+ pcie_aspm_check_latency(child);
+ }
+ }
+}
+
+/* @pdev: the endpoint device */
+void pcie_aspm_exit_link_state(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ struct pcie_link_state *link, *root, *parent_link;
+
+ if (!parent || !parent->link_state)
+ return;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ /*
+ * All PCIe functions are in one slot, remove one function will remove
+ * the whole slot, so just wait until we are the last function left.
+ */
+ if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
+ goto out;
+
+ link = parent->link_state;
+ root = link->root;
+ parent_link = link->parent;
+
+ /* All functions are removed, so just disable ASPM for the link */
+ pcie_config_aspm_link(link, 0);
+ list_del(&link->sibling);
+ list_del(&link->link);
+ /* Clock PM is for endpoint device */
+ free_link_state(link);
+
+ /* Recheck latencies and configure upstream links */
+ if (parent_link) {
+ pcie_update_aspm_capable(root);
+ pcie_config_aspm_path(parent_link);
+ }
+out:
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+}
+
+/* @pdev: the root port or switch downstream port */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link = pdev->link_state;
+
+ if (aspm_disabled || !pci_is_pcie(pdev) || !link)
+ return;
+ if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
+ return;
+ /*
+ * Devices changed PM state, we should recheck if latency
+ * meets all functions' requirement
+ */
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_update_aspm_capable(link->root);
+ pcie_config_aspm_path(link);
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+}
+
+void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link = pdev->link_state;
+
+ if (aspm_disabled || !pci_is_pcie(pdev) || !link)
+ return;
+
+ if (aspm_policy != POLICY_POWERSAVE)
+ return;
+
+ if ((pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
+ return;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_config_aspm_path(link);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+}
+
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+ bool force)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ struct pcie_link_state *link;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)
+ parent = pdev;
+ if (!parent || !parent->link_state)
+ return;
+
+ /*
+ * A driver requested that ASPM be disabled on this device, but
+ * if we don't have permission to manage ASPM (e.g., on ACPI
+ * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
+ * the _OSC method), we can't honor that request. Windows has
+ * a similar mechanism using "PciASPMOptOut", which is also
+ * ignored in this situation.
+ */
+ if (aspm_disabled && !force) {
+ dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n");
+ return;
+ }
+
+ if (sem)
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ link = parent->link_state;
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_disable |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= ASPM_STATE_L1;
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
+ if (state & PCIE_LINK_STATE_CLKPM) {
+ link->clkpm_capable = 0;
+ pcie_set_clkpm(link, 0);
+ }
+ mutex_unlock(&aspm_lock);
+ if (sem)
+ up_read(&pci_bus_sem);
+}
+
+void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
+{
+ __pci_disable_link_state(pdev, state, false, false);
+}
+EXPORT_SYMBOL(pci_disable_link_state_locked);
+
+/**
+ * pci_disable_link_state - Disable device's link state, so the link will
+ * never enter specific states. Note that if the BIOS didn't grant ASPM
+ * control to the OS, this does nothing because we can't touch the LNKCTL
+ * register.
+ *
+ * @pdev: PCI device
+ * @state: ASPM link state to disable
+ */
+void pci_disable_link_state(struct pci_dev *pdev, int state)
+{
+ __pci_disable_link_state(pdev, state, true, false);
+}
+EXPORT_SYMBOL(pci_disable_link_state);
+
+static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
+{
+ int i;
+ struct pcie_link_state *link;
+
+ if (aspm_disabled)
+ return -EPERM;
+ for (i = 0; i < ARRAY_SIZE(policy_str); i++)
+ if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(policy_str))
+ return -EINVAL;
+ if (i == aspm_policy)
+ return 0;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ aspm_policy = i;
+ list_for_each_entry(link, &link_list, sibling) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ }
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+ return 0;
+}
+
+static int pcie_aspm_get_policy(char *buffer, struct kernel_param *kp)
+{
+ int i, cnt = 0;
+ for (i = 0; i < ARRAY_SIZE(policy_str); i++)
+ if (i == aspm_policy)
+ cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]);
+ else
+ cnt += sprintf(buffer + cnt, "%s ", policy_str[i]);
+ return cnt;
+}
+
+module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
+ NULL, 0644);
+
+#ifdef CONFIG_PCIEASPM_DEBUG
+static ssize_t link_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pcie_link_state *link_state = pci_device->link_state;
+
+ return sprintf(buf, "%d\n", link_state->aspm_enabled);
+}
+
+static ssize_t link_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pcie_link_state *link, *root = pdev->link_state->root;
+ u32 val, state = 0;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ if (aspm_disabled)
+ return -EPERM;
+ if (n < 1 || val > 3)
+ return -EINVAL;
+
+ /* Convert requested state to ASPM state */
+ if (val & PCIE_LINK_STATE_L0S)
+ state |= ASPM_STATE_L0S;
+ if (val & PCIE_LINK_STATE_L1)
+ state |= ASPM_STATE_L1;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ pcie_config_aspm_link(link, state);
+ }
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+ return n;
+}
+
+static ssize_t clk_ctl_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pcie_link_state *link_state = pci_device->link_state;
+
+ return sprintf(buf, "%d\n", link_state->clkpm_enabled);
+}
+
+static ssize_t clk_ctl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool state;
+
+ if (strtobool(buf, &state))
+ return -EINVAL;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_set_clkpm_nocheck(pdev->link_state, state);
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+
+ return n;
+}
+
+static DEVICE_ATTR(link_state, 0644, link_state_show, link_state_store);
+static DEVICE_ATTR(clk_ctl, 0644, clk_ctl_show, clk_ctl_store);
+
+static char power_group[] = "power";
+void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link_state = pdev->link_state;
+
+ if (!pci_is_pcie(pdev) ||
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ return;
+
+ if (link_state->aspm_support)
+ sysfs_add_file_to_group(&pdev->dev.kobj,
+ &dev_attr_link_state.attr, power_group);
+ if (link_state->clkpm_capable)
+ sysfs_add_file_to_group(&pdev->dev.kobj,
+ &dev_attr_clk_ctl.attr, power_group);
+}
+
+void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link_state = pdev->link_state;
+
+ if (!pci_is_pcie(pdev) ||
+ (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
+ return;
+
+ if (link_state->aspm_support)
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_link_state.attr, power_group);
+ if (link_state->clkpm_capable)
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_clk_ctl.attr, power_group);
+}
+#endif
+
+static int __init pcie_aspm_disable(char *str)
+{
+ if (!strcmp(str, "off")) {
+ aspm_policy = POLICY_DEFAULT;
+ aspm_disabled = 1;
+ aspm_support_enabled = false;
+ printk(KERN_INFO "PCIe ASPM is disabled\n");
+ } else if (!strcmp(str, "force")) {
+ aspm_force = 1;
+ printk(KERN_INFO "PCIe ASPM is forcibly enabled\n");
+ }
+ return 1;
+}
+
+__setup("pcie_aspm=", pcie_aspm_disable);
+
+void pcie_no_aspm(void)
+{
+ /*
+ * Disabling ASPM is intended to prevent the kernel from modifying
+ * existing hardware state, not to clear existing state. To that end:
+ * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+ * (b) prevent userspace from changing policy
+ */
+ if (!aspm_force) {
+ aspm_policy = POLICY_DEFAULT;
+ aspm_disabled = 1;
+ }
+}
+
+bool pcie_aspm_support_enabled(void)
+{
+ return aspm_support_enabled;
+}
+EXPORT_SYMBOL(pcie_aspm_support_enabled);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
new file mode 100644
index 000000000..63fc63911
--- /dev/null
+++ b/drivers/pci/pcie/pme.c
@@ -0,0 +1,481 @@
+/*
+ * PCIe Native PME support
+ *
+ * Copyright (C) 2007 - 2009 Intel Corp
+ * Copyright (C) 2007 - 2009 Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pcieport_if.h>
+#include <linux/pm_runtime.h>
+
+#include "../pci.h"
+#include "portdrv.h"
+
+/*
+ * If this switch is set, MSI will not be used for PCIe PME signaling. This
+ * causes the PCIe port driver to use INTx interrupts only, but it turns out
+ * that using MSI for PCIe PME signaling doesn't play well with PCIe PME-based
+ * wake-up from system sleep states.
+ */
+bool pcie_pme_msi_disabled;
+
+static int __init pcie_pme_setup(char *str)
+{
+ if (!strncmp(str, "nomsi", 5))
+ pcie_pme_msi_disabled = true;
+
+ return 1;
+}
+__setup("pcie_pme=", pcie_pme_setup);
+
+enum pme_suspend_level {
+ PME_SUSPEND_NONE = 0,
+ PME_SUSPEND_WAKEUP,
+ PME_SUSPEND_NOIRQ,
+};
+
+struct pcie_pme_service_data {
+ spinlock_t lock;
+ struct pcie_device *srv;
+ struct work_struct work;
+ enum pme_suspend_level suspend_level;
+};
+
+/**
+ * pcie_pme_interrupt_enable - Enable/disable PCIe PME interrupt generation.
+ * @dev: PCIe root port or event collector.
+ * @enable: Enable or disable the interrupt.
+ */
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable)
+{
+ if (enable)
+ pcie_capability_set_word(dev, PCI_EXP_RTCTL,
+ PCI_EXP_RTCTL_PMEIE);
+ else
+ pcie_capability_clear_word(dev, PCI_EXP_RTCTL,
+ PCI_EXP_RTCTL_PMEIE);
+}
+
+/**
+ * pcie_pme_walk_bus - Scan a PCI bus for devices asserting PME#.
+ * @bus: PCI bus to scan.
+ *
+ * Scan given PCI bus and all buses under it for devices asserting PME#.
+ */
+static bool pcie_pme_walk_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ bool ret = false;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ /* Skip PCIe devices in case we started from a root port. */
+ if (!pci_is_pcie(dev) && pci_check_pme_status(dev)) {
+ if (dev->pme_poll)
+ dev->pme_poll = false;
+
+ pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
+ ret = true;
+ }
+
+ if (dev->subordinate && pcie_pme_walk_bus(dev->subordinate))
+ ret = true;
+ }
+
+ return ret;
+}
+
+/**
+ * pcie_pme_from_pci_bridge - Check if PCIe-PCI bridge generated a PME.
+ * @bus: Secondary bus of the bridge.
+ * @devfn: Device/function number to check.
+ *
+ * PME from PCI devices under a PCIe-PCI bridge may be converted to an in-band
+ * PCIe PME message. In such that case the bridge should use the Requester ID
+ * of device/function number 0 on its secondary bus.
+ */
+static bool pcie_pme_from_pci_bridge(struct pci_bus *bus, u8 devfn)
+{
+ struct pci_dev *dev;
+ bool found = false;
+
+ if (devfn)
+ return false;
+
+ dev = pci_dev_get(bus->self);
+ if (!dev)
+ return false;
+
+ if (pci_is_pcie(dev) && pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE) {
+ down_read(&pci_bus_sem);
+ if (pcie_pme_walk_bus(bus))
+ found = true;
+ up_read(&pci_bus_sem);
+ }
+
+ pci_dev_put(dev);
+ return found;
+}
+
+/**
+ * pcie_pme_handle_request - Find device that generated PME and handle it.
+ * @port: Root port or event collector that generated the PME interrupt.
+ * @req_id: PCIe Requester ID of the device that generated the PME.
+ */
+static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id)
+{
+ u8 busnr = req_id >> 8, devfn = req_id & 0xff;
+ struct pci_bus *bus;
+ struct pci_dev *dev;
+ bool found = false;
+
+ /* First, check if the PME is from the root port itself. */
+ if (port->devfn == devfn && port->bus->number == busnr) {
+ if (port->pme_poll)
+ port->pme_poll = false;
+
+ if (pci_check_pme_status(port)) {
+ pm_request_resume(&port->dev);
+ found = true;
+ } else {
+ /*
+ * Apparently, the root port generated the PME on behalf
+ * of a non-PCIe device downstream. If this is done by
+ * a root port, the Requester ID field in its status
+ * register may contain either the root port's, or the
+ * source device's information (PCI Express Base
+ * Specification, Rev. 2.0, Section 6.1.9).
+ */
+ down_read(&pci_bus_sem);
+ found = pcie_pme_walk_bus(port->subordinate);
+ up_read(&pci_bus_sem);
+ }
+ goto out;
+ }
+
+ /* Second, find the bus the source device is on. */
+ bus = pci_find_bus(pci_domain_nr(port->bus), busnr);
+ if (!bus)
+ goto out;
+
+ /* Next, check if the PME is from a PCIe-PCI bridge. */
+ found = pcie_pme_from_pci_bridge(bus, devfn);
+ if (found)
+ goto out;
+
+ /* Finally, try to find the PME source on the bus. */
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_get(dev);
+ if (dev->devfn == devfn) {
+ found = true;
+ break;
+ }
+ pci_dev_put(dev);
+ }
+ up_read(&pci_bus_sem);
+
+ if (found) {
+ /* The device is there, but we have to check its PME status. */
+ found = pci_check_pme_status(dev);
+ if (found) {
+ if (dev->pme_poll)
+ dev->pme_poll = false;
+
+ pci_wakeup_event(dev);
+ pm_request_resume(&dev->dev);
+ }
+ pci_dev_put(dev);
+ } else if (devfn) {
+ /*
+ * The device is not there, but we can still try to recover by
+ * assuming that the PME was reported by a PCIe-PCI bridge that
+ * used devfn different from zero.
+ */
+ dev_dbg(&port->dev, "PME interrupt generated for non-existent device %02x:%02x.%d\n",
+ busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ found = pcie_pme_from_pci_bridge(bus, 0);
+ }
+
+ out:
+ if (!found)
+ dev_dbg(&port->dev, "Spurious native PME interrupt!\n");
+}
+
+/**
+ * pcie_pme_work_fn - Work handler for PCIe PME interrupt.
+ * @work: Work structure giving access to service data.
+ */
+static void pcie_pme_work_fn(struct work_struct *work)
+{
+ struct pcie_pme_service_data *data =
+ container_of(work, struct pcie_pme_service_data, work);
+ struct pci_dev *port = data->srv->port;
+ u32 rtsta;
+
+ spin_lock_irq(&data->lock);
+
+ for (;;) {
+ if (data->suspend_level != PME_SUSPEND_NONE)
+ break;
+
+ pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+ if (rtsta & PCI_EXP_RTSTA_PME) {
+ /*
+ * Clear PME status of the port. If there are other
+ * pending PMEs, the status will be set again.
+ */
+ pcie_clear_root_pme_status(port);
+
+ spin_unlock_irq(&data->lock);
+ pcie_pme_handle_request(port, rtsta & 0xffff);
+ spin_lock_irq(&data->lock);
+
+ continue;
+ }
+
+ /* No need to loop if there are no more PMEs pending. */
+ if (!(rtsta & PCI_EXP_RTSTA_PENDING))
+ break;
+
+ spin_unlock_irq(&data->lock);
+ cpu_relax();
+ spin_lock_irq(&data->lock);
+ }
+
+ if (data->suspend_level == PME_SUSPEND_NONE)
+ pcie_pme_interrupt_enable(port, true);
+
+ spin_unlock_irq(&data->lock);
+}
+
+/**
+ * pcie_pme_irq - Interrupt handler for PCIe root port PME interrupt.
+ * @irq: Interrupt vector.
+ * @context: Interrupt context pointer.
+ */
+static irqreturn_t pcie_pme_irq(int irq, void *context)
+{
+ struct pci_dev *port;
+ struct pcie_pme_service_data *data;
+ u32 rtsta;
+ unsigned long flags;
+
+ port = ((struct pcie_device *)context)->port;
+ data = get_service_data((struct pcie_device *)context);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+
+ if (!(rtsta & PCI_EXP_RTSTA_PME)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ return IRQ_NONE;
+ }
+
+ pcie_pme_interrupt_enable(port, false);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* We don't use pm_wq, because it's freezable. */
+ schedule_work(&data->work);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * pcie_pme_set_native - Set the PME interrupt flag for given device.
+ * @dev: PCI device to handle.
+ * @ign: Ignored.
+ */
+static int pcie_pme_set_native(struct pci_dev *dev, void *ign)
+{
+ dev_info(&dev->dev, "Signaling PME through PCIe PME interrupt\n");
+
+ device_set_run_wake(&dev->dev, true);
+ dev->pme_interrupt = true;
+ return 0;
+}
+
+/**
+ * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port.
+ * @port: PCIe root port or event collector to handle.
+ *
+ * For each device below given root port, including the port itself (or for each
+ * root complex integrated endpoint if @port is a root complex event collector)
+ * set the flag indicating that it can signal run-time wake-up events via PCIe
+ * PME interrupts.
+ */
+static void pcie_pme_mark_devices(struct pci_dev *port)
+{
+ pcie_pme_set_native(port, NULL);
+ if (port->subordinate) {
+ pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL);
+ } else {
+ struct pci_bus *bus = port->bus;
+ struct pci_dev *dev;
+
+ /* Check if this is a root port event collector. */
+ if (pci_pcie_type(port) != PCI_EXP_TYPE_RC_EC || !bus)
+ return;
+
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (pci_is_pcie(dev)
+ && pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END)
+ pcie_pme_set_native(dev, NULL);
+ up_read(&pci_bus_sem);
+ }
+}
+
+/**
+ * pcie_pme_probe - Initialize PCIe PME service for given root port.
+ * @srv: PCIe service to initialize.
+ */
+static int pcie_pme_probe(struct pcie_device *srv)
+{
+ struct pci_dev *port;
+ struct pcie_pme_service_data *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ spin_lock_init(&data->lock);
+ INIT_WORK(&data->work, pcie_pme_work_fn);
+ data->srv = srv;
+ set_service_data(srv, data);
+
+ port = srv->port;
+ pcie_pme_interrupt_enable(port, false);
+ pcie_clear_root_pme_status(port);
+
+ ret = request_irq(srv->irq, pcie_pme_irq, IRQF_SHARED, "PCIe PME", srv);
+ if (ret) {
+ kfree(data);
+ } else {
+ pcie_pme_mark_devices(port);
+ pcie_pme_interrupt_enable(port, true);
+ }
+
+ return ret;
+}
+
+static bool pcie_pme_check_wakeup(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ if (!bus)
+ return false;
+
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (device_may_wakeup(&dev->dev)
+ || pcie_pme_check_wakeup(dev->subordinate))
+ return true;
+
+ return false;
+}
+
+/**
+ * pcie_pme_suspend - Suspend PCIe PME service device.
+ * @srv: PCIe service device to suspend.
+ */
+static int pcie_pme_suspend(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+ struct pci_dev *port = srv->port;
+ bool wakeup;
+ int ret;
+
+ if (device_may_wakeup(&port->dev)) {
+ wakeup = true;
+ } else {
+ down_read(&pci_bus_sem);
+ wakeup = pcie_pme_check_wakeup(port->subordinate);
+ up_read(&pci_bus_sem);
+ }
+ spin_lock_irq(&data->lock);
+ if (wakeup) {
+ ret = enable_irq_wake(srv->irq);
+ data->suspend_level = PME_SUSPEND_WAKEUP;
+ }
+ if (!wakeup || ret) {
+ struct pci_dev *port = srv->port;
+
+ pcie_pme_interrupt_enable(port, false);
+ pcie_clear_root_pme_status(port);
+ data->suspend_level = PME_SUSPEND_NOIRQ;
+ }
+ spin_unlock_irq(&data->lock);
+
+ synchronize_irq(srv->irq);
+
+ return 0;
+}
+
+/**
+ * pcie_pme_resume - Resume PCIe PME service device.
+ * @srv - PCIe service device to resume.
+ */
+static int pcie_pme_resume(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ if (data->suspend_level == PME_SUSPEND_NOIRQ) {
+ struct pci_dev *port = srv->port;
+
+ pcie_clear_root_pme_status(port);
+ pcie_pme_interrupt_enable(port, true);
+ } else {
+ disable_irq_wake(srv->irq);
+ }
+ data->suspend_level = PME_SUSPEND_NONE;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
+/**
+ * pcie_pme_remove - Prepare PCIe PME service device for removal.
+ * @srv - PCIe service device to remove.
+ */
+static void pcie_pme_remove(struct pcie_device *srv)
+{
+ pcie_pme_suspend(srv);
+ free_irq(srv->irq, srv);
+ kfree(get_service_data(srv));
+}
+
+static struct pcie_port_service_driver pcie_pme_driver = {
+ .name = "pcie_pme",
+ .port_type = PCI_EXP_TYPE_ROOT_PORT,
+ .service = PCIE_PORT_SERVICE_PME,
+
+ .probe = pcie_pme_probe,
+ .suspend = pcie_pme_suspend,
+ .resume = pcie_pme_resume,
+ .remove = pcie_pme_remove,
+};
+
+/**
+ * pcie_pme_service_init - Register the PCIe PME service driver.
+ */
+static int __init pcie_pme_service_init(void)
+{
+ return pcie_port_service_register(&pcie_pme_driver);
+}
+
+module_init(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
new file mode 100644
index 000000000..d52554840
--- /dev/null
+++ b/drivers/pci/pcie/portdrv.h
@@ -0,0 +1,83 @@
+/*
+ * File: portdrv.h
+ * Purpose: PCI Express Port Bus Driver's Internal Data Structures
+ *
+ * Copyright (C) 2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#ifndef _PORTDRV_H_
+#define _PORTDRV_H_
+
+#include <linux/compiler.h>
+
+#define PCIE_PORT_DEVICE_MAXSERVICES 4
+/*
+ * According to the PCI Express Base Specification 2.0, the indices of
+ * the MSI-X table entries used by port services must not exceed 31
+ */
+#define PCIE_PORT_MAX_MSIX_ENTRIES 32
+
+#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
+
+extern struct bus_type pcie_port_bus_type;
+int pcie_port_device_register(struct pci_dev *dev);
+#ifdef CONFIG_PM
+int pcie_port_device_suspend(struct device *dev);
+int pcie_port_device_resume(struct device *dev);
+#endif
+void pcie_port_device_remove(struct pci_dev *dev);
+int __must_check pcie_port_bus_register(void);
+void pcie_port_bus_unregister(void);
+
+struct pci_dev;
+
+void pcie_clear_root_pme_status(struct pci_dev *dev);
+
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+extern bool pciehp_msi_disabled;
+
+static inline bool pciehp_no_msi(void)
+{
+ return pciehp_msi_disabled;
+}
+
+#else /* !CONFIG_HOTPLUG_PCI_PCIE */
+static inline bool pciehp_no_msi(void) { return false; }
+#endif /* !CONFIG_HOTPLUG_PCI_PCIE */
+
+#ifdef CONFIG_PCIE_PME
+extern bool pcie_pme_msi_disabled;
+
+static inline void pcie_pme_disable_msi(void)
+{
+ pcie_pme_msi_disabled = true;
+}
+
+static inline bool pcie_pme_no_msi(void)
+{
+ return pcie_pme_msi_disabled;
+}
+
+void pcie_pme_interrupt_enable(struct pci_dev *dev, bool enable);
+#else /* !CONFIG_PCIE_PME */
+static inline void pcie_pme_disable_msi(void) {}
+static inline bool pcie_pme_no_msi(void) { return false; }
+static inline void pcie_pme_interrupt_enable(struct pci_dev *dev, bool en) {}
+#endif /* !CONFIG_PCIE_PME */
+
+#ifdef CONFIG_ACPI
+int pcie_port_acpi_setup(struct pci_dev *port, int *mask);
+
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return pcie_port_acpi_setup(port, mask);
+}
+#else /* !CONFIG_ACPI */
+static inline int pcie_port_platform_notify(struct pci_dev *port, int *mask)
+{
+ return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+#endif /* _PORTDRV_H_ */
diff --git a/drivers/pci/pcie/portdrv_acpi.c b/drivers/pci/pcie/portdrv_acpi.c
new file mode 100644
index 000000000..b4d2894ee
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_acpi.c
@@ -0,0 +1,63 @@
+/*
+ * PCIe Port Native Services Support, ACPI-Related Part
+ *
+ * Copyright (C) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License V2. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/pci-acpi.h>
+#include <linux/pcieport_if.h>
+
+#include "aer/aerdrv.h"
+#include "../pci.h"
+#include "portdrv.h"
+
+/**
+ * pcie_port_acpi_setup - Request the BIOS to release control of PCIe services.
+ * @port: PCIe Port service for a root port or event collector.
+ * @srv_mask: Bit mask of services that can be enabled for @port.
+ *
+ * Invoked when @port is identified as a PCIe port device. To avoid conflicts
+ * with the BIOS PCIe port native services support requires the BIOS to yield
+ * control of these services to the kernel. The mask of services that the BIOS
+ * allows to be enabled for @port is written to @srv_mask.
+ *
+ * NOTE: It turns out that we cannot do that for individual port services
+ * separately, because that would make some systems work incorrectly.
+ */
+int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
+{
+ struct acpi_pci_root *root;
+ acpi_handle handle;
+ u32 flags;
+
+ if (acpi_pci_disabled)
+ return 0;
+
+ handle = acpi_find_root_bridge_handle(port);
+ if (!handle)
+ return -EINVAL;
+
+ root = acpi_pci_find_root(handle);
+ if (!root)
+ return -ENODEV;
+
+ flags = root->osc_control_set;
+
+ *srv_mask = PCIE_PORT_SERVICE_VC;
+ if (flags & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_HP;
+ if (flags & OSC_PCI_EXPRESS_PME_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_PME;
+ if (flags & OSC_PCI_EXPRESS_AER_CONTROL)
+ *srv_mask |= PCIE_PORT_SERVICE_AER;
+
+ return 0;
+}
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
new file mode 100644
index 000000000..87e79a6ff
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -0,0 +1,55 @@
+/*
+ * File: portdrv_bus.c
+ * Purpose: PCI Express Port Bus Driver's Bus Overloading Functions
+ *
+ * Copyright (C) 2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+
+#include <linux/pcieport_if.h>
+#include "portdrv.h"
+
+static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
+
+struct bus_type pcie_port_bus_type = {
+ .name = "pci_express",
+ .match = pcie_port_bus_match,
+};
+EXPORT_SYMBOL_GPL(pcie_port_bus_type);
+
+static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct pcie_device *pciedev;
+ struct pcie_port_service_driver *driver;
+
+ if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
+ return 0;
+
+ pciedev = to_pcie_device(dev);
+ driver = to_service_driver(drv);
+
+ if (driver->service != pciedev->service)
+ return 0;
+
+ if ((driver->port_type != PCIE_ANY_PORT) &&
+ (driver->port_type != pci_pcie_type(pciedev->port)))
+ return 0;
+
+ return 1;
+}
+
+int pcie_port_bus_register(void)
+{
+ return bus_register(&pcie_port_bus_type);
+}
+
+void pcie_port_bus_unregister(void)
+{
+ bus_unregister(&pcie_port_bus_type);
+}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
new file mode 100644
index 000000000..2f0ce668a
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -0,0 +1,578 @@
+/*
+ * File: portdrv_core.c
+ * Purpose: PCI Express Port Bus Driver's Core Functions
+ *
+ * Copyright (C) 2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/pcieport_if.h>
+#include <linux/aer.h>
+
+#include "../pci.h"
+#include "portdrv.h"
+
+bool pciehp_msi_disabled;
+
+static int __init pciehp_setup(char *str)
+{
+ if (!strncmp(str, "nomsi", 5))
+ pciehp_msi_disabled = true;
+
+ return 1;
+}
+__setup("pcie_hp=", pciehp_setup);
+
+/**
+ * release_pcie_device - free PCI Express port service device structure
+ * @dev: Port service device to release
+ *
+ * Invoked automatically when device is being removed in response to
+ * device_unregister(dev). Release all resources being claimed.
+ */
+static void release_pcie_device(struct device *dev)
+{
+ kfree(to_pcie_device(dev));
+}
+
+/**
+ * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
+ * @entries: Array of MSI-X entries
+ * @new_entry: Index of the entry to add to the array
+ * @nr_entries: Number of entries already in the array
+ *
+ * Return value: Position of the added entry in the array
+ */
+static int pcie_port_msix_add_entry(
+ struct msix_entry *entries, int new_entry, int nr_entries)
+{
+ int j;
+
+ for (j = 0; j < nr_entries; j++)
+ if (entries[j].entry == new_entry)
+ return j;
+
+ entries[j].entry = new_entry;
+ return j;
+}
+
+/**
+ * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
+ * @dev: PCI Express port to handle
+ * @vectors: Array of interrupt vectors to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: 0 on success, error code on failure
+ */
+static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
+{
+ struct msix_entry *msix_entries;
+ int idx[PCIE_PORT_DEVICE_MAXSERVICES];
+ int nr_entries, status, pos, i, nvec;
+ u16 reg16;
+ u32 reg32;
+
+ nr_entries = pci_msix_vec_count(dev);
+ if (nr_entries < 0)
+ return nr_entries;
+ BUG_ON(!nr_entries);
+ if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
+ nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
+
+ msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
+ if (!msix_entries)
+ return -ENOMEM;
+
+ /*
+ * Allocate as many entries as the port wants, so that we can check
+ * which of them will be useful. Moreover, if nr_entries is correctly
+ * equal to the number of entries this port actually uses, we'll happily
+ * go through without any tricks.
+ */
+ for (i = 0; i < nr_entries; i++)
+ msix_entries[i].entry = i;
+
+ status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
+ if (status)
+ goto Exit;
+
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ idx[i] = -1;
+ status = -EIO;
+ nvec = 0;
+
+ if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
+ int entry;
+
+ /*
+ * The code below follows the PCI Express Base Specification 2.0
+ * stating in Section 6.1.6 that "PME and Hot-Plug Event
+ * interrupts (when both are implemented) always share the same
+ * MSI or MSI-X vector, as indicated by the Interrupt Message
+ * Number field in the PCI Express Capabilities register", where
+ * according to Section 7.8.2 of the specification "For MSI-X,
+ * the value in this field indicates which MSI-X Table entry is
+ * used to generate the interrupt message."
+ */
+ pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
+ entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
+ if (entry >= nr_entries)
+ goto Error;
+
+ i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
+ if (i == nvec)
+ nvec++;
+
+ idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
+ idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
+ }
+
+ if (mask & PCIE_PORT_SERVICE_AER) {
+ int entry;
+
+ /*
+ * The code below follows Section 7.10.10 of the PCI Express
+ * Base Specification 2.0 stating that bits 31-27 of the Root
+ * Error Status Register contain a value indicating which of the
+ * MSI/MSI-X vectors assigned to the port is going to be used
+ * for AER, where "For MSI-X, the value in this register
+ * indicates which MSI-X Table entry is used to generate the
+ * interrupt message."
+ */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
+ entry = reg32 >> 27;
+ if (entry >= nr_entries)
+ goto Error;
+
+ i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
+ if (i == nvec)
+ nvec++;
+
+ idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
+ }
+
+ /*
+ * If nvec is equal to the allocated number of entries, we can just use
+ * what we have. Otherwise, the port has some extra entries not for the
+ * services we know and we need to work around that.
+ */
+ if (nvec == nr_entries) {
+ status = 0;
+ } else {
+ /* Drop the temporary MSI-X setup */
+ pci_disable_msix(dev);
+
+ /* Now allocate the MSI-X vectors for real */
+ status = pci_enable_msix_exact(dev, msix_entries, nvec);
+ if (status)
+ goto Exit;
+ }
+
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
+
+ Exit:
+ kfree(msix_entries);
+ return status;
+
+ Error:
+ pci_disable_msix(dev);
+ goto Exit;
+}
+
+/**
+ * init_service_irqs - initialize irqs for PCI Express port services
+ * @dev: PCI Express port to handle
+ * @irqs: Array of irqs to populate
+ * @mask: Bitmask of port capabilities returned by get_port_device_capability()
+ *
+ * Return value: Interrupt mode associated with the port
+ */
+static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
+{
+ int i, irq = -1;
+
+ /*
+ * If MSI cannot be used for PCIe PME or hotplug, we have to use
+ * INTx or other interrupts, e.g. system shared interrupt.
+ */
+ if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
+ ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
+ if (dev->irq)
+ irq = dev->irq;
+ goto no_msi;
+ }
+
+ /* Try to use MSI-X if supported */
+ if (!pcie_port_enable_msix(dev, irqs, mask))
+ return 0;
+
+ /*
+ * We're not going to use MSI-X, so try MSI and fall back to INTx.
+ * If neither MSI/MSI-X nor INTx available, try other interrupt. On
+ * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
+ */
+ if (!pci_enable_msi(dev) || dev->irq)
+ irq = dev->irq;
+
+ no_msi:
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = irq;
+ irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
+
+ if (irq < 0)
+ return -ENODEV;
+ return 0;
+}
+
+static void cleanup_service_irqs(struct pci_dev *dev)
+{
+ if (dev->msix_enabled)
+ pci_disable_msix(dev);
+ else if (dev->msi_enabled)
+ pci_disable_msi(dev);
+}
+
+/**
+ * get_port_device_capability - discover capabilities of a PCI Express port
+ * @dev: PCI Express port to examine
+ *
+ * The capabilities are read from the port's PCI Express configuration registers
+ * as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and
+ * 7.9 - 7.11.
+ *
+ * Return value: Bitmask of discovered port capabilities
+ */
+static int get_port_device_capability(struct pci_dev *dev)
+{
+ int services = 0;
+ u32 reg32;
+ int cap_mask = 0;
+ int err;
+
+ if (pcie_ports_disabled)
+ return 0;
+
+ cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP
+ | PCIE_PORT_SERVICE_VC;
+ if (pci_aer_available())
+ cap_mask |= PCIE_PORT_SERVICE_AER;
+
+ if (pcie_ports_auto) {
+ err = pcie_port_platform_notify(dev, &cap_mask);
+ if (err)
+ return 0;
+ }
+
+ /* Hot-Plug Capable */
+ if ((cap_mask & PCIE_PORT_SERVICE_HP) &&
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT) {
+ pcie_capability_read_dword(dev, PCI_EXP_SLTCAP, &reg32);
+ if (reg32 & PCI_EXP_SLTCAP_HPC) {
+ services |= PCIE_PORT_SERVICE_HP;
+ /*
+ * Disable hot-plug interrupts in case they have been
+ * enabled by the BIOS and the hot-plug service driver
+ * is not loaded.
+ */
+ pcie_capability_clear_word(dev, PCI_EXP_SLTCTL,
+ PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE);
+ }
+ }
+ /* AER capable */
+ if ((cap_mask & PCIE_PORT_SERVICE_AER)
+ && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) {
+ services |= PCIE_PORT_SERVICE_AER;
+ /*
+ * Disable AER on this port in case it's been enabled by the
+ * BIOS (the AER service driver will enable it when necessary).
+ */
+ pci_disable_pcie_error_reporting(dev);
+ }
+ /* VC support */
+ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
+ services |= PCIE_PORT_SERVICE_VC;
+ /* Root ports are capable of generating PME too */
+ if ((cap_mask & PCIE_PORT_SERVICE_PME)
+ && pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) {
+ services |= PCIE_PORT_SERVICE_PME;
+ /*
+ * Disable PME interrupt on this port in case it's been enabled
+ * by the BIOS (the PME service driver will enable it when
+ * necessary).
+ */
+ pcie_pme_interrupt_enable(dev, false);
+ }
+
+ return services;
+}
+
+/**
+ * pcie_device_init - allocate and initialize PCI Express port service device
+ * @pdev: PCI Express port to associate the service device with
+ * @service: Type of service to associate with the service device
+ * @irq: Interrupt vector to associate with the service device
+ */
+static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
+{
+ int retval;
+ struct pcie_device *pcie;
+ struct device *device;
+
+ pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+ pcie->port = pdev;
+ pcie->irq = irq;
+ pcie->service = service;
+
+ /* Initialize generic device interface */
+ device = &pcie->device;
+ device->bus = &pcie_port_bus_type;
+ device->release = release_pcie_device; /* callback to free pcie dev */
+ dev_set_name(device, "%s:pcie%02x",
+ pci_name(pdev),
+ get_descriptor_id(pci_pcie_type(pdev), service));
+ device->parent = &pdev->dev;
+ device_enable_async_suspend(device);
+
+ retval = device_register(device);
+ if (retval) {
+ put_device(device);
+ return retval;
+ }
+
+ return 0;
+}
+
+/**
+ * pcie_port_device_register - register PCI Express port
+ * @dev: PCI Express port to register
+ *
+ * Allocate the port extension structure and register services associated with
+ * the port.
+ */
+int pcie_port_device_register(struct pci_dev *dev)
+{
+ int status, capabilities, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
+
+ /* Enable PCI Express port device */
+ status = pci_enable_device(dev);
+ if (status)
+ return status;
+
+ /* Get and check PCI Express port services */
+ capabilities = get_port_device_capability(dev);
+ if (!capabilities)
+ return 0;
+
+ pci_set_master(dev);
+ /*
+ * Initialize service irqs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
+ */
+ status = init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
+ if (!capabilities)
+ goto error_disable;
+ }
+
+ /* Allocate child services if any */
+ status = -ENODEV;
+ nr_service = 0;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
+ int service = 1 << i;
+ if (!(capabilities & service))
+ continue;
+ if (!pcie_device_init(dev, service, irqs[i]))
+ nr_service++;
+ }
+ if (!nr_service)
+ goto error_cleanup_irqs;
+
+ return 0;
+
+error_cleanup_irqs:
+ cleanup_service_irqs(dev);
+error_disable:
+ pci_disable_device(dev);
+ return status;
+}
+
+#ifdef CONFIG_PM
+static int suspend_iter(struct device *dev, void *data)
+{
+ struct pcie_port_service_driver *service_driver;
+
+ if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
+ service_driver = to_service_driver(dev->driver);
+ if (service_driver->suspend)
+ service_driver->suspend(to_pcie_device(dev));
+ }
+ return 0;
+}
+
+/**
+ * pcie_port_device_suspend - suspend port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_suspend(struct device *dev)
+{
+ return device_for_each_child(dev, NULL, suspend_iter);
+}
+
+static int resume_iter(struct device *dev, void *data)
+{
+ struct pcie_port_service_driver *service_driver;
+
+ if ((dev->bus == &pcie_port_bus_type) &&
+ (dev->driver)) {
+ service_driver = to_service_driver(dev->driver);
+ if (service_driver->resume)
+ service_driver->resume(to_pcie_device(dev));
+ }
+ return 0;
+}
+
+/**
+ * pcie_port_device_suspend - resume port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_resume(struct device *dev)
+{
+ return device_for_each_child(dev, NULL, resume_iter);
+}
+#endif /* PM */
+
+static int remove_iter(struct device *dev, void *data)
+{
+ if (dev->bus == &pcie_port_bus_type)
+ device_unregister(dev);
+ return 0;
+}
+
+/**
+ * pcie_port_device_remove - unregister PCI Express port service devices
+ * @dev: PCI Express port the service devices to unregister are associated with
+ *
+ * Remove PCI Express port service devices associated with given port and
+ * disable MSI-X or MSI for the port.
+ */
+void pcie_port_device_remove(struct pci_dev *dev)
+{
+ device_for_each_child(&dev->dev, NULL, remove_iter);
+ cleanup_service_irqs(dev);
+ pci_disable_device(dev);
+}
+
+/**
+ * pcie_port_probe_service - probe driver for given PCI Express port service
+ * @dev: PCI Express port service device to probe against
+ *
+ * If PCI Express port service driver is registered with
+ * pcie_port_service_register(), this function will be called by the driver core
+ * whenever match is found between the driver and a port service device.
+ */
+static int pcie_port_probe_service(struct device *dev)
+{
+ struct pcie_device *pciedev;
+ struct pcie_port_service_driver *driver;
+ int status;
+
+ if (!dev || !dev->driver)
+ return -ENODEV;
+
+ driver = to_service_driver(dev->driver);
+ if (!driver || !driver->probe)
+ return -ENODEV;
+
+ pciedev = to_pcie_device(dev);
+ status = driver->probe(pciedev);
+ if (status)
+ return status;
+
+ dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
+ get_device(dev);
+ return 0;
+}
+
+/**
+ * pcie_port_remove_service - detach driver from given PCI Express port service
+ * @dev: PCI Express port service device to handle
+ *
+ * If PCI Express port service driver is registered with
+ * pcie_port_service_register(), this function will be called by the driver core
+ * when device_unregister() is called for the port service device associated
+ * with the driver.
+ */
+static int pcie_port_remove_service(struct device *dev)
+{
+ struct pcie_device *pciedev;
+ struct pcie_port_service_driver *driver;
+
+ if (!dev || !dev->driver)
+ return 0;
+
+ pciedev = to_pcie_device(dev);
+ driver = to_service_driver(dev->driver);
+ if (driver && driver->remove) {
+ dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
+ driver->name);
+ driver->remove(pciedev);
+ put_device(dev);
+ }
+ return 0;
+}
+
+/**
+ * pcie_port_shutdown_service - shut down given PCI Express port service
+ * @dev: PCI Express port service device to handle
+ *
+ * If PCI Express port service driver is registered with
+ * pcie_port_service_register(), this function will be called by the driver core
+ * when device_shutdown() is called for the port service device associated
+ * with the driver.
+ */
+static void pcie_port_shutdown_service(struct device *dev) {}
+
+/**
+ * pcie_port_service_register - register PCI Express port service driver
+ * @new: PCI Express port service driver to register
+ */
+int pcie_port_service_register(struct pcie_port_service_driver *new)
+{
+ if (pcie_ports_disabled)
+ return -ENODEV;
+
+ new->driver.name = new->name;
+ new->driver.bus = &pcie_port_bus_type;
+ new->driver.probe = pcie_port_probe_service;
+ new->driver.remove = pcie_port_remove_service;
+ new->driver.shutdown = pcie_port_shutdown_service;
+
+ return driver_register(&new->driver);
+}
+EXPORT_SYMBOL(pcie_port_service_register);
+
+/**
+ * pcie_port_service_unregister - unregister PCI Express port service driver
+ * @drv: PCI Express port service driver to unregister
+ */
+void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(pcie_port_service_unregister);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
new file mode 100644
index 000000000..be35da2e1
--- /dev/null
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -0,0 +1,364 @@
+/*
+ * File: portdrv_pci.c
+ * Purpose: PCI Express Port Bus Driver
+ *
+ * Copyright (C) 2004 Intel
+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/init.h>
+#include <linux/pcieport_if.h>
+#include <linux/aer.h>
+#include <linux/dmi.h>
+#include <linux/pci-aspm.h>
+
+#include "portdrv.h"
+#include "aer/aerdrv.h"
+
+/*
+ * Version Information
+ */
+#define DRIVER_VERSION "v1.0"
+#define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
+#define DRIVER_DESC "PCIe Port Bus Driver"
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/* If this switch is set, PCIe port native services should not be enabled. */
+bool pcie_ports_disabled;
+
+/*
+ * If this switch is set, ACPI _OSC will be used to determine whether or not to
+ * enable PCIe port native services.
+ */
+bool pcie_ports_auto = true;
+
+static int __init pcie_port_setup(char *str)
+{
+ if (!strncmp(str, "compat", 6)) {
+ pcie_ports_disabled = true;
+ } else if (!strncmp(str, "native", 6)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = false;
+ } else if (!strncmp(str, "auto", 4)) {
+ pcie_ports_disabled = false;
+ pcie_ports_auto = true;
+ }
+
+ return 1;
+}
+__setup("pcie_ports=", pcie_port_setup);
+
+/* global data */
+
+/**
+ * pcie_clear_root_pme_status - Clear root port PME interrupt status.
+ * @dev: PCIe root port or event collector.
+ */
+void pcie_clear_root_pme_status(struct pci_dev *dev)
+{
+ pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
+}
+
+static int pcie_portdrv_restore_config(struct pci_dev *dev)
+{
+ int retval;
+
+ retval = pci_enable_device(dev);
+ if (retval)
+ return retval;
+ pci_set_master(dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int pcie_port_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ /*
+ * Some BIOSes forget to clear Root PME Status bits after system wakeup
+ * which breaks ACPI-based runtime wakeup on PCI Express, so clear those
+ * bits now just in case (shouldn't hurt).
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
+ pcie_clear_root_pme_status(pdev);
+ return 0;
+}
+
+static const struct dev_pm_ops pcie_portdrv_pm_ops = {
+ .suspend = pcie_port_device_suspend,
+ .resume = pcie_port_device_resume,
+ .freeze = pcie_port_device_suspend,
+ .thaw = pcie_port_device_resume,
+ .poweroff = pcie_port_device_suspend,
+ .restore = pcie_port_device_resume,
+ .resume_noirq = pcie_port_resume_noirq,
+};
+
+#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
+
+#else /* !PM */
+
+#define PCIE_PORTDRV_PM_OPS NULL
+#endif /* !PM */
+
+/*
+ * pcie_portdrv_probe - Probe PCI-Express port devices
+ * @dev: PCI-Express port device being probed
+ *
+ * If detected invokes the pcie_port_device_register() method for
+ * this port device.
+ *
+ */
+static int pcie_portdrv_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int status;
+
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENODEV;
+
+ status = pcie_port_device_register(dev);
+ if (status)
+ return status;
+
+ pci_save_state(dev);
+ /*
+ * D3cold may not work properly on some PCIe port, so disable
+ * it by default.
+ */
+ dev->d3cold_allowed = false;
+ return 0;
+}
+
+static void pcie_portdrv_remove(struct pci_dev *dev)
+{
+ pcie_port_device_remove(dev);
+}
+
+static int error_detected_iter(struct device *device, void *data)
+{
+ struct pcie_device *pcie_device;
+ struct pcie_port_service_driver *driver;
+ struct aer_broadcast_data *result_data;
+ pci_ers_result_t status;
+
+ result_data = (struct aer_broadcast_data *) data;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ driver = to_service_driver(device->driver);
+ if (!driver ||
+ !driver->err_handler ||
+ !driver->err_handler->error_detected)
+ return 0;
+
+ pcie_device = to_pcie_device(device);
+
+ /* Forward error detected message to service drivers */
+ status = driver->err_handler->error_detected(
+ pcie_device->port,
+ result_data->state);
+ result_data->result =
+ merge_result(result_data->result, status);
+ }
+
+ return 0;
+}
+
+static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
+ enum pci_channel_state error)
+{
+ struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
+
+ /* get true return value from &data */
+ device_for_each_child(&dev->dev, &data, error_detected_iter);
+ return data.result;
+}
+
+static int mmio_enabled_iter(struct device *device, void *data)
+{
+ struct pcie_device *pcie_device;
+ struct pcie_port_service_driver *driver;
+ pci_ers_result_t status, *result;
+
+ result = (pci_ers_result_t *) data;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ driver = to_service_driver(device->driver);
+ if (driver &&
+ driver->err_handler &&
+ driver->err_handler->mmio_enabled) {
+ pcie_device = to_pcie_device(device);
+
+ /* Forward error message to service drivers */
+ status = driver->err_handler->mmio_enabled(
+ pcie_device->port);
+ *result = merge_result(*result, status);
+ }
+ }
+
+ return 0;
+}
+
+static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+
+ /* get true return value from &status */
+ device_for_each_child(&dev->dev, &status, mmio_enabled_iter);
+ return status;
+}
+
+static int slot_reset_iter(struct device *device, void *data)
+{
+ struct pcie_device *pcie_device;
+ struct pcie_port_service_driver *driver;
+ pci_ers_result_t status, *result;
+
+ result = (pci_ers_result_t *) data;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ driver = to_service_driver(device->driver);
+ if (driver &&
+ driver->err_handler &&
+ driver->err_handler->slot_reset) {
+ pcie_device = to_pcie_device(device);
+
+ /* Forward error message to service drivers */
+ status = driver->err_handler->slot_reset(
+ pcie_device->port);
+ *result = merge_result(*result, status);
+ }
+ }
+
+ return 0;
+}
+
+static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
+{
+ pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+
+ /* If fatal, restore cfg space for possible link reset at upstream */
+ if (dev->error_state == pci_channel_io_frozen) {
+ dev->state_saved = true;
+ pci_restore_state(dev);
+ pcie_portdrv_restore_config(dev);
+ pci_enable_pcie_error_reporting(dev);
+ }
+
+ /* get true return value from &status */
+ device_for_each_child(&dev->dev, &status, slot_reset_iter);
+ return status;
+}
+
+static int resume_iter(struct device *device, void *data)
+{
+ struct pcie_device *pcie_device;
+ struct pcie_port_service_driver *driver;
+
+ if (device->bus == &pcie_port_bus_type && device->driver) {
+ driver = to_service_driver(device->driver);
+ if (driver &&
+ driver->err_handler &&
+ driver->err_handler->resume) {
+ pcie_device = to_pcie_device(device);
+
+ /* Forward error message to service drivers */
+ driver->err_handler->resume(pcie_device->port);
+ }
+ }
+
+ return 0;
+}
+
+static void pcie_portdrv_err_resume(struct pci_dev *dev)
+{
+ device_for_each_child(&dev->dev, NULL, resume_iter);
+}
+
+/*
+ * LINUX Device Driver Model
+ */
+static const struct pci_device_id port_pci_ids[] = { {
+ /* handle any PCI-Express port */
+ PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
+ }, { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, port_pci_ids);
+
+static const struct pci_error_handlers pcie_portdrv_err_handler = {
+ .error_detected = pcie_portdrv_error_detected,
+ .mmio_enabled = pcie_portdrv_mmio_enabled,
+ .slot_reset = pcie_portdrv_slot_reset,
+ .resume = pcie_portdrv_err_resume,
+};
+
+static struct pci_driver pcie_portdriver = {
+ .name = "pcieport",
+ .id_table = &port_pci_ids[0],
+
+ .probe = pcie_portdrv_probe,
+ .remove = pcie_portdrv_remove,
+
+ .err_handler = &pcie_portdrv_err_handler,
+
+ .driver.pm = PCIE_PORTDRV_PM_OPS,
+};
+
+static int __init dmi_pcie_pme_disable_msi(const struct dmi_system_id *d)
+{
+ pr_notice("%s detected: will not use MSI for PCIe PME signaling\n",
+ d->ident);
+ pcie_pme_disable_msi();
+ return 0;
+}
+
+static struct dmi_system_id __initdata pcie_portdrv_dmi_table[] = {
+ /*
+ * Boxes that should not use MSI for PCIe PME signaling.
+ */
+ {
+ .callback = dmi_pcie_pme_disable_msi,
+ .ident = "MSI Wind U-100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U-100"),
+ },
+ },
+ {}
+};
+
+static int __init pcie_portdrv_init(void)
+{
+ int retval;
+
+ if (pcie_ports_disabled)
+ return pci_register_driver(&pcie_portdriver);
+
+ dmi_check_system(pcie_portdrv_dmi_table);
+
+ retval = pcie_port_bus_register();
+ if (retval) {
+ printk(KERN_WARNING "PCIE: bus_register error: %d\n", retval);
+ goto out;
+ }
+ retval = pci_register_driver(&pcie_portdriver);
+ if (retval)
+ pcie_port_bus_unregister();
+ out:
+ return retval;
+}
+
+module_init(pcie_portdrv_init);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
new file mode 100644
index 000000000..c91185721
--- /dev/null
+++ b/drivers/pci/probe.c
@@ -0,0 +1,2220 @@
+/*
+ * probe.c - PCI detection and setup code
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/of_pci.h>
+#include <linux/pci_hotplug.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <linux/pci-aspm.h>
+#include <asm-generic/pci-bridge.h>
+#include "pci.h"
+
+#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
+#define CARDBUS_RESERVE_BUSNR 3
+
+static struct resource busn_resource = {
+ .name = "PCI busn",
+ .start = 0,
+ .end = 255,
+ .flags = IORESOURCE_BUS,
+};
+
+/* Ugh. Need to stop exporting this to modules. */
+LIST_HEAD(pci_root_buses);
+EXPORT_SYMBOL(pci_root_buses);
+
+static LIST_HEAD(pci_domain_busn_res_list);
+
+struct pci_domain_busn_res {
+ struct list_head list;
+ struct resource res;
+ int domain_nr;
+};
+
+static struct resource *get_pci_domain_busn_res(int domain_nr)
+{
+ struct pci_domain_busn_res *r;
+
+ list_for_each_entry(r, &pci_domain_busn_res_list, list)
+ if (r->domain_nr == domain_nr)
+ return &r->res;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->domain_nr = domain_nr;
+ r->res.start = 0;
+ r->res.end = 0xff;
+ r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
+
+ list_add_tail(&r->list, &pci_domain_busn_res_list);
+
+ return &r->res;
+}
+
+static int find_anything(struct device *dev, void *data)
+{
+ return 1;
+}
+
+/*
+ * Some device drivers need know if pci is initiated.
+ * Basically, we think pci is not initiated when there
+ * is no device to be found on the pci_bus_type.
+ */
+int no_pci_devices(void)
+{
+ struct device *dev;
+ int no_devices;
+
+ dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
+ no_devices = (dev == NULL);
+ put_device(dev);
+ return no_devices;
+}
+EXPORT_SYMBOL(no_pci_devices);
+
+/*
+ * PCI Bus Class
+ */
+static void release_pcibus_dev(struct device *dev)
+{
+ struct pci_bus *pci_bus = to_pci_bus(dev);
+
+ put_device(pci_bus->bridge);
+ pci_bus_remove_resources(pci_bus);
+ pci_release_bus_of_node(pci_bus);
+ kfree(pci_bus);
+}
+
+static struct class pcibus_class = {
+ .name = "pci_bus",
+ .dev_release = &release_pcibus_dev,
+ .dev_groups = pcibus_groups,
+};
+
+static int __init pcibus_class_init(void)
+{
+ return class_register(&pcibus_class);
+}
+postcore_initcall(pcibus_class_init);
+
+static u64 pci_size(u64 base, u64 maxbase, u64 mask)
+{
+ u64 size = mask & maxbase; /* Find the significant bits */
+ if (!size)
+ return 0;
+
+ /* Get the lowest of them to find the decode size, and
+ from that the extent. */
+ size = (size & ~(size-1)) - 1;
+
+ /* base == maxbase can be valid only if the BAR has
+ already been programmed with all 1s. */
+ if (base == maxbase && ((base | size) & mask) != mask)
+ return 0;
+
+ return size;
+}
+
+static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
+{
+ u32 mem_type;
+ unsigned long flags;
+
+ if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
+ flags |= IORESOURCE_IO;
+ return flags;
+ }
+
+ flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+ flags |= IORESOURCE_MEM;
+ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
+ flags |= IORESOURCE_PREFETCH;
+
+ mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ switch (mem_type) {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ /* 1M mem BAR treated as 32-bit BAR */
+ break;
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ flags |= IORESOURCE_MEM_64;
+ break;
+ default:
+ /* mem unknown type treated as 32-bit BAR */
+ break;
+ }
+ return flags;
+}
+
+#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+/**
+ * pci_read_base - read a PCI BAR
+ * @dev: the PCI device
+ * @type: type of the BAR
+ * @res: resource buffer to be filled in
+ * @pos: BAR position in the config space
+ *
+ * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
+ */
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int pos)
+{
+ u32 l, sz, mask;
+ u64 l64, sz64, mask64;
+ u16 orig_cmd;
+ struct pci_bus_region region, inverted_region;
+
+ mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+
+ /* No printks while decoding is disabled! */
+ if (!dev->mmio_always_on) {
+ pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
+ }
+
+ res->name = pci_name(dev);
+
+ pci_read_config_dword(dev, pos, &l);
+ pci_write_config_dword(dev, pos, l | mask);
+ pci_read_config_dword(dev, pos, &sz);
+ pci_write_config_dword(dev, pos, l);
+
+ /*
+ * All bits set in sz means the device isn't working properly.
+ * If the BAR isn't implemented, all bits must be 0. If it's a
+ * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
+ * 1 must be clear.
+ */
+ if (sz == 0xffffffff)
+ sz = 0;
+
+ /*
+ * I don't know how l can have all bits set. Copied from old code.
+ * Maybe it fixes a bug on some ancient platform.
+ */
+ if (l == 0xffffffff)
+ l = 0;
+
+ if (type == pci_bar_unknown) {
+ res->flags = decode_bar(dev, l);
+ res->flags |= IORESOURCE_SIZEALIGN;
+ if (res->flags & IORESOURCE_IO) {
+ l64 = l & PCI_BASE_ADDRESS_IO_MASK;
+ sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
+ mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
+ } else {
+ l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
+ sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
+ mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ }
+ } else {
+ res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ l64 = l & PCI_ROM_ADDRESS_MASK;
+ sz64 = sz & PCI_ROM_ADDRESS_MASK;
+ mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+ }
+
+ if (res->flags & IORESOURCE_MEM_64) {
+ pci_read_config_dword(dev, pos + 4, &l);
+ pci_write_config_dword(dev, pos + 4, ~0);
+ pci_read_config_dword(dev, pos + 4, &sz);
+ pci_write_config_dword(dev, pos + 4, l);
+
+ l64 |= ((u64)l << 32);
+ sz64 |= ((u64)sz << 32);
+ mask64 |= ((u64)~0 << 32);
+ }
+
+ if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
+ if (!sz64)
+ goto fail;
+
+ sz64 = pci_size(l64, sz64, mask64);
+ if (!sz64) {
+ dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
+ pos);
+ goto fail;
+ }
+
+ if (res->flags & IORESOURCE_MEM_64) {
+ if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
+ && sz64 > 0x100000000ULL) {
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ res->start = 0;
+ res->end = 0;
+ dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
+ pos, (unsigned long long)sz64);
+ goto out;
+ }
+
+ if ((sizeof(pci_bus_addr_t) < 8) && l) {
+ /* Above 32-bit boundary; try to reallocate */
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = sz64;
+ dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
+ pos, (unsigned long long)l64);
+ goto out;
+ }
+ }
+
+ region.start = l64;
+ region.end = l64 + sz64;
+
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ pcibios_resource_to_bus(dev->bus, &inverted_region, res);
+
+ /*
+ * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
+ * the corresponding resource address (the physical address used by
+ * the CPU. Converting that resource address back to a bus address
+ * should yield the original BAR value:
+ *
+ * resource_to_bus(bus_to_resource(A)) == A
+ *
+ * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
+ * be claimed by the device.
+ */
+ if (inverted_region.start != region.start) {
+ res->flags |= IORESOURCE_UNSET;
+ res->start = 0;
+ res->end = region.end - region.start;
+ dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
+ pos, (unsigned long long)region.start);
+ }
+
+ goto out;
+
+
+fail:
+ res->flags = 0;
+out:
+ if (res->flags)
+ dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
+
+ return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
+}
+
+static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+{
+ unsigned int pos, reg;
+
+ for (pos = 0; pos < howmany; pos++) {
+ struct resource *res = &dev->resource[pos];
+ reg = PCI_BASE_ADDRESS_0 + (pos << 2);
+ pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ }
+
+ if (rom) {
+ struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
+ dev->rom_base_reg = rom;
+ res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
+ IORESOURCE_SIZEALIGN;
+ __pci_read_base(dev, pci_bar_mem32, res, rom);
+ }
+}
+
+static void pci_read_bridge_io(struct pci_bus *child)
+{
+ struct pci_dev *dev = child->self;
+ u8 io_base_lo, io_limit_lo;
+ unsigned long io_mask, io_granularity, base, limit;
+ struct pci_bus_region region;
+ struct resource *res;
+
+ io_mask = PCI_IO_RANGE_MASK;
+ io_granularity = 0x1000;
+ if (dev->io_window_1k) {
+ /* Support 1K I/O space granularity */
+ io_mask = PCI_IO_1K_RANGE_MASK;
+ io_granularity = 0x400;
+ }
+
+ res = child->resource[0];
+ pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
+ pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
+ base = (io_base_lo & io_mask) << 8;
+ limit = (io_limit_lo & io_mask) << 8;
+
+ if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
+ u16 io_base_hi, io_limit_hi;
+
+ pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
+ pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
+ base |= ((unsigned long) io_base_hi << 16);
+ limit |= ((unsigned long) io_limit_hi << 16);
+ }
+
+ if (base <= limit) {
+ res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
+ region.start = base;
+ region.end = limit + io_granularity - 1;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
+ }
+}
+
+static void pci_read_bridge_mmio(struct pci_bus *child)
+{
+ struct pci_dev *dev = child->self;
+ u16 mem_base_lo, mem_limit_lo;
+ unsigned long base, limit;
+ struct pci_bus_region region;
+ struct resource *res;
+
+ res = child->resource[1];
+ pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
+ pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
+ base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ if (base <= limit) {
+ res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
+ region.start = base;
+ region.end = limit + 0xfffff;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
+ }
+}
+
+static void pci_read_bridge_mmio_pref(struct pci_bus *child)
+{
+ struct pci_dev *dev = child->self;
+ u16 mem_base_lo, mem_limit_lo;
+ u64 base64, limit64;
+ pci_bus_addr_t base, limit;
+ struct pci_bus_region region;
+ struct resource *res;
+
+ res = child->resource[2];
+ pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
+ pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
+ base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
+ limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
+
+ if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+ u32 mem_base_hi, mem_limit_hi;
+
+ pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
+ pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
+
+ /*
+ * Some bridges set the base > limit by default, and some
+ * (broken) BIOSes do not initialize them. If we find
+ * this, just assume they are not being used.
+ */
+ if (mem_base_hi <= mem_limit_hi) {
+ base64 |= (u64) mem_base_hi << 32;
+ limit64 |= (u64) mem_limit_hi << 32;
+ }
+ }
+
+ base = (pci_bus_addr_t) base64;
+ limit = (pci_bus_addr_t) limit64;
+
+ if (base != base64) {
+ dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
+ (unsigned long long) base64);
+ return;
+ }
+
+ if (base <= limit) {
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+ IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
+ region.start = base;
+ region.end = limit + 0xfffff;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
+ }
+}
+
+void pci_read_bridge_bases(struct pci_bus *child)
+{
+ struct pci_dev *dev = child->self;
+ struct resource *res;
+ int i;
+
+ if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
+ return;
+
+ dev_info(&dev->dev, "PCI bridge to %pR%s\n",
+ &child->busn_res,
+ dev->transparent ? " (subtractive decode)" : "");
+
+ pci_bus_remove_resources(child);
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
+ child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
+
+ pci_read_bridge_io(child);
+ pci_read_bridge_mmio(child);
+ pci_read_bridge_mmio_pref(child);
+
+ if (dev->transparent) {
+ pci_bus_for_each_resource(child->parent, res, i) {
+ if (res && res->flags) {
+ pci_bus_add_resource(child, res,
+ PCI_SUBTRACTIVE_DECODE);
+ dev_printk(KERN_DEBUG, &dev->dev,
+ " bridge window %pR (subtractive decode)\n",
+ res);
+ }
+ }
+ }
+}
+
+static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
+{
+ struct pci_bus *b;
+
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ INIT_LIST_HEAD(&b->node);
+ INIT_LIST_HEAD(&b->children);
+ INIT_LIST_HEAD(&b->devices);
+ INIT_LIST_HEAD(&b->slots);
+ INIT_LIST_HEAD(&b->resources);
+ b->max_bus_speed = PCI_SPEED_UNKNOWN;
+ b->cur_bus_speed = PCI_SPEED_UNKNOWN;
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ if (parent)
+ b->domain_nr = parent->domain_nr;
+#endif
+ return b;
+}
+
+static void pci_release_host_bridge_dev(struct device *dev)
+{
+ struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
+
+ if (bridge->release_fn)
+ bridge->release_fn(bridge);
+
+ pci_free_resource_list(&bridge->windows);
+
+ kfree(bridge);
+}
+
+static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
+{
+ struct pci_host_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return NULL;
+
+ INIT_LIST_HEAD(&bridge->windows);
+ bridge->bus = b;
+ return bridge;
+}
+
+static const unsigned char pcix_bus_speed[] = {
+ PCI_SPEED_UNKNOWN, /* 0 */
+ PCI_SPEED_66MHz_PCIX, /* 1 */
+ PCI_SPEED_100MHz_PCIX, /* 2 */
+ PCI_SPEED_133MHz_PCIX, /* 3 */
+ PCI_SPEED_UNKNOWN, /* 4 */
+ PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
+ PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
+ PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
+ PCI_SPEED_UNKNOWN, /* 8 */
+ PCI_SPEED_66MHz_PCIX_266, /* 9 */
+ PCI_SPEED_100MHz_PCIX_266, /* A */
+ PCI_SPEED_133MHz_PCIX_266, /* B */
+ PCI_SPEED_UNKNOWN, /* C */
+ PCI_SPEED_66MHz_PCIX_533, /* D */
+ PCI_SPEED_100MHz_PCIX_533, /* E */
+ PCI_SPEED_133MHz_PCIX_533 /* F */
+};
+
+const unsigned char pcie_link_speed[] = {
+ PCI_SPEED_UNKNOWN, /* 0 */
+ PCIE_SPEED_2_5GT, /* 1 */
+ PCIE_SPEED_5_0GT, /* 2 */
+ PCIE_SPEED_8_0GT, /* 3 */
+ PCI_SPEED_UNKNOWN, /* 4 */
+ PCI_SPEED_UNKNOWN, /* 5 */
+ PCI_SPEED_UNKNOWN, /* 6 */
+ PCI_SPEED_UNKNOWN, /* 7 */
+ PCI_SPEED_UNKNOWN, /* 8 */
+ PCI_SPEED_UNKNOWN, /* 9 */
+ PCI_SPEED_UNKNOWN, /* A */
+ PCI_SPEED_UNKNOWN, /* B */
+ PCI_SPEED_UNKNOWN, /* C */
+ PCI_SPEED_UNKNOWN, /* D */
+ PCI_SPEED_UNKNOWN, /* E */
+ PCI_SPEED_UNKNOWN /* F */
+};
+
+void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+{
+ bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
+}
+EXPORT_SYMBOL_GPL(pcie_update_link_speed);
+
+static unsigned char agp_speeds[] = {
+ AGP_UNKNOWN,
+ AGP_1X,
+ AGP_2X,
+ AGP_4X,
+ AGP_8X
+};
+
+static enum pci_bus_speed agp_speed(int agp3, int agpstat)
+{
+ int index = 0;
+
+ if (agpstat & 4)
+ index = 3;
+ else if (agpstat & 2)
+ index = 2;
+ else if (agpstat & 1)
+ index = 1;
+ else
+ goto out;
+
+ if (agp3) {
+ index += 2;
+ if (index == 5)
+ index = 0;
+ }
+
+ out:
+ return agp_speeds[index];
+}
+
+static void pci_set_bus_speed(struct pci_bus *bus)
+{
+ struct pci_dev *bridge = bus->self;
+ int pos;
+
+ pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
+ if (!pos)
+ pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
+ if (pos) {
+ u32 agpstat, agpcmd;
+
+ pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
+ bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
+
+ pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
+ bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
+ }
+
+ pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+ if (pos) {
+ u16 status;
+ enum pci_bus_speed max;
+
+ pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
+ &status);
+
+ if (status & PCI_X_SSTATUS_533MHZ) {
+ max = PCI_SPEED_133MHz_PCIX_533;
+ } else if (status & PCI_X_SSTATUS_266MHZ) {
+ max = PCI_SPEED_133MHz_PCIX_266;
+ } else if (status & PCI_X_SSTATUS_133MHZ) {
+ if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
+ max = PCI_SPEED_133MHz_PCIX_ECC;
+ else
+ max = PCI_SPEED_133MHz_PCIX;
+ } else {
+ max = PCI_SPEED_66MHz_PCIX;
+ }
+
+ bus->max_bus_speed = max;
+ bus->cur_bus_speed = pcix_bus_speed[
+ (status & PCI_X_SSTATUS_FREQ) >> 6];
+
+ return;
+ }
+
+ if (pci_is_pcie(bridge)) {
+ u32 linkcap;
+ u16 linksta;
+
+ pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
+ bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
+
+ pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
+ pcie_update_link_speed(bus, linksta);
+ }
+}
+
+static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
+ struct pci_dev *bridge, int busnr)
+{
+ struct pci_bus *child;
+ int i;
+ int ret;
+
+ /*
+ * Allocate a new bus, and inherit stuff from the parent..
+ */
+ child = pci_alloc_bus(parent);
+ if (!child)
+ return NULL;
+
+ child->parent = parent;
+ child->ops = parent->ops;
+ child->msi = parent->msi;
+ child->sysdata = parent->sysdata;
+ child->bus_flags = parent->bus_flags;
+
+ /* initialize some portions of the bus device, but don't register it
+ * now as the parent is not properly set up yet.
+ */
+ child->dev.class = &pcibus_class;
+ dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
+
+ /*
+ * Set up the primary, secondary and subordinate
+ * bus numbers.
+ */
+ child->number = child->busn_res.start = busnr;
+ child->primary = parent->busn_res.start;
+ child->busn_res.end = 0xff;
+
+ if (!bridge) {
+ child->dev.parent = parent->bridge;
+ goto add_dev;
+ }
+
+ child->self = bridge;
+ child->bridge = get_device(&bridge->dev);
+ child->dev.parent = child->bridge;
+ pci_set_bus_of_node(child);
+ pci_set_bus_speed(child);
+
+ /* Set up default resource pointers and names.. */
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
+ child->resource[i]->name = child->name;
+ }
+ bridge->subordinate = child;
+
+add_dev:
+ ret = device_register(&child->dev);
+ WARN_ON(ret < 0);
+
+ pcibios_add_bus(child);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(child);
+
+ return child;
+}
+
+struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
+ int busnr)
+{
+ struct pci_bus *child;
+
+ child = pci_alloc_child_bus(parent, dev, busnr);
+ if (child) {
+ down_write(&pci_bus_sem);
+ list_add_tail(&child->node, &parent->children);
+ up_write(&pci_bus_sem);
+ }
+ return child;
+}
+EXPORT_SYMBOL(pci_add_new_bus);
+
+static void pci_enable_crs(struct pci_dev *pdev)
+{
+ u16 root_cap = 0;
+
+ /* Enable CRS Software Visibility if supported */
+ pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
+ if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+ pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
+ PCI_EXP_RTCTL_CRSSVE);
+}
+
+/*
+ * If it's a bridge, configure it and scan the bus behind it.
+ * For CardBus bridges, we don't scan behind as the devices will
+ * be handled by the bridge driver itself.
+ *
+ * We need to process bridges in two passes -- first we scan those
+ * already configured by the BIOS and after we are done with all of
+ * them, we proceed to assigning numbers to the remaining buses in
+ * order to avoid overlaps between old and new bus numbers.
+ */
+int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+{
+ struct pci_bus *child;
+ int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
+ u32 buses, i, j = 0;
+ u16 bctl;
+ u8 primary, secondary, subordinate;
+ int broken = 0;
+
+ pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
+ primary = buses & 0xFF;
+ secondary = (buses >> 8) & 0xFF;
+ subordinate = (buses >> 16) & 0xFF;
+
+ dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
+ secondary, subordinate, pass);
+
+ if (!primary && (primary != bus->number) && secondary && subordinate) {
+ dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
+ primary = bus->number;
+ }
+
+ /* Check if setup is sensible at all */
+ if (!pass &&
+ (primary != bus->number || secondary <= bus->number ||
+ secondary > subordinate)) {
+ dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
+ secondary, subordinate);
+ broken = 1;
+ }
+
+ /* Disable MasterAbortMode during probing to avoid reporting
+ of bus errors (in some architectures) */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
+ bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
+
+ pci_enable_crs(dev);
+
+ if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
+ !is_cardbus && !broken) {
+ unsigned int cmax;
+ /*
+ * Bus already configured by firmware, process it in the first
+ * pass and just note the configuration.
+ */
+ if (pass)
+ goto out;
+
+ /*
+ * The bus might already exist for two reasons: Either we are
+ * rescanning the bus or the bus is reachable through more than
+ * one bridge. The second case can happen with the i450NX
+ * chipset.
+ */
+ child = pci_find_bus(pci_domain_nr(bus), secondary);
+ if (!child) {
+ child = pci_add_new_bus(bus, dev, secondary);
+ if (!child)
+ goto out;
+ child->primary = primary;
+ pci_bus_insert_busn_res(child, secondary, subordinate);
+ child->bridge_ctl = bctl;
+ }
+
+ cmax = pci_scan_child_bus(child);
+ if (cmax > subordinate)
+ dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
+ subordinate, cmax);
+ /* subordinate should equal child->busn_res.end */
+ if (subordinate > max)
+ max = subordinate;
+ } else {
+ /*
+ * We need to assign a number to this bus which we always
+ * do in the second pass.
+ */
+ if (!pass) {
+ if (pcibios_assign_all_busses() || broken || is_cardbus)
+ /* Temporarily disable forwarding of the
+ configuration cycles on all bridges in
+ this bus segment to avoid possible
+ conflicts in the second pass between two
+ bridges programmed with overlapping
+ bus ranges. */
+ pci_write_config_dword(dev, PCI_PRIMARY_BUS,
+ buses & ~0xffffff);
+ goto out;
+ }
+
+ /* Clear errors */
+ pci_write_config_word(dev, PCI_STATUS, 0xffff);
+
+ /* Prevent assigning a bus number that already exists.
+ * This can happen when a bridge is hot-plugged, so in
+ * this case we only re-scan this bus. */
+ child = pci_find_bus(pci_domain_nr(bus), max+1);
+ if (!child) {
+ child = pci_add_new_bus(bus, dev, max+1);
+ if (!child)
+ goto out;
+ pci_bus_insert_busn_res(child, max+1, 0xff);
+ }
+ max++;
+ buses = (buses & 0xff000000)
+ | ((unsigned int)(child->primary) << 0)
+ | ((unsigned int)(child->busn_res.start) << 8)
+ | ((unsigned int)(child->busn_res.end) << 16);
+
+ /*
+ * yenta.c forces a secondary latency timer of 176.
+ * Copy that behaviour here.
+ */
+ if (is_cardbus) {
+ buses &= ~0xff000000;
+ buses |= CARDBUS_LATENCY_TIMER << 24;
+ }
+
+ /*
+ * We need to blast all three values with a single write.
+ */
+ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
+
+ if (!is_cardbus) {
+ child->bridge_ctl = bctl;
+ max = pci_scan_child_bus(child);
+ } else {
+ /*
+ * For CardBus bridges, we leave 4 bus numbers
+ * as cards with a PCI-to-PCI bridge can be
+ * inserted later.
+ */
+ for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
+ struct pci_bus *parent = bus;
+ if (pci_find_bus(pci_domain_nr(bus),
+ max+i+1))
+ break;
+ while (parent->parent) {
+ if ((!pcibios_assign_all_busses()) &&
+ (parent->busn_res.end > max) &&
+ (parent->busn_res.end <= max+i)) {
+ j = 1;
+ }
+ parent = parent->parent;
+ }
+ if (j) {
+ /*
+ * Often, there are two cardbus bridges
+ * -- try to leave one valid bus number
+ * for each one.
+ */
+ i /= 2;
+ break;
+ }
+ }
+ max += i;
+ }
+ /*
+ * Set the subordinate bus number to its real value.
+ */
+ pci_bus_update_busn_res_end(child, max);
+ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
+ }
+
+ sprintf(child->name,
+ (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
+ pci_domain_nr(bus), child->number);
+
+ /* Has only triggered on CardBus, fixup is in yenta_socket */
+ while (bus->parent) {
+ if ((child->busn_res.end > bus->busn_res.end) ||
+ (child->number > bus->busn_res.end) ||
+ (child->number < bus->number) ||
+ (child->busn_res.end < bus->number)) {
+ dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
+ &child->busn_res,
+ (bus->number > child->busn_res.end &&
+ bus->busn_res.end < child->number) ?
+ "wholly" : "partially",
+ bus->self->transparent ? " transparent" : "",
+ dev_name(&bus->dev),
+ &bus->busn_res);
+ }
+ bus = bus->parent;
+ }
+
+out:
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
+
+ return max;
+}
+EXPORT_SYMBOL(pci_scan_bridge);
+
+/*
+ * Read interrupt line and base address registers.
+ * The architecture-dependent code can tweak these, of course.
+ */
+static void pci_read_irq(struct pci_dev *dev)
+{
+ unsigned char irq;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
+ dev->pin = irq;
+ if (irq)
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ dev->irq = irq;
+}
+
+void set_pcie_port_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+ pdev->pcie_cap = pos;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ pdev->pcie_flags_reg = reg16;
+ pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+}
+
+void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+{
+ u32 reg32;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
+ if (reg32 & PCI_EXP_SLTCAP_HPC)
+ pdev->is_hotplug_bridge = 1;
+}
+
+/**
+ * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
+ * @dev: PCI device
+ *
+ * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
+ * when forwarding a type1 configuration request the bridge must check that
+ * the extended register address field is zero. The bridge is not permitted
+ * to forward the transactions and must handle it as an Unsupported Request.
+ * Some bridges do not follow this rule and simply drop the extended register
+ * bits, resulting in the standard config space being aliased, every 256
+ * bytes across the entire configuration space. Test for this condition by
+ * comparing the first dword of each potential alias to the vendor/device ID.
+ * Known offenders:
+ * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
+ * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
+ */
+static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
+{
+#ifdef CONFIG_PCI_QUIRKS
+ int pos;
+ u32 header, tmp;
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
+
+ for (pos = PCI_CFG_SPACE_SIZE;
+ pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
+ if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
+ || header != tmp)
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/**
+ * pci_cfg_space_size - get the configuration space size of the PCI device.
+ * @dev: PCI device
+ *
+ * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
+ * have 4096 bytes. Even if the device is capable, that doesn't mean we can
+ * access it. Maybe we don't have a way to generate extended config space
+ * accesses, or the device is behind a reverse Express bridge. So we try
+ * reading the dword at 0x100 which must either be 0 or a valid extended
+ * capability header.
+ */
+static int pci_cfg_space_size_ext(struct pci_dev *dev)
+{
+ u32 status;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
+ goto fail;
+ if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
+ goto fail;
+
+ return PCI_CFG_SPACE_EXP_SIZE;
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+int pci_cfg_space_size(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+ u16 class;
+
+ class = dev->class >> 8;
+ if (class == PCI_CLASS_BRIDGE_HOST)
+ return pci_cfg_space_size_ext(dev);
+
+ if (!pci_is_pcie(dev)) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!pos)
+ goto fail;
+
+ pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
+ if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
+ goto fail;
+ }
+
+ return pci_cfg_space_size_ext(dev);
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
+
+/**
+ * pci_setup_device - fill in class and map information of a device
+ * @dev: the device structure to fill
+ *
+ * Initialize the device structure with information about the device's
+ * vendor,class,memory and IO-space addresses,IRQ lines etc.
+ * Called at initialisation of the PCI subsystem and by CardBus services.
+ * Returns 0 on success and negative if unknown type of device (not normal,
+ * bridge or CardBus).
+ */
+int pci_setup_device(struct pci_dev *dev)
+{
+ u32 class;
+ u8 hdr_type;
+ struct pci_slot *slot;
+ int pos = 0;
+ struct pci_bus_region region;
+ struct resource *res;
+
+ if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
+ return -EIO;
+
+ dev->sysdata = dev->bus->sysdata;
+ dev->dev.parent = dev->bus->bridge;
+ dev->dev.bus = &pci_bus_type;
+ dev->hdr_type = hdr_type & 0x7f;
+ dev->multifunction = !!(hdr_type & 0x80);
+ dev->error_state = pci_channel_io_normal;
+ set_pcie_port_type(dev);
+
+ list_for_each_entry(slot, &dev->bus->slots, list)
+ if (PCI_SLOT(dev->devfn) == slot->number)
+ dev->slot = slot;
+
+ /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
+ set this higher, assuming the system even supports it. */
+ dev->dma_mask = 0xffffffff;
+
+ dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
+ dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+ dev->revision = class & 0xff;
+ dev->class = class >> 8; /* upper 3 bytes */
+
+ dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
+ dev->vendor, dev->device, dev->hdr_type, dev->class);
+
+ /* need to have dev->class ready */
+ dev->cfg_size = pci_cfg_space_size(dev);
+
+ /* "Unknown power state" */
+ dev->current_state = PCI_UNKNOWN;
+
+ /* Early fixups, before probing the BARs */
+ pci_fixup_device(pci_fixup_early, dev);
+ /* device class may be changed after fixup */
+ class = dev->class >> 8;
+
+ switch (dev->hdr_type) { /* header type */
+ case PCI_HEADER_TYPE_NORMAL: /* standard header */
+ if (class == PCI_CLASS_BRIDGE_PCI)
+ goto bad;
+ pci_read_irq(dev);
+ pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
+ pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
+ pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
+
+ /*
+ * Do the ugly legacy mode stuff here rather than broken chip
+ * quirk code. Legacy mode ATA controllers have fixed
+ * addresses. These are not always echoed in BAR0-3, and
+ * BAR0-3 in a few cases contain junk!
+ */
+ if (class == PCI_CLASS_STORAGE_IDE) {
+ u8 progif;
+ pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+ if ((progif & 1) == 0) {
+ region.start = 0x1F0;
+ region.end = 0x1F7;
+ res = &dev->resource[0];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
+ res);
+ region.start = 0x3F6;
+ region.end = 0x3F6;
+ res = &dev->resource[1];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
+ res);
+ }
+ if ((progif & 4) == 0) {
+ region.start = 0x170;
+ region.end = 0x177;
+ res = &dev->resource[2];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
+ res);
+ region.start = 0x376;
+ region.end = 0x376;
+ res = &dev->resource[3];
+ res->flags = LEGACY_IO_RESOURCE;
+ pcibios_bus_to_resource(dev->bus, res, &region);
+ dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
+ res);
+ }
+ }
+ break;
+
+ case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
+ if (class != PCI_CLASS_BRIDGE_PCI)
+ goto bad;
+ /* The PCI-to-PCI bridge spec requires that subtractive
+ decoding (i.e. transparent) bridge must have programming
+ interface code of 0x01. */
+ pci_read_irq(dev);
+ dev->transparent = ((dev->class & 0xff) == 1);
+ pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
+ set_pcie_hotplug_bridge(dev);
+ pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
+ pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
+ }
+ break;
+
+ case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
+ if (class != PCI_CLASS_BRIDGE_CARDBUS)
+ goto bad;
+ pci_read_irq(dev);
+ pci_read_bases(dev, 1, 0);
+ pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
+ pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
+ break;
+
+ default: /* unknown header */
+ dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
+ dev->hdr_type);
+ return -EIO;
+
+ bad:
+ dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
+ dev->class, dev->hdr_type);
+ dev->class = PCI_CLASS_NOT_DEFINED;
+ }
+
+ /* We found a fine healthy device, go go go... */
+ return 0;
+}
+
+static struct hpp_type0 pci_default_type0 = {
+ .revision = 1,
+ .cache_line_size = 8,
+ .latency_timer = 0x40,
+ .enable_serr = 0,
+ .enable_perr = 0,
+};
+
+static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
+{
+ u16 pci_cmd, pci_bctl;
+
+ if (!hpp)
+ hpp = &pci_default_type0;
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev,
+ "PCI settings rev %d not supported; using defaults\n",
+ hpp->revision);
+ hpp = &pci_default_type0;
+ }
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
+ pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
+ if (hpp->enable_serr)
+ pci_cmd |= PCI_COMMAND_SERR;
+ if (hpp->enable_perr)
+ pci_cmd |= PCI_COMMAND_PARITY;
+ pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
+
+ /* Program bridge control value */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
+ hpp->latency_timer);
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
+ if (hpp->enable_serr)
+ pci_bctl |= PCI_BRIDGE_CTL_SERR;
+ if (hpp->enable_perr)
+ pci_bctl |= PCI_BRIDGE_CTL_PARITY;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
+ }
+}
+
+static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+{
+ if (hpp)
+ dev_warn(&dev->dev, "PCI-X settings not supported\n");
+}
+
+static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+{
+ int pos;
+ u32 reg32;
+
+ if (!hpp)
+ return;
+
+ if (hpp->revision > 1) {
+ dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
+ hpp->revision);
+ return;
+ }
+
+ /*
+ * Don't allow _HPX to change MPS or MRRS settings. We manage
+ * those to make sure they're consistent with the rest of the
+ * platform.
+ */
+ hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ;
+ hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
+ PCI_EXP_DEVCTL_READRQ);
+
+ /* Initialize Device Control Register */
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+ ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
+
+ /* Initialize Link Control Register */
+ if (pcie_cap_has_lnkctl(dev))
+ pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+ ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
+
+ /* Find Advanced Error Reporting Enhanced Capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return;
+
+ /* Initialize Uncorrectable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
+ reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
+
+ /* Initialize Uncorrectable Error Severity Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
+ reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
+
+ /* Initialize Correctable Error Mask Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
+ reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
+
+ /* Initialize Advanced Error Capabilities and Control Register */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ /*
+ * FIXME: The following two registers are not supported yet.
+ *
+ * o Secondary Uncorrectable Error Severity Register
+ * o Secondary Uncorrectable Error Mask Register
+ */
+}
+
+static void pci_configure_device(struct pci_dev *dev)
+{
+ struct hotplug_params hpp;
+ int ret;
+
+ memset(&hpp, 0, sizeof(hpp));
+ ret = pci_get_hp_params(dev, &hpp);
+ if (ret)
+ return;
+
+ program_hpp_type2(dev, hpp.t2);
+ program_hpp_type1(dev, hpp.t1);
+ program_hpp_type0(dev, hpp.t0);
+}
+
+static void pci_release_capabilities(struct pci_dev *dev)
+{
+ pci_vpd_release(dev);
+ pci_iov_release(dev);
+ pci_free_cap_save_buffers(dev);
+}
+
+/**
+ * pci_release_dev - free a pci device structure when all users of it are finished.
+ * @dev: device that's been disconnected
+ *
+ * Will be called only by the device core when all users of this pci device are
+ * done.
+ */
+static void pci_release_dev(struct device *dev)
+{
+ struct pci_dev *pci_dev;
+
+ pci_dev = to_pci_dev(dev);
+ pci_release_capabilities(pci_dev);
+ pci_release_of_node(pci_dev);
+ pcibios_release_device(pci_dev);
+ pci_bus_put(pci_dev->bus);
+ kfree(pci_dev->driver_override);
+ kfree(pci_dev);
+}
+
+struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ INIT_LIST_HEAD(&dev->bus_list);
+ dev->dev.type = &pci_dev_type;
+ dev->bus = pci_bus_get(bus);
+
+ return dev;
+}
+EXPORT_SYMBOL(pci_alloc_dev);
+
+bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
+ int crs_timeout)
+{
+ int delay = 1;
+
+ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
+ return false;
+
+ /* some broken boards return 0 or ~0 if a slot is empty: */
+ if (*l == 0xffffffff || *l == 0x00000000 ||
+ *l == 0x0000ffff || *l == 0xffff0000)
+ return false;
+
+ /*
+ * Configuration Request Retry Status. Some root ports return the
+ * actual device ID instead of the synthetic ID (0xFFFF) required
+ * by the PCIe spec. Ignore the device ID and only check for
+ * (vendor id == 1).
+ */
+ while ((*l & 0xffff) == 0x0001) {
+ if (!crs_timeout)
+ return false;
+
+ msleep(delay);
+ delay *= 2;
+ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
+ return false;
+ /* Card hasn't responded in 60 seconds? Must be stuck. */
+ if (delay > crs_timeout) {
+ printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
+ return false;
+ }
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+
+/*
+ * Read the config data for a PCI device, sanity-check it
+ * and fill in the dev structure...
+ */
+static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_dev *dev;
+ u32 l;
+
+ if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
+ return NULL;
+
+ dev = pci_alloc_dev(bus);
+ if (!dev)
+ return NULL;
+
+ dev->devfn = devfn;
+ dev->vendor = l & 0xffff;
+ dev->device = (l >> 16) & 0xffff;
+
+ pci_set_of_node(dev);
+
+ if (pci_setup_device(dev)) {
+ pci_bus_put(dev->bus);
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+static void pci_init_capabilities(struct pci_dev *dev)
+{
+ /* MSI/MSI-X list */
+ pci_msi_init_pci_dev(dev);
+
+ /* Buffers for saving PCIe and PCI-X capabilities */
+ pci_allocate_cap_save_buffers(dev);
+
+ /* Power Management */
+ pci_pm_init(dev);
+
+ /* Vital Product Data */
+ pci_vpd_pci22_init(dev);
+
+ /* Alternative Routing-ID Forwarding */
+ pci_configure_ari(dev);
+
+ /* Single Root I/O Virtualization */
+ pci_iov_init(dev);
+
+ /* Enable ACS P2P upstream forwarding */
+ pci_enable_acs(dev);
+}
+
+void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
+{
+ int ret;
+
+ pci_configure_device(dev);
+
+ device_initialize(&dev->dev);
+ dev->dev.release = pci_release_dev;
+
+ set_dev_node(&dev->dev, pcibus_to_node(bus));
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_parms = &dev->dma_parms;
+ dev->dev.coherent_dma_mask = 0xffffffffull;
+ of_pci_dma_configure(dev);
+
+ pci_set_dma_max_seg_size(dev, 65536);
+ pci_set_dma_seg_boundary(dev, 0xffffffff);
+
+ /* Fix up broken headers */
+ pci_fixup_device(pci_fixup_header, dev);
+
+ /* moved out from quirk header fixup code */
+ pci_reassigndev_resource_alignment(dev);
+
+ /* Clear the state_saved flag. */
+ dev->state_saved = false;
+
+ /* Initialize various capabilities */
+ pci_init_capabilities(dev);
+
+ /*
+ * Add the device to our list of discovered devices
+ * and the bus list for fixup functions, etc.
+ */
+ down_write(&pci_bus_sem);
+ list_add_tail(&dev->bus_list, &bus->devices);
+ up_write(&pci_bus_sem);
+
+ ret = pcibios_add_device(dev);
+ WARN_ON(ret < 0);
+
+ /* Notifier could use PCI capabilities */
+ dev->match_driver = false;
+ ret = device_add(&dev->dev);
+ WARN_ON(ret < 0);
+}
+
+struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_dev *dev;
+
+ dev = pci_get_slot(bus, devfn);
+ if (dev) {
+ pci_dev_put(dev);
+ return dev;
+ }
+
+ dev = pci_scan_device(bus, devfn);
+ if (!dev)
+ return NULL;
+
+ pci_device_add(dev, bus);
+
+ return dev;
+}
+EXPORT_SYMBOL(pci_scan_single_device);
+
+static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
+{
+ int pos;
+ u16 cap = 0;
+ unsigned next_fn;
+
+ if (pci_ari_enabled(bus)) {
+ if (!dev)
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
+ next_fn = PCI_ARI_CAP_NFN(cap);
+ if (next_fn <= fn)
+ return 0; /* protect against malformed list */
+
+ return next_fn;
+ }
+
+ /* dev may be NULL for non-contiguous multifunction devices */
+ if (!dev || dev->multifunction)
+ return (fn + 1) % 8;
+
+ return 0;
+}
+
+static int only_one_child(struct pci_bus *bus)
+{
+ struct pci_dev *parent = bus->self;
+
+ if (!parent || !pci_is_pcie(parent))
+ return 0;
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
+ return 1;
+ if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
+ !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
+ return 1;
+ return 0;
+}
+
+/**
+ * pci_scan_slot - scan a PCI slot on a bus for devices.
+ * @bus: PCI bus to scan
+ * @devfn: slot number to scan (must have zero function.)
+ *
+ * Scan a PCI slot on the specified PCI bus for devices, adding
+ * discovered devices to the @bus->devices list. New devices
+ * will not have is_added set.
+ *
+ * Returns the number of new devices found.
+ */
+int pci_scan_slot(struct pci_bus *bus, int devfn)
+{
+ unsigned fn, nr = 0;
+ struct pci_dev *dev;
+
+ if (only_one_child(bus) && (devfn > 0))
+ return 0; /* Already scanned the entire slot */
+
+ dev = pci_scan_single_device(bus, devfn);
+ if (!dev)
+ return 0;
+ if (!dev->is_added)
+ nr++;
+
+ for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
+ dev = pci_scan_single_device(bus, devfn + fn);
+ if (dev) {
+ if (!dev->is_added)
+ nr++;
+ dev->multifunction = 1;
+ }
+ }
+
+ /* only one slot has pcie device */
+ if (bus->self && nr)
+ pcie_aspm_init_link_state(bus->self);
+
+ return nr;
+}
+EXPORT_SYMBOL(pci_scan_slot);
+
+static int pcie_find_smpss(struct pci_dev *dev, void *data)
+{
+ u8 *smpss = data;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ /*
+ * We don't have a way to change MPS settings on devices that have
+ * drivers attached. A hot-added device might support only the minimum
+ * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
+ * where devices may be hot-added, we limit the fabric MPS to 128 so
+ * hot-added devices will work correctly.
+ *
+ * However, if we hot-add a device to a slot directly below a Root
+ * Port, it's impossible for there to be other existing devices below
+ * the port. We don't limit the MPS in this case because we can
+ * reconfigure MPS on both the Root Port and the hot-added device,
+ * and there are no other devices involved.
+ *
+ * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
+ */
+ if (dev->is_hotplug_bridge &&
+ pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ *smpss = 0;
+
+ if (*smpss > dev->pcie_mpss)
+ *smpss = dev->pcie_mpss;
+
+ return 0;
+}
+
+static void pcie_write_mps(struct pci_dev *dev, int mps)
+{
+ int rc;
+
+ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
+ mps = 128 << dev->pcie_mpss;
+
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
+ dev->bus->self)
+ /* For "Performance", the assumption is made that
+ * downstream communication will never be larger than
+ * the MRRS. So, the MPS only needs to be configured
+ * for the upstream communication. This being the case,
+ * walk from the top down and set the MPS of the child
+ * to that of the parent bus.
+ *
+ * Configure the device MPS with the smaller of the
+ * device MPSS or the bridge MPS (which is assumed to be
+ * properly configured at this point to the largest
+ * allowable MPS based on its parent bus).
+ */
+ mps = min(mps, pcie_get_mps(dev->bus->self));
+ }
+
+ rc = pcie_set_mps(dev, mps);
+ if (rc)
+ dev_err(&dev->dev, "Failed attempting to set the MPS\n");
+}
+
+static void pcie_write_mrrs(struct pci_dev *dev)
+{
+ int rc, mrrs;
+
+ /* In the "safe" case, do not configure the MRRS. There appear to be
+ * issues with setting MRRS to 0 on a number of devices.
+ */
+ if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+ return;
+
+ /* For Max performance, the MRRS must be set to the largest supported
+ * value. However, it cannot be configured larger than the MPS the
+ * device or the bus can support. This should already be properly
+ * configured by a prior call to pcie_write_mps.
+ */
+ mrrs = pcie_get_mps(dev);
+
+ /* MRRS is a R/W register. Invalid values can be written, but a
+ * subsequent read will verify if the value is acceptable or not.
+ * If the MRRS value provided is not acceptable (e.g., too large),
+ * shrink the value until it is acceptable to the HW.
+ */
+ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+ rc = pcie_set_readrq(dev, mrrs);
+ if (!rc)
+ break;
+
+ dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
+ mrrs /= 2;
+ }
+
+ if (mrrs < 128)
+ dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
+}
+
+static void pcie_bus_detect_mps(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = dev->bus->self;
+ int mps, p_mps;
+
+ if (!bridge)
+ return;
+
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+ if (mps != p_mps)
+ dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps, pci_name(bridge), p_mps);
+}
+
+static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
+{
+ int mps, orig_mps;
+
+ if (!pci_is_pcie(dev))
+ return 0;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+ pcie_bus_detect_mps(dev);
+ return 0;
+ }
+
+ mps = 128 << *(u8 *)data;
+ orig_mps = pcie_get_mps(dev);
+
+ pcie_write_mps(dev, mps);
+ pcie_write_mrrs(dev);
+
+ dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
+ pcie_get_mps(dev), 128 << dev->pcie_mpss,
+ orig_mps, pcie_get_readrq(dev));
+
+ return 0;
+}
+
+/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
+ * parents then children fashion. If this changes, then this code will not
+ * work as designed.
+ */
+void pcie_bus_configure_settings(struct pci_bus *bus)
+{
+ u8 smpss = 0;
+
+ if (!bus->self)
+ return;
+
+ if (!pci_is_pcie(bus->self))
+ return;
+
+ /* FIXME - Peer to peer DMA is possible, though the endpoint would need
+ * to be aware of the MPS of the destination. To work around this,
+ * simply force the MPS of the entire system to the smallest possible.
+ */
+ if (pcie_bus_config == PCIE_BUS_PEER2PEER)
+ smpss = 0;
+
+ if (pcie_bus_config == PCIE_BUS_SAFE) {
+ smpss = bus->self->pcie_mpss;
+
+ pcie_find_smpss(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_find_smpss, &smpss);
+ }
+
+ pcie_bus_configure_set(bus->self, &smpss);
+ pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
+}
+EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
+
+unsigned int pci_scan_child_bus(struct pci_bus *bus)
+{
+ unsigned int devfn, pass, max = bus->busn_res.start;
+ struct pci_dev *dev;
+
+ dev_dbg(&bus->dev, "scanning bus\n");
+
+ /* Go find them, Rover! */
+ for (devfn = 0; devfn < 0x100; devfn += 8)
+ pci_scan_slot(bus, devfn);
+
+ /* Reserve buses for SR-IOV capability. */
+ max += pci_iov_bus_range(bus);
+
+ /*
+ * After performing arch-dependent fixup of the bus, look behind
+ * all PCI-to-PCI bridges on this bus.
+ */
+ if (!bus->is_added) {
+ dev_dbg(&bus->dev, "fixups for bus\n");
+ pcibios_fixup_bus(bus);
+ bus->is_added = 1;
+ }
+
+ for (pass = 0; pass < 2; pass++)
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (pci_is_bridge(dev))
+ max = pci_scan_bridge(bus, dev, max, pass);
+ }
+
+ /*
+ * We've scanned the bus and so we know all about what's on
+ * the other side of any bridges that may be on this bus plus
+ * any devices.
+ *
+ * Return how far we've got finding sub-buses.
+ */
+ dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
+ return max;
+}
+EXPORT_SYMBOL_GPL(pci_scan_child_bus);
+
+/**
+ * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
+ * @bridge: Host bridge to set up.
+ *
+ * Default empty implementation. Replace with an architecture-specific setup
+ * routine, if necessary.
+ */
+int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ return 0;
+}
+
+void __weak pcibios_add_bus(struct pci_bus *bus)
+{
+}
+
+void __weak pcibios_remove_bus(struct pci_bus *bus)
+{
+}
+
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ int error;
+ struct pci_host_bridge *bridge;
+ struct pci_bus *b, *b2;
+ struct resource_entry *window, *n;
+ struct resource *res;
+ resource_size_t offset;
+ char bus_addr[64];
+ char *fmt;
+
+ b = pci_alloc_bus(NULL);
+ if (!b)
+ return NULL;
+
+ b->sysdata = sysdata;
+ b->ops = ops;
+ b->number = b->busn_res.start = bus;
+ pci_bus_assign_domain_nr(b, parent);
+ b2 = pci_find_bus(pci_domain_nr(b), bus);
+ if (b2) {
+ /* If we already got to this bus through a different bridge, ignore it */
+ dev_dbg(&b2->dev, "bus already known\n");
+ goto err_out;
+ }
+
+ bridge = pci_alloc_host_bridge(b);
+ if (!bridge)
+ goto err_out;
+
+ bridge->dev.parent = parent;
+ bridge->dev.release = pci_release_host_bridge_dev;
+ dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
+ error = pcibios_root_bridge_prepare(bridge);
+ if (error) {
+ kfree(bridge);
+ goto err_out;
+ }
+
+ error = device_register(&bridge->dev);
+ if (error) {
+ put_device(&bridge->dev);
+ goto err_out;
+ }
+ b->bridge = get_device(&bridge->dev);
+ device_enable_async_suspend(b->bridge);
+ pci_set_bus_of_node(b);
+
+ if (!parent)
+ set_dev_node(b->bridge, pcibus_to_node(b));
+
+ b->dev.class = &pcibus_class;
+ b->dev.parent = b->bridge;
+ dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
+ error = device_register(&b->dev);
+ if (error)
+ goto class_dev_reg_err;
+
+ pcibios_add_bus(b);
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(b);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
+ else
+ printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
+
+ /* Add initial resources to the bus */
+ resource_list_for_each_entry_safe(window, n, resources) {
+ list_move_tail(&window->node, &bridge->windows);
+ res = window->res;
+ offset = window->offset;
+ if (res->flags & IORESOURCE_BUS)
+ pci_bus_insert_busn_res(b, bus, res->end);
+ else
+ pci_bus_add_resource(b, res, 0);
+ if (offset) {
+ if (resource_type(res) == IORESOURCE_IO)
+ fmt = " (bus address [%#06llx-%#06llx])";
+ else
+ fmt = " (bus address [%#010llx-%#010llx])";
+ snprintf(bus_addr, sizeof(bus_addr), fmt,
+ (unsigned long long) (res->start - offset),
+ (unsigned long long) (res->end - offset));
+ } else
+ bus_addr[0] = '\0';
+ dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
+ }
+
+ down_write(&pci_bus_sem);
+ list_add_tail(&b->node, &pci_root_buses);
+ up_write(&pci_bus_sem);
+
+ return b;
+
+class_dev_reg_err:
+ put_device(&bridge->dev);
+ device_unregister(&bridge->dev);
+err_out:
+ kfree(b);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_create_root_bus);
+
+int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
+{
+ struct resource *res = &b->busn_res;
+ struct resource *parent_res, *conflict;
+
+ res->start = bus;
+ res->end = bus_max;
+ res->flags = IORESOURCE_BUS;
+
+ if (!pci_is_root_bus(b))
+ parent_res = &b->parent->busn_res;
+ else {
+ parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
+ res->flags |= IORESOURCE_PCI_FIXED;
+ }
+
+ conflict = request_resource_conflict(parent_res, res);
+
+ if (conflict)
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
+ res, pci_is_root_bus(b) ? "domain " : "",
+ parent_res, conflict->name, conflict);
+
+ return conflict == NULL;
+}
+
+int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
+{
+ struct resource *res = &b->busn_res;
+ struct resource old_res = *res;
+ resource_size_t size;
+ int ret;
+
+ if (res->start > bus_max)
+ return -EINVAL;
+
+ size = bus_max - res->start + 1;
+ ret = adjust_resource(res, res->start, size);
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR end %s updated to %02x\n",
+ &old_res, ret ? "can not be" : "is", bus_max);
+
+ if (!ret && !res->parent)
+ pci_bus_insert_busn_res(b, res->start, res->end);
+
+ return ret;
+}
+
+void pci_bus_release_busn_res(struct pci_bus *b)
+{
+ struct resource *res = &b->busn_res;
+ int ret;
+
+ if (!res->flags || !res->parent)
+ return;
+
+ ret = release_resource(res);
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR %s released\n",
+ res, ret ? "can not be" : "is");
+}
+
+struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ struct resource_entry *window;
+ bool found = false;
+ struct pci_bus *b;
+ int max;
+
+ resource_list_for_each_entry(window, resources)
+ if (window->res->flags & IORESOURCE_BUS) {
+ found = true;
+ break;
+ }
+
+ b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ if (!b)
+ return NULL;
+
+ if (!found) {
+ dev_info(&b->dev,
+ "No busn resource found for root bus, will use [bus %02x-ff]\n",
+ bus);
+ pci_bus_insert_busn_res(b, bus, 255);
+ }
+
+ max = pci_scan_child_bus(b);
+
+ if (!found)
+ pci_bus_update_busn_res_end(b, max);
+
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_root_bus);
+
+/* Deprecated; use pci_scan_root_bus() instead */
+struct pci_bus *pci_scan_bus_parented(struct device *parent,
+ int bus, struct pci_ops *ops, void *sysdata)
+{
+ LIST_HEAD(resources);
+ struct pci_bus *b;
+
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_resource);
+ b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
+ if (b)
+ pci_scan_child_bus(b);
+ else
+ pci_free_resource_list(&resources);
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus_parented);
+
+struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
+ void *sysdata)
+{
+ LIST_HEAD(resources);
+ struct pci_bus *b;
+
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_resource);
+ b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
+ if (b) {
+ pci_scan_child_bus(b);
+ } else {
+ pci_free_resource_list(&resources);
+ }
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus);
+
+/**
+ * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
+ * @bridge: PCI bridge for the bus to scan
+ *
+ * Scan a PCI bus and child buses for new devices, add them,
+ * and enable them, resizing bridge mmio/io resource if necessary
+ * and possible. The caller must ensure the child devices are already
+ * removed for resizing to occur.
+ *
+ * Returns the max number of subordinate bus discovered.
+ */
+unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
+{
+ unsigned int max;
+ struct pci_bus *bus = bridge->subordinate;
+
+ max = pci_scan_child_bus(bus);
+
+ pci_assign_unassigned_bridge_resources(bridge);
+
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+
+/**
+ * pci_rescan_bus - scan a PCI bus for devices.
+ * @bus: PCI bus to scan
+ *
+ * Scan a PCI bus and child buses for new devices, adds them,
+ * and enables them.
+ *
+ * Returns the max number of subordinate bus discovered.
+ */
+unsigned int pci_rescan_bus(struct pci_bus *bus)
+{
+ unsigned int max;
+
+ max = pci_scan_child_bus(bus);
+ pci_assign_unassigned_bus_resources(bus);
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+EXPORT_SYMBOL_GPL(pci_rescan_bus);
+
+/*
+ * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
+ * routines should always be executed under this mutex.
+ */
+static DEFINE_MUTEX(pci_rescan_remove_lock);
+
+void pci_lock_rescan_remove(void)
+{
+ mutex_lock(&pci_rescan_remove_lock);
+}
+EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
+
+void pci_unlock_rescan_remove(void)
+{
+ mutex_unlock(&pci_rescan_remove_lock);
+}
+EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
+
+static int __init pci_sort_bf_cmp(const struct device *d_a,
+ const struct device *d_b)
+{
+ const struct pci_dev *a = to_pci_dev(d_a);
+ const struct pci_dev *b = to_pci_dev(d_b);
+
+ if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
+ else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
+
+ if (a->bus->number < b->bus->number) return -1;
+ else if (a->bus->number > b->bus->number) return 1;
+
+ if (a->devfn < b->devfn) return -1;
+ else if (a->devfn > b->devfn) return 1;
+
+ return 0;
+}
+
+void __init pci_sort_breadthfirst(void)
+{
+ bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
+}
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
new file mode 100644
index 000000000..3f155e785
--- /dev/null
+++ b/drivers/pci/proc.c
@@ -0,0 +1,446 @@
+/*
+ * Procfs interface for the PCI bus.
+ *
+ * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/capability.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include "pci.h"
+
+static int proc_initialized; /* = 0 */
+
+static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
+{
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
+ return fixed_size_llseek(file, off, whence, dev->cfg_size);
+}
+
+static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
+ unsigned int pos = *ppos;
+ unsigned int cnt, size;
+
+ /*
+ * Normal users can read only the standardized portion of the
+ * configuration space as several chips lock up when trying to read
+ * undefined locations (think of Intel PIIX4 as a typical example).
+ */
+
+ if (capable(CAP_SYS_ADMIN))
+ size = dev->cfg_size;
+ else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ size = 128;
+ else
+ size = 64;
+
+ if (pos >= size)
+ return 0;
+ if (nbytes >= size)
+ nbytes = size;
+ if (pos + nbytes > size)
+ nbytes = size - pos;
+ cnt = nbytes;
+
+ if (!access_ok(VERIFY_WRITE, buf, cnt))
+ return -EINVAL;
+
+ pci_config_pm_runtime_get(dev);
+
+ if ((pos & 1) && cnt) {
+ unsigned char val;
+ pci_user_read_config_byte(dev, pos, &val);
+ __put_user(val, buf);
+ buf++;
+ pos++;
+ cnt--;
+ }
+
+ if ((pos & 3) && cnt > 2) {
+ unsigned short val;
+ pci_user_read_config_word(dev, pos, &val);
+ __put_user(cpu_to_le16(val), (__le16 __user *) buf);
+ buf += 2;
+ pos += 2;
+ cnt -= 2;
+ }
+
+ while (cnt >= 4) {
+ unsigned int val;
+ pci_user_read_config_dword(dev, pos, &val);
+ __put_user(cpu_to_le32(val), (__le32 __user *) buf);
+ buf += 4;
+ pos += 4;
+ cnt -= 4;
+ }
+
+ if (cnt >= 2) {
+ unsigned short val;
+ pci_user_read_config_word(dev, pos, &val);
+ __put_user(cpu_to_le16(val), (__le16 __user *) buf);
+ buf += 2;
+ pos += 2;
+ cnt -= 2;
+ }
+
+ if (cnt) {
+ unsigned char val;
+ pci_user_read_config_byte(dev, pos, &val);
+ __put_user(val, buf);
+ buf++;
+ pos++;
+ cnt--;
+ }
+
+ pci_config_pm_runtime_put(dev);
+
+ *ppos = pos;
+ return nbytes;
+}
+
+static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct inode *ino = file_inode(file);
+ struct pci_dev *dev = PDE_DATA(ino);
+ int pos = *ppos;
+ int size = dev->cfg_size;
+ int cnt;
+
+ if (pos >= size)
+ return 0;
+ if (nbytes >= size)
+ nbytes = size;
+ if (pos + nbytes > size)
+ nbytes = size - pos;
+ cnt = nbytes;
+
+ if (!access_ok(VERIFY_READ, buf, cnt))
+ return -EINVAL;
+
+ pci_config_pm_runtime_get(dev);
+
+ if ((pos & 1) && cnt) {
+ unsigned char val;
+ __get_user(val, buf);
+ pci_user_write_config_byte(dev, pos, val);
+ buf++;
+ pos++;
+ cnt--;
+ }
+
+ if ((pos & 3) && cnt > 2) {
+ __le16 val;
+ __get_user(val, (__le16 __user *) buf);
+ pci_user_write_config_word(dev, pos, le16_to_cpu(val));
+ buf += 2;
+ pos += 2;
+ cnt -= 2;
+ }
+
+ while (cnt >= 4) {
+ __le32 val;
+ __get_user(val, (__le32 __user *) buf);
+ pci_user_write_config_dword(dev, pos, le32_to_cpu(val));
+ buf += 4;
+ pos += 4;
+ cnt -= 4;
+ }
+
+ if (cnt >= 2) {
+ __le16 val;
+ __get_user(val, (__le16 __user *) buf);
+ pci_user_write_config_word(dev, pos, le16_to_cpu(val));
+ buf += 2;
+ pos += 2;
+ cnt -= 2;
+ }
+
+ if (cnt) {
+ unsigned char val;
+ __get_user(val, buf);
+ pci_user_write_config_byte(dev, pos, val);
+ buf++;
+ pos++;
+ cnt--;
+ }
+
+ pci_config_pm_runtime_put(dev);
+
+ *ppos = pos;
+ i_size_write(ino, dev->cfg_size);
+ return nbytes;
+}
+
+struct pci_filp_private {
+ enum pci_mmap_state mmap_state;
+ int write_combine;
+};
+
+static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
+#ifdef HAVE_PCI_MMAP
+ struct pci_filp_private *fpriv = file->private_data;
+#endif /* HAVE_PCI_MMAP */
+ int ret = 0;
+
+ switch (cmd) {
+ case PCIIOC_CONTROLLER:
+ ret = pci_domain_nr(dev->bus);
+ break;
+
+#ifdef HAVE_PCI_MMAP
+ case PCIIOC_MMAP_IS_IO:
+ fpriv->mmap_state = pci_mmap_io;
+ break;
+
+ case PCIIOC_MMAP_IS_MEM:
+ fpriv->mmap_state = pci_mmap_mem;
+ break;
+
+ case PCIIOC_WRITE_COMBINE:
+ if (arg)
+ fpriv->write_combine = 1;
+ else
+ fpriv->write_combine = 0;
+ break;
+
+#endif /* HAVE_PCI_MMAP */
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef HAVE_PCI_MMAP
+static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_filp_private *fpriv = file->private_data;
+ int i, ret;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /* Make sure the caller is mapping a real resource for this device */
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
+ break;
+ }
+
+ if (i >= PCI_ROM_RESOURCE)
+ return -ENODEV;
+
+ ret = pci_mmap_page_range(dev, vma,
+ fpriv->mmap_state,
+ fpriv->write_combine);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int proc_bus_pci_open(struct inode *inode, struct file *file)
+{
+ struct pci_filp_private *fpriv = kmalloc(sizeof(*fpriv), GFP_KERNEL);
+
+ if (!fpriv)
+ return -ENOMEM;
+
+ fpriv->mmap_state = pci_mmap_io;
+ fpriv->write_combine = 0;
+
+ file->private_data = fpriv;
+
+ return 0;
+}
+
+static int proc_bus_pci_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+#endif /* HAVE_PCI_MMAP */
+
+static const struct file_operations proc_bus_pci_operations = {
+ .owner = THIS_MODULE,
+ .llseek = proc_bus_pci_lseek,
+ .read = proc_bus_pci_read,
+ .write = proc_bus_pci_write,
+ .unlocked_ioctl = proc_bus_pci_ioctl,
+ .compat_ioctl = proc_bus_pci_ioctl,
+#ifdef HAVE_PCI_MMAP
+ .open = proc_bus_pci_open,
+ .release = proc_bus_pci_release,
+ .mmap = proc_bus_pci_mmap,
+#ifdef HAVE_ARCH_PCI_GET_UNMAPPED_AREA
+ .get_unmapped_area = get_pci_unmapped_area,
+#endif /* HAVE_ARCH_PCI_GET_UNMAPPED_AREA */
+#endif /* HAVE_PCI_MMAP */
+};
+
+/* iterator */
+static void *pci_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct pci_dev *dev = NULL;
+ loff_t n = *pos;
+
+ for_each_pci_dev(dev) {
+ if (!n--)
+ break;
+ }
+ return dev;
+}
+
+static void *pci_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct pci_dev *dev = v;
+
+ (*pos)++;
+ dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
+ return dev;
+}
+
+static void pci_seq_stop(struct seq_file *m, void *v)
+{
+ if (v) {
+ struct pci_dev *dev = v;
+ pci_dev_put(dev);
+ }
+}
+
+static int show_device(struct seq_file *m, void *v)
+{
+ const struct pci_dev *dev = v;
+ const struct pci_driver *drv;
+ int i;
+
+ if (dev == NULL)
+ return 0;
+
+ drv = pci_dev_driver(dev);
+ seq_printf(m, "%02x%02x\t%04x%04x\t%x",
+ dev->bus->number,
+ dev->devfn,
+ dev->vendor,
+ dev->device,
+ dev->irq);
+
+ /* only print standard and ROM resources to preserve compatibility */
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ resource_size_t start, end;
+ pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
+ seq_printf(m, "\t%16llx",
+ (unsigned long long)(start |
+ (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
+ }
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ resource_size_t start, end;
+ pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
+ seq_printf(m, "\t%16llx",
+ dev->resource[i].start < dev->resource[i].end ?
+ (unsigned long long)(end - start) + 1 : 0);
+ }
+ seq_putc(m, '\t');
+ if (drv)
+ seq_printf(m, "%s", drv->name);
+ seq_putc(m, '\n');
+ return 0;
+}
+
+static const struct seq_operations proc_bus_pci_devices_op = {
+ .start = pci_seq_start,
+ .next = pci_seq_next,
+ .stop = pci_seq_stop,
+ .show = show_device
+};
+
+static struct proc_dir_entry *proc_bus_pci_dir;
+
+int pci_proc_attach_device(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ struct proc_dir_entry *e;
+ char name[16];
+
+ if (!proc_initialized)
+ return -EACCES;
+
+ if (!bus->procdir) {
+ if (pci_proc_domain(bus)) {
+ sprintf(name, "%04x:%02x", pci_domain_nr(bus),
+ bus->number);
+ } else {
+ sprintf(name, "%02x", bus->number);
+ }
+ bus->procdir = proc_mkdir(name, proc_bus_pci_dir);
+ if (!bus->procdir)
+ return -ENOMEM;
+ }
+
+ sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
+ e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
+ &proc_bus_pci_operations, dev);
+ if (!e)
+ return -ENOMEM;
+ proc_set_size(e, dev->cfg_size);
+ dev->procent = e;
+
+ return 0;
+}
+
+int pci_proc_detach_device(struct pci_dev *dev)
+{
+ proc_remove(dev->procent);
+ dev->procent = NULL;
+ return 0;
+}
+
+int pci_proc_detach_bus(struct pci_bus *bus)
+{
+ proc_remove(bus->procdir);
+ return 0;
+}
+
+static int proc_bus_pci_dev_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &proc_bus_pci_devices_op);
+}
+
+static const struct file_operations proc_bus_pci_dev_operations = {
+ .owner = THIS_MODULE,
+ .open = proc_bus_pci_dev_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init pci_proc_init(void)
+{
+ struct pci_dev *dev = NULL;
+ proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
+ proc_create("devices", 0, proc_bus_pci_dir,
+ &proc_bus_pci_dev_operations);
+ proc_initialized = 1;
+ for_each_pci_dev(dev)
+ pci_proc_attach_device(dev);
+
+ return 0;
+}
+device_initcall(pci_proc_init);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
new file mode 100644
index 000000000..c6dc1dfd2
--- /dev/null
+++ b/drivers/pci/quirks.c
@@ -0,0 +1,4015 @@
+/*
+ * This file contains work-arounds for many known PCI hardware
+ * bugs. Devices present only on certain architectures (host
+ * bridges et cetera) should be handled in arch-specific code.
+ *
+ * Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
+ *
+ * Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ * Init/reset quirks for USB host controllers should be in the
+ * USB quirks file, where their drivers can access reuse it.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include <linux/kallsyms.h>
+#include <linux/dmi.h>
+#include <linux/pci-aspm.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/ktime.h>
+#include <linux/mm.h>
+#include <asm/dma.h> /* isa_dma_bridge_buggy */
+#include "pci.h"
+
+/*
+ * Decoding should be disabled for a PCI device during BAR sizing to avoid
+ * conflict. But doing so may cause problems on host bridge and perhaps other
+ * key system devices. For devices that need to have mmio decoding always-on,
+ * we need to set the dev->mmio_always_on bit.
+ */
+static void quirk_mmio_always_on(struct pci_dev *dev)
+{
+ dev->mmio_always_on = 1;
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
+
+/* The Mellanox Tavor device gives false positive parity errors
+ * Mark this device with a broken_parity_status, to allow
+ * PCI scanning code to "skip" this now blacklisted device.
+ */
+static void quirk_mellanox_tavor(struct pci_dev *dev)
+{
+ dev->broken_parity_status = 1; /* This device gives false positives */
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
+
+/* Deal with broken BIOSes that neglect to enable passive release,
+ which can cause problems in combination with the 82441FX/PPro MTRRs */
+static void quirk_passive_release(struct pci_dev *dev)
+{
+ struct pci_dev *d = NULL;
+ unsigned char dlc;
+
+ /* We have to make sure a particular bit is set in the PIIX3
+ ISA bridge, so we have to go out and find it. */
+ while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
+ pci_read_config_byte(d, 0x82, &dlc);
+ if (!(dlc & 1<<1)) {
+ dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
+ dlc |= 1<<1;
+ pci_write_config_byte(d, 0x82, dlc);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
+
+/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
+ but VIA don't answer queries. If you happen to have good contacts at VIA
+ ask them for me please -- Alan
+
+ This appears to be BIOS not version dependent. So presumably there is a
+ chipset level fix */
+
+static void quirk_isa_dma_hangs(struct pci_dev *dev)
+{
+ if (!isa_dma_bridge_buggy) {
+ isa_dma_bridge_buggy = 1;
+ dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
+ }
+}
+ /*
+ * Its not totally clear which chipsets are the problematic ones
+ * We know 82C586 and 82C596 variants are affected.
+ */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
+
+/*
+ * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
+ * for some HT machines to use C4 w/o hanging.
+ */
+static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
+{
+ u32 pmbase;
+ u16 pm1a;
+
+ pci_read_config_dword(dev, 0x40, &pmbase);
+ pmbase = pmbase & 0xff80;
+ pm1a = inw(pmbase);
+
+ if (pm1a & 0x10) {
+ dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
+ outw(0x10, pmbase);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
+
+/*
+ * Chipsets where PCI->PCI transfers vanish or hang
+ */
+static void quirk_nopcipci(struct pci_dev *dev)
+{
+ if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
+ dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
+ pci_pci_problems |= PCIPCI_FAIL;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
+
+static void quirk_nopciamd(struct pci_dev *dev)
+{
+ u8 rev;
+ pci_read_config_byte(dev, 0x08, &rev);
+ if (rev == 0x13) {
+ /* Erratum 24 */
+ dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
+ pci_pci_problems |= PCIAGP_FAIL;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
+
+/*
+ * Triton requires workarounds to be used by the drivers
+ */
+static void quirk_triton(struct pci_dev *dev)
+{
+ if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
+ dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
+ pci_pci_problems |= PCIPCI_TRITON;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
+
+/*
+ * VIA Apollo KT133 needs PCI latency patch
+ * Made according to a windows driver based patch by George E. Breese
+ * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
+ * and http://www.georgebreese.com/net/software/#PCI
+ * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
+ * the info on which Mr Breese based his work.
+ *
+ * Updated based on further information from the site and also on
+ * information provided by VIA
+ */
+static void quirk_vialatency(struct pci_dev *dev)
+{
+ struct pci_dev *p;
+ u8 busarb;
+ /* Ok we have a potential problem chipset here. Now see if we have
+ a buggy southbridge */
+
+ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
+ if (p != NULL) {
+ /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
+ /* Check for buggy part revisions */
+ if (p->revision < 0x40 || p->revision > 0x42)
+ goto exit;
+ } else {
+ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
+ if (p == NULL) /* No problem parts */
+ goto exit;
+ /* Check for buggy part revisions */
+ if (p->revision < 0x10 || p->revision > 0x12)
+ goto exit;
+ }
+
+ /*
+ * Ok we have the problem. Now set the PCI master grant to
+ * occur every master grant. The apparent bug is that under high
+ * PCI load (quite common in Linux of course) you can get data
+ * loss when the CPU is held off the bus for 3 bus master requests
+ * This happens to include the IDE controllers....
+ *
+ * VIA only apply this fix when an SB Live! is present but under
+ * both Linux and Windows this isn't enough, and we have seen
+ * corruption without SB Live! but with things like 3 UDMA IDE
+ * controllers. So we ignore that bit of the VIA recommendation..
+ */
+
+ pci_read_config_byte(dev, 0x76, &busarb);
+ /* Set bit 4 and bi 5 of byte 76 to 0x01
+ "Master priority rotation on every PCI master grant */
+ busarb &= ~(1<<5);
+ busarb |= (1<<4);
+ pci_write_config_byte(dev, 0x76, busarb);
+ dev_info(&dev->dev, "Applying VIA southbridge workaround\n");
+exit:
+ pci_dev_put(p);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
+/* Must restore this on a resume from RAM */
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
+
+/*
+ * VIA Apollo VP3 needs ETBF on BT848/878
+ */
+static void quirk_viaetbf(struct pci_dev *dev)
+{
+ if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
+ dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
+ pci_pci_problems |= PCIPCI_VIAETBF;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
+
+static void quirk_vsfx(struct pci_dev *dev)
+{
+ if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
+ dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
+ pci_pci_problems |= PCIPCI_VSFX;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
+
+/*
+ * Ali Magik requires workarounds to be used by the drivers
+ * that DMA to AGP space. Latency must be set to 0xA and triton
+ * workaround applied too
+ * [Info kindly provided by ALi]
+ */
+static void quirk_alimagik(struct pci_dev *dev)
+{
+ if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
+ dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
+ pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
+
+/*
+ * Natoma has some interesting boundary conditions with Zoran stuff
+ * at least
+ */
+static void quirk_natoma(struct pci_dev *dev)
+{
+ if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
+ dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
+ pci_pci_problems |= PCIPCI_NATOMA;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
+
+/*
+ * This chip can cause PCI parity errors if config register 0xA0 is read
+ * while DMAs are occurring.
+ */
+static void quirk_citrine(struct pci_dev *dev)
+{
+ dev->cfg_size = 0xA0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
+
+/* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
+static void quirk_extend_bar_to_page(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
+ r->end = PAGE_SIZE - 1;
+ r->start = 0;
+ r->flags |= IORESOURCE_UNSET;
+ dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
+ i, r);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
+
+/*
+ * S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ * If it's needed, re-allocate the region.
+ */
+static void quirk_s3_64M(struct pci_dev *dev)
+{
+ struct resource *r = &dev->resource[0];
+
+ if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+ r->flags |= IORESOURCE_UNSET;
+ r->start = 0;
+ r->end = 0x3ffffff;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
+
+static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+ const char *name)
+{
+ u32 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + pos;
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
+
+ if (!region)
+ return;
+
+ res->name = pci_name(dev);
+ res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
+ res->flags |=
+ (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
+ region &= ~(size - 1);
+
+ /* Convert from PCI bus to resource space */
+ bus_region.start = region;
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+ dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+ name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+}
+
+/*
+ * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
+ * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
+ * BAR0 should be 8 bytes; instead, it may be set to something like 8k
+ * (which conflicts w/ BAR1's memory range).
+ *
+ * CS553x's ISA PCI BARs may also be read-only (ref:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
+ */
+static void quirk_cs5536_vsa(struct pci_dev *dev)
+{
+ static char *name = "CS5536 ISA bridge";
+
+ if (pci_resource_len(dev, 0) != 8) {
+ quirk_io(dev, 0, 8, name); /* SMB */
+ quirk_io(dev, 1, 256, name); /* GPIO */
+ quirk_io(dev, 2, 64, name); /* MFGPT */
+ dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
+ name);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
+
+static void quirk_io_region(struct pci_dev *dev, int port,
+ unsigned size, int nr, const char *name)
+{
+ u16 region;
+ struct pci_bus_region bus_region;
+ struct resource *res = dev->resource + nr;
+
+ pci_read_config_word(dev, port, &region);
+ region &= ~(size - 1);
+
+ if (!region)
+ return;
+
+ res->name = pci_name(dev);
+ res->flags = IORESOURCE_IO;
+
+ /* Convert from PCI bus to resource space */
+ bus_region.start = region;
+ bus_region.end = region + size - 1;
+ pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+ if (!pci_claim_resource(dev, nr))
+ dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
+}
+
+/*
+ * ATI Northbridge setups MCE the processor if you even
+ * read somewhere between 0x3b0->0x3bb or read 0x3d3
+ */
+static void quirk_ati_exploding_mce(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
+ /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
+ request_region(0x3b0, 0x0C, "RadeonIGP");
+ request_region(0x3d3, 0x01, "RadeonIGP");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
+
+/*
+ * In the AMD NL platform, this device ([1022:7912]) has a class code of
+ * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
+ * claim it.
+ * But the dwc3 driver is a more specific driver for this device, and we'd
+ * prefer to use it instead of xhci. To prevent xhci from claiming the
+ * device, change the class code to 0x0c03fe, which the PCI r3.0 spec
+ * defines as "USB device (not host controller)". The dwc3 driver can then
+ * claim it based on its Vendor and Device ID.
+ */
+static void quirk_amd_nl_class(struct pci_dev *pdev)
+{
+ /*
+ * Use 'USB Device' (0x0c03fe) instead of PCI header provided
+ */
+ pdev->class = 0x0c03fe;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
+ quirk_amd_nl_class);
+
+/*
+ * Let's make the southbridge information explicit instead
+ * of having to worry about people probing the ACPI areas,
+ * for example.. (Yes, it happens, and if you read the wrong
+ * ACPI register it will put the machine to sleep with no
+ * way of waking it up again. Bummer).
+ *
+ * ALI M7101: Two IO regions pointed to by words at
+ * 0xE0 (64 bytes of ACPI registers)
+ * 0xE2 (32 bytes of SMB registers)
+ */
+static void quirk_ali7101_acpi(struct pci_dev *dev)
+{
+ quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
+ quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
+
+static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+ u32 devres;
+ u32 mask, size, base;
+
+ pci_read_config_dword(dev, port, &devres);
+ if ((devres & enable) != enable)
+ return;
+ mask = (devres >> 16) & 15;
+ base = devres & 0xffff;
+ size = 16;
+ for (;;) {
+ unsigned bit = size >> 1;
+ if ((bit & mask) == bit)
+ break;
+ size = bit;
+ }
+ /*
+ * For now we only print it out. Eventually we'll want to
+ * reserve it (at least if it's in the 0x1000+ range), but
+ * let's get enough confirmation reports first.
+ */
+ base &= -size;
+ dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base,
+ base + size - 1);
+}
+
+static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+ u32 devres;
+ u32 mask, size, base;
+
+ pci_read_config_dword(dev, port, &devres);
+ if ((devres & enable) != enable)
+ return;
+ base = devres & 0xffff0000;
+ mask = (devres & 0x3f) << 16;
+ size = 128 << 16;
+ for (;;) {
+ unsigned bit = size >> 1;
+ if ((bit & mask) == bit)
+ break;
+ size = bit;
+ }
+ /*
+ * For now we only print it out. Eventually we'll want to
+ * reserve it, but let's get enough confirmation reports first.
+ */
+ base &= -size;
+ dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base,
+ base + size - 1);
+}
+
+/*
+ * PIIX4 ACPI: Two IO regions pointed to by longwords at
+ * 0x40 (64 bytes of ACPI registers)
+ * 0x90 (16 bytes of SMB registers)
+ * and a few strange programmable PIIX4 device resources.
+ */
+static void quirk_piix4_acpi(struct pci_dev *dev)
+{
+ u32 res_a;
+
+ quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
+ quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+
+ /* Device resource A has enables for some of the other ones */
+ pci_read_config_dword(dev, 0x5c, &res_a);
+
+ piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
+ piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
+
+ /* Device resource D is just bitfields for static resources */
+
+ /* Device 12 enabled? */
+ if (res_a & (1 << 29)) {
+ piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
+ piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
+ }
+ /* Device 13 enabled? */
+ if (res_a & (1 << 30)) {
+ piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
+ piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
+ }
+ piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
+ piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
+
+#define ICH_PMBASE 0x40
+#define ICH_ACPI_CNTL 0x44
+#define ICH4_ACPI_EN 0x10
+#define ICH6_ACPI_EN 0x80
+#define ICH4_GPIOBASE 0x58
+#define ICH4_GPIO_CNTL 0x5c
+#define ICH4_GPIO_EN 0x10
+#define ICH6_GPIOBASE 0x48
+#define ICH6_GPIO_CNTL 0x4c
+#define ICH6_GPIO_EN 0x10
+
+/*
+ * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
+ * 0x40 (128 bytes of ACPI, GPIO & TCO registers)
+ * 0x58 (64 bytes of GPIO I/O space)
+ */
+static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
+{
+ u8 enable;
+
+ /*
+ * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
+ * with low legacy (and fixed) ports. We don't know the decoding
+ * priority and can't tell whether the legacy device or the one created
+ * here is really at that address. This happens on boards with broken
+ * BIOSes.
+ */
+
+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
+ if (enable & ICH4_ACPI_EN)
+ quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
+ "ICH4 ACPI/GPIO/TCO");
+
+ pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
+ if (enable & ICH4_GPIO_EN)
+ quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
+ "ICH4 GPIO");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
+
+static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
+{
+ u8 enable;
+
+ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
+ if (enable & ICH6_ACPI_EN)
+ quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
+ "ICH6 ACPI/GPIO/TCO");
+
+ pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
+ if (enable & ICH6_GPIO_EN)
+ quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
+ "ICH6 GPIO");
+}
+
+static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
+{
+ u32 val;
+ u32 size, base;
+
+ pci_read_config_dword(dev, reg, &val);
+
+ /* Enabled? */
+ if (!(val & 1))
+ return;
+ base = val & 0xfffc;
+ if (dynsize) {
+ /*
+ * This is not correct. It is 16, 32 or 64 bytes depending on
+ * register D31:F0:ADh bits 5:4.
+ *
+ * But this gets us at least _part_ of it.
+ */
+ size = 16;
+ } else {
+ size = 128;
+ }
+ base &= ~(size-1);
+
+ /* Just print it out for now. We should reserve it after more debugging */
+ dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
+}
+
+static void quirk_ich6_lpc(struct pci_dev *dev)
+{
+ /* Shared ACPI/GPIO decode with all ICH6+ */
+ ich6_lpc_acpi_gpio(dev);
+
+ /* ICH6-specific generic IO decode */
+ ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
+ ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
+
+static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
+{
+ u32 val;
+ u32 mask, base;
+
+ pci_read_config_dword(dev, reg, &val);
+
+ /* Enabled? */
+ if (!(val & 1))
+ return;
+
+ /*
+ * IO base in bits 15:2, mask in bits 23:18, both
+ * are dword-based
+ */
+ base = val & 0xfffc;
+ mask = (val >> 16) & 0xfc;
+ mask |= 3;
+
+ /* Just print it out for now. We should reserve it after more debugging */
+ dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
+}
+
+/* ICH7-10 has the same common LPC generic IO decode registers */
+static void quirk_ich7_lpc(struct pci_dev *dev)
+{
+ /* We share the common ACPI/GPIO decode with ICH6 */
+ ich6_lpc_acpi_gpio(dev);
+
+ /* And have 4 ICH7+ generic decodes */
+ ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
+ ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
+ ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
+ ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
+
+/*
+ * VIA ACPI: One IO region pointed to by longword at
+ * 0x48 or 0x20 (256 bytes of ACPI registers)
+ */
+static void quirk_vt82c586_acpi(struct pci_dev *dev)
+{
+ if (dev->revision & 0x10)
+ quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
+ "vt82c586 ACPI");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
+
+/*
+ * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
+ * 0x48 (256 bytes of ACPI registers)
+ * 0x70 (128 bytes of hardware monitoring register)
+ * 0x90 (16 bytes of SMB registers)
+ */
+static void quirk_vt82c686_acpi(struct pci_dev *dev)
+{
+ quirk_vt82c586_acpi(dev);
+
+ quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
+ "vt82c686 HW-mon");
+
+ quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
+
+/*
+ * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
+ * 0x88 (128 bytes of power management registers)
+ * 0xd0 (16 bytes of SMB registers)
+ */
+static void quirk_vt8235_acpi(struct pci_dev *dev)
+{
+ quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
+ quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
+
+/*
+ * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
+ * Disable fast back-to-back on the secondary bus segment
+ */
+static void quirk_xio2000a(struct pci_dev *dev)
+{
+ struct pci_dev *pdev;
+ u16 command;
+
+ dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
+ list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
+ pci_read_config_word(pdev, PCI_COMMAND, &command);
+ if (command & PCI_COMMAND_FAST_BACK)
+ pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
+ quirk_xio2000a);
+
+#ifdef CONFIG_X86_IO_APIC
+
+#include <asm/io_apic.h>
+
+/*
+ * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
+ * devices to the external APIC.
+ *
+ * TODO: When we have device-specific interrupt routers,
+ * this code will go away from quirks.
+ */
+static void quirk_via_ioapic(struct pci_dev *dev)
+{
+ u8 tmp;
+
+ if (nr_ioapics < 1)
+ tmp = 0; /* nothing routed to external APIC */
+ else
+ tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
+
+ dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
+ tmp == 0 ? "Disa" : "Ena");
+
+ /* Offset 0x58: External APIC IRQ output control */
+ pci_write_config_byte(dev, 0x58, tmp);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
+
+/*
+ * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
+ * This leads to doubled level interrupt rates.
+ * Set this bit to get rid of cycle wastage.
+ * Otherwise uncritical.
+ */
+static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
+{
+ u8 misc_control2;
+#define BYPASS_APIC_DEASSERT 8
+
+ pci_read_config_byte(dev, 0x5B, &misc_control2);
+ if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
+ dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
+ pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
+
+/*
+ * The AMD io apic can hang the box when an apic irq is masked.
+ * We check all revs >= B0 (yet not in the pre production!) as the bug
+ * is currently marked NoFix
+ *
+ * We have multiple reports of hangs with this chipset that went away with
+ * noapic specified. For the moment we assume it's the erratum. We may be wrong
+ * of course. However the advice is demonstrably good even if so..
+ */
+static void quirk_amd_ioapic(struct pci_dev *dev)
+{
+ if (dev->revision >= 0x02) {
+ dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
+ dev_warn(&dev->dev, " : booting with the \"noapic\" option\n");
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
+
+static void quirk_ioapic_rmw(struct pci_dev *dev)
+{
+ if (dev->devfn == 0 && dev->bus->number == 0)
+ sis_apic_bug = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
+#endif /* CONFIG_X86_IO_APIC */
+
+/*
+ * Some settings of MMRBC can lead to data corruption so block changes.
+ * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
+ */
+static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
+{
+ if (dev->subordinate && dev->revision <= 0x12) {
+ dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
+ dev->revision);
+ dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
+
+/*
+ * FIXME: it is questionable that quirk_via_acpi
+ * is needed. It shows up as an ISA bridge, and does not
+ * support the PCI_INTERRUPT_LINE register at all. Therefore
+ * it seems like setting the pci_dev's 'irq' to the
+ * value of the ACPI SCI interrupt is only done for convenience.
+ * -jgarzik
+ */
+static void quirk_via_acpi(struct pci_dev *d)
+{
+ /*
+ * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
+ */
+ u8 irq;
+ pci_read_config_byte(d, 0x42, &irq);
+ irq &= 0xf;
+ if (irq && (irq != 2))
+ d->irq = irq;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
+
+
+/*
+ * VIA bridges which have VLink
+ */
+
+static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
+
+static void quirk_via_bridge(struct pci_dev *dev)
+{
+ /* See what bridge we have and find the device ranges */
+ switch (dev->device) {
+ case PCI_DEVICE_ID_VIA_82C686:
+ /* The VT82C686 is special, it attaches to PCI and can have
+ any device number. All its subdevices are functions of
+ that single device. */
+ via_vlink_dev_lo = PCI_SLOT(dev->devfn);
+ via_vlink_dev_hi = PCI_SLOT(dev->devfn);
+ break;
+ case PCI_DEVICE_ID_VIA_8237:
+ case PCI_DEVICE_ID_VIA_8237A:
+ via_vlink_dev_lo = 15;
+ break;
+ case PCI_DEVICE_ID_VIA_8235:
+ via_vlink_dev_lo = 16;
+ break;
+ case PCI_DEVICE_ID_VIA_8231:
+ case PCI_DEVICE_ID_VIA_8233_0:
+ case PCI_DEVICE_ID_VIA_8233A:
+ case PCI_DEVICE_ID_VIA_8233C_0:
+ via_vlink_dev_lo = 17;
+ break;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
+
+/**
+ * quirk_via_vlink - VIA VLink IRQ number update
+ * @dev: PCI device
+ *
+ * If the device we are dealing with is on a PIC IRQ we need to
+ * ensure that the IRQ line register which usually is not relevant
+ * for PCI cards, is actually written so that interrupts get sent
+ * to the right place.
+ * We only do this on systems where a VIA south bridge was detected,
+ * and only for VIA devices on the motherboard (see quirk_via_bridge
+ * above).
+ */
+
+static void quirk_via_vlink(struct pci_dev *dev)
+{
+ u8 irq, new_irq;
+
+ /* Check if we have VLink at all */
+ if (via_vlink_dev_lo == -1)
+ return;
+
+ new_irq = dev->irq;
+
+ /* Don't quirk interrupts outside the legacy IRQ range */
+ if (!new_irq || new_irq > 15)
+ return;
+
+ /* Internal device ? */
+ if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
+ PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
+ return;
+
+ /* This is an internal VLink device on a PIC interrupt. The BIOS
+ ought to have set this but may not have, so we redo it */
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ if (new_irq != irq) {
+ dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n",
+ irq, new_irq);
+ udelay(15); /* unknown if delay really needed */
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
+ }
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
+
+/*
+ * VIA VT82C598 has its device ID settable and many BIOSes
+ * set it to the ID of VT82C597 for backward compatibility.
+ * We need to switch it off to be able to recognize the real
+ * type of the chip.
+ */
+static void quirk_vt82c598_id(struct pci_dev *dev)
+{
+ pci_write_config_byte(dev, 0xfc, 0);
+ pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
+
+/*
+ * CardBus controllers have a legacy base address that enables them
+ * to respond as i82365 pcmcia controllers. We don't want them to
+ * do this even if the Linux CardBus driver is not loaded, because
+ * the Linux i82365 driver does not (and should not) handle CardBus.
+ */
+static void quirk_cardbus_legacy(struct pci_dev *dev)
+{
+ pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
+DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
+
+/*
+ * Following the PCI ordering rules is optional on the AMD762. I'm not
+ * sure what the designers were smoking but let's not inhale...
+ *
+ * To be fair to AMD, it follows the spec by default, its BIOS people
+ * who turn it off!
+ */
+static void quirk_amd_ordering(struct pci_dev *dev)
+{
+ u32 pcic;
+ pci_read_config_dword(dev, 0x4C, &pcic);
+ if ((pcic & 6) != 6) {
+ pcic |= 6;
+ dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
+ pci_write_config_dword(dev, 0x4C, pcic);
+ pci_read_config_dword(dev, 0x84, &pcic);
+ pcic |= (1 << 23); /* Required in this mode */
+ pci_write_config_dword(dev, 0x84, pcic);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
+
+/*
+ * DreamWorks provided workaround for Dunord I-3000 problem
+ *
+ * This card decodes and responds to addresses not apparently
+ * assigned to it. We force a larger allocation to ensure that
+ * nothing gets put too close to it.
+ */
+static void quirk_dunord(struct pci_dev *dev)
+{
+ struct resource *r = &dev->resource[1];
+
+ r->flags |= IORESOURCE_UNSET;
+ r->start = 0;
+ r->end = 0xffffff;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
+
+/*
+ * i82380FB mobile docking controller: its PCI-to-PCI bridge
+ * is subtractive decoding (transparent), and does indicate this
+ * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
+ * instead of 0x01.
+ */
+static void quirk_transparent_bridge(struct pci_dev *dev)
+{
+ dev->transparent = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
+
+/*
+ * Common misconfiguration of the MediaGX/Geode PCI master that will
+ * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
+ * datasheets found at http://www.national.com/analog for info on what
+ * these bits do. <christer@weinigel.se>
+ */
+static void quirk_mediagx_master(struct pci_dev *dev)
+{
+ u8 reg;
+
+ pci_read_config_byte(dev, 0x41, &reg);
+ if (reg & 2) {
+ reg &= ~2;
+ dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
+ reg);
+ pci_write_config_byte(dev, 0x41, reg);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
+
+/*
+ * Ensure C0 rev restreaming is off. This is normally done by
+ * the BIOS but in the odd case it is not the results are corruption
+ * hence the presence of a Linux check
+ */
+static void quirk_disable_pxb(struct pci_dev *pdev)
+{
+ u16 config;
+
+ if (pdev->revision != 0x04) /* Only C0 requires this */
+ return;
+ pci_read_config_word(pdev, 0x40, &config);
+ if (config & (1<<6)) {
+ config &= ~(1<<6);
+ pci_write_config_word(pdev, 0x40, config);
+ dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n");
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
+
+static void quirk_amd_ide_mode(struct pci_dev *pdev)
+{
+ /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
+ u8 tmp;
+
+ pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
+ if (tmp == 0x01) {
+ pci_read_config_byte(pdev, 0x40, &tmp);
+ pci_write_config_byte(pdev, 0x40, tmp|1);
+ pci_write_config_byte(pdev, 0x9, 1);
+ pci_write_config_byte(pdev, 0xa, 6);
+ pci_write_config_byte(pdev, 0x40, tmp);
+
+ pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
+ dev_info(&pdev->dev, "set SATA to AHCI mode\n");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
+
+/*
+ * Serverworks CSB5 IDE does not fully support native mode
+ */
+static void quirk_svwks_csb5ide(struct pci_dev *pdev)
+{
+ u8 prog;
+ pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+ if (prog & 5) {
+ prog &= ~5;
+ pdev->class &= ~5;
+ pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
+ /* PCI layer will sort out resources */
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
+
+/*
+ * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
+ */
+static void quirk_ide_samemode(struct pci_dev *pdev)
+{
+ u8 prog;
+
+ pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+
+ if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
+ dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n");
+ prog &= ~5;
+ pdev->class &= ~5;
+ pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
+
+/*
+ * Some ATA devices break if put into D3
+ */
+
+static void quirk_no_ata_d3(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
+}
+/* Quirk the legacy ATA devices only. The AHCI ones are ok */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
+/* ALi loses some register settings that we cannot then restore */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
+/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
+ occur when mode detecting */
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
+ PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
+
+/* This was originally an Alpha specific thing, but it really fits here.
+ * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
+ */
+static void quirk_eisa_bridge(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_EISA << 8;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
+
+
+/*
+ * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
+ * is not activated. The myth is that Asus said that they do not want the
+ * users to be irritated by just another PCI Device in the Win98 device
+ * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
+ * package 2.7.0 for details)
+ *
+ * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
+ * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
+ * becomes necessary to do this tweak in two steps -- the chosen trigger
+ * is either the Host bridge (preferred) or on-board VGA controller.
+ *
+ * Note that we used to unhide the SMBus that way on Toshiba laptops
+ * (Satellite A40 and Tecra M2) but then found that the thermal management
+ * was done by SMM code, which could cause unsynchronized concurrent
+ * accesses to the SMBus registers, with potentially bad effects. Thus you
+ * should be very careful when adding new entries: if SMM is accessing the
+ * Intel SMBus, this is a very good reason to leave it hidden.
+ *
+ * Likewise, many recent laptops use ACPI for thermal management. If the
+ * ACPI DSDT code accesses the SMBus, then Linux should not access it
+ * natively, and keeping the SMBus hidden is the right thing to do. If you
+ * are about to add an entry in the table below, please first disassemble
+ * the DSDT and double-check that there is no code accessing the SMBus.
+ */
+static int asus_hides_smbus;
+
+static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
+{
+ if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
+ if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
+ switch (dev->subsystem_device) {
+ case 0x8025: /* P4B-LX */
+ case 0x8070: /* P4B */
+ case 0x8088: /* P4B533 */
+ case 0x1626: /* L3C notebook */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
+ switch (dev->subsystem_device) {
+ case 0x80b1: /* P4GE-V */
+ case 0x80b2: /* P4PE */
+ case 0x8093: /* P4B533-V */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
+ switch (dev->subsystem_device) {
+ case 0x8030: /* P4T533 */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
+ switch (dev->subsystem_device) {
+ case 0x8070: /* P4G8X Deluxe */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
+ switch (dev->subsystem_device) {
+ case 0x80c9: /* PU-DLS */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
+ switch (dev->subsystem_device) {
+ case 0x1751: /* M2N notebook */
+ case 0x1821: /* M5N notebook */
+ case 0x1897: /* A6L notebook */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+ switch (dev->subsystem_device) {
+ case 0x184b: /* W1N notebook */
+ case 0x186a: /* M6Ne notebook */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
+ switch (dev->subsystem_device) {
+ case 0x80f2: /* P4P800-X */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
+ switch (dev->subsystem_device) {
+ case 0x1882: /* M6V notebook */
+ case 0x1977: /* A6VA notebook */
+ asus_hides_smbus = 1;
+ }
+ } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+ switch (dev->subsystem_device) {
+ case 0x088C: /* HP Compaq nc8000 */
+ case 0x0890: /* HP Compaq nc6000 */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
+ switch (dev->subsystem_device) {
+ case 0x12bc: /* HP D330L */
+ case 0x12bd: /* HP D530 */
+ case 0x006a: /* HP Compaq nx9500 */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
+ switch (dev->subsystem_device) {
+ case 0x12bf: /* HP xw4100 */
+ asus_hides_smbus = 1;
+ }
+ } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
+ if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+ switch (dev->subsystem_device) {
+ case 0xC00C: /* Samsung P35 notebook */
+ asus_hides_smbus = 1;
+ }
+ } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
+ if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+ switch (dev->subsystem_device) {
+ case 0x0058: /* Compaq Evo N620c */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
+ switch (dev->subsystem_device) {
+ case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
+ /* Motherboard doesn't have Host bridge
+ * subvendor/subdevice IDs, therefore checking
+ * its on-board VGA controller */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
+ switch (dev->subsystem_device) {
+ case 0x00b8: /* Compaq Evo D510 CMT */
+ case 0x00b9: /* Compaq Evo D510 SFF */
+ case 0x00ba: /* Compaq Evo D510 USDT */
+ /* Motherboard doesn't have Host bridge
+ * subvendor/subdevice IDs and on-board VGA
+ * controller is disabled if an AGP card is
+ * inserted, therefore checking USB UHCI
+ * Controller #1 */
+ asus_hides_smbus = 1;
+ }
+ else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
+ switch (dev->subsystem_device) {
+ case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
+ /* Motherboard doesn't have host bridge
+ * subvendor/subdevice IDs, therefore checking
+ * its on-board VGA controller */
+ asus_hides_smbus = 1;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
+
+static void asus_hides_smbus_lpc(struct pci_dev *dev)
+{
+ u16 val;
+
+ if (likely(!asus_hides_smbus))
+ return;
+
+ pci_read_config_word(dev, 0xF2, &val);
+ if (val & 0x8) {
+ pci_write_config_word(dev, 0xF2, val & (~0x8));
+ pci_read_config_word(dev, 0xF2, &val);
+ if (val & 0x8)
+ dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
+ val);
+ else
+ dev_info(&dev->dev, "Enabled i801 SMBus device\n");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
+
+/* It appears we just have one such device. If not, we have a warning */
+static void __iomem *asus_rcba_base;
+static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
+{
+ u32 rcba;
+
+ if (likely(!asus_hides_smbus))
+ return;
+ WARN_ON(asus_rcba_base);
+
+ pci_read_config_dword(dev, 0xF0, &rcba);
+ /* use bits 31:14, 16 kB aligned */
+ asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
+ if (asus_rcba_base == NULL)
+ return;
+}
+
+static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
+{
+ u32 val;
+
+ if (likely(!asus_hides_smbus || !asus_rcba_base))
+ return;
+ /* read the Function Disable register, dword mode only */
+ val = readl(asus_rcba_base + 0x3418);
+ writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
+}
+
+static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
+{
+ if (likely(!asus_hides_smbus || !asus_rcba_base))
+ return;
+ iounmap(asus_rcba_base);
+ asus_rcba_base = NULL;
+ dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
+}
+
+static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
+{
+ asus_hides_smbus_lpc_ich6_suspend(dev);
+ asus_hides_smbus_lpc_ich6_resume_early(dev);
+ asus_hides_smbus_lpc_ich6_resume(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
+DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
+
+/*
+ * SiS 96x south bridge: BIOS typically hides SMBus device...
+ */
+static void quirk_sis_96x_smbus(struct pci_dev *dev)
+{
+ u8 val = 0;
+ pci_read_config_byte(dev, 0x77, &val);
+ if (val & 0x10) {
+ dev_info(&dev->dev, "Enabling SiS 96x SMBus\n");
+ pci_write_config_byte(dev, 0x77, val & ~0x10);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
+
+/*
+ * ... This is further complicated by the fact that some SiS96x south
+ * bridges pretend to be 85C503/5513 instead. In that case see if we
+ * spotted a compatible north bridge to make sure.
+ * (pci_find_device doesn't work yet)
+ *
+ * We can also enable the sis96x bit in the discovery register..
+ */
+#define SIS_DETECT_REGISTER 0x40
+
+static void quirk_sis_503(struct pci_dev *dev)
+{
+ u8 reg;
+ u16 devid;
+
+ pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg);
+ pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
+ pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
+ if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
+ pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
+ return;
+ }
+
+ /*
+ * Ok, it now shows up as a 96x.. run the 96x quirk by
+ * hand in case it has already been processed.
+ * (depends on link order, which is apparently not guaranteed)
+ */
+ dev->device = devid;
+ quirk_sis_96x_smbus(dev);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
+
+
+/*
+ * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
+ * and MC97 modem controller are disabled when a second PCI soundcard is
+ * present. This patch, tweaking the VT8237 ISA bridge, enables them.
+ * -- bjd
+ */
+static void asus_hides_ac97_lpc(struct pci_dev *dev)
+{
+ u8 val;
+ int asus_hides_ac97 = 0;
+
+ if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
+ if (dev->device == PCI_DEVICE_ID_VIA_8237)
+ asus_hides_ac97 = 1;
+ }
+
+ if (!asus_hides_ac97)
+ return;
+
+ pci_read_config_byte(dev, 0x50, &val);
+ if (val & 0xc0) {
+ pci_write_config_byte(dev, 0x50, val & (~0xc0));
+ pci_read_config_byte(dev, 0x50, &val);
+ if (val & 0xc0)
+ dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
+ val);
+ else
+ dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
+
+#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
+
+/*
+ * If we are using libata we can drive this chip properly but must
+ * do this early on to make the additional device appear during
+ * the PCI scanning.
+ */
+static void quirk_jmicron_ata(struct pci_dev *pdev)
+{
+ u32 conf1, conf5, class;
+ u8 hdr;
+
+ /* Only poke fn 0 */
+ if (PCI_FUNC(pdev->devfn))
+ return;
+
+ pci_read_config_dword(pdev, 0x40, &conf1);
+ pci_read_config_dword(pdev, 0x80, &conf5);
+
+ conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
+ conf5 &= ~(1 << 24); /* Clear bit 24 */
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
+ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
+ case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */
+ /* The controller should be in single function ahci mode */
+ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
+ break;
+
+ case PCI_DEVICE_ID_JMICRON_JMB365:
+ case PCI_DEVICE_ID_JMICRON_JMB366:
+ /* Redirect IDE second PATA port to the right spot */
+ conf5 |= (1 << 24);
+ /* Fall through */
+ case PCI_DEVICE_ID_JMICRON_JMB361:
+ case PCI_DEVICE_ID_JMICRON_JMB363:
+ case PCI_DEVICE_ID_JMICRON_JMB369:
+ /* Enable dual function mode, AHCI on fn 0, IDE fn1 */
+ /* Set the class codes correctly and then direct IDE 0 */
+ conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
+ break;
+
+ case PCI_DEVICE_ID_JMICRON_JMB368:
+ /* The controller should be in single function IDE mode */
+ conf1 |= 0x00C00000; /* Set 22, 23 */
+ break;
+ }
+
+ pci_write_config_dword(pdev, 0x40, conf1);
+ pci_write_config_dword(pdev, 0x80, conf5);
+
+ /* Update pdev accordingly */
+ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
+ pdev->hdr_type = hdr & 0x7f;
+ pdev->multifunction = !!(hdr & 0x80);
+
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
+ pdev->class = class >> 8;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
+
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+static void quirk_alder_ioapic(struct pci_dev *pdev)
+{
+ int i;
+
+ if ((pdev->class >> 8) != 0xff00)
+ return;
+
+ /* the first BAR is the location of the IO APIC...we must
+ * not touch this (and it's already covered by the fixmap), so
+ * forcibly insert it into the resource tree */
+ if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
+ insert_resource(&iomem_resource, &pdev->resource[0]);
+
+ /* The next five BARs all seem to be rubbish, so just clean
+ * them out */
+ for (i = 1; i < 6; i++)
+ memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
+#endif
+
+static void quirk_pcie_mch(struct pci_dev *pdev)
+{
+ pci_msi_off(pdev);
+ pdev->no_msi = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
+
+
+/*
+ * It's possible for the MSI to get corrupted if shpc and acpi
+ * are used together on certain PXH-based systems.
+ */
+static void quirk_pcie_pxh(struct pci_dev *dev)
+{
+ pci_msi_off(dev);
+ dev->no_msi = 1;
+ dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
+
+/*
+ * Some Intel PCI Express chipsets have trouble with downstream
+ * device power management.
+ */
+static void quirk_intel_pcie_pm(struct pci_dev *dev)
+{
+ pci_pm_d3_delay = 120;
+ dev->no_d1d2 = 1;
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
+
+#ifdef CONFIG_X86_IO_APIC
+/*
+ * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
+ * remap the original interrupt in the linux kernel to the boot interrupt, so
+ * that a PCI device's interrupt handler is installed on the boot interrupt
+ * line instead.
+ */
+static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
+{
+ if (noioapicquirk || noioapicreroute)
+ return;
+
+ dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
+ dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
+ dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
+
+/*
+ * On some chipsets we can disable the generation of legacy INTx boot
+ * interrupts.
+ */
+
+/*
+ * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
+ * 300641-004US, section 5.7.3.
+ */
+#define INTEL_6300_IOAPIC_ABAR 0x40
+#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
+
+static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
+{
+ u16 pci_config_word;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
+ pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
+ pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
+
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
+
+/*
+ * disable boot interrupts on HT-1000
+ */
+#define BC_HT1000_FEATURE_REG 0x64
+#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
+#define BC_HT1000_MAP_IDX 0xC00
+#define BC_HT1000_MAP_DATA 0xC01
+
+static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
+{
+ u32 pci_config_dword;
+ u8 irq;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
+ pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
+ BC_HT1000_PIC_REGS_ENABLE);
+
+ for (irq = 0x10; irq < 0x10 + 32; irq++) {
+ outb(irq, BC_HT1000_MAP_IDX);
+ outb(0x00, BC_HT1000_MAP_DATA);
+ }
+
+ pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
+
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
+
+/*
+ * disable boot interrupts on AMD and ATI chipsets
+ */
+/*
+ * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
+ * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
+ * (due to an erratum).
+ */
+#define AMD_813X_MISC 0x40
+#define AMD_813X_NOIOAMODE (1<<0)
+#define AMD_813X_REV_B1 0x12
+#define AMD_813X_REV_B2 0x13
+
+static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
+{
+ u32 pci_config_dword;
+
+ if (noioapicquirk)
+ return;
+ if ((dev->revision == AMD_813X_REV_B1) ||
+ (dev->revision == AMD_813X_REV_B2))
+ return;
+
+ pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
+ pci_config_dword &= ~AMD_813X_NOIOAMODE;
+ pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
+
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
+
+#define AMD_8111_PCI_IRQ_ROUTING 0x56
+
+static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
+{
+ u16 pci_config_word;
+
+ if (noioapicquirk)
+ return;
+
+ pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
+ if (!pci_config_word) {
+ dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n",
+ dev->vendor, dev->device);
+ return;
+ }
+ pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
+ dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
+ dev->vendor, dev->device);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
+#endif /* CONFIG_X86_IO_APIC */
+
+/*
+ * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
+ * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
+ * Re-allocate the region if needed...
+ */
+static void quirk_tc86c001_ide(struct pci_dev *dev)
+{
+ struct resource *r = &dev->resource[0];
+
+ if (r->start & 0x8) {
+ r->flags |= IORESOURCE_UNSET;
+ r->start = 0;
+ r->end = 0xf;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
+ PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
+ quirk_tc86c001_ide);
+
+/*
+ * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
+ * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
+ * being read correctly if bit 7 of the base address is set.
+ * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
+ * Re-allocate the regions to a 256-byte boundary if necessary.
+ */
+static void quirk_plx_pci9050(struct pci_dev *dev)
+{
+ unsigned int bar;
+
+ /* Fixed in revision 2 (PCI 9052). */
+ if (dev->revision >= 2)
+ return;
+ for (bar = 0; bar <= 1; bar++)
+ if (pci_resource_len(dev, bar) == 0x80 &&
+ (pci_resource_start(dev, bar) & 0x80)) {
+ struct resource *r = &dev->resource[bar];
+ dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
+ bar);
+ r->flags |= IORESOURCE_UNSET;
+ r->start = 0;
+ r->end = 0xff;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ quirk_plx_pci9050);
+/*
+ * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
+ * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
+ * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
+ * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
+ *
+ * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
+ * driver.
+ */
+DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
+DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
+
+static void quirk_netmos(struct pci_dev *dev)
+{
+ unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
+ unsigned int num_serial = dev->subsystem_device & 0xf;
+
+ /*
+ * These Netmos parts are multiport serial devices with optional
+ * parallel ports. Even when parallel ports are present, they
+ * are identified as class SERIAL, which means the serial driver
+ * will claim them. To prevent this, mark them as class OTHER.
+ * These combo devices should be claimed by parport_serial.
+ *
+ * The subdevice ID is of the form 0x00PS, where <P> is the number
+ * of parallel ports and <S> is the number of serial ports.
+ */
+ switch (dev->device) {
+ case PCI_DEVICE_ID_NETMOS_9835:
+ /* Well, this rule doesn't hold for the following 9835 device */
+ if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
+ dev->subsystem_device == 0x0299)
+ return;
+ case PCI_DEVICE_ID_NETMOS_9735:
+ case PCI_DEVICE_ID_NETMOS_9745:
+ case PCI_DEVICE_ID_NETMOS_9845:
+ case PCI_DEVICE_ID_NETMOS_9855:
+ if (num_parallel) {
+ dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
+ dev->device, num_parallel, num_serial);
+ dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
+ (dev->class & 0xff);
+ }
+ }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+
+static void quirk_e100_interrupt(struct pci_dev *dev)
+{
+ u16 command, pmcsr;
+ u8 __iomem *csr;
+ u8 cmd_hi;
+
+ switch (dev->device) {
+ /* PCI IDs taken from drivers/net/e100.c */
+ case 0x1029:
+ case 0x1030 ... 0x1034:
+ case 0x1038 ... 0x103E:
+ case 0x1050 ... 0x1057:
+ case 0x1059:
+ case 0x1064 ... 0x106B:
+ case 0x1091 ... 0x1095:
+ case 0x1209:
+ case 0x1229:
+ case 0x2449:
+ case 0x2459:
+ case 0x245D:
+ case 0x27DC:
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * Some firmware hands off the e100 with interrupts enabled,
+ * which can cause a flood of interrupts if packets are
+ * received before the driver attaches to the device. So
+ * disable all e100 interrupts here. The driver will
+ * re-enable them when it's ready.
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+
+ if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
+ return;
+
+ /*
+ * Check that the device is in the D0 power state. If it's not,
+ * there is no point to look any further.
+ */
+ if (dev->pm_cap) {
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
+ return;
+ }
+
+ /* Convert from PCI bus to resource space. */
+ csr = ioremap(pci_resource_start(dev, 0), 8);
+ if (!csr) {
+ dev_warn(&dev->dev, "Can't map e100 registers\n");
+ return;
+ }
+
+ cmd_hi = readb(csr + 3);
+ if (cmd_hi == 0) {
+ dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n");
+ writeb(1, csr + 3);
+ }
+
+ iounmap(csr);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
+
+/*
+ * The 82575 and 82598 may experience data corruption issues when transitioning
+ * out of L0S. To prevent this we need to disable L0S on the pci-e link
+ */
+static void quirk_disable_aspm_l0s(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "Disabling L0s\n");
+ pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+
+static void fixup_rev1_53c810(struct pci_dev *dev)
+{
+ /* rev 1 ncr53c810 chips don't set the class at all which means
+ * they don't get their resources remapped. Fix that here.
+ */
+
+ if (dev->class == PCI_CLASS_NOT_DEFINED) {
+ dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
+ dev->class = PCI_CLASS_STORAGE_SCSI;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
+
+/* Enable 1k I/O space granularity on the Intel P64H2 */
+static void quirk_p64h2_1k_io(struct pci_dev *dev)
+{
+ u16 en1k;
+
+ pci_read_config_word(dev, 0x40, &en1k);
+
+ if (en1k & 0x200) {
+ dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
+ dev->io_window_1k = 1;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
+
+/* Under some circumstances, AER is not linked with extended capabilities.
+ * Force it to be linked by setting the corresponding control bit in the
+ * config space.
+ */
+static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
+{
+ uint8_t b;
+ if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
+ if (!(b & 0x20)) {
+ pci_write_config_byte(dev, 0xf41, b | 0x20);
+ dev_info(&dev->dev, "Linking AER extended capability\n");
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
+ quirk_nvidia_ck804_pcie_aer_ext_cap);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
+ quirk_nvidia_ck804_pcie_aer_ext_cap);
+
+static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
+{
+ /*
+ * Disable PCI Bus Parking and PCI Master read caching on CX700
+ * which causes unspecified timing errors with a VT6212L on the PCI
+ * bus leading to USB2.0 packet loss.
+ *
+ * This quirk is only enabled if a second (on the external PCI bus)
+ * VT6212L is found -- the CX700 core itself also contains a USB
+ * host controller with the same PCI ID as the VT6212L.
+ */
+
+ /* Count VT6212L instances */
+ struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
+ uint8_t b;
+
+ /* p should contain the first (internal) VT6212L -- see if we have
+ an external one by searching again */
+ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
+ if (!p)
+ return;
+ pci_dev_put(p);
+
+ if (pci_read_config_byte(dev, 0x76, &b) == 0) {
+ if (b & 0x40) {
+ /* Turn off PCI Bus Parking */
+ pci_write_config_byte(dev, 0x76, b ^ 0x40);
+
+ dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n");
+ }
+ }
+
+ if (pci_read_config_byte(dev, 0x72, &b) == 0) {
+ if (b != 0) {
+ /* Turn off PCI Master read caching */
+ pci_write_config_byte(dev, 0x72, 0x0);
+
+ /* Set PCI Master Bus time-out to "1x16 PCLK" */
+ pci_write_config_byte(dev, 0x75, 0x1);
+
+ /* Disable "Read FIFO Timer" */
+ pci_write_config_byte(dev, 0x77, 0x0);
+
+ dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n");
+ }
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
+
+/*
+ * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
+ * VPD end tag will hang the device. This problem was initially
+ * observed when a vpd entry was created in sysfs
+ * ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
+ * will dump 32k of data. Reading a full 32k will cause an access
+ * beyond the VPD end tag causing the device to hang. Once the device
+ * is hung, the bnx2 driver will not be able to reset the device.
+ * We believe that it is legal to read beyond the end tag and
+ * therefore the solution is to limit the read/write length.
+ */
+static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
+{
+ /*
+ * Only disable the VPD capability for 5706, 5706S, 5708,
+ * 5708S and 5709 rev. A
+ */
+ if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
+ (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
+ (dev->device == PCI_DEVICE_ID_NX2_5708) ||
+ (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
+ ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
+ (dev->revision & 0xf0) == 0x0)) {
+ if (dev->vpd)
+ dev->vpd->len = 0x80;
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5706,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5706S,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5708,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5708S,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5709,
+ quirk_brcm_570x_limit_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_NX2_5709S,
+ quirk_brcm_570x_limit_vpd);
+
+static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
+{
+ u32 rev;
+
+ pci_read_config_dword(dev, 0xf4, &rev);
+
+ /* Only CAP the MRRS if the device is a 5719 A0 */
+ if (rev == 0x05719000) {
+ int readrq = pcie_get_readrq(dev);
+ if (readrq > 2048)
+ pcie_set_readrq(dev, 2048);
+ }
+}
+
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5719,
+ quirk_brcm_5719_limit_mrrs);
+
+/* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+static void quirk_unhide_mch_dev6(struct pci_dev *dev)
+{
+ u8 reg;
+
+ if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
+ dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
+ pci_write_config_byte(dev, 0xF4, reg | 0x02);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
+ quirk_unhide_mch_dev6);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
+ quirk_unhide_mch_dev6);
+
+#ifdef CONFIG_TILEPRO
+/*
+ * The Tilera TILEmpower tilepro platform needs to set the link speed
+ * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
+ * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
+ * capability register of the PEX8624 PCIe switch. The switch
+ * supports link speed auto negotiation, but falsely sets
+ * the link speed to 5GT/s.
+ */
+static void quirk_tile_plx_gen1(struct pci_dev *dev)
+{
+ if (tile_plx_gen1) {
+ pci_write_config_dword(dev, 0x98, 0x1);
+ mdelay(50);
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
+#endif /* CONFIG_TILEPRO */
+
+#ifdef CONFIG_PCI_MSI
+/* Some chipsets do not support MSI. We cannot easily rely on setting
+ * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
+ * some other buses controlled by the chipset even if Linux is not
+ * aware of it. Instead of setting the flag on all buses in the
+ * machine, simply disable MSI globally.
+ */
+static void quirk_disable_all_msi(struct pci_dev *dev)
+{
+ pci_no_msi();
+ dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
+
+/* Disable MSI on chipsets that are known to not support it */
+static void quirk_disable_msi(struct pci_dev *dev)
+{
+ if (dev->subordinate) {
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
+ dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
+
+/*
+ * The APC bridge device in AMD 780 family northbridges has some random
+ * OEM subsystem ID in its vendor ID register (erratum 18), so instead
+ * we use the possible vendor/device IDs of the host bridge for the
+ * declared quirk, and search for the APC bridge by slot number.
+ */
+static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
+{
+ struct pci_dev *apc_bridge;
+
+ apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
+ if (apc_bridge) {
+ if (apc_bridge->device == 0x9602)
+ quirk_disable_msi(apc_bridge);
+ pci_dev_put(apc_bridge);
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
+
+/* Go through the list of Hypertransport capabilities and
+ * return 1 if a HT MSI capability is found and enabled */
+static int msi_ht_cap_enabled(struct pci_dev *dev)
+{
+ int pos, ttl = 48;
+
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
+ while (pos && ttl--) {
+ u8 flags;
+
+ if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
+ &flags) == 0) {
+ dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
+ flags & HT_MSI_FLAGS_ENABLE ?
+ "enabled" : "disabled");
+ return (flags & HT_MSI_FLAGS_ENABLE) != 0;
+ }
+
+ pos = pci_find_next_ht_capability(dev, pos,
+ HT_CAPTYPE_MSI_MAPPING);
+ }
+ return 0;
+}
+
+/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
+static void quirk_msi_ht_cap(struct pci_dev *dev)
+{
+ if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
+ dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
+ quirk_msi_ht_cap);
+
+/* The nVidia CK804 chipset may have 2 HT MSI mappings.
+ * MSI are supported if the MSI capability set in any of these mappings.
+ */
+static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
+{
+ struct pci_dev *pdev;
+
+ if (!dev->subordinate)
+ return;
+
+ /* check HT MSI cap on this chipset and the root one.
+ * a single one having MSI is enough to be sure that MSI are supported.
+ */
+ pdev = pci_get_slot(dev->bus, 0);
+ if (!pdev)
+ return;
+ if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
+ dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
+ dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
+ }
+ pci_dev_put(pdev);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
+ quirk_nvidia_ck804_msi_ht_cap);
+
+/* Force enable MSI mapping capability on HT bridges */
+static void ht_enable_msi_mapping(struct pci_dev *dev)
+{
+ int pos, ttl = 48;
+
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
+ while (pos && ttl--) {
+ u8 flags;
+
+ if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
+ &flags) == 0) {
+ dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
+
+ pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
+ flags | HT_MSI_FLAGS_ENABLE);
+ }
+ pos = pci_find_next_ht_capability(dev, pos,
+ HT_CAPTYPE_MSI_MAPPING);
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
+ ht_enable_msi_mapping);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
+ ht_enable_msi_mapping);
+
+/* The P5N32-SLI motherboards from Asus have a problem with msi
+ * for the MCP55 NIC. It is not yet determined whether the msi problem
+ * also affects other devices. As for now, turn off msi for this device.
+ */
+static void nvenet_msi_disable(struct pci_dev *dev)
+{
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (board_name &&
+ (strstr(board_name, "P5N32-SLI PREMIUM") ||
+ strstr(board_name, "P5N32-E SLI"))) {
+ dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n");
+ dev->no_msi = 1;
+ }
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_NVENET_15,
+ nvenet_msi_disable);
+
+/*
+ * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
+ * config register. This register controls the routing of legacy
+ * interrupts from devices that route through the MCP55. If this register
+ * is misprogrammed, interrupts are only sent to the BSP, unlike
+ * conventional systems where the IRQ is broadcast to all online CPUs. Not
+ * having this register set properly prevents kdump from booting up
+ * properly, so let's make sure that we have it set correctly.
+ * Note that this is an undocumented register.
+ */
+static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
+{
+ u32 cfg;
+
+ if (!pci_find_capability(dev, PCI_CAP_ID_HT))
+ return;
+
+ pci_read_config_dword(dev, 0x74, &cfg);
+
+ if (cfg & ((1 << 2) | (1 << 15))) {
+ printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
+ cfg &= ~((1 << 2) | (1 << 15));
+ pci_write_config_dword(dev, 0x74, cfg);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
+ nvbridge_check_legacy_irq_routing);
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
+ PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
+ nvbridge_check_legacy_irq_routing);
+
+static int ht_check_msi_mapping(struct pci_dev *dev)
+{
+ int pos, ttl = 48;
+ int found = 0;
+
+ /* check if there is HT MSI cap or enabled on this device */
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
+ while (pos && ttl--) {
+ u8 flags;
+
+ if (found < 1)
+ found = 1;
+ if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
+ &flags) == 0) {
+ if (flags & HT_MSI_FLAGS_ENABLE) {
+ if (found < 2) {
+ found = 2;
+ break;
+ }
+ }
+ }
+ pos = pci_find_next_ht_capability(dev, pos,
+ HT_CAPTYPE_MSI_MAPPING);
+ }
+
+ return found;
+}
+
+static int host_bridge_with_leaf(struct pci_dev *host_bridge)
+{
+ struct pci_dev *dev;
+ int pos;
+ int i, dev_no;
+ int found = 0;
+
+ dev_no = host_bridge->devfn >> 3;
+ for (i = dev_no + 1; i < 0x20; i++) {
+ dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
+ if (!dev)
+ continue;
+
+ /* found next host bridge ?*/
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
+ if (pos != 0) {
+ pci_dev_put(dev);
+ break;
+ }
+
+ if (ht_check_msi_mapping(dev)) {
+ found = 1;
+ pci_dev_put(dev);
+ break;
+ }
+ pci_dev_put(dev);
+ }
+
+ return found;
+}
+
+#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
+#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
+
+static int is_end_of_ht_chain(struct pci_dev *dev)
+{
+ int pos, ctrl_off;
+ int end = 0;
+ u16 flags, ctrl;
+
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
+
+ if (!pos)
+ goto out;
+
+ pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
+
+ ctrl_off = ((flags >> 10) & 1) ?
+ PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
+ pci_read_config_word(dev, pos + ctrl_off, &ctrl);
+
+ if (ctrl & (1 << 6))
+ end = 1;
+
+out:
+ return end;
+}
+
+static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
+{
+ struct pci_dev *host_bridge;
+ int pos;
+ int i, dev_no;
+ int found = 0;
+
+ dev_no = dev->devfn >> 3;
+ for (i = dev_no; i >= 0; i--) {
+ host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
+ if (!host_bridge)
+ continue;
+
+ pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
+ if (pos != 0) {
+ found = 1;
+ break;
+ }
+ pci_dev_put(host_bridge);
+ }
+
+ if (!found)
+ return;
+
+ /* don't enable end_device/host_bridge with leaf directly here */
+ if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
+ host_bridge_with_leaf(host_bridge))
+ goto out;
+
+ /* root did that ! */
+ if (msi_ht_cap_enabled(host_bridge))
+ goto out;
+
+ ht_enable_msi_mapping(dev);
+
+out:
+ pci_dev_put(host_bridge);
+}
+
+static void ht_disable_msi_mapping(struct pci_dev *dev)
+{
+ int pos, ttl = 48;
+
+ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
+ while (pos && ttl--) {
+ u8 flags;
+
+ if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
+ &flags) == 0) {
+ dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
+
+ pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
+ flags & ~HT_MSI_FLAGS_ENABLE);
+ }
+ pos = pci_find_next_ht_capability(dev, pos,
+ HT_CAPTYPE_MSI_MAPPING);
+ }
+}
+
+static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
+{
+ struct pci_dev *host_bridge;
+ int pos;
+ int found;
+
+ if (!pci_msi_enabled())
+ return;
+
+ /* check if there is HT MSI cap or enabled on this device */
+ found = ht_check_msi_mapping(dev);
+
+ /* no HT MSI CAP */
+ if (found == 0)
+ return;
+
+ /*
+ * HT MSI mapping should be disabled on devices that are below
+ * a non-Hypertransport host bridge. Locate the host bridge...
+ */
+ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+ if (host_bridge == NULL) {
+ dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
+ return;
+ }
+
+ pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
+ if (pos != 0) {
+ /* Host bridge is to HT */
+ if (found == 1) {
+ /* it is not enabled, try to enable it */
+ if (all)
+ ht_enable_msi_mapping(dev);
+ else
+ nv_ht_enable_msi_mapping(dev);
+ }
+ goto out;
+ }
+
+ /* HT MSI is not enabled */
+ if (found == 1)
+ goto out;
+
+ /* Host bridge is not to HT, disable HT MSI mapping on this device */
+ ht_disable_msi_mapping(dev);
+
+out:
+ pci_dev_put(host_bridge);
+}
+
+static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
+{
+ return __nv_msi_ht_cap_quirk(dev, 1);
+}
+
+static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
+{
+ return __nv_msi_ht_cap_quirk(dev, 0);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+
+static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+}
+static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
+{
+ struct pci_dev *p;
+
+ /* SB700 MSI issue will be fixed at HW level from revision A21,
+ * we need check PCI REVISION ID of SMBus controller to get SB700
+ * revision.
+ */
+ p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
+ NULL);
+ if (!p)
+ return;
+
+ if ((p->revision < 0x3B) && (p->revision >= 0x30))
+ dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+ pci_dev_put(p);
+}
+static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
+{
+ /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
+ if (dev->revision < 0x18) {
+ dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
+ dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5780,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5780S,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5714,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5714S,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5715,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
+ PCI_DEVICE_ID_TIGON3_5715S,
+ quirk_msi_intx_disable_bug);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
+ quirk_msi_intx_disable_ati_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
+ quirk_msi_intx_disable_ati_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
+ quirk_msi_intx_disable_ati_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
+ quirk_msi_intx_disable_ati_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
+ quirk_msi_intx_disable_ati_bug);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
+ quirk_msi_intx_disable_bug);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
+ quirk_msi_intx_disable_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
+ quirk_msi_intx_disable_qca_bug);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
+ quirk_msi_intx_disable_qca_bug);
+#endif /* CONFIG_PCI_MSI */
+
+/* Allow manual resource allocation for PCI hotplug bridges
+ * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
+ * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
+ * kernel fails to allocate resources when hotplug device is
+ * inserted and PCI bus is rescanned.
+ */
+static void quirk_hotplug_bridge(struct pci_dev *dev)
+{
+ dev->is_hotplug_bridge = 1;
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+
+/*
+ * This is a quirk for the Ricoh MMC controller found as a part of
+ * some mulifunction chips.
+
+ * This is very similar and based on the ricoh_mmc driver written by
+ * Philip Langdale. Thank you for these magic sequences.
+ *
+ * These chips implement the four main memory card controllers (SD, MMC, MS, xD)
+ * and one or both of cardbus or firewire.
+ *
+ * It happens that they implement SD and MMC
+ * support as separate controllers (and PCI functions). The linux SDHCI
+ * driver supports MMC cards but the chip detects MMC cards in hardware
+ * and directs them to the MMC controller - so the SDHCI driver never sees
+ * them.
+ *
+ * To get around this, we must disable the useless MMC controller.
+ * At that point, the SDHCI controller will start seeing them
+ * It seems to be the case that the relevant PCI registers to deactivate the
+ * MMC controller live on PCI function 0, which might be the cardbus controller
+ * or the firewire controller, depending on the particular chip in question
+ *
+ * This has to be done early, because as soon as we disable the MMC controller
+ * other pci functions shift up one level, e.g. function #2 becomes function
+ * #1, and this will confuse the pci core.
+ */
+
+#ifdef CONFIG_MMC_RICOH_MMC
+static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
+{
+ /* disable via cardbus interface */
+ u8 write_enable;
+ u8 write_target;
+ u8 disable;
+
+ /* disable must be done via function #0 */
+ if (PCI_FUNC(dev->devfn))
+ return;
+
+ pci_read_config_byte(dev, 0xB7, &disable);
+ if (disable & 0x02)
+ return;
+
+ pci_read_config_byte(dev, 0x8E, &write_enable);
+ pci_write_config_byte(dev, 0x8E, 0xAA);
+ pci_read_config_byte(dev, 0x8D, &write_target);
+ pci_write_config_byte(dev, 0x8D, 0xB7);
+ pci_write_config_byte(dev, 0xB7, disable | 0x02);
+ pci_write_config_byte(dev, 0x8E, write_enable);
+ pci_write_config_byte(dev, 0x8D, write_target);
+
+ dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
+ dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
+
+static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+{
+ /* disable via firewire interface */
+ u8 write_enable;
+ u8 disable;
+
+ /* disable must be done via function #0 */
+ if (PCI_FUNC(dev->devfn))
+ return;
+ /*
+ * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+ * 0x150 - SD2.0 mode enable for changing base clock
+ * frequency to 50Mhz
+ * 0xe1 - Base clock frequency
+ * 0x32 - 50Mhz new clock frequency
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
+ dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+ pci_write_config_byte(dev, 0xfc, 0x01);
+ pci_write_config_byte(dev, 0xe1, 0x32);
+ pci_write_config_byte(dev, 0xfc, 0x00);
+
+ dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
+ }
+
+ pci_read_config_byte(dev, 0xCB, &disable);
+
+ if (disable & 0x02)
+ return;
+
+ pci_read_config_byte(dev, 0xCA, &write_enable);
+ pci_write_config_byte(dev, 0xCA, 0x57);
+ pci_write_config_byte(dev, 0xCB, disable | 0x02);
+ pci_write_config_byte(dev, 0xCA, write_enable);
+
+ dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+ dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
+#endif /*CONFIG_MMC_RICOH_MMC*/
+
+#ifdef CONFIG_DMAR_TABLE
+#define VTUNCERRMSK_REG 0x1ac
+#define VTD_MSK_SPEC_ERRORS (1 << 31)
+/*
+ * This is a quirk for masking vt-d spec defined errors to platform error
+ * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
+ * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
+ * on the RAS config settings of the platform) when a vt-d fault happens.
+ * The resulting SMI caused the system to hang.
+ *
+ * VT-d spec related errors are already handled by the VT-d OS code, so no
+ * need to report the same error through other channels.
+ */
+static void vtd_mask_spec_errors(struct pci_dev *dev)
+{
+ u32 word;
+
+ pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
+ pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
+#endif
+
+static void fixup_ti816x_class(struct pci_dev *dev)
+{
+ /* TI 816x devices do not have class code set when in PCIe boot mode */
+ dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
+ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
+ PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+
+/* Some PCIe devices do not work reliably with the claimed maximum
+ * payload size supported.
+ */
+static void fixup_mpss_256(struct pci_dev *dev)
+{
+ dev->pcie_mpss = 1; /* 256 bytes */
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
+ PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
+
+/* Intel 5000 and 5100 Memory controllers have an errata with read completion
+ * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
+ * Since there is no way of knowing what the PCIE MPS on each fabric will be
+ * until all of the devices are discovered and buses walked, read completion
+ * coalescing must be disabled. Unfortunately, it cannot be re-enabled because
+ * it is possible to hotplug a device with MPS of 256B.
+ */
+static void quirk_intel_mc_errata(struct pci_dev *dev)
+{
+ int err;
+ u16 rcc;
+
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ return;
+
+ /* Intel errata specifies bits to change but does not say what they are.
+ * Keeping them magical until such time as the registers and values can
+ * be explained.
+ */
+ err = pci_read_config_word(dev, 0x48, &rcc);
+ if (err) {
+ dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n");
+ return;
+ }
+
+ if (!(rcc & (1 << 10)))
+ return;
+
+ rcc &= ~(1 << 10);
+
+ err = pci_write_config_word(dev, 0x48, rcc);
+ if (err) {
+ dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n");
+ return;
+ }
+
+ pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
+}
+/* Intel 5000 series memory controllers and ports 2-7 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
+/* Intel 5100 series memory controllers and ports 2-7 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
+
+
+/*
+ * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. To
+ * work around this, query the size it should be configured to by the device and
+ * modify the resource end to correspond to this new size.
+ */
+static void quirk_intel_ntb(struct pci_dev *dev)
+{
+ int rc;
+ u8 val;
+
+ rc = pci_read_config_byte(dev, 0x00D0, &val);
+ if (rc)
+ return;
+
+ dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
+
+ rc = pci_read_config_byte(dev, 0x00D1, &val);
+ if (rc)
+ return;
+
+ dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
+
+static ktime_t fixup_debug_start(struct pci_dev *dev,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t calltime = ktime_set(0, 0);
+
+ dev_dbg(&dev->dev, "calling %pF\n", fn);
+ if (initcall_debug) {
+ pr_debug("calling %pF @ %i for %s\n",
+ fn, task_pid_nr(current), dev_name(&dev->dev));
+ calltime = ktime_get();
+ }
+
+ return calltime;
+}
+
+static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t delta, rettime;
+ unsigned long long duration;
+
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
+ fn, duration, dev_name(&dev->dev));
+ }
+}
+
+/*
+ * Some BIOS implementations leave the Intel GPU interrupts enabled,
+ * even though no one is handling them (f.e. i915 driver is never loaded).
+ * Additionally the interrupt destination is not set up properly
+ * and the interrupt ends up -somewhere-.
+ *
+ * These spurious interrupts are "sticky" and the kernel disables
+ * the (shared) interrupt line after 100.000+ generated interrupts.
+ *
+ * Fix it by disabling the still enabled interrupts.
+ * This resolves crashes often seen on monitor unplug.
+ */
+#define I915_DEIER_REG 0x4400c
+static void disable_igfx_irq(struct pci_dev *dev)
+{
+ void __iomem *regs = pci_iomap(dev, 0, 0);
+ if (regs == NULL) {
+ dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
+ return;
+ }
+
+ /* Check if any interrupt line is still enabled */
+ if (readl(regs + I915_DEIER_REG) != 0) {
+ dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
+
+ writel(0, regs + I915_DEIER_REG);
+ }
+
+ pci_iounmap(dev, regs);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
+
+/*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+ dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
+/*
+ * Some devices may pass our check in pci_intx_mask_supported if
+ * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
+ * support this feature.
+ */
+static void quirk_broken_intx_masking(struct pci_dev *dev)
+{
+ dev->broken_intx_masking = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
+/*
+ * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
+ * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
+ *
+ * RTL8110SC - Fails under PCI device assignment using DisINTx masking.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+ quirk_broken_intx_masking);
+
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset. The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
+static void quirk_no_pm_reset(struct pci_dev *dev)
+{
+ /*
+ * We can't do a bus reset on root bus devices, but an ineffective
+ * PM reset may be better than nothing.
+ */
+ if (!pci_is_root_bus(dev->bus))
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
+}
+
+/*
+ * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition
+ * causes a reset (i.e., they advertise NoSoftRst-). This transition seems
+ * to have no effect on the device: it retains the framebuffer contents and
+ * monitor sync. Advertising this support makes other layers, like VFIO,
+ * assume pci_reset_function() is viable for this device. Mark it as
+ * unavailable to skip it when testing reset methods.
+ */
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
+
+#ifdef CONFIG_ACPI
+/*
+ * Apple: Shutdown Cactus Ridge Thunderbolt controller.
+ *
+ * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be
+ * shutdown before suspend. Otherwise the native host interface (NHI) will not
+ * be present after resume if a device was plugged in before suspend.
+ *
+ * The thunderbolt controller consists of a pcie switch with downstream
+ * bridges leading to the NHI and to the tunnel pci bridges.
+ *
+ * This quirk cuts power to the whole chip. Therefore we have to apply it
+ * during suspend_noirq of the upstream bridge.
+ *
+ * Power is automagically restored before resume. No action is needed.
+ */
+static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
+{
+ acpi_handle bridge, SXIO, SXFP, SXLV;
+
+ if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ return;
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
+ return;
+ bridge = ACPI_HANDLE(&dev->dev);
+ if (!bridge)
+ return;
+ /*
+ * SXIO and SXLV are present only on machines requiring this quirk.
+ * TB bridges in external devices might have the same device id as those
+ * on the host, but they will not have the associated ACPI methods. This
+ * implicitly checks that we are at the right bridge.
+ */
+ if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
+ || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
+ || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
+ return;
+ dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n");
+
+ /* magic sequence */
+ acpi_execute_simple_method(SXIO, NULL, 1);
+ acpi_execute_simple_method(SXFP, NULL, 0);
+ msleep(300);
+ acpi_execute_simple_method(SXLV, NULL, 0);
+ acpi_execute_simple_method(SXIO, NULL, 0);
+ acpi_execute_simple_method(SXLV, NULL, 0);
+}
+DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
+ quirk_apple_poweroff_thunderbolt);
+
+/*
+ * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
+ *
+ * During suspend the thunderbolt controller is reset and all pci
+ * tunnels are lost. The NHI driver will try to reestablish all tunnels
+ * during resume. We have to manually wait for the NHI since there is
+ * no parent child relationship between the NHI and the tunneled
+ * bridges.
+ */
+static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
+{
+ struct pci_dev *sibling = NULL;
+ struct pci_dev *nhi = NULL;
+
+ if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
+ return;
+ if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ /*
+ * Find the NHI and confirm that we are a bridge on the tb host
+ * controller and not on a tb endpoint.
+ */
+ sibling = pci_get_slot(dev->bus, 0x0);
+ if (sibling == dev)
+ goto out; /* we are the downstream bridge to the NHI */
+ if (!sibling || !sibling->subordinate)
+ goto out;
+ nhi = pci_get_slot(sibling->subordinate, 0x0);
+ if (!nhi)
+ goto out;
+ if (nhi->vendor != PCI_VENDOR_ID_INTEL
+ || (nhi->device != 0x1547 && nhi->device != 0x156c)
+ || nhi->subsystem_vendor != 0x2222
+ || nhi->subsystem_device != 0x1111)
+ goto out;
+ dev_info(&dev->dev, "quirk: waiting for thunderbolt to reestablish PCI tunnels...\n");
+ device_pm_wait_for_dev(&dev->dev, &nhi->dev);
+out:
+ pci_dev_put(nhi);
+ pci_dev_put(sibling);
+}
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
+ quirk_apple_wait_for_thunderbolt);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
+ quirk_apple_wait_for_thunderbolt);
+#endif
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
+ struct pci_fixup *end)
+{
+ ktime_t calltime;
+
+ for (; f < end; f++)
+ if ((f->class == (u32) (dev->class >> f->class_shift) ||
+ f->class == (u32) PCI_ANY_ID) &&
+ (f->vendor == dev->vendor ||
+ f->vendor == (u16) PCI_ANY_ID) &&
+ (f->device == dev->device ||
+ f->device == (u16) PCI_ANY_ID)) {
+ calltime = fixup_debug_start(dev, f->hook);
+ f->hook(dev);
+ fixup_debug_report(dev, calltime, f->hook);
+ }
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+extern struct pci_fixup __start_pci_fixups_resume[];
+extern struct pci_fixup __end_pci_fixups_resume[];
+extern struct pci_fixup __start_pci_fixups_resume_early[];
+extern struct pci_fixup __end_pci_fixups_resume_early[];
+extern struct pci_fixup __start_pci_fixups_suspend[];
+extern struct pci_fixup __end_pci_fixups_suspend[];
+extern struct pci_fixup __start_pci_fixups_suspend_late[];
+extern struct pci_fixup __end_pci_fixups_suspend_late[];
+
+static bool pci_apply_fixup_final_quirks;
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+ struct pci_fixup *start, *end;
+
+ switch (pass) {
+ case pci_fixup_early:
+ start = __start_pci_fixups_early;
+ end = __end_pci_fixups_early;
+ break;
+
+ case pci_fixup_header:
+ start = __start_pci_fixups_header;
+ end = __end_pci_fixups_header;
+ break;
+
+ case pci_fixup_final:
+ if (!pci_apply_fixup_final_quirks)
+ return;
+ start = __start_pci_fixups_final;
+ end = __end_pci_fixups_final;
+ break;
+
+ case pci_fixup_enable:
+ start = __start_pci_fixups_enable;
+ end = __end_pci_fixups_enable;
+ break;
+
+ case pci_fixup_resume:
+ start = __start_pci_fixups_resume;
+ end = __end_pci_fixups_resume;
+ break;
+
+ case pci_fixup_resume_early:
+ start = __start_pci_fixups_resume_early;
+ end = __end_pci_fixups_resume_early;
+ break;
+
+ case pci_fixup_suspend:
+ start = __start_pci_fixups_suspend;
+ end = __end_pci_fixups_suspend;
+ break;
+
+ case pci_fixup_suspend_late:
+ start = __start_pci_fixups_suspend_late;
+ end = __end_pci_fixups_suspend_late;
+ break;
+
+ default:
+ /* stupid compiler warning, you would think with an enum... */
+ return;
+ }
+ pci_do_fixups(dev, start, end);
+}
+EXPORT_SYMBOL(pci_fixup_device);
+
+
+static int __init pci_apply_final_quirks(void)
+{
+ struct pci_dev *dev = NULL;
+ u8 cls = 0;
+ u8 tmp;
+
+ if (pci_cache_line_size)
+ printk(KERN_DEBUG "PCI: CLS %u bytes\n",
+ pci_cache_line_size << 2);
+
+ pci_apply_fixup_final_quirks = true;
+ for_each_pci_dev(dev) {
+ pci_fixup_device(pci_fixup_final, dev);
+ /*
+ * If arch hasn't set it explicitly yet, use the CLS
+ * value shared by all PCI devices. If there's a
+ * mismatch, fall back to the default value.
+ */
+ if (!pci_cache_line_size) {
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
+ if (!cls)
+ cls = tmp;
+ if (!tmp || cls == tmp)
+ continue;
+
+ printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
+ cls << 2, tmp << 2,
+ pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = pci_dfl_cache_line_size;
+ }
+ }
+
+ if (!pci_cache_line_size) {
+ printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
+ cls << 2, pci_dfl_cache_line_size << 2);
+ pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
+ }
+
+ return 0;
+}
+
+fs_initcall_sync(pci_apply_final_quirks);
+
+/*
+ * Followings are device-specific reset methods which can be used to
+ * reset a single function if other methods (e.g. FLR, PM D0->D3) are
+ * not available.
+ */
+static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
+{
+ int pos;
+
+ /* only implement PCI_CLASS_SERIAL_USB at present */
+ if (dev->class == PCI_CLASS_SERIAL_USB) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+ if (!pos)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_write_config_byte(dev, pos + 0x4, 1);
+ msleep(100);
+
+ return 0;
+ } else {
+ return -ENOTTY;
+ }
+}
+
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+{
+ /*
+ * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
+ *
+ * The 82599 supports FLR on VFs, but FLR support is reported only
+ * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
+ * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
+ */
+
+ if (probe)
+ return 0;
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+
+ msleep(100);
+
+ return 0;
+}
+
+#include "../gpu/drm/i915/i915_reg.h"
+#define MSG_CTL 0x45010
+#define NSDE_PWR_STATE 0xd0100
+#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
+
+static int reset_ivb_igd(struct pci_dev *dev, int probe)
+{
+ void __iomem *mmio_base;
+ unsigned long timeout;
+ u32 val;
+
+ if (probe)
+ return 0;
+
+ mmio_base = pci_iomap(dev, 0, 0);
+ if (!mmio_base)
+ return -ENOMEM;
+
+ iowrite32(0x00000002, mmio_base + MSG_CTL);
+
+ /*
+ * Clobbering SOUTH_CHICKEN2 register is fine only if the next
+ * driver loaded sets the right bits. However, this's a reset and
+ * the bits have been set by i915 previously, so we clobber
+ * SOUTH_CHICKEN2 register directly here.
+ */
+ iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
+
+ val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
+ iowrite32(val, mmio_base + PCH_PP_CONTROL);
+
+ timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
+ do {
+ val = ioread32(mmio_base + PCH_PP_STATUS);
+ if ((val & 0xb0000000) == 0)
+ goto reset_complete;
+ msleep(10);
+ } while (time_before(jiffies, timeout));
+ dev_warn(&dev->dev, "timeout during reset\n");
+
+reset_complete:
+ iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
+
+ pci_iounmap(dev, mmio_base);
+ return 0;
+}
+
+/*
+ * Device-specific reset method for Chelsio T4-based adapters.
+ */
+static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+{
+ u16 old_command;
+ u16 msix_flags;
+
+ /*
+ * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
+ * that we have no device-specific reset method.
+ */
+ if ((dev->device & 0xf000) != 0x4000)
+ return -ENOTTY;
+
+ /*
+ * If this is the "probe" phase, return 0 indicating that we can
+ * reset this device.
+ */
+ if (probe)
+ return 0;
+
+ /*
+ * T4 can wedge if there are DMAs in flight within the chip and Bus
+ * Master has been disabled. We need to have it on till the Function
+ * Level Reset completes. (BUS_MASTER is disabled in
+ * pci_reset_function()).
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &old_command);
+ pci_write_config_word(dev, PCI_COMMAND,
+ old_command | PCI_COMMAND_MASTER);
+
+ /*
+ * Perform the actual device function reset, saving and restoring
+ * configuration information around the reset.
+ */
+ pci_save_state(dev);
+
+ /*
+ * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
+ * are disabled when an MSI-X interrupt message needs to be delivered.
+ * So we briefly re-enable MSI-X interrupts for the duration of the
+ * FLR. The pci_restore_state() below will restore the original
+ * MSI-X state.
+ */
+ pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
+ if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
+ pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
+ msix_flags |
+ PCI_MSIX_FLAGS_ENABLE |
+ PCI_MSIX_FLAGS_MASKALL);
+
+ /*
+ * Start of pcie_flr() code sequence. This reset code is a copy of
+ * the guts of pcie_flr() because that's not an exported function.
+ */
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ msleep(100);
+
+ /*
+ * End of pcie_flr() code sequence.
+ */
+
+ /*
+ * Restore the configuration information (BAR values, etc.) including
+ * the original PCI Configuration Space Command word, and return
+ * success.
+ */
+ pci_restore_state(dev);
+ pci_write_config_word(dev, PCI_COMMAND, old_command);
+ return 0;
+}
+
+#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
+#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
+#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
+
+static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+ reset_intel_82599_sfp_virtfn },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
+ reset_ivb_igd },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
+ reset_ivb_igd },
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ reset_intel_generic_dev },
+ { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ reset_chelsio_generic_dev },
+ { 0 }
+};
+
+/*
+ * These device-specific reset methods are here rather than in a driver
+ * because when a host assigns a device to a guest VM, the host may need
+ * to reset the device but probably doesn't have a driver for it.
+ */
+int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+ const struct pci_dev_reset_methods *i;
+
+ for (i = pci_dev_reset_methods; i->reset; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID))
+ return i->reset(dev, probe);
+ }
+
+ return -ENOTTY;
+}
+
+static void quirk_dma_func0_alias(struct pci_dev *dev)
+{
+ if (PCI_FUNC(dev->devfn) != 0) {
+ dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ }
+}
+
+/*
+ * https://bugzilla.redhat.com/show_bug.cgi?id=605888
+ *
+ * Some Ricoh devices use function 0 as the PCIe requester ID for DMA.
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
+
+static void quirk_dma_func1_alias(struct pci_dev *dev)
+{
+ if (PCI_FUNC(dev->devfn) != 1) {
+ dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ }
+}
+
+/*
+ * Marvell 88SE9123 uses function 1 as the requester ID for DMA. In some
+ * SKUs function 1 is present and is a legacy IDE controller, in other
+ * SKUs this function is not present, making this a ghost requester.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=42679
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
+ quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
+ quirk_dma_func1_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
+ quirk_dma_func1_alias);
+/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
+ PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+ quirk_dma_func1_alias);
+
+/*
+ * Some devices DMA with the wrong devfn, not just the wrong function.
+ * quirk_fixed_dma_alias() uses this table to create fixed aliases, where
+ * the alias is "fixed" and independent of the device devfn.
+ *
+ * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O
+ * processor. To software, this appears as a PCIe-to-PCI/X bridge with a
+ * single device on the secondary bus. In reality, the single exposed
+ * device at 0e.0 is the Address Translation Unit (ATU) of the controller
+ * that provides a bridge to the internal bus of the I/O processor. The
+ * controller supports private devices, which can be hidden from PCI config
+ * space. In the case of the Adaptec 3405, a private device at 01.0
+ * appears to be the DMA engine, which therefore needs to become a DMA
+ * alias for the device.
+ */
+static const struct pci_device_id fixed_dma_alias_tbl[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
+ PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */
+ .driver_data = PCI_DEVFN(1, 0) },
+ { 0 }
+};
+
+static void quirk_fixed_dma_alias(struct pci_dev *dev)
+{
+ const struct pci_device_id *id;
+
+ id = pci_match_id(fixed_dma_alias_tbl, dev);
+ if (id) {
+ dev->dma_alias_devfn = id->driver_data;
+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
+ dev_info(&dev->dev, "Enabling fixed DMA alias to %02x.%d\n",
+ PCI_SLOT(dev->dma_alias_devfn),
+ PCI_FUNC(dev->dma_alias_devfn));
+ }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
+
+/*
+ * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
+ * using the wrong DMA alias for the device. Some of these devices can be
+ * used as either forward or reverse bridges, so we need to test whether the
+ * device is operating in the correct mode. We could probably apply this
+ * quirk to PCI_ANY_ID, but for now we'll just use known offenders. The test
+ * is for a non-root, non-PCIe bridge where the upstream device is PCIe and
+ * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge.
+ */
+static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
+{
+ if (!pci_is_root_bus(pdev->bus) &&
+ pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
+ !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
+ pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
+ pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
+}
+/* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
+ quirk_use_pcie_bridge_dma_alias);
+/* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */
+DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
+/* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
+DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
+DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
+
+/*
+ * AMD has indicated that the devices below do not support peer-to-peer
+ * in any system where they are found in the southbridge with an AMD
+ * IOMMU in the system. Multifunction devices that do not support
+ * peer-to-peer between functions can claim to support a subset of ACS.
+ * Such devices effectively enable request redirect (RR) and completion
+ * redirect (CR) since all transactions are redirected to the upstream
+ * root complex.
+ *
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
+ *
+ * 1002:4385 SBx00 SMBus Controller
+ * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
+ * 1002:4383 SBx00 Azalia (Intel HDA)
+ * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
+ * 1002:4384 SBx00 PCI to PCI Bridge
+ * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15
+ *
+ * 1022:780f [AMD] FCH PCI Bridge
+ * 1022:7809 [AMD] FCH USB OHCI Controller
+ */
+static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_table_header *header = NULL;
+ acpi_status status;
+
+ /* Targeting multifunction devices on the SB (appears on root bus) */
+ if (!dev->multifunction || !pci_is_root_bus(dev->bus))
+ return -ENODEV;
+
+ /* The IVRS table describes the AMD IOMMU */
+ status = acpi_get_table("IVRS", 0, &header);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Filter out flags not applicable to multifunction */
+ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
+
+ return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+#else
+ return -ENODEV;
+#endif
+}
+
+/*
+ * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * transactions and validate bus numbers in requests, but do not provide an
+ * actual PCIe ACS capability. This is the list of device IDs known to fall
+ * into that category as provided by Intel in Red Hat bugzilla 1037684.
+ */
+static const u16 pci_quirk_intel_pch_acs_ids[] = {
+ /* Ibexpeak PCH */
+ 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
+ 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
+ /* Cougarpoint PCH */
+ 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
+ 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
+ /* Pantherpoint PCH */
+ 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
+ 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
+ /* Lynxpoint-H PCH */
+ 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
+ 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
+ /* Lynxpoint-LP PCH */
+ 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
+ 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
+ /* Wildcat PCH */
+ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
+ 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
+ /* Patsburg (X79) PCH */
+ 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
+ /* Wellsburg (X99) PCH */
+ 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
+ 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
+};
+
+static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
+{
+ int i;
+
+ /* Filter out a few obvious non-matches first */
+ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
+ if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
+ return true;
+
+ return false;
+}
+
+#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
+
+static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
+ INTEL_PCH_ACS_FLAGS : 0;
+
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ return acs_flags & ~flags ? 0 : 1;
+}
+
+static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ /*
+ * SV, TB, and UF are not relevant to multifunction endpoints.
+ *
+ * Multifunction devices are only required to implement RR, CR, and DT
+ * in their ACS capability if they support peer-to-peer transactions.
+ * Devices matching this quirk have been verified by the vendor to not
+ * perform peer-to-peer with other functions, allowing us to mask out
+ * these bits as if they were unimplemented in the ACS capability.
+ */
+ acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+
+ return acs_flags ? 0 : 1;
+}
+
+static const struct pci_dev_acs_enabled {
+ u16 vendor;
+ u16 device;
+ int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
+} pci_dev_acs_enabled[] = {
+ { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
+ /* 82580 */
+ { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
+ /* 82576 */
+ { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
+ /* 82575 */
+ { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
+ /* I350 */
+ { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
+ /* 82571 (Quads omitted due to non-ACS switch) */
+ { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
+ /* Intel PCH root ports */
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
+ { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
+ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
+ { 0 }
+};
+
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
+{
+ const struct pci_dev_acs_enabled *i;
+ int ret;
+
+ /*
+ * Allow devices that do not expose standard PCIe ACS capabilities
+ * or control to indicate their support here. Multi-function express
+ * devices which do not allow internal peer-to-peer between functions,
+ * but do not implement PCIe ACS may wish to return true here.
+ */
+ for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->acs_enabled(dev, acs_flags);
+ if (ret >= 0)
+ return ret;
+ }
+ }
+
+ return -ENOTTY;
+}
+
+/* Config space offset of Root Complex Base Address register */
+#define INTEL_LPC_RCBA_REG 0xf0
+/* 31:14 RCBA address */
+#define INTEL_LPC_RCBA_MASK 0xffffc000
+/* RCBA Enable */
+#define INTEL_LPC_RCBA_ENABLE (1 << 0)
+
+/* Backbone Scratch Pad Register */
+#define INTEL_BSPR_REG 0x1104
+/* Backbone Peer Non-Posted Disable */
+#define INTEL_BSPR_REG_BPNPD (1 << 8)
+/* Backbone Peer Posted Disable */
+#define INTEL_BSPR_REG_BPPD (1 << 9)
+
+/* Upstream Peer Decode Configuration Register */
+#define INTEL_UPDCR_REG 0x1114
+/* 5:0 Peer Decode Enable bits */
+#define INTEL_UPDCR_REG_MASK 0x3f
+
+static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
+{
+ u32 rcba, bspr, updcr;
+ void __iomem *rcba_mem;
+
+ /*
+ * Read the RCBA register from the LPC (D31:F0). PCH root ports
+ * are D28:F* and therefore get probed before LPC, thus we can't
+ * use pci_get_slot/pci_read_config_dword here.
+ */
+ pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
+ INTEL_LPC_RCBA_REG, &rcba);
+ if (!(rcba & INTEL_LPC_RCBA_ENABLE))
+ return -EINVAL;
+
+ rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
+ PAGE_ALIGN(INTEL_UPDCR_REG));
+ if (!rcba_mem)
+ return -ENOMEM;
+
+ /*
+ * The BSPR can disallow peer cycles, but it's set by soft strap and
+ * therefore read-only. If both posted and non-posted peer cycles are
+ * disallowed, we're ok. If either are allowed, then we need to use
+ * the UPDCR to disable peer decodes for each port. This provides the
+ * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
+ */
+ bspr = readl(rcba_mem + INTEL_BSPR_REG);
+ bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
+ if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
+ updcr = readl(rcba_mem + INTEL_UPDCR_REG);
+ if (updcr & INTEL_UPDCR_REG_MASK) {
+ dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
+ updcr &= ~INTEL_UPDCR_REG_MASK;
+ writel(updcr, rcba_mem + INTEL_UPDCR_REG);
+ }
+ }
+
+ iounmap(rcba_mem);
+ return 0;
+}
+
+/* Miscellaneous Port Configuration register */
+#define INTEL_MPC_REG 0xd8
+/* MPC: Invalid Receive Bus Number Check Enable */
+#define INTEL_MPC_REG_IRBNCE (1 << 26)
+
+static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
+{
+ u32 mpc;
+
+ /*
+ * When enabled, the IRBNCE bit of the MPC register enables the
+ * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
+ * ensures that requester IDs fall within the bus number range
+ * of the bridge. Enable if not already.
+ */
+ pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
+ if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
+ dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
+ mpc |= INTEL_MPC_REG_IRBNCE;
+ pci_write_config_word(dev, INTEL_MPC_REG, mpc);
+ }
+}
+
+static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
+{
+ if (!pci_quirk_intel_pch_acs_match(dev))
+ return -ENOTTY;
+
+ if (pci_quirk_enable_intel_lpc_acs(dev)) {
+ dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
+ return 0;
+ }
+
+ pci_quirk_enable_intel_rp_mpc_acs(dev);
+
+ dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
+
+ dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
+
+ return 0;
+}
+
+static const struct pci_dev_enable_acs {
+ u16 vendor;
+ u16 device;
+ int (*enable_acs)(struct pci_dev *dev);
+} pci_dev_enable_acs[] = {
+ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
+ { 0 }
+};
+
+void pci_dev_specific_enable_acs(struct pci_dev *dev)
+{
+ const struct pci_dev_enable_acs *i;
+ int ret;
+
+ for (i = pci_dev_enable_acs; i->enable_acs; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->enable_acs(dev);
+ if (ret >= 0)
+ return;
+ }
+ }
+}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
new file mode 100644
index 000000000..8a280e9c2
--- /dev/null
+++ b/drivers/pci/remove.c
@@ -0,0 +1,162 @@
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/pci-aspm.h>
+#include "pci.h"
+
+static void pci_free_resources(struct pci_dev *dev)
+{
+ int i;
+
+ pci_cleanup_rom(dev);
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = dev->resource + i;
+ if (res->parent)
+ release_resource(res);
+ }
+}
+
+static void pci_stop_dev(struct pci_dev *dev)
+{
+ pci_pme_active(dev, false);
+
+ if (dev->is_added) {
+ pci_proc_detach_device(dev);
+ pci_remove_sysfs_dev_files(dev);
+ device_release_driver(&dev->dev);
+ dev->is_added = 0;
+ }
+
+ if (dev->bus->self)
+ pcie_aspm_exit_link_state(dev);
+}
+
+static void pci_destroy_dev(struct pci_dev *dev)
+{
+ if (!dev->dev.kobj.parent)
+ return;
+
+ device_del(&dev->dev);
+
+ down_write(&pci_bus_sem);
+ list_del(&dev->bus_list);
+ up_write(&pci_bus_sem);
+
+ pci_free_resources(dev);
+ put_device(&dev->dev);
+}
+
+void pci_remove_bus(struct pci_bus *bus)
+{
+ pci_proc_detach_bus(bus);
+
+ down_write(&pci_bus_sem);
+ list_del(&bus->node);
+ pci_bus_release_busn_res(bus);
+ up_write(&pci_bus_sem);
+ pci_remove_legacy_files(bus);
+ pcibios_remove_bus(bus);
+ device_unregister(&bus->dev);
+}
+EXPORT_SYMBOL(pci_remove_bus);
+
+static void pci_stop_bus_device(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->subordinate;
+ struct pci_dev *child, *tmp;
+
+ /*
+ * Stopping an SR-IOV PF device removes all the associated VFs,
+ * which will update the bus->devices list and confuse the
+ * iterator. Therefore, iterate in reverse so we remove the VFs
+ * first, then the PF.
+ */
+ if (bus) {
+ list_for_each_entry_safe_reverse(child, tmp,
+ &bus->devices, bus_list)
+ pci_stop_bus_device(child);
+ }
+
+ pci_stop_dev(dev);
+}
+
+static void pci_remove_bus_device(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->subordinate;
+ struct pci_dev *child, *tmp;
+
+ if (bus) {
+ list_for_each_entry_safe(child, tmp,
+ &bus->devices, bus_list)
+ pci_remove_bus_device(child);
+
+ pci_remove_bus(bus);
+ dev->subordinate = NULL;
+ }
+
+ pci_destroy_dev(dev);
+}
+
+/**
+ * pci_stop_and_remove_bus_device - remove a PCI device and any children
+ * @dev: the device to remove
+ *
+ * Remove a PCI device from the device lists, informing the drivers
+ * that the device has been removed. We also remove any subordinate
+ * buses and children in a depth-first manner.
+ *
+ * For each device we remove, delete the device structure from the
+ * device lists, remove the /proc entry, and notify userspace
+ * (/sbin/hotplug).
+ */
+void pci_stop_and_remove_bus_device(struct pci_dev *dev)
+{
+ pci_stop_bus_device(dev);
+ pci_remove_bus_device(dev);
+}
+EXPORT_SYMBOL(pci_stop_and_remove_bus_device);
+
+void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev)
+{
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(dev);
+ pci_unlock_rescan_remove();
+}
+EXPORT_SYMBOL_GPL(pci_stop_and_remove_bus_device_locked);
+
+void pci_stop_root_bus(struct pci_bus *bus)
+{
+ struct pci_dev *child, *tmp;
+ struct pci_host_bridge *host_bridge;
+
+ if (!pci_is_root_bus(bus))
+ return;
+
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ list_for_each_entry_safe_reverse(child, tmp,
+ &bus->devices, bus_list)
+ pci_stop_bus_device(child);
+
+ /* stop the host bridge */
+ device_release_driver(&host_bridge->dev);
+}
+EXPORT_SYMBOL_GPL(pci_stop_root_bus);
+
+void pci_remove_root_bus(struct pci_bus *bus)
+{
+ struct pci_dev *child, *tmp;
+ struct pci_host_bridge *host_bridge;
+
+ if (!pci_is_root_bus(bus))
+ return;
+
+ host_bridge = to_pci_host_bridge(bus->bridge);
+ list_for_each_entry_safe(child, tmp,
+ &bus->devices, bus_list)
+ pci_remove_bus_device(child);
+ pci_remove_bus(bus);
+ host_bridge->bus = NULL;
+
+ /* remove the host bridge */
+ device_unregister(&host_bridge->dev);
+}
+EXPORT_SYMBOL_GPL(pci_remove_root_bus);
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
new file mode 100644
index 000000000..eb0ad530d
--- /dev/null
+++ b/drivers/pci/rom.c
@@ -0,0 +1,230 @@
+/*
+ * drivers/pci/rom.c
+ *
+ * (C) Copyright 2004 Jon Smirl <jonsmirl@yahoo.com>
+ * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <jbarnes@sgi.com>
+ *
+ * PCI ROM access routines
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "pci.h"
+
+/**
+ * pci_enable_rom - enable ROM decoding for a PCI device
+ * @pdev: PCI device to enable
+ *
+ * Enable ROM decoding on @dev. This involves simply turning on the last
+ * bit of the PCI ROM BAR. Note that some cards may share address decoders
+ * between the ROM and other resources, so enabling it may disable access
+ * to MMIO registers or other card memory.
+ */
+int pci_enable_rom(struct pci_dev *pdev)
+{
+ struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
+ struct pci_bus_region region;
+ u32 rom_addr;
+
+ if (!res->flags)
+ return -1;
+
+ pcibios_resource_to_bus(pdev->bus, &region, res);
+ pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
+ rom_addr &= ~PCI_ROM_ADDRESS_MASK;
+ rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_enable_rom);
+
+/**
+ * pci_disable_rom - disable ROM decoding for a PCI device
+ * @pdev: PCI device to disable
+ *
+ * Disable ROM decoding on a PCI device by turning off the last bit in the
+ * ROM BAR.
+ */
+void pci_disable_rom(struct pci_dev *pdev)
+{
+ u32 rom_addr;
+ pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
+ rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
+ pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
+}
+EXPORT_SYMBOL_GPL(pci_disable_rom);
+
+/**
+ * pci_get_rom_size - obtain the actual size of the ROM image
+ * @pdev: target PCI device
+ * @rom: kernel virtual pointer to image of ROM
+ * @size: size of PCI window
+ * return: size of actual ROM image
+ *
+ * Determine the actual length of the ROM image.
+ * The PCI window size could be much larger than the
+ * actual image size.
+ */
+size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
+{
+ void __iomem *image;
+ int last_image;
+ unsigned length;
+
+ image = rom;
+ do {
+ void __iomem *pds;
+ /* Standard PCI ROMs start out with these bytes 55 AA */
+ if (readb(image) != 0x55) {
+ dev_err(&pdev->dev, "Invalid ROM contents\n");
+ break;
+ }
+ if (readb(image + 1) != 0xAA)
+ break;
+ /* get the PCI data structure and check its signature */
+ pds = image + readw(image + 24);
+ if (readb(pds) != 'P')
+ break;
+ if (readb(pds + 1) != 'C')
+ break;
+ if (readb(pds + 2) != 'I')
+ break;
+ if (readb(pds + 3) != 'R')
+ break;
+ last_image = readb(pds + 21) & 0x80;
+ length = readw(pds + 16);
+ image += length * 512;
+ } while (length && !last_image);
+
+ /* never return a size larger than the PCI resource window */
+ /* there are known ROMs that get the size wrong */
+ return min((size_t)(image - rom), size);
+}
+
+/**
+ * pci_map_rom - map a PCI ROM to kernel space
+ * @pdev: pointer to pci device struct
+ * @size: pointer to receive size of pci window over ROM
+ *
+ * Return: kernel virtual pointer to image of ROM
+ *
+ * Map a PCI ROM into kernel space. If ROM is boot video ROM,
+ * the shadow BIOS copy will be returned instead of the
+ * actual ROM.
+ */
+void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
+{
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+ loff_t start;
+ void __iomem *rom;
+
+ /*
+ * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
+ * memory map if the VGA enable bit of the Bridge Control register is
+ * set for embedded VGA.
+ */
+ if (res->flags & IORESOURCE_ROM_SHADOW) {
+ /* primary video rom always starts here */
+ start = (loff_t)0xC0000;
+ *size = 0x20000; /* cover C000:0 through E000:0 */
+ } else {
+ if (res->flags &
+ (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
+ *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ return (void __iomem *)(unsigned long)
+ pci_resource_start(pdev, PCI_ROM_RESOURCE);
+ } else {
+ /* assign the ROM an address if it doesn't have one */
+ if (res->parent == NULL &&
+ pci_assign_resource(pdev, PCI_ROM_RESOURCE))
+ return NULL;
+ start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
+ *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+ if (*size == 0)
+ return NULL;
+
+ /* Enable ROM space decodes */
+ if (pci_enable_rom(pdev))
+ return NULL;
+ }
+ }
+
+ rom = ioremap(start, *size);
+ if (!rom) {
+ /* restore enable if ioremap fails */
+ if (!(res->flags & (IORESOURCE_ROM_ENABLE |
+ IORESOURCE_ROM_SHADOW |
+ IORESOURCE_ROM_COPY)))
+ pci_disable_rom(pdev);
+ return NULL;
+ }
+
+ /*
+ * Try to find the true size of the ROM since sometimes the PCI window
+ * size is much larger than the actual size of the ROM.
+ * True size is important if the ROM is going to be copied.
+ */
+ *size = pci_get_rom_size(pdev, rom, *size);
+ return rom;
+}
+EXPORT_SYMBOL(pci_map_rom);
+
+/**
+ * pci_unmap_rom - unmap the ROM from kernel space
+ * @pdev: pointer to pci device struct
+ * @rom: virtual address of the previous mapping
+ *
+ * Remove a mapping of a previously mapped ROM
+ */
+void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
+{
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+
+ if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
+ return;
+
+ iounmap(rom);
+
+ /* Disable again before continuing, leave enabled if pci=rom */
+ if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
+ pci_disable_rom(pdev);
+}
+EXPORT_SYMBOL(pci_unmap_rom);
+
+/**
+ * pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
+ * @pdev: pointer to pci device struct
+ *
+ * Free the copied ROM if we allocated one.
+ */
+void pci_cleanup_rom(struct pci_dev *pdev)
+{
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+
+ if (res->flags & IORESOURCE_ROM_COPY) {
+ kfree((void *)(unsigned long)res->start);
+ res->flags |= IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_ROM_COPY;
+ res->start = 0;
+ res->end = 0;
+ }
+}
+
+/**
+ * pci_platform_rom - provides a pointer to any ROM image provided by the
+ * platform
+ * @pdev: pointer to pci device struct
+ * @size: pointer to receive size of pci window over ROM
+ */
+void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
+{
+ if (pdev->rom && pdev->romlen) {
+ *size = pdev->romlen;
+ return phys_to_virt((phys_addr_t)pdev->rom);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(pci_platform_rom);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
new file mode 100644
index 000000000..a20ce7d5e
--- /dev/null
+++ b/drivers/pci/search.c
@@ -0,0 +1,386 @@
+/*
+ * PCI searching functions.
+ *
+ * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
+ * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz>
+ * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com>
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include "pci.h"
+
+DECLARE_RWSEM(pci_bus_sem);
+EXPORT_SYMBOL_GPL(pci_bus_sem);
+
+/*
+ * pci_for_each_dma_alias - Iterate over DMA aliases for a device
+ * @pdev: starting downstream device
+ * @fn: function to call for each alias
+ * @data: opaque data to pass to @fn
+ *
+ * Starting @pdev, walk up the bus calling @fn for each possible alias
+ * of @pdev at the root bus.
+ */
+int pci_for_each_dma_alias(struct pci_dev *pdev,
+ int (*fn)(struct pci_dev *pdev,
+ u16 alias, void *data), void *data)
+{
+ struct pci_bus *bus;
+ int ret;
+
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
+ if (ret)
+ return ret;
+
+ /*
+ * If the device is broken and uses an alias requester ID for
+ * DMA, iterate over that too.
+ */
+ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) {
+ ret = fn(pdev, PCI_DEVID(pdev->bus->number,
+ pdev->dma_alias_devfn), data);
+ if (ret)
+ return ret;
+ }
+
+ for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
+ struct pci_dev *tmp;
+
+ /* Skip virtual buses */
+ if (!bus->self)
+ continue;
+
+ tmp = bus->self;
+
+ /*
+ * PCIe-to-PCI/X bridges alias transactions from downstream
+ * devices using the subordinate bus number (PCI Express to
+ * PCI/PCI-X Bridge Spec, rev 1.0, sec 2.3). For all cases
+ * where the upstream bus is PCI/X we alias to the bridge
+ * (there are various conditions in the previous reference
+ * where the bridge may take ownership of transactions, even
+ * when the secondary interface is PCI-X).
+ */
+ if (pci_is_pcie(tmp)) {
+ switch (pci_pcie_type(tmp)) {
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ continue;
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ ret = fn(tmp,
+ PCI_DEVID(tmp->subordinate->number,
+ PCI_DEVFN(0, 0)), data);
+ if (ret)
+ return ret;
+ continue;
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ ret = fn(tmp,
+ PCI_DEVID(tmp->bus->number,
+ tmp->devfn), data);
+ if (ret)
+ return ret;
+ continue;
+ }
+ } else {
+ if (tmp->dev_flags & PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS)
+ ret = fn(tmp,
+ PCI_DEVID(tmp->subordinate->number,
+ PCI_DEVFN(0, 0)), data);
+ else
+ ret = fn(tmp,
+ PCI_DEVID(tmp->bus->number,
+ tmp->devfn), data);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
+{
+ struct pci_bus *child;
+ struct pci_bus *tmp;
+
+ if (bus->number == busnr)
+ return bus;
+
+ list_for_each_entry(tmp, &bus->children, node) {
+ child = pci_do_find_bus(tmp, busnr);
+ if (child)
+ return child;
+ }
+ return NULL;
+}
+
+/**
+ * pci_find_bus - locate PCI bus from a given domain and bus number
+ * @domain: number of PCI domain to search
+ * @busnr: number of desired PCI bus
+ *
+ * Given a PCI bus number and domain number, the desired PCI bus is located
+ * in the global list of PCI buses. If the bus is found, a pointer to its
+ * data structure is returned. If no bus is found, %NULL is returned.
+ */
+struct pci_bus *pci_find_bus(int domain, int busnr)
+{
+ struct pci_bus *bus = NULL;
+ struct pci_bus *tmp_bus;
+
+ while ((bus = pci_find_next_bus(bus)) != NULL) {
+ if (pci_domain_nr(bus) != domain)
+ continue;
+ tmp_bus = pci_do_find_bus(bus, busnr);
+ if (tmp_bus)
+ return tmp_bus;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_find_bus);
+
+/**
+ * pci_find_next_bus - begin or continue searching for a PCI bus
+ * @from: Previous PCI bus found, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI buses. A new search is
+ * initiated by passing %NULL as the @from argument. Otherwise if
+ * @from is not %NULL, searches continue from next device on the
+ * global list.
+ */
+struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
+{
+ struct list_head *n;
+ struct pci_bus *b = NULL;
+
+ WARN_ON(in_interrupt());
+ down_read(&pci_bus_sem);
+ n = from ? from->node.next : pci_root_buses.next;
+ if (n != &pci_root_buses)
+ b = list_entry(n, struct pci_bus, node);
+ up_read(&pci_bus_sem);
+ return b;
+}
+EXPORT_SYMBOL(pci_find_next_bus);
+
+/**
+ * pci_get_slot - locate PCI device for a given PCI slot
+ * @bus: PCI bus on which desired PCI device resides
+ * @devfn: encodes number of PCI slot in which the desired PCI
+ * device resides and the logical device number within that slot
+ * in case of multi-function devices.
+ *
+ * Given a PCI bus and slot/function number, the desired PCI device
+ * is located in the list of PCI devices.
+ * If the device is found, its reference count is increased and this
+ * function returns a pointer to its data structure. The caller must
+ * decrement the reference count by calling pci_dev_put().
+ * If no device is found, %NULL is returned.
+ */
+struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn)
+{
+ struct pci_dev *dev;
+
+ WARN_ON(in_interrupt());
+ down_read(&pci_bus_sem);
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->devfn == devfn)
+ goto out;
+ }
+
+ dev = NULL;
+ out:
+ pci_dev_get(dev);
+ up_read(&pci_bus_sem);
+ return dev;
+}
+EXPORT_SYMBOL(pci_get_slot);
+
+/**
+ * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
+ * @domain: PCI domain/segment on which the PCI device resides.
+ * @bus: PCI bus on which desired PCI device resides
+ * @devfn: encodes number of PCI slot in which the desired PCI device
+ * resides and the logical device number within that slot in case of
+ * multi-function devices.
+ *
+ * Given a PCI domain, bus, and slot/function number, the desired PCI
+ * device is located in the list of PCI devices. If the device is
+ * found, its reference count is increased and this function returns a
+ * pointer to its data structure. The caller must decrement the
+ * reference count by calling pci_dev_put(). If no device is found,
+ * %NULL is returned.
+ */
+struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
+ unsigned int devfn)
+{
+ struct pci_dev *dev = NULL;
+
+ for_each_pci_dev(dev) {
+ if (pci_domain_nr(dev->bus) == domain &&
+ (dev->bus->number == bus && dev->devfn == devfn))
+ return dev;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(pci_get_domain_bus_and_slot);
+
+static int match_pci_dev_by_id(struct device *dev, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_device_id *id = data;
+
+ if (pci_match_one_device(id, pdev))
+ return 1;
+ return 0;
+}
+
+/*
+ * pci_get_dev_by_id - begin or continue searching for a PCI device by id
+ * @id: pointer to struct pci_device_id to match for the device
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is found
+ * with a matching id a pointer to its device structure is returned, and the
+ * reference count to the device is incremented. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument. Otherwise
+ * if @from is not %NULL, searches continue from next device on the global
+ * list. The reference count for @from is always decremented if it is not
+ * %NULL.
+ *
+ * This is an internal function for use by the other search functions in
+ * this file.
+ */
+static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id,
+ struct pci_dev *from)
+{
+ struct device *dev;
+ struct device *dev_start = NULL;
+ struct pci_dev *pdev = NULL;
+
+ WARN_ON(in_interrupt());
+ if (from)
+ dev_start = &from->dev;
+ dev = bus_find_device(&pci_bus_type, dev_start, (void *)id,
+ match_pci_dev_by_id);
+ if (dev)
+ pdev = to_pci_dev(dev);
+ pci_dev_put(from);
+ return pdev;
+}
+
+/**
+ * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id
+ * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
+ * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
+ * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids
+ * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is found
+ * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its
+ * device structure is returned, and the reference count to the device is
+ * incremented. Otherwise, %NULL is returned. A new search is initiated by
+ * passing %NULL as the @from argument. Otherwise if @from is not %NULL,
+ * searches continue from next device on the global list.
+ * The reference count for @from is always decremented if it is not %NULL.
+ */
+struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
+ unsigned int ss_vendor, unsigned int ss_device,
+ struct pci_dev *from)
+{
+ struct pci_device_id id = {
+ .vendor = vendor,
+ .device = device,
+ .subvendor = ss_vendor,
+ .subdevice = ss_device,
+ };
+
+ return pci_get_dev_by_id(&id, from);
+}
+EXPORT_SYMBOL(pci_get_subsys);
+
+/**
+ * pci_get_device - begin or continue searching for a PCI device by vendor/device id
+ * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
+ * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is
+ * found with a matching @vendor and @device, the reference count to the
+ * device is incremented and a pointer to its device structure is returned.
+ * Otherwise, %NULL is returned. A new search is initiated by passing %NULL
+ * as the @from argument. Otherwise if @from is not %NULL, searches continue
+ * from next device on the global list. The reference count for @from is
+ * always decremented if it is not %NULL.
+ */
+struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from)
+{
+ return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
+}
+EXPORT_SYMBOL(pci_get_device);
+
+/**
+ * pci_get_class - begin or continue searching for a PCI device by class
+ * @class: search for a PCI device with this class designation
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is
+ * found with a matching @class, the reference count to the device is
+ * incremented and a pointer to its device structure is returned.
+ * Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * Otherwise if @from is not %NULL, searches continue from next device
+ * on the global list. The reference count for @from is always decremented
+ * if it is not %NULL.
+ */
+struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
+{
+ struct pci_device_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class_mask = PCI_ANY_ID,
+ .class = class,
+ };
+
+ return pci_get_dev_by_id(&id, from);
+}
+EXPORT_SYMBOL(pci_get_class);
+
+/**
+ * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
+ * @ids: A pointer to a null terminated list of struct pci_device_id structures
+ * that describe the type of PCI device the caller is trying to find.
+ *
+ * Obvious fact: You do not have a reference to any device that might be found
+ * by this function, so if that device is removed from the system right after
+ * this function is finished, the value will be stale. Use this function to
+ * find devices that are usually built into a system, or for a general hint as
+ * to if another device happens to be present at this specific moment in time.
+ */
+int pci_dev_present(const struct pci_device_id *ids)
+{
+ struct pci_dev *found = NULL;
+
+ WARN_ON(in_interrupt());
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ found = pci_get_dev_by_id(ids, NULL);
+ if (found) {
+ pci_dev_put(found);
+ return 1;
+ }
+ ids++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pci_dev_present);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
new file mode 100644
index 000000000..508cc5613
--- /dev/null
+++ b/drivers/pci/setup-bus.c
@@ -0,0 +1,1819 @@
+/*
+ * drivers/pci/setup-bus.c
+ *
+ * Extruded from code written by
+ * Dave Rusling (david.rusling@reo.mts.dec.com)
+ * David Mosberger (davidm@cs.arizona.edu)
+ * David Miller (davem@redhat.com)
+ *
+ * Support routines for initializing a PCI subsystem.
+ */
+
+/*
+ * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+ * PCI-PCI bridges cleanup, sorted resource allocation.
+ * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+ * Converted to allocation in 3 passes, which gives
+ * tighter packing. Prefetchable range support.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include <asm-generic/pci-bridge.h>
+#include "pci.h"
+
+unsigned int pci_flags;
+
+struct pci_dev_resource {
+ struct list_head list;
+ struct resource *res;
+ struct pci_dev *dev;
+ resource_size_t start;
+ resource_size_t end;
+ resource_size_t add_size;
+ resource_size_t min_align;
+ unsigned long flags;
+};
+
+static void free_list(struct list_head *head)
+{
+ struct pci_dev_resource *dev_res, *tmp;
+
+ list_for_each_entry_safe(dev_res, tmp, head, list) {
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ }
+}
+
+/**
+ * add_to_list() - add a new resource tracker to the list
+ * @head: Head of the list
+ * @dev: device corresponding to which the resource
+ * belongs
+ * @res: The resource to be tracked
+ * @add_size: additional size to be optionally added
+ * to the resource
+ */
+static int add_to_list(struct list_head *head,
+ struct pci_dev *dev, struct resource *res,
+ resource_size_t add_size, resource_size_t min_align)
+{
+ struct pci_dev_resource *tmp;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ pr_warn("add_to_list: kmalloc() failed!\n");
+ return -ENOMEM;
+ }
+
+ tmp->res = res;
+ tmp->dev = dev;
+ tmp->start = res->start;
+ tmp->end = res->end;
+ tmp->flags = res->flags;
+ tmp->add_size = add_size;
+ tmp->min_align = min_align;
+
+ list_add(&tmp->list, head);
+
+ return 0;
+}
+
+static void remove_from_list(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res, *tmp;
+
+ list_for_each_entry_safe(dev_res, tmp, head, list) {
+ if (dev_res->res == res) {
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ break;
+ }
+ }
+}
+
+static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res;
+
+ list_for_each_entry(dev_res, head, list) {
+ if (dev_res->res == res) {
+ int idx = res - &dev_res->dev->resource[0];
+
+ dev_printk(KERN_DEBUG, &dev_res->dev->dev,
+ "res[%d]=%pR res_to_dev_res add_size %llx min_align %llx\n",
+ idx, dev_res->res,
+ (unsigned long long)dev_res->add_size,
+ (unsigned long long)dev_res->min_align);
+
+ return dev_res;
+ }
+ }
+
+ return NULL;
+}
+
+static resource_size_t get_res_add_size(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res;
+
+ dev_res = res_to_dev_res(head, res);
+ return dev_res ? dev_res->add_size : 0;
+}
+
+static resource_size_t get_res_add_align(struct list_head *head,
+ struct resource *res)
+{
+ struct pci_dev_resource *dev_res;
+
+ dev_res = res_to_dev_res(head, res);
+ return dev_res ? dev_res->min_align : 0;
+}
+
+
+/* Sort resources by alignment */
+static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r;
+ struct pci_dev_resource *dev_res, *tmp;
+ resource_size_t r_align;
+ struct list_head *n;
+
+ r = &dev->resource[i];
+
+ if (r->flags & IORESOURCE_PCI_FIXED)
+ continue;
+
+ if (!(r->flags) || r->parent)
+ continue;
+
+ r_align = pci_resource_alignment(dev, r);
+ if (!r_align) {
+ dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
+ i, r);
+ continue;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ panic("pdev_sort_resources(): kmalloc() failed!\n");
+ tmp->res = r;
+ tmp->dev = dev;
+
+ /* fallback is smallest one or list is empty*/
+ n = head;
+ list_for_each_entry(dev_res, head, list) {
+ resource_size_t align;
+
+ align = pci_resource_alignment(dev_res->dev,
+ dev_res->res);
+
+ if (r_align > align) {
+ n = &dev_res->list;
+ break;
+ }
+ }
+ /* Insert it just before n*/
+ list_add_tail(&tmp->list, n);
+ }
+}
+
+static void __dev_sort_resources(struct pci_dev *dev,
+ struct list_head *head)
+{
+ u16 class = dev->class >> 8;
+
+ /* Don't touch classless devices or host bridges or ioapics. */
+ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
+ return;
+
+ /* Don't touch ioapic devices already enabled by firmware */
+ if (class == PCI_CLASS_SYSTEM_PIC) {
+ u16 command;
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
+ return;
+ }
+
+ pdev_sort_resources(dev, head);
+}
+
+static inline void reset_resource(struct resource *res)
+{
+ res->start = 0;
+ res->end = 0;
+ res->flags = 0;
+}
+
+/**
+ * reassign_resources_sorted() - satisfy any additional resource requests
+ *
+ * @realloc_head : head of the list tracking requests requiring additional
+ * resources
+ * @head : head of the list tracking requests with allocated
+ * resources
+ *
+ * Walk through each element of the realloc_head and try to procure
+ * additional resources for the element, provided the element
+ * is in the head list.
+ */
+static void reassign_resources_sorted(struct list_head *realloc_head,
+ struct list_head *head)
+{
+ struct resource *res;
+ struct pci_dev_resource *add_res, *tmp;
+ struct pci_dev_resource *dev_res;
+ resource_size_t add_size, align;
+ int idx;
+
+ list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
+ bool found_match = false;
+
+ res = add_res->res;
+ /* skip resource that has been reset */
+ if (!res->flags)
+ goto out;
+
+ /* skip this resource if not found in head list */
+ list_for_each_entry(dev_res, head, list) {
+ if (dev_res->res == res) {
+ found_match = true;
+ break;
+ }
+ }
+ if (!found_match)/* just skip */
+ continue;
+
+ idx = res - &add_res->dev->resource[0];
+ add_size = add_res->add_size;
+ align = add_res->min_align;
+ if (!resource_size(res)) {
+ res->start = align;
+ res->end = res->start + add_size - 1;
+ if (pci_assign_resource(add_res->dev, idx))
+ reset_resource(res);
+ } else {
+ res->flags |= add_res->flags &
+ (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
+ if (pci_reassign_resource(add_res->dev, idx,
+ add_size, align))
+ dev_printk(KERN_DEBUG, &add_res->dev->dev,
+ "failed to add %llx res[%d]=%pR\n",
+ (unsigned long long)add_size,
+ idx, res);
+ }
+out:
+ list_del(&add_res->list);
+ kfree(add_res);
+ }
+}
+
+/**
+ * assign_requested_resources_sorted() - satisfy resource requests
+ *
+ * @head : head of the list tracking requests for resources
+ * @fail_head : head of the list tracking requests that could
+ * not be allocated
+ *
+ * Satisfy resource requests of each element in the list. Add
+ * requests that could not satisfied to the failed_list.
+ */
+static void assign_requested_resources_sorted(struct list_head *head,
+ struct list_head *fail_head)
+{
+ struct resource *res;
+ struct pci_dev_resource *dev_res;
+ int idx;
+
+ list_for_each_entry(dev_res, head, list) {
+ res = dev_res->res;
+ idx = res - &dev_res->dev->resource[0];
+ if (resource_size(res) &&
+ pci_assign_resource(dev_res->dev, idx)) {
+ if (fail_head) {
+ /*
+ * if the failed res is for ROM BAR, and it will
+ * be enabled later, don't add it to the list
+ */
+ if (!((idx == PCI_ROM_RESOURCE) &&
+ (!(res->flags & IORESOURCE_ROM_ENABLE))))
+ add_to_list(fail_head,
+ dev_res->dev, res,
+ 0 /* don't care */,
+ 0 /* don't care */);
+ }
+ reset_resource(res);
+ }
+ }
+}
+
+static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+ unsigned long mask = 0;
+
+ /* check failed type */
+ list_for_each_entry(fail_res, fail_head, list)
+ mask |= fail_res->flags;
+
+ /*
+ * one pref failed resource will set IORESOURCE_MEM,
+ * as we can allocate pref in non-pref range.
+ * Will release all assigned non-pref sibling resources
+ * according to that bit.
+ */
+ return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
+}
+
+static bool pci_need_to_release(unsigned long mask, struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return !!(mask & IORESOURCE_IO);
+
+ /* check pref at first */
+ if (res->flags & IORESOURCE_PREFETCH) {
+ if (mask & IORESOURCE_PREFETCH)
+ return true;
+ /* count pref if its parent is non-pref */
+ else if ((mask & IORESOURCE_MEM) &&
+ !(res->parent->flags & IORESOURCE_PREFETCH))
+ return true;
+ else
+ return false;
+ }
+
+ if (res->flags & IORESOURCE_MEM)
+ return !!(mask & IORESOURCE_MEM);
+
+ return false; /* should not get here */
+}
+
+static void __assign_resources_sorted(struct list_head *head,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
+{
+ /*
+ * Should not assign requested resources at first.
+ * they could be adjacent, so later reassign can not reallocate
+ * them one by one in parent resource window.
+ * Try to assign requested + add_size at beginning
+ * if could do that, could get out early.
+ * if could not do that, we still try to assign requested at first,
+ * then try to reassign add_size for some resources.
+ *
+ * Separate three resource type checking if we need to release
+ * assigned resource after requested + add_size try.
+ * 1. if there is io port assign fail, will release assigned
+ * io port.
+ * 2. if there is pref mmio assign fail, release assigned
+ * pref mmio.
+ * if assigned pref mmio's parent is non-pref mmio and there
+ * is non-pref mmio assign fail, will release that assigned
+ * pref mmio.
+ * 3. if there is non-pref mmio assign fail or pref mmio
+ * assigned fail, will release assigned non-pref mmio.
+ */
+ LIST_HEAD(save_head);
+ LIST_HEAD(local_fail_head);
+ struct pci_dev_resource *save_res;
+ struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
+ unsigned long fail_type;
+ resource_size_t add_align, align;
+
+ /* Check if optional add_size is there */
+ if (!realloc_head || list_empty(realloc_head))
+ goto requested_and_reassign;
+
+ /* Save original start, end, flags etc at first */
+ list_for_each_entry(dev_res, head, list) {
+ if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
+ free_list(&save_head);
+ goto requested_and_reassign;
+ }
+ }
+
+ /* Update res in head list with add_size in realloc_head list */
+ list_for_each_entry_safe(dev_res, tmp_res, head, list) {
+ dev_res->res->end += get_res_add_size(realloc_head,
+ dev_res->res);
+
+ /*
+ * There are two kinds of additional resources in the list:
+ * 1. bridge resource -- IORESOURCE_STARTALIGN
+ * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
+ * Here just fix the additional alignment for bridge
+ */
+ if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
+ continue;
+
+ add_align = get_res_add_align(realloc_head, dev_res->res);
+
+ /*
+ * The "head" list is sorted by the alignment to make sure
+ * resources with bigger alignment will be assigned first.
+ * After we change the alignment of a dev_res in "head" list,
+ * we need to reorder the list by alignment to make it
+ * consistent.
+ */
+ if (add_align > dev_res->res->start) {
+ resource_size_t r_size = resource_size(dev_res->res);
+
+ dev_res->res->start = add_align;
+ dev_res->res->end = add_align + r_size - 1;
+
+ list_for_each_entry(dev_res2, head, list) {
+ align = pci_resource_alignment(dev_res2->dev,
+ dev_res2->res);
+ if (add_align > align) {
+ list_move_tail(&dev_res->list,
+ &dev_res2->list);
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Try updated head list with add_size added */
+ assign_requested_resources_sorted(head, &local_fail_head);
+
+ /* all assigned with add_size ? */
+ if (list_empty(&local_fail_head)) {
+ /* Remove head list from realloc_head list */
+ list_for_each_entry(dev_res, head, list)
+ remove_from_list(realloc_head, dev_res->res);
+ free_list(&save_head);
+ free_list(head);
+ return;
+ }
+
+ /* check failed type */
+ fail_type = pci_fail_res_type_mask(&local_fail_head);
+ /* remove not need to be released assigned res from head list etc */
+ list_for_each_entry_safe(dev_res, tmp_res, head, list)
+ if (dev_res->res->parent &&
+ !pci_need_to_release(fail_type, dev_res->res)) {
+ /* remove it from realloc_head list */
+ remove_from_list(realloc_head, dev_res->res);
+ remove_from_list(&save_head, dev_res->res);
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ }
+
+ free_list(&local_fail_head);
+ /* Release assigned resource */
+ list_for_each_entry(dev_res, head, list)
+ if (dev_res->res->parent)
+ release_resource(dev_res->res);
+ /* Restore start/end/flags from saved list */
+ list_for_each_entry(save_res, &save_head, list) {
+ struct resource *res = save_res->res;
+
+ res->start = save_res->start;
+ res->end = save_res->end;
+ res->flags = save_res->flags;
+ }
+ free_list(&save_head);
+
+requested_and_reassign:
+ /* Satisfy the must-have resource requests */
+ assign_requested_resources_sorted(head, fail_head);
+
+ /* Try to satisfy any additional optional resource
+ requests */
+ if (realloc_head)
+ reassign_resources_sorted(realloc_head, head);
+ free_list(head);
+}
+
+static void pdev_assign_resources_sorted(struct pci_dev *dev,
+ struct list_head *add_head,
+ struct list_head *fail_head)
+{
+ LIST_HEAD(head);
+
+ __dev_sort_resources(dev, &head);
+ __assign_resources_sorted(&head, add_head, fail_head);
+
+}
+
+static void pbus_assign_resources_sorted(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
+{
+ struct pci_dev *dev;
+ LIST_HEAD(head);
+
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ __dev_sort_resources(dev, &head);
+
+ __assign_resources_sorted(&head, realloc_head, fail_head);
+}
+
+void pci_setup_cardbus(struct pci_bus *bus)
+{
+ struct pci_dev *bridge = bus->self;
+ struct resource *res;
+ struct pci_bus_region region;
+
+ dev_info(&bridge->dev, "CardBus bridge to %pR\n",
+ &bus->busn_res);
+
+ res = bus->resource[0];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_IO) {
+ /*
+ * The IO resource is allocated a range twice as large as it
+ * would normally need. This allows us to set both IO regs.
+ */
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
+ region.start);
+ pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
+ region.end);
+ }
+
+ res = bus->resource[1];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_IO) {
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
+ region.start);
+ pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
+ region.end);
+ }
+
+ res = bus->resource[2];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_MEM) {
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
+ region.start);
+ pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
+ region.end);
+ }
+
+ res = bus->resource[3];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_MEM) {
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
+ region.start);
+ pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
+ region.end);
+ }
+}
+EXPORT_SYMBOL(pci_setup_cardbus);
+
+/* Initialize bridges with base/limit values we have collected.
+ PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
+ requires that if there is no I/O ports or memory behind the
+ bridge, corresponding range must be turned off by writing base
+ value greater than limit to the bridge's base/limit registers.
+
+ Note: care must be taken when updating I/O base/limit registers
+ of bridges which support 32-bit I/O. This update requires two
+ config space writes, so it's quite possible that an I/O window of
+ the bridge will have some undesirable address (e.g. 0) after the
+ first write. Ditto 64-bit prefetchable MMIO. */
+static void pci_setup_bridge_io(struct pci_dev *bridge)
+{
+ struct resource *res;
+ struct pci_bus_region region;
+ unsigned long io_mask;
+ u8 io_base_lo, io_limit_lo;
+ u16 l;
+ u32 io_upper16;
+
+ io_mask = PCI_IO_RANGE_MASK;
+ if (bridge->io_window_1k)
+ io_mask = PCI_IO_1K_RANGE_MASK;
+
+ /* Set up the top and bottom of the PCI I/O segment for this bus. */
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_IO) {
+ pci_read_config_word(bridge, PCI_IO_BASE, &l);
+ io_base_lo = (region.start >> 8) & io_mask;
+ io_limit_lo = (region.end >> 8) & io_mask;
+ l = ((u16) io_limit_lo << 8) | io_base_lo;
+ /* Set up upper 16 bits of I/O base/limit. */
+ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ } else {
+ /* Clear upper 16 bits of I/O base/limit. */
+ io_upper16 = 0;
+ l = 0x00f0;
+ }
+ /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
+ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
+ /* Update lower 16 bits of I/O base/limit. */
+ pci_write_config_word(bridge, PCI_IO_BASE, l);
+ /* Update upper 16 bits of I/O base/limit. */
+ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
+}
+
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
+{
+ struct resource *res;
+ struct pci_bus_region region;
+ u32 l;
+
+ /* Set up the top and bottom of the PCI Memory segment for this bus. */
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_MEM) {
+ l = (region.start >> 16) & 0xfff0;
+ l |= region.end & 0xfff00000;
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ } else {
+ l = 0x0000fff0;
+ }
+ pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
+}
+
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
+{
+ struct resource *res;
+ struct pci_bus_region region;
+ u32 l, bu, lu;
+
+ /* Clear out the upper 32 bits of PREF limit.
+ If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
+ disables PREF range, which is ok. */
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
+
+ /* Set up PREF base/limit. */
+ bu = lu = 0;
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
+ pcibios_resource_to_bus(bridge->bus, &region, res);
+ if (res->flags & IORESOURCE_PREFETCH) {
+ l = (region.start >> 16) & 0xfff0;
+ l |= region.end & 0xfff00000;
+ if (res->flags & IORESOURCE_MEM_64) {
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ }
+ dev_info(&bridge->dev, " bridge window %pR\n", res);
+ } else {
+ l = 0x0000fff0;
+ }
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
+
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+}
+
+static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
+{
+ struct pci_dev *bridge = bus->self;
+
+ dev_info(&bridge->dev, "PCI bridge to %pR\n",
+ &bus->busn_res);
+
+ if (type & IORESOURCE_IO)
+ pci_setup_bridge_io(bridge);
+
+ if (type & IORESOURCE_MEM)
+ pci_setup_bridge_mmio(bridge);
+
+ if (type & IORESOURCE_PREFETCH)
+ pci_setup_bridge_mmio_pref(bridge);
+
+ pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
+}
+
+void pci_setup_bridge(struct pci_bus *bus)
+{
+ unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+
+ __pci_setup_bridge(bus, type);
+}
+
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+ if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+ return 0;
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed the window */
+
+ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return 0;
+
+ if (!pci_bus_clip_resource(bridge, i))
+ return -EINVAL; /* clipping didn't change anything */
+
+ switch (i - PCI_BRIDGE_RESOURCES) {
+ case 0:
+ pci_setup_bridge_io(bridge);
+ break;
+ case 1:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case 2:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed a smaller window */
+
+ return -EINVAL;
+}
+
+/* Check whether the bridge supports optional I/O and
+ prefetchable memory ranges. If not, the respective
+ base/limit registers must be read-only and read as 0. */
+static void pci_bridge_check_ranges(struct pci_bus *bus)
+{
+ u16 io;
+ u32 pmem;
+ struct pci_dev *bridge = bus->self;
+ struct resource *b_res;
+
+ b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ b_res[1].flags |= IORESOURCE_MEM;
+
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ if (!io) {
+ pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);
+ pci_read_config_word(bridge, PCI_IO_BASE, &io);
+ pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
+ }
+ if (io)
+ b_res[0].flags |= IORESOURCE_IO;
+
+ /* DECchip 21050 pass 2 errata: the bridge may miss an address
+ disconnect boundary by one PCI data phase.
+ Workaround: do not use prefetching on this device. */
+ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
+ return;
+
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ if (!pmem) {
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
+ 0xffe0fff0);
+ pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
+ pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
+ }
+ if (pmem) {
+ b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
+ PCI_PREF_RANGE_TYPE_64) {
+ b_res[2].flags |= IORESOURCE_MEM_64;
+ b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
+ }
+ }
+
+ /* double check if bridge does support 64 bit pref */
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ u32 mem_base_hi, tmp;
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ &mem_base_hi);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ if (!tmp)
+ b_res[2].flags &= ~IORESOURCE_MEM_64;
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ mem_base_hi);
+ }
+}
+
+/* Helper function for sizing routines: find first available
+ bus resource of a given type. Note: we intentionally skip
+ the bus resources which have already been assigned (that is,
+ have non-NULL parent resource). */
+static struct resource *find_free_bus_resource(struct pci_bus *bus,
+ unsigned long type_mask, unsigned long type)
+{
+ int i;
+ struct resource *r;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ if (r == &ioport_resource || r == &iomem_resource)
+ continue;
+ if (r && (r->flags & type_mask) == type && !r->parent)
+ return r;
+ }
+ return NULL;
+}
+
+static resource_size_t calculate_iosize(resource_size_t size,
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t old_size,
+ resource_size_t align)
+{
+ if (size < min_size)
+ size = min_size;
+ if (old_size == 1)
+ old_size = 0;
+ /* To be fixed in 2.5: we should have sort of HAVE_ISA
+ flag in the struct pci_bus. */
+#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
+ size = (size & 0xff) + ((size & ~0xffUL) << 2);
+#endif
+ size = ALIGN(size + size1, align);
+ if (size < old_size)
+ size = old_size;
+ return size;
+}
+
+static resource_size_t calculate_memsize(resource_size_t size,
+ resource_size_t min_size,
+ resource_size_t size1,
+ resource_size_t old_size,
+ resource_size_t align)
+{
+ if (size < min_size)
+ size = min_size;
+ if (old_size == 1)
+ old_size = 0;
+ if (size < old_size)
+ size = old_size;
+ size = ALIGN(size + size1, align);
+ return size;
+}
+
+resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
+ unsigned long type)
+{
+ return 1;
+}
+
+#define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
+#define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
+#define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
+
+static resource_size_t window_alignment(struct pci_bus *bus,
+ unsigned long type)
+{
+ resource_size_t align = 1, arch_align;
+
+ if (type & IORESOURCE_MEM)
+ align = PCI_P2P_DEFAULT_MEM_ALIGN;
+ else if (type & IORESOURCE_IO) {
+ /*
+ * Per spec, I/O windows are 4K-aligned, but some
+ * bridges have an extension to support 1K alignment.
+ */
+ if (bus->self->io_window_1k)
+ align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
+ else
+ align = PCI_P2P_DEFAULT_IO_ALIGN;
+ }
+
+ arch_align = pcibios_window_alignment(bus, type);
+ return max(align, arch_align);
+}
+
+/**
+ * pbus_size_io() - size the io window of a given bus
+ *
+ * @bus : the bus
+ * @min_size : the minimum io window that must to be allocated
+ * @add_size : additional optional io window
+ * @realloc_head : track the additional io window on this list
+ *
+ * Sizing the IO windows of the PCI-PCI bridge is trivial,
+ * since these windows have 1K or 4K granularity and the IO ranges
+ * of non-bridge PCI devices are limited to 256 bytes.
+ * We must be careful with the ISA aliasing though.
+ */
+static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
+ resource_size_t add_size, struct list_head *realloc_head)
+{
+ struct pci_dev *dev;
+ struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
+ IORESOURCE_IO);
+ resource_size_t size = 0, size0 = 0, size1 = 0;
+ resource_size_t children_add_size = 0;
+ resource_size_t min_align, align;
+
+ if (!b_res)
+ return;
+
+ min_align = window_alignment(bus, IORESOURCE_IO);
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+ unsigned long r_size;
+
+ if (r->parent || !(r->flags & IORESOURCE_IO))
+ continue;
+ r_size = resource_size(r);
+
+ if (r_size < 0x400)
+ /* Might be re-aligned for ISA */
+ size += r_size;
+ else
+ size1 += r_size;
+
+ align = pci_resource_alignment(dev, r);
+ if (align > min_align)
+ min_align = align;
+
+ if (realloc_head)
+ children_add_size += get_res_add_size(realloc_head, r);
+ }
+ }
+
+ size0 = calculate_iosize(size, min_size, size1,
+ resource_size(b_res), min_align);
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+ calculate_iosize(size, min_size, add_size + size1,
+ resource_size(b_res), min_align);
+ if (!size0 && !size1) {
+ if (b_res->start || b_res->end)
+ dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
+ b_res, &bus->busn_res);
+ b_res->flags = 0;
+ return;
+ }
+
+ b_res->start = min_align;
+ b_res->end = b_res->start + size0 - 1;
+ b_res->flags |= IORESOURCE_STARTALIGN;
+ if (size1 > size0 && realloc_head) {
+ add_to_list(realloc_head, bus->self, b_res, size1-size0,
+ min_align);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long)size1-size0);
+ }
+}
+
+static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
+ int max_order)
+{
+ resource_size_t align = 0;
+ resource_size_t min_align = 0;
+ int order;
+
+ for (order = 0; order <= max_order; order++) {
+ resource_size_t align1 = 1;
+
+ align1 <<= (order + 20);
+
+ if (!align)
+ min_align = align1;
+ else if (ALIGN(align + min_align, min_align) < align1)
+ min_align = align1 >> 1;
+ align += aligns[order];
+ }
+
+ return min_align;
+}
+
+/**
+ * pbus_size_mem() - size the memory window of a given bus
+ *
+ * @bus : the bus
+ * @mask: mask the resource flag, then compare it with type
+ * @type: the type of free resource from bridge
+ * @type2: second match type
+ * @type3: third match type
+ * @min_size : the minimum memory window that must to be allocated
+ * @add_size : additional optional memory window
+ * @realloc_head : track the additional memory window on this list
+ *
+ * Calculate the size of the bus and minimal alignment which
+ * guarantees that all child resources fit in this size.
+ *
+ * Returns -ENOSPC if there's no available bus resource of the desired type.
+ * Otherwise, sets the bus resource start/end to indicate the required
+ * size, adds things to realloc_head (if supplied), and returns 0.
+ */
+static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
+ unsigned long type, unsigned long type2,
+ unsigned long type3,
+ resource_size_t min_size, resource_size_t add_size,
+ struct list_head *realloc_head)
+{
+ struct pci_dev *dev;
+ resource_size_t min_align, align, size, size0, size1;
+ resource_size_t aligns[18]; /* Alignments from 1Mb to 128Gb */
+ int order, max_order;
+ struct resource *b_res = find_free_bus_resource(bus,
+ mask | IORESOURCE_PREFETCH, type);
+ resource_size_t children_add_size = 0;
+ resource_size_t children_add_align = 0;
+ resource_size_t add_align = 0;
+
+ if (!b_res)
+ return -ENOSPC;
+
+ memset(aligns, 0, sizeof(aligns));
+ max_order = 0;
+ size = 0;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+ resource_size_t r_size;
+
+ if (r->parent || ((r->flags & mask) != type &&
+ (r->flags & mask) != type2 &&
+ (r->flags & mask) != type3))
+ continue;
+ r_size = resource_size(r);
+#ifdef CONFIG_PCI_IOV
+ /* put SRIOV requested res to the optional list */
+ if (realloc_head && i >= PCI_IOV_RESOURCES &&
+ i <= PCI_IOV_RESOURCE_END) {
+ add_align = max(pci_resource_alignment(dev, r), add_align);
+ r->end = r->start - 1;
+ add_to_list(realloc_head, dev, r, r_size, 0/* don't care */);
+ children_add_size += r_size;
+ continue;
+ }
+#endif
+ /*
+ * aligns[0] is for 1MB (since bridge memory
+ * windows are always at least 1MB aligned), so
+ * keep "order" from being negative for smaller
+ * resources.
+ */
+ align = pci_resource_alignment(dev, r);
+ order = __ffs(align) - 20;
+ if (order < 0)
+ order = 0;
+ if (order >= ARRAY_SIZE(aligns)) {
+ dev_warn(&dev->dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
+ i, r, (unsigned long long) align);
+ r->flags = 0;
+ continue;
+ }
+ size += r_size;
+ /* Exclude ranges with size > align from
+ calculation of the alignment. */
+ if (r_size == align)
+ aligns[order] += align;
+ if (order > max_order)
+ max_order = order;
+
+ if (realloc_head) {
+ children_add_size += get_res_add_size(realloc_head, r);
+ children_add_align = get_res_add_align(realloc_head, r);
+ add_align = max(add_align, children_add_align);
+ }
+ }
+ }
+
+ min_align = calculate_mem_align(aligns, max_order);
+ min_align = max(min_align, window_alignment(bus, b_res->flags));
+ size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
+ add_align = max(min_align, add_align);
+ if (children_add_size > add_size)
+ add_size = children_add_size;
+ size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
+ calculate_memsize(size, min_size, add_size,
+ resource_size(b_res), add_align);
+ if (!size0 && !size1) {
+ if (b_res->start || b_res->end)
+ dev_info(&bus->self->dev, "disabling bridge window %pR to %pR (unused)\n",
+ b_res, &bus->busn_res);
+ b_res->flags = 0;
+ return 0;
+ }
+ b_res->start = min_align;
+ b_res->end = size0 + min_align - 1;
+ b_res->flags |= IORESOURCE_STARTALIGN;
+ if (size1 > size0 && realloc_head) {
+ add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
+ dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window %pR to %pR add_size %llx add_align %llx\n",
+ b_res, &bus->busn_res,
+ (unsigned long long) (size1 - size0),
+ (unsigned long long) add_align);
+ }
+ return 0;
+}
+
+unsigned long pci_cardbus_resource_alignment(struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return pci_cardbus_io_size;
+ if (res->flags & IORESOURCE_MEM)
+ return pci_cardbus_mem_size;
+ return 0;
+}
+
+static void pci_bus_size_cardbus(struct pci_bus *bus,
+ struct list_head *realloc_head)
+{
+ struct pci_dev *bridge = bus->self;
+ struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
+ resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
+ u16 ctrl;
+
+ if (b_res[0].parent)
+ goto handle_b_res_1;
+ /*
+ * Reserve some resources for CardBus. We reserve
+ * a fixed amount of bus space for CardBus bridges.
+ */
+ b_res[0].start = pci_cardbus_io_size;
+ b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1;
+ b_res[0].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[0].end -= pci_cardbus_io_size;
+ add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
+ pci_cardbus_io_size);
+ }
+
+handle_b_res_1:
+ if (b_res[1].parent)
+ goto handle_b_res_2;
+ b_res[1].start = pci_cardbus_io_size;
+ b_res[1].end = b_res[1].start + pci_cardbus_io_size - 1;
+ b_res[1].flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[1].end -= pci_cardbus_io_size;
+ add_to_list(realloc_head, bridge, b_res+1, pci_cardbus_io_size,
+ pci_cardbus_io_size);
+ }
+
+handle_b_res_2:
+ /* MEM1 must not be pref mmio */
+ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
+ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
+ ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
+ pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
+ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
+ }
+
+ /*
+ * Check whether prefetchable memory is supported
+ * by this bridge.
+ */
+ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
+ if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
+ ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
+ pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
+ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
+ }
+
+ if (b_res[2].parent)
+ goto handle_b_res_3;
+ /*
+ * If we have prefetchable memory support, allocate
+ * two regions. Otherwise, allocate one region of
+ * twice the size.
+ */
+ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
+ b_res[2].start = pci_cardbus_mem_size;
+ b_res[2].end = b_res[2].start + pci_cardbus_mem_size - 1;
+ b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[2].end -= pci_cardbus_mem_size;
+ add_to_list(realloc_head, bridge, b_res+2,
+ pci_cardbus_mem_size, pci_cardbus_mem_size);
+ }
+
+ /* reduce that to half */
+ b_res_3_size = pci_cardbus_mem_size;
+ }
+
+handle_b_res_3:
+ if (b_res[3].parent)
+ goto handle_done;
+ b_res[3].start = pci_cardbus_mem_size;
+ b_res[3].end = b_res[3].start + b_res_3_size - 1;
+ b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
+ if (realloc_head) {
+ b_res[3].end -= b_res_3_size;
+ add_to_list(realloc_head, bridge, b_res+3, b_res_3_size,
+ pci_cardbus_mem_size);
+ }
+
+handle_done:
+ ;
+}
+
+void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
+{
+ struct pci_dev *dev;
+ unsigned long mask, prefmask, type2 = 0, type3 = 0;
+ resource_size_t additional_mem_size = 0, additional_io_size = 0;
+ struct resource *b_res;
+ int ret;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ struct pci_bus *b = dev->subordinate;
+ if (!b)
+ continue;
+
+ switch (dev->class >> 8) {
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ pci_bus_size_cardbus(b, realloc_head);
+ break;
+
+ case PCI_CLASS_BRIDGE_PCI:
+ default:
+ __pci_bus_size_bridges(b, realloc_head);
+ break;
+ }
+ }
+
+ /* The root bus? */
+ if (pci_is_root_bus(bus))
+ return;
+
+ switch (bus->self->class >> 8) {
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ /* don't size cardbuses yet. */
+ break;
+
+ case PCI_CLASS_BRIDGE_PCI:
+ pci_bridge_check_ranges(bus);
+ if (bus->self->is_hotplug_bridge) {
+ additional_io_size = pci_hotplug_io_size;
+ additional_mem_size = pci_hotplug_mem_size;
+ }
+ /* Fall through */
+ default:
+ pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
+ additional_io_size, realloc_head);
+
+ /*
+ * If there's a 64-bit prefetchable MMIO window, compute
+ * the size required to put all 64-bit prefetchable
+ * resources in it.
+ */
+ b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES];
+ mask = IORESOURCE_MEM;
+ prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ prefmask |= IORESOURCE_MEM_64;
+ ret = pbus_size_mem(bus, prefmask, prefmask,
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
+
+ /*
+ * If successful, all non-prefetchable resources
+ * and any 32-bit prefetchable resources will go in
+ * the non-prefetchable window.
+ */
+ if (ret == 0) {
+ mask = prefmask;
+ type2 = prefmask & ~IORESOURCE_MEM_64;
+ type3 = prefmask & ~IORESOURCE_PREFETCH;
+ }
+ }
+
+ /*
+ * If there is no 64-bit prefetchable window, compute the
+ * size required to put all prefetchable resources in the
+ * 32-bit prefetchable window (if there is one).
+ */
+ if (!type2) {
+ prefmask &= ~IORESOURCE_MEM_64;
+ ret = pbus_size_mem(bus, prefmask, prefmask,
+ prefmask, prefmask,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
+
+ /*
+ * If successful, only non-prefetchable resources
+ * will go in the non-prefetchable window.
+ */
+ if (ret == 0)
+ mask = prefmask;
+ else
+ additional_mem_size += additional_mem_size;
+
+ type2 = type3 = IORESOURCE_MEM;
+ }
+
+ /*
+ * Compute the size required to put everything else in the
+ * non-prefetchable window. This includes:
+ *
+ * - all non-prefetchable resources
+ * - 32-bit prefetchable resources if there's a 64-bit
+ * prefetchable window or no prefetchable window at all
+ * - 64-bit prefetchable resources if there's no
+ * prefetchable window at all
+ *
+ * Note that the strategy in __pci_assign_resource() must
+ * match that used here. Specifically, we cannot put a
+ * 32-bit prefetchable resource in a 64-bit prefetchable
+ * window.
+ */
+ pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
+ realloc_head ? 0 : additional_mem_size,
+ additional_mem_size, realloc_head);
+ break;
+ }
+}
+
+void pci_bus_size_bridges(struct pci_bus *bus)
+{
+ __pci_bus_size_bridges(bus, NULL);
+}
+EXPORT_SYMBOL(pci_bus_size_bridges);
+
+void __pci_bus_assign_resources(const struct pci_bus *bus,
+ struct list_head *realloc_head,
+ struct list_head *fail_head)
+{
+ struct pci_bus *b;
+ struct pci_dev *dev;
+
+ pbus_assign_resources_sorted(bus, realloc_head, fail_head);
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ __pci_bus_assign_resources(b, realloc_head, fail_head);
+
+ switch (dev->class >> 8) {
+ case PCI_CLASS_BRIDGE_PCI:
+ if (!pci_is_enabled(dev))
+ pci_setup_bridge(b);
+ break;
+
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ pci_setup_cardbus(b);
+ break;
+
+ default:
+ dev_info(&dev->dev, "not setting up bridge for bus %04x:%02x\n",
+ pci_domain_nr(b), b->number);
+ break;
+ }
+ }
+}
+
+void pci_bus_assign_resources(const struct pci_bus *bus)
+{
+ __pci_bus_assign_resources(bus, NULL, NULL);
+}
+EXPORT_SYMBOL(pci_bus_assign_resources);
+
+static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
+ struct list_head *add_head,
+ struct list_head *fail_head)
+{
+ struct pci_bus *b;
+
+ pdev_assign_resources_sorted((struct pci_dev *)bridge,
+ add_head, fail_head);
+
+ b = bridge->subordinate;
+ if (!b)
+ return;
+
+ __pci_bus_assign_resources(b, add_head, fail_head);
+
+ switch (bridge->class >> 8) {
+ case PCI_CLASS_BRIDGE_PCI:
+ pci_setup_bridge(b);
+ break;
+
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ pci_setup_cardbus(b);
+ break;
+
+ default:
+ dev_info(&bridge->dev, "not setting up bridge for bus %04x:%02x\n",
+ pci_domain_nr(b), b->number);
+ break;
+ }
+}
+static void pci_bridge_release_resources(struct pci_bus *bus,
+ unsigned long type)
+{
+ struct pci_dev *dev = bus->self;
+ struct resource *r;
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
+ unsigned old_flags = 0;
+ struct resource *b_res;
+ int idx = 1;
+
+ b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
+
+ /*
+ * 1. if there is io port assign fail, will release bridge
+ * io port.
+ * 2. if there is non pref mmio assign fail, release bridge
+ * nonpref mmio.
+ * 3. if there is 64bit pref mmio assign fail, and bridge pref
+ * is 64bit, release bridge pref mmio.
+ * 4. if there is pref mmio assign fail, and bridge pref is
+ * 32bit mmio, release bridge pref mmio
+ * 5. if there is pref mmio assign fail, and bridge pref is not
+ * assigned, release bridge nonpref mmio.
+ */
+ if (type & IORESOURCE_IO)
+ idx = 0;
+ else if (!(type & IORESOURCE_PREFETCH))
+ idx = 1;
+ else if ((type & IORESOURCE_MEM_64) &&
+ (b_res[2].flags & IORESOURCE_MEM_64))
+ idx = 2;
+ else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
+ (b_res[2].flags & IORESOURCE_PREFETCH))
+ idx = 2;
+ else
+ idx = 1;
+
+ r = &b_res[idx];
+
+ if (!r->parent)
+ return;
+
+ /*
+ * if there are children under that, we should release them
+ * all
+ */
+ release_child_resources(r);
+ if (!release_resource(r)) {
+ type = old_flags = r->flags & type_mask;
+ dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n",
+ PCI_BRIDGE_RESOURCES + idx, r);
+ /* keep the old size */
+ r->end = resource_size(r) - 1;
+ r->start = 0;
+ r->flags = 0;
+
+ /* avoiding touch the one without PREF */
+ if (type & IORESOURCE_PREFETCH)
+ type = IORESOURCE_PREFETCH;
+ __pci_setup_bridge(bus, type);
+ /* for next child res under same bridge */
+ r->flags = old_flags;
+ }
+}
+
+enum release_type {
+ leaf_only,
+ whole_subtree,
+};
+/*
+ * try to release pci bridge resources that is from leaf bridge,
+ * so we can allocate big new one later
+ */
+static void pci_bus_release_bridge_resources(struct pci_bus *bus,
+ unsigned long type,
+ enum release_type rel_type)
+{
+ struct pci_dev *dev;
+ bool is_leaf_bridge = true;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ struct pci_bus *b = dev->subordinate;
+ if (!b)
+ continue;
+
+ is_leaf_bridge = false;
+
+ if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ continue;
+
+ if (rel_type == whole_subtree)
+ pci_bus_release_bridge_resources(b, type,
+ whole_subtree);
+ }
+
+ if (pci_is_root_bus(bus))
+ return;
+
+ if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return;
+
+ if ((rel_type == whole_subtree) || is_leaf_bridge)
+ pci_bridge_release_resources(bus, type);
+}
+
+static void pci_bus_dump_res(struct pci_bus *bus)
+{
+ struct resource *res;
+ int i;
+
+ pci_bus_for_each_resource(bus, res, i) {
+ if (!res || !res->end || !res->flags)
+ continue;
+
+ dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
+ }
+}
+
+static void pci_bus_dump_resources(struct pci_bus *bus)
+{
+ struct pci_bus *b;
+ struct pci_dev *dev;
+
+
+ pci_bus_dump_res(bus);
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ pci_bus_dump_resources(b);
+ }
+}
+
+static int pci_bus_get_depth(struct pci_bus *bus)
+{
+ int depth = 0;
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(child_bus, &bus->children, node) {
+ int ret;
+
+ ret = pci_bus_get_depth(child_bus);
+ if (ret + 1 > depth)
+ depth = ret + 1;
+ }
+
+ return depth;
+}
+
+/*
+ * -1: undefined, will auto detect later
+ * 0: disabled by user
+ * 1: disabled by auto detect
+ * 2: enabled by user
+ * 3: enabled by auto detect
+ */
+enum enable_type {
+ undefined = -1,
+ user_disabled,
+ auto_disabled,
+ user_enabled,
+ auto_enabled,
+};
+
+static enum enable_type pci_realloc_enable = undefined;
+void __init pci_realloc_get_opt(char *str)
+{
+ if (!strncmp(str, "off", 3))
+ pci_realloc_enable = user_disabled;
+ else if (!strncmp(str, "on", 2))
+ pci_realloc_enable = user_enabled;
+}
+static bool pci_realloc_enabled(enum enable_type enable)
+{
+ return enable >= user_enabled;
+}
+
+#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
+static int iov_resources_unassigned(struct pci_dev *dev, void *data)
+{
+ int i;
+ bool *unassigned = data;
+
+ for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) {
+ struct resource *r = &dev->resource[i];
+ struct pci_bus_region region;
+
+ /* Not assigned or rejected by kernel? */
+ if (!r->flags)
+ continue;
+
+ pcibios_resource_to_bus(dev->bus, &region, r);
+ if (!region.start) {
+ *unassigned = true;
+ return 1; /* return early from pci_walk_bus() */
+ }
+ }
+
+ return 0;
+}
+
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ bool unassigned = false;
+
+ if (enable_local != undefined)
+ return enable_local;
+
+ pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
+ if (unassigned)
+ return auto_enabled;
+
+ return enable_local;
+}
+#else
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ return enable_local;
+}
+#endif
+
+/*
+ * first try will not touch pci bridge res
+ * second and later try will clear small leaf bridge res
+ * will stop till to the max depth if can not find good one
+ */
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
+{
+ LIST_HEAD(realloc_head); /* list of resources that
+ want additional resources */
+ struct list_head *add_list = NULL;
+ int tried_times = 0;
+ enum release_type rel_type = leaf_only;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
+ int pci_try_num = 1;
+ enum enable_type enable_local;
+
+ /* don't realloc if asked to do so */
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
+
+ pci_try_num = max_depth + 1;
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
+ }
+
+again:
+ /*
+ * last try will use add_list, otherwise will try good to have as
+ * must have, so can realloc parent bridge resource
+ */
+ if (tried_times + 1 == pci_try_num)
+ add_list = &realloc_head;
+ /* Depth first, calculate sizes and alignments of all
+ subordinate buses. */
+ __pci_bus_size_bridges(bus, add_list);
+
+ /* Depth last, allocate resources and update the hardware. */
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
+ if (add_list)
+ BUG_ON(!list_empty(add_list));
+ tried_times++;
+
+ /* any device complain? */
+ if (list_empty(&fail_head))
+ goto dump;
+
+ if (tried_times >= pci_try_num) {
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+
+ free_list(&fail_head);
+ goto dump;
+ }
+
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "No. %d try to assign unassigned res\n", tried_times + 1);
+
+ /* third times and later will not check if it is leaf */
+ if ((tried_times + 1) > 2)
+ rel_type = whole_subtree;
+
+ /*
+ * Try to release leaf bridge's resources that doesn't fit resource of
+ * child device under that bridge
+ */
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & type_mask,
+ rel_type);
+
+ /* restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+ if (fail_res->dev->subordinate)
+ res->flags = 0;
+ }
+ free_list(&fail_head);
+
+ goto again;
+
+dump:
+ /* dump the resource on buses */
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node)
+ pci_assign_unassigned_root_bus_resources(root_bus);
+}
+
+void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
+{
+ struct pci_bus *parent = bridge->subordinate;
+ LIST_HEAD(add_list); /* list of resources that
+ want additional resources */
+ int tried_times = 0;
+ LIST_HEAD(fail_head);
+ struct pci_dev_resource *fail_res;
+ int retval;
+ unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64;
+
+again:
+ __pci_bus_size_bridges(parent, &add_list);
+ __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
+ BUG_ON(!list_empty(&add_list));
+ tried_times++;
+
+ if (list_empty(&fail_head))
+ goto enable_all;
+
+ if (tried_times >= 2) {
+ /* still fail, don't need to try more */
+ free_list(&fail_head);
+ goto enable_all;
+ }
+
+ printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
+ tried_times + 1);
+
+ /*
+ * Try to release leaf bridge's resources that doesn't fit resource of
+ * child device under that bridge
+ */
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & type_mask,
+ whole_subtree);
+
+ /* restore size and flags */
+ list_for_each_entry(fail_res, &fail_head, list) {
+ struct resource *res = fail_res->res;
+
+ res->start = fail_res->start;
+ res->end = fail_res->end;
+ res->flags = fail_res->flags;
+ if (fail_res->dev->subordinate)
+ res->flags = 0;
+ }
+ free_list(&fail_head);
+
+ goto again;
+
+enable_all:
+ retval = pci_reenable_device(bridge);
+ if (retval)
+ dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval);
+ pci_set_master(bridge);
+}
+EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
+
+void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ LIST_HEAD(add_list); /* list of resources that
+ want additional resources */
+
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (pci_is_bridge(dev) && pci_has_subordinate(dev))
+ __pci_bus_size_bridges(dev->subordinate,
+ &add_list);
+ up_read(&pci_bus_sem);
+ __pci_bus_assign_resources(bus, &add_list, NULL);
+ BUG_ON(!list_empty(&add_list));
+}
+EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c
new file mode 100644
index 000000000..95c225be4
--- /dev/null
+++ b/drivers/pci/setup-irq.c
@@ -0,0 +1,68 @@
+/*
+ * drivers/pci/setup-irq.c
+ *
+ * Extruded from code written by
+ * Dave Rusling (david.rusling@reo.mts.dec.com)
+ * David Mosberger (davidm@cs.arizona.edu)
+ * David Miller (davem@redhat.com)
+ *
+ * Support routines for initializing a PCI subsystem.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/cache.h>
+
+void __weak pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ dev_dbg(&dev->dev, "assigning IRQ %02d\n", irq);
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+static void pdev_fixup_irq(struct pci_dev *dev,
+ u8 (*swizzle)(struct pci_dev *, u8 *),
+ int (*map_irq)(const struct pci_dev *, u8, u8))
+{
+ u8 pin, slot;
+ int irq = 0;
+
+ /* If this device is not on the primary bus, we need to figure out
+ which interrupt pin it will come in on. We know which slot it
+ will come in on 'cos that slot is where the bridge is. Each
+ time the interrupt line passes through a PCI-PCI bridge we must
+ apply the swizzle function. */
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ /* Cope with illegal. */
+ if (pin > 4)
+ pin = 1;
+
+ if (pin != 0) {
+ /* Follow the chain of bridges, swizzling as we go. */
+ slot = (*swizzle)(dev, &pin);
+
+ irq = (*map_irq)(dev, slot, pin);
+ if (irq == -1)
+ irq = 0;
+ }
+ dev->irq = irq;
+
+ dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
+
+ /* Always tell the device, so the driver knows what is
+ the real IRQ to use; the device does not use it. */
+ pcibios_update_irq(dev, irq);
+}
+
+void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *),
+ int (*map_irq)(const struct pci_dev *, u8, u8))
+{
+ struct pci_dev *dev = NULL;
+
+ for_each_pci_dev(dev)
+ pdev_fixup_irq(dev, swizzle, map_irq);
+}
+EXPORT_SYMBOL_GPL(pci_fixup_irqs);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
new file mode 100644
index 000000000..232f9254c
--- /dev/null
+++ b/drivers/pci/setup-res.c
@@ -0,0 +1,390 @@
+/*
+ * drivers/pci/setup-res.c
+ *
+ * Extruded from code written by
+ * Dave Rusling (david.rusling@reo.mts.dec.com)
+ * David Mosberger (davidm@cs.arizona.edu)
+ * David Miller (davem@redhat.com)
+ *
+ * Support routines for initializing a PCI subsystem.
+ */
+
+/* fixed for multiple pci buses, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+/*
+ * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+ * Resource sorting
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include "pci.h"
+
+
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+ struct pci_bus_region region;
+ bool disable;
+ u16 cmd;
+ u32 new, check, mask;
+ int reg;
+ enum pci_bar_type type;
+ struct resource *res = dev->resource + resno;
+
+ /*
+ * Ignore resources for unimplemented BARs and unused resource slots
+ * for 64 bit BARs.
+ */
+ if (!res->flags)
+ return;
+
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
+ /*
+ * Ignore non-moveable resources. This might be legacy resources for
+ * which no functional BAR register exists or another important
+ * system resource we shouldn't move around.
+ */
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return;
+
+ pcibios_resource_to_bus(dev->bus, &region, res);
+
+ new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
+ if (res->flags & IORESOURCE_IO)
+ mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
+ else
+ mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+
+ reg = pci_resource_bar(dev, resno, &type);
+ if (!reg)
+ return;
+ if (type != pci_bar_unknown) {
+ if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ return;
+ new |= PCI_ROM_ADDRESS_ENABLE;
+ }
+
+ /*
+ * We can't update a 64-bit BAR atomically, so when possible,
+ * disable decoding so that a half-updated BAR won't conflict
+ * with another device.
+ */
+ disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on;
+ if (disable) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+ }
+
+ pci_write_config_dword(dev, reg, new);
+ pci_read_config_dword(dev, reg, &check);
+
+ if ((new ^ check) & mask) {
+ dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n",
+ resno, new, check);
+ }
+
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
+ pci_read_config_dword(dev, reg + 4, &check);
+ if (check != new) {
+ dev_err(&dev->dev, "BAR %d: error updating (high %#08x != %#08x)\n",
+ resno, new, check);
+ }
+ }
+
+ if (disable)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+}
+
+int pci_claim_resource(struct pci_dev *dev, int resource)
+{
+ struct resource *res = &dev->resource[resource];
+ struct resource *root, *conflict;
+
+ if (res->flags & IORESOURCE_UNSET) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no address assigned\n",
+ resource, res);
+ return -EINVAL;
+ }
+
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: no compatible bridge window\n",
+ resource, res);
+ res->flags |= IORESOURCE_UNSET;
+ return -EINVAL;
+ }
+
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+ dev_info(&dev->dev, "can't claim BAR %d %pR: address conflict with %s %pR\n",
+ resource, res, conflict->name, conflict);
+ res->flags |= IORESOURCE_UNSET;
+ return -EBUSY;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pci_claim_resource);
+
+void pci_disable_bridge_window(struct pci_dev *dev)
+{
+ dev_info(&dev->dev, "disabling bridge mem windows\n");
+
+ /* MMIO Base/Limit */
+ pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
+
+ /* Prefetchable MMIO Base/Limit */
+ pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
+ pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0);
+ pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
+}
+
+/*
+ * Generic function that returns a value indicating that the device's
+ * original BIOS BAR address was not saved and so is not available for
+ * reinstatement.
+ *
+ * Can be over-ridden by architecture specific code that implements
+ * reinstatement functionality rather than leaving it disabled when
+ * normal allocation attempts fail.
+ */
+resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx)
+{
+ return 0;
+}
+
+static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
+ int resno, resource_size_t size)
+{
+ struct resource *root, *conflict;
+ resource_size_t fw_addr, start, end;
+
+ fw_addr = pcibios_retrieve_fw_addr(dev, resno);
+ if (!fw_addr)
+ return -ENOMEM;
+
+ start = res->start;
+ end = res->end;
+ res->start = fw_addr;
+ res->end = res->start + size - 1;
+
+ root = pci_find_parent_resource(dev, res);
+ if (!root) {
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ else
+ root = &iomem_resource;
+ }
+
+ dev_info(&dev->dev, "BAR %d: trying firmware assignment %pR\n",
+ resno, res);
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
+ dev_info(&dev->dev, "BAR %d: %pR conflicts with %s %pR\n",
+ resno, res, conflict->name, conflict);
+ res->start = start;
+ res->end = end;
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno, resource_size_t size, resource_size_t align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t min;
+ int ret;
+
+ min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+
+ /*
+ * First, try exact prefetching match. Even if a 64-bit
+ * prefetchable bridge window is below 4GB, we can't put a 32-bit
+ * prefetchable resource in it because pbus_size_mem() assumes a
+ * 64-bit window will contain no 32-bit resources. If we assign
+ * things differently than they were sized, not everything will fit.
+ */
+ ret = pci_bus_alloc_resource(bus, res, size, align, min,
+ IORESOURCE_PREFETCH | IORESOURCE_MEM_64,
+ pcibios_align_resource, dev);
+ if (ret == 0)
+ return 0;
+
+ /*
+ * If the prefetchable window is only 32 bits wide, we can put
+ * 64-bit prefetchable resources in it.
+ */
+ if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ==
+ (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) {
+ ret = pci_bus_alloc_resource(bus, res, size, align, min,
+ IORESOURCE_PREFETCH,
+ pcibios_align_resource, dev);
+ if (ret == 0)
+ return 0;
+ }
+
+ /*
+ * If we didn't find a better match, we can put any memory resource
+ * in a non-prefetchable window. If this resource is 32 bits and
+ * non-prefetchable, the first call already tried the only possibility
+ * so we don't need to try again.
+ */
+ if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64))
+ ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
+ pcibios_align_resource, dev);
+
+ return ret;
+}
+
+static int _pci_assign_resource(struct pci_dev *dev, int resno,
+ resource_size_t size, resource_size_t min_align)
+{
+ struct pci_bus *bus;
+ int ret;
+
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
+ if (!bus->parent || !bus->self->transparent)
+ break;
+ bus = bus->parent;
+ }
+
+ return ret;
+}
+
+int pci_assign_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t align, size;
+ int ret;
+
+ res->flags |= IORESOURCE_UNSET;
+ align = pci_resource_alignment(dev, res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't assign %pR (bogus alignment)\n",
+ resno, res);
+ return -EINVAL;
+ }
+
+ size = resource_size(res);
+ ret = _pci_assign_resource(dev, resno, size, align);
+
+ /*
+ * If we failed to assign anything, let's try the address
+ * where firmware left it. That at least has a chance of
+ * working, which is better than just leaving it disabled.
+ */
+ if (ret < 0) {
+ dev_info(&dev->dev, "BAR %d: no space for %pR\n", resno, res);
+ ret = pci_revert_fw_address(res, dev, resno, size);
+ }
+
+ if (ret < 0) {
+ dev_info(&dev->dev, "BAR %d: failed to assign %pR\n", resno,
+ res);
+ return ret;
+ }
+
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+
+ return 0;
+}
+EXPORT_SYMBOL(pci_assign_resource);
+
+int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+ resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ unsigned long flags;
+ resource_size_t new_size;
+ int ret;
+
+ flags = res->flags;
+ res->flags |= IORESOURCE_UNSET;
+ if (!res->parent) {
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
+ resno, res);
+ return -EINVAL;
+ }
+
+ /* already aligned with min_align */
+ new_size = resource_size(res) + addsize;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (ret) {
+ res->flags = flags;
+ dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n",
+ resno, res, (unsigned long long) addsize);
+ return ret;
+ }
+
+ res->flags &= ~IORESOURCE_UNSET;
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
+ resno, res, (unsigned long long) addsize);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+
+ return 0;
+}
+
+int pci_enable_resources(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int i;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ r = &dev->resource[i];
+
+ if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
+ continue;
+ if ((i == PCI_ROM_RESOURCE) &&
+ (!(r->flags & IORESOURCE_ROM_ENABLE)))
+ continue;
+
+ if (r->flags & IORESOURCE_UNSET) {
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not assigned\n",
+ i, r);
+ return -EINVAL;
+ }
+
+ if (!r->parent) {
+ dev_err(&dev->dev, "can't enable device: BAR %d %pR not claimed\n",
+ i, r);
+ return -EINVAL;
+ }
+
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ if (cmd != old_cmd) {
+ dev_info(&dev->dev, "enabling device (%04x -> %04x)\n",
+ old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
new file mode 100644
index 000000000..396c200b9
--- /dev/null
+++ b/drivers/pci/slot.c
@@ -0,0 +1,397 @@
+/*
+ * drivers/pci/slot.c
+ * Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
+ * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
+ * Alex Chiang <achiang@hp.com>
+ */
+
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/err.h>
+#include "pci.h"
+
+struct kset *pci_slots_kset;
+EXPORT_SYMBOL_GPL(pci_slots_kset);
+
+static ssize_t pci_slot_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct pci_slot *slot = to_pci_slot(kobj);
+ struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
+ return attribute->show ? attribute->show(slot, buf) : -EIO;
+}
+
+static ssize_t pci_slot_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t len)
+{
+ struct pci_slot *slot = to_pci_slot(kobj);
+ struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
+ return attribute->store ? attribute->store(slot, buf, len) : -EIO;
+}
+
+static const struct sysfs_ops pci_slot_sysfs_ops = {
+ .show = pci_slot_attr_show,
+ .store = pci_slot_attr_store,
+};
+
+static ssize_t address_read_file(struct pci_slot *slot, char *buf)
+{
+ if (slot->number == 0xff)
+ return sprintf(buf, "%04x:%02x\n",
+ pci_domain_nr(slot->bus),
+ slot->bus->number);
+ else
+ return sprintf(buf, "%04x:%02x:%02x\n",
+ pci_domain_nr(slot->bus),
+ slot->bus->number,
+ slot->number);
+}
+
+/* these strings match up with the values in pci_bus_speed */
+static const char *pci_bus_speed_strings[] = {
+ "33 MHz PCI", /* 0x00 */
+ "66 MHz PCI", /* 0x01 */
+ "66 MHz PCI-X", /* 0x02 */
+ "100 MHz PCI-X", /* 0x03 */
+ "133 MHz PCI-X", /* 0x04 */
+ NULL, /* 0x05 */
+ NULL, /* 0x06 */
+ NULL, /* 0x07 */
+ NULL, /* 0x08 */
+ "66 MHz PCI-X 266", /* 0x09 */
+ "100 MHz PCI-X 266", /* 0x0a */
+ "133 MHz PCI-X 266", /* 0x0b */
+ "Unknown AGP", /* 0x0c */
+ "1x AGP", /* 0x0d */
+ "2x AGP", /* 0x0e */
+ "4x AGP", /* 0x0f */
+ "8x AGP", /* 0x10 */
+ "66 MHz PCI-X 533", /* 0x11 */
+ "100 MHz PCI-X 533", /* 0x12 */
+ "133 MHz PCI-X 533", /* 0x13 */
+ "2.5 GT/s PCIe", /* 0x14 */
+ "5.0 GT/s PCIe", /* 0x15 */
+ "8.0 GT/s PCIe", /* 0x16 */
+};
+
+static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
+{
+ const char *speed_string;
+
+ if (speed < ARRAY_SIZE(pci_bus_speed_strings))
+ speed_string = pci_bus_speed_strings[speed];
+ else
+ speed_string = "Unknown";
+
+ return sprintf(buf, "%s\n", speed_string);
+}
+
+static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
+{
+ return bus_speed_read(slot->bus->max_bus_speed, buf);
+}
+
+static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
+{
+ return bus_speed_read(slot->bus->cur_bus_speed, buf);
+}
+
+static void pci_slot_release(struct kobject *kobj)
+{
+ struct pci_dev *dev;
+ struct pci_slot *slot = to_pci_slot(kobj);
+
+ dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
+ slot->number, pci_slot_name(slot));
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->number)
+ dev->slot = NULL;
+
+ list_del(&slot->list);
+
+ kfree(slot);
+}
+
+static struct pci_slot_attribute pci_slot_attr_address =
+ __ATTR(address, S_IRUGO, address_read_file, NULL);
+static struct pci_slot_attribute pci_slot_attr_max_speed =
+ __ATTR(max_bus_speed, S_IRUGO, max_speed_read_file, NULL);
+static struct pci_slot_attribute pci_slot_attr_cur_speed =
+ __ATTR(cur_bus_speed, S_IRUGO, cur_speed_read_file, NULL);
+
+static struct attribute *pci_slot_default_attrs[] = {
+ &pci_slot_attr_address.attr,
+ &pci_slot_attr_max_speed.attr,
+ &pci_slot_attr_cur_speed.attr,
+ NULL,
+};
+
+static struct kobj_type pci_slot_ktype = {
+ .sysfs_ops = &pci_slot_sysfs_ops,
+ .release = &pci_slot_release,
+ .default_attrs = pci_slot_default_attrs,
+};
+
+static char *make_slot_name(const char *name)
+{
+ char *new_name;
+ int len, max, dup;
+
+ new_name = kstrdup(name, GFP_KERNEL);
+ if (!new_name)
+ return NULL;
+
+ /*
+ * Make sure we hit the realloc case the first time through the
+ * loop. 'len' will be strlen(name) + 3 at that point which is
+ * enough space for "name-X" and the trailing NUL.
+ */
+ len = strlen(name) + 2;
+ max = 1;
+ dup = 1;
+
+ for (;;) {
+ struct kobject *dup_slot;
+ dup_slot = kset_find_obj(pci_slots_kset, new_name);
+ if (!dup_slot)
+ break;
+ kobject_put(dup_slot);
+ if (dup == max) {
+ len++;
+ max *= 10;
+ kfree(new_name);
+ new_name = kmalloc(len, GFP_KERNEL);
+ if (!new_name)
+ break;
+ }
+ sprintf(new_name, "%s-%d", name, dup++);
+ }
+
+ return new_name;
+}
+
+static int rename_slot(struct pci_slot *slot, const char *name)
+{
+ int result = 0;
+ char *slot_name;
+
+ if (strcmp(pci_slot_name(slot), name) == 0)
+ return result;
+
+ slot_name = make_slot_name(name);
+ if (!slot_name)
+ return -ENOMEM;
+
+ result = kobject_rename(&slot->kobj, slot_name);
+ kfree(slot_name);
+
+ return result;
+}
+
+static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
+{
+ struct pci_slot *slot;
+ /*
+ * We already hold pci_bus_sem so don't worry
+ */
+ list_for_each_entry(slot, &parent->slots, list)
+ if (slot->number == slot_nr) {
+ kobject_get(&slot->kobj);
+ return slot;
+ }
+
+ return NULL;
+}
+
+/**
+ * pci_create_slot - create or increment refcount for physical PCI slot
+ * @parent: struct pci_bus of parent bridge
+ * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
+ * @name: user visible string presented in /sys/bus/pci/slots/<name>
+ * @hotplug: set if caller is hotplug driver, NULL otherwise
+ *
+ * PCI slots have first class attributes such as address, speed, width,
+ * and a &struct pci_slot is used to manage them. This interface will
+ * either return a new &struct pci_slot to the caller, or if the pci_slot
+ * already exists, its refcount will be incremented.
+ *
+ * Slots are uniquely identified by a @pci_bus, @slot_nr tuple.
+ *
+ * There are known platforms with broken firmware that assign the same
+ * name to multiple slots. Workaround these broken platforms by renaming
+ * the slots on behalf of the caller. If firmware assigns name N to
+ * multiple slots:
+ *
+ * The first slot is assigned N
+ * The second slot is assigned N-1
+ * The third slot is assigned N-2
+ * etc.
+ *
+ * Placeholder slots:
+ * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
+ * a slot. There is one notable exception - pSeries (rpaphp), where the
+ * @slot_nr cannot be determined until a device is actually inserted into
+ * the slot. In this scenario, the caller may pass -1 for @slot_nr.
+ *
+ * The following semantics are imposed when the caller passes @slot_nr ==
+ * -1. First, we no longer check for an existing %struct pci_slot, as there
+ * may be many slots with @slot_nr of -1. The other change in semantics is
+ * user-visible, which is the 'address' parameter presented in sysfs will
+ * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
+ * %struct pci_bus and bb is the bus number. In other words, the devfn of
+ * the 'placeholder' slot will not be displayed.
+ */
+struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
+ const char *name,
+ struct hotplug_slot *hotplug)
+{
+ struct pci_dev *dev;
+ struct pci_slot *slot;
+ int err = 0;
+ char *slot_name = NULL;
+
+ down_write(&pci_bus_sem);
+
+ if (slot_nr == -1)
+ goto placeholder;
+
+ /*
+ * Hotplug drivers are allowed to rename an existing slot,
+ * but only if not already claimed.
+ */
+ slot = get_slot(parent, slot_nr);
+ if (slot) {
+ if (hotplug) {
+ if ((err = slot->hotplug ? -EBUSY : 0)
+ || (err = rename_slot(slot, name))) {
+ kobject_put(&slot->kobj);
+ slot = NULL;
+ goto err;
+ }
+ }
+ goto out;
+ }
+
+placeholder:
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ slot->bus = parent;
+ slot->number = slot_nr;
+
+ slot->kobj.kset = pci_slots_kset;
+
+ slot_name = make_slot_name(name);
+ if (!slot_name) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
+ "%s", slot_name);
+ if (err)
+ goto err;
+
+ INIT_LIST_HEAD(&slot->list);
+ list_add(&slot->list, &parent->slots);
+
+ list_for_each_entry(dev, &parent->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot_nr)
+ dev->slot = slot;
+
+ dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
+ slot_nr, pci_slot_name(slot));
+
+out:
+ kfree(slot_name);
+ up_write(&pci_bus_sem);
+ return slot;
+err:
+ kfree(slot);
+ slot = ERR_PTR(err);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(pci_create_slot);
+
+/**
+ * pci_destroy_slot - decrement refcount for physical PCI slot
+ * @slot: struct pci_slot to decrement
+ *
+ * %struct pci_slot is refcounted, so destroying them is really easy; we
+ * just call kobject_put on its kobj and let our release methods do the
+ * rest.
+ */
+void pci_destroy_slot(struct pci_slot *slot)
+{
+ dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
+ slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
+
+ down_write(&pci_bus_sem);
+ kobject_put(&slot->kobj);
+ up_write(&pci_bus_sem);
+}
+EXPORT_SYMBOL_GPL(pci_destroy_slot);
+
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+#include <linux/pci_hotplug.h>
+/**
+ * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * @pci_slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to create symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_create_module_link(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ struct kobject *kobj = NULL;
+ int ret;
+
+ if (!slot || !slot->ops)
+ return;
+ kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ if (!kobj)
+ return;
+ ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ if (ret)
+ dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n",
+ ret);
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
+
+/**
+ * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * @pci_slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to remove symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_remove_module_link(struct pci_slot *pci_slot)
+{
+ sysfs_remove_link(&pci_slot->kobj, "module");
+}
+EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
+#endif
+
+static int pci_slot_init(void)
+{
+ struct kset *pci_bus_kset;
+
+ pci_bus_kset = bus_get_kset(&pci_bus_type);
+ pci_slots_kset = kset_create_and_add("slots", NULL,
+ &pci_bus_kset->kobj);
+ if (!pci_slots_kset) {
+ printk(KERN_ERR "PCI: Slot initialization failure\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+subsys_initcall(pci_slot_init);
diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
new file mode 100644
index 000000000..b91c4da68
--- /dev/null
+++ b/drivers/pci/syscall.c
@@ -0,0 +1,136 @@
+/*
+ * pci_syscall.c
+ *
+ * For architectures where we want to allow direct access
+ * to the PCI config stuff - it would probably be preferable
+ * on PCs too, but there people just do it by hand with the
+ * magic northbridge registers..
+ */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/syscalls.h>
+#include <asm/uaccess.h>
+#include "pci.h"
+
+SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ unsigned long, off, unsigned long, len, void __user *, buf)
+{
+ struct pci_dev *dev;
+ u8 byte;
+ u16 word;
+ u32 dword;
+ long err;
+ long cfg_ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = -ENODEV;
+ dev = pci_get_bus_and_slot(bus, dfn);
+ if (!dev)
+ goto error;
+
+ switch (len) {
+ case 1:
+ cfg_ret = pci_user_read_config_byte(dev, off, &byte);
+ break;
+ case 2:
+ cfg_ret = pci_user_read_config_word(dev, off, &word);
+ break;
+ case 4:
+ cfg_ret = pci_user_read_config_dword(dev, off, &dword);
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ err = -EIO;
+ if (cfg_ret != PCIBIOS_SUCCESSFUL)
+ goto error;
+
+ switch (len) {
+ case 1:
+ err = put_user(byte, (unsigned char __user *)buf);
+ break;
+ case 2:
+ err = put_user(word, (unsigned short __user *)buf);
+ break;
+ case 4:
+ err = put_user(dword, (unsigned int __user *)buf);
+ break;
+ }
+ pci_dev_put(dev);
+ return err;
+
+error:
+ /* ??? XFree86 doesn't even check the return value. They
+ just look for 0xffffffff in the output, since that's what
+ they get instead of a machine check on x86. */
+ switch (len) {
+ case 1:
+ put_user(-1, (unsigned char __user *)buf);
+ break;
+ case 2:
+ put_user(-1, (unsigned short __user *)buf);
+ break;
+ case 4:
+ put_user(-1, (unsigned int __user *)buf);
+ break;
+ }
+ pci_dev_put(dev);
+ return err;
+}
+
+SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ unsigned long, off, unsigned long, len, void __user *, buf)
+{
+ struct pci_dev *dev;
+ u8 byte;
+ u16 word;
+ u32 dword;
+ int err = 0;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ dev = pci_get_bus_and_slot(bus, dfn);
+ if (!dev)
+ return -ENODEV;
+
+ switch (len) {
+ case 1:
+ err = get_user(byte, (u8 __user *)buf);
+ if (err)
+ break;
+ err = pci_user_write_config_byte(dev, off, byte);
+ if (err != PCIBIOS_SUCCESSFUL)
+ err = -EIO;
+ break;
+
+ case 2:
+ err = get_user(word, (u16 __user *)buf);
+ if (err)
+ break;
+ err = pci_user_write_config_word(dev, off, word);
+ if (err != PCIBIOS_SUCCESSFUL)
+ err = -EIO;
+ break;
+
+ case 4:
+ err = get_user(dword, (u32 __user *)buf);
+ if (err)
+ break;
+ err = pci_user_write_config_dword(dev, off, dword);
+ if (err != PCIBIOS_SUCCESSFUL)
+ err = -EIO;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ pci_dev_put(dev);
+ return err;
+}
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
new file mode 100644
index 000000000..7e1304d2e
--- /dev/null
+++ b/drivers/pci/vc.c
@@ -0,0 +1,434 @@
+/*
+ * PCI Virtual Channel support
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/types.h>
+
+/**
+ * pci_vc_save_restore_dwords - Save or restore a series of dwords
+ * @dev: device
+ * @pos: starting config space position
+ * @buf: buffer to save to or restore from
+ * @dwords: number of dwords to save/restore
+ * @save: whether to save or restore
+ */
+static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos,
+ u32 *buf, int dwords, bool save)
+{
+ int i;
+
+ for (i = 0; i < dwords; i++, buf++) {
+ if (save)
+ pci_read_config_dword(dev, pos + (i * 4), buf);
+ else
+ pci_write_config_dword(dev, pos + (i * 4), *buf);
+ }
+}
+
+/**
+ * pci_vc_load_arb_table - load and wait for VC arbitration table
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ *
+ * Set Load VC Arbitration Table bit requesting hardware to apply the VC
+ * Arbitration Table (previously loaded). When the VC Arbitration Table
+ * Status clears, hardware has latched the table into VC arbitration logic.
+ */
+static void pci_vc_load_arb_table(struct pci_dev *dev, int pos)
+{
+ u16 ctrl;
+
+ pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl);
+ pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE);
+ if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS,
+ PCI_VC_PORT_STATUS_TABLE))
+ return;
+
+ dev_err(&dev->dev, "VC arbitration table failed to load\n");
+}
+
+/**
+ * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @res: VC resource number, ie. VCn (0-7)
+ *
+ * Set Load Port Arbitration Table bit requesting hardware to apply the Port
+ * Arbitration Table (previously loaded). When the Port Arbitration Table
+ * Status clears, hardware has latched the table into port arbitration logic.
+ */
+static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res)
+{
+ int ctrl_pos, status_pos;
+ u32 ctrl;
+
+ ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+
+ pci_read_config_dword(dev, ctrl_pos, &ctrl);
+ pci_write_config_dword(dev, ctrl_pos,
+ ctrl | PCI_VC_RES_CTRL_LOAD_TABLE);
+
+ if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE))
+ return;
+
+ dev_err(&dev->dev, "VC%d port arbitration table failed to load\n", res);
+}
+
+/**
+ * pci_vc_enable - Enable virtual channel
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @res: VC res number, ie. VCn (0-7)
+ *
+ * A VC is enabled by setting the enable bit in matching resource control
+ * registers on both sides of a link. We therefore need to find the opposite
+ * end of the link. To keep this simple we enable from the downstream device.
+ * RC devices do not have an upstream device, nor does it seem that VC9 do
+ * (spec is unclear). Once we find the upstream device, match the VC ID to
+ * get the correct resource, disable and enable on both ends.
+ */
+static void pci_vc_enable(struct pci_dev *dev, int pos, int res)
+{
+ int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2;
+ u32 ctrl, header, cap1, ctrl2;
+ struct pci_dev *link = NULL;
+
+ /* Enable VCs from the downstream device */
+ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF);
+
+ pci_read_config_dword(dev, ctrl_pos, &ctrl);
+ id = ctrl & PCI_VC_RES_CTRL_ID;
+
+ pci_read_config_dword(dev, pos, &header);
+
+ /* If there is no opposite end of the link, skip to enable */
+ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 ||
+ pci_is_root_bus(dev->bus))
+ goto enable;
+
+ pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC);
+ if (!pos2)
+ goto enable;
+
+ pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1);
+ evcc = cap1 & PCI_VC_CAP1_EVCC;
+
+ /* VC0 is hardwired enabled, so we can start with 1 */
+ for (i = 1; i < evcc + 1; i++) {
+ ctrl_pos2 = pos2 + PCI_VC_RES_CTRL +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ status_pos2 = pos2 + PCI_VC_RES_STATUS +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2);
+ if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) {
+ link = dev->bus->self;
+ break;
+ }
+ }
+
+ if (!link)
+ goto enable;
+
+ /* Disable if enabled */
+ if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) {
+ ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(link, ctrl_pos2, ctrl2);
+ }
+
+ /* Enable on both ends */
+ ctrl2 |= PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(link, ctrl_pos2, ctrl2);
+enable:
+ ctrl |= PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(dev, ctrl_pos, ctrl);
+
+ if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO))
+ dev_err(&dev->dev, "VC%d negotiation stuck pending\n", id);
+
+ if (link && !pci_wait_for_pending(link, status_pos2,
+ PCI_VC_RES_STATUS_NEGO))
+ dev_err(&link->dev, "VC%d negotiation stuck pending\n", id);
+}
+
+/**
+ * pci_vc_do_save_buffer - Size, save, or restore VC state
+ * @dev: device
+ * @pos: starting position of VC capability (VC/VC9/MFVC)
+ * @save_state: buffer for save/restore
+ * @name: for error message
+ * @save: if provided a buffer, this indicates what to do with it
+ *
+ * Walking Virtual Channel config space to size, save, or restore it
+ * is complicated, so we do it all from one function to reduce code and
+ * guarantee ordering matches in the buffer. When called with NULL
+ * @save_state, return the size of the necessary save buffer. When called
+ * with a non-NULL @save_state, @save determines whether we save to the
+ * buffer or restore from it.
+ */
+static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
+ struct pci_cap_saved_state *save_state,
+ bool save)
+{
+ u32 cap1;
+ char evcc, lpevcc, parb_size;
+ int i, len = 0;
+ u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL;
+
+ /* Sanity check buffer size for save/restore */
+ if (buf && save_state->cap.size !=
+ pci_vc_do_save_buffer(dev, pos, NULL, save)) {
+ dev_err(&dev->dev,
+ "VC save buffer size does not match @0x%x\n", pos);
+ return -ENOMEM;
+ }
+
+ pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1);
+ /* Extended VC Count (not counting VC0) */
+ evcc = cap1 & PCI_VC_CAP1_EVCC;
+ /* Low Priority Extended VC Count (not counting VC0) */
+ lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4;
+ /* Port Arbitration Table Entry Size (bits) */
+ parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10);
+
+ /*
+ * Port VC Control Register contains VC Arbitration Select, which
+ * cannot be modified when more than one LPVC is in operation. We
+ * therefore save/restore it first, as only VC0 should be enabled
+ * after device reset.
+ */
+ if (buf) {
+ if (save)
+ pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ (u16 *)buf);
+ else
+ pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
+ *(u16 *)buf);
+ buf += 2;
+ }
+ len += 2;
+
+ /*
+ * If we have any Low Priority VCs and a VC Arbitration Table Offset
+ * in Port VC Capability Register 2 then save/restore it next.
+ */
+ if (lpevcc) {
+ u32 cap2;
+ int vcarb_offset;
+
+ pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2);
+ vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16;
+
+ if (vcarb_offset) {
+ int size, vcarb_phases = 0;
+
+ if (cap2 & PCI_VC_CAP2_128_PHASE)
+ vcarb_phases = 128;
+ else if (cap2 & PCI_VC_CAP2_64_PHASE)
+ vcarb_phases = 64;
+ else if (cap2 & PCI_VC_CAP2_32_PHASE)
+ vcarb_phases = 32;
+
+ /* Fixed 4 bits per phase per lpevcc (plus VC0) */
+ size = ((lpevcc + 1) * vcarb_phases * 4) / 8;
+
+ if (size && buf) {
+ pci_vc_save_restore_dwords(dev,
+ pos + vcarb_offset,
+ (u32 *)buf,
+ size / 4, save);
+ /*
+ * On restore, we need to signal hardware to
+ * re-load the VC Arbitration Table.
+ */
+ if (!save)
+ pci_vc_load_arb_table(dev, pos);
+
+ buf += size;
+ }
+ len += size;
+ }
+ }
+
+ /*
+ * In addition to each VC Resource Control Register, we may have a
+ * Port Arbitration Table attached to each VC. The Port Arbitration
+ * Table Offset in each VC Resource Capability Register tells us if
+ * it exists. The entry size is global from the Port VC Capability
+ * Register1 above. The number of phases is determined per VC.
+ */
+ for (i = 0; i < evcc + 1; i++) {
+ u32 cap;
+ int parb_offset;
+
+ pci_read_config_dword(dev, pos + PCI_VC_RES_CAP +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap);
+ parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16;
+ if (parb_offset) {
+ int size, parb_phases = 0;
+
+ if (cap & PCI_VC_RES_CAP_256_PHASE)
+ parb_phases = 256;
+ else if (cap & (PCI_VC_RES_CAP_128_PHASE |
+ PCI_VC_RES_CAP_128_PHASE_TB))
+ parb_phases = 128;
+ else if (cap & PCI_VC_RES_CAP_64_PHASE)
+ parb_phases = 64;
+ else if (cap & PCI_VC_RES_CAP_32_PHASE)
+ parb_phases = 32;
+
+ size = (parb_size * parb_phases) / 8;
+
+ if (size && buf) {
+ pci_vc_save_restore_dwords(dev,
+ pos + parb_offset,
+ (u32 *)buf,
+ size / 4, save);
+ buf += size;
+ }
+ len += size;
+ }
+
+ /* VC Resource Control Register */
+ if (buf) {
+ int ctrl_pos = pos + PCI_VC_RES_CTRL +
+ (i * PCI_CAP_VC_PER_VC_SIZEOF);
+ if (save)
+ pci_read_config_dword(dev, ctrl_pos,
+ (u32 *)buf);
+ else {
+ u32 tmp, ctrl = *(u32 *)buf;
+ /*
+ * For an FLR case, the VC config may remain.
+ * Preserve enable bit, restore the rest.
+ */
+ pci_read_config_dword(dev, ctrl_pos, &tmp);
+ tmp &= PCI_VC_RES_CTRL_ENABLE;
+ tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE;
+ pci_write_config_dword(dev, ctrl_pos, tmp);
+ /* Load port arbitration table if used */
+ if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT)
+ pci_vc_load_port_arb_table(dev, pos, i);
+ /* Re-enable if needed */
+ if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE)
+ pci_vc_enable(dev, pos, i);
+ }
+ buf += 4;
+ }
+ len += 4;
+ }
+
+ return buf ? 0 : len;
+}
+
+static struct {
+ u16 id;
+ const char *name;
+} vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" },
+ { PCI_EXT_CAP_ID_VC, "VC" },
+ { PCI_EXT_CAP_ID_VC9, "VC9" } };
+
+/**
+ * pci_save_vc_state - Save VC state to pre-allocate save buffer
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability and
+ * save it to the pre-allocated save buffer.
+ */
+int pci_save_vc_state(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int pos, ret;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_ext_capability(dev, vc_caps[i].id);
+ if (!pos)
+ continue;
+
+ save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
+ if (!save_state) {
+ dev_err(&dev->dev, "%s buffer not found in %s\n",
+ vc_caps[i].name, __func__);
+ return -ENOMEM;
+ }
+
+ ret = pci_vc_do_save_buffer(dev, pos, save_state, true);
+ if (ret) {
+ dev_err(&dev->dev, "%s save unsuccessful %s\n",
+ vc_caps[i].name, __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pci_restore_vc_state - Restore VC state from save buffer
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability and
+ * restore it from the previously saved buffer.
+ */
+void pci_restore_vc_state(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_ext_capability(dev, vc_caps[i].id);
+ save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id);
+ if (!save_state || !pos)
+ continue;
+
+ pci_vc_do_save_buffer(dev, pos, save_state, false);
+ }
+}
+
+/**
+ * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps
+ * @dev: device
+ *
+ * For each type of VC capability, VC/VC9/MFVC, find the capability, size
+ * it, and allocate a buffer for save/restore.
+ */
+
+void pci_allocate_vc_save_buffers(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc_caps); i++) {
+ int len, pos = pci_find_ext_capability(dev, vc_caps[i].id);
+
+ if (!pos)
+ continue;
+
+ len = pci_vc_do_save_buffer(dev, pos, NULL, false);
+ if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len))
+ dev_err(&dev->dev,
+ "unable to preallocate %s save buffer\n",
+ vc_caps[i].name);
+ }
+}
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
new file mode 100644
index 000000000..39b790703
--- /dev/null
+++ b/drivers/pci/vpd.c
@@ -0,0 +1,62 @@
+/*
+ * File: vpd.c
+ * Purpose: Provide PCI VPD support
+ *
+ * Copyright (C) 2010 Broadcom Corporation.
+ */
+
+#include <linux/pci.h>
+#include <linux/export.h>
+
+int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
+{
+ int i;
+
+ for (i = off; i < len; ) {
+ u8 val = buf[i];
+
+ if (val & PCI_VPD_LRDT) {
+ /* Don't return success of the tag isn't complete */
+ if (i + PCI_VPD_LRDT_TAG_SIZE > len)
+ break;
+
+ if (val == rdt)
+ return i;
+
+ i += PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&buf[i]);
+ } else {
+ u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
+
+ if (tag == rdt)
+ return i;
+
+ if (tag == PCI_VPD_SRDT_END)
+ break;
+
+ i += PCI_VPD_SRDT_TAG_SIZE +
+ pci_vpd_srdt_size(&buf[i]);
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
+
+int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+ unsigned int len, const char *kw)
+{
+ int i;
+
+ for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
+ if (buf[i + 0] == kw[0] &&
+ buf[i + 1] == kw[1])
+ return i;
+
+ i += PCI_VPD_INFO_FLD_HDR_SIZE +
+ pci_vpd_info_field_size(&buf[i]);
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
new file mode 100644
index 000000000..7cfd2db02
--- /dev/null
+++ b/drivers/pci/xen-pcifront.c
@@ -0,0 +1,1180 @@
+/*
+ * Xen PCI Frontend.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+#include <xen/page.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <xen/interface/io/pciif.h>
+#include <asm/xen/pci.h>
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/time.h>
+#include <xen/platform_pci.h>
+
+#include <asm/xen/swiotlb-xen.h>
+#define INVALID_GRANT_REF (0)
+#define INVALID_EVTCHN (-1)
+
+struct pci_bus_entry {
+ struct list_head list;
+ struct pci_bus *bus;
+};
+
+#define _PDEVB_op_active (0)
+#define PDEVB_op_active (1 << (_PDEVB_op_active))
+
+struct pcifront_device {
+ struct xenbus_device *xdev;
+ struct list_head root_buses;
+
+ int evtchn;
+ int gnt_ref;
+
+ int irq;
+
+ /* Lock this when doing any operations in sh_info */
+ spinlock_t sh_info_lock;
+ struct xen_pci_sharedinfo *sh_info;
+ struct work_struct op_work;
+ unsigned long flags;
+
+};
+
+struct pcifront_sd {
+ int domain;
+ struct pcifront_device *pdev;
+};
+
+static inline struct pcifront_device *
+pcifront_get_pdev(struct pcifront_sd *sd)
+{
+ return sd->pdev;
+}
+
+static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ unsigned int domain, unsigned int bus,
+ struct pcifront_device *pdev)
+{
+ sd->domain = domain;
+ sd->pdev = pdev;
+}
+
+static DEFINE_SPINLOCK(pcifront_dev_lock);
+static struct pcifront_device *pcifront_dev;
+
+static int verbose_request;
+module_param(verbose_request, int, 0644);
+
+static int errno_to_pcibios_err(int errno)
+{
+ switch (errno) {
+ case XEN_PCI_ERR_success:
+ return PCIBIOS_SUCCESSFUL;
+
+ case XEN_PCI_ERR_dev_not_found:
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ case XEN_PCI_ERR_invalid_offset:
+ case XEN_PCI_ERR_op_failed:
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ case XEN_PCI_ERR_not_implemented:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+
+ case XEN_PCI_ERR_access_denied:
+ return PCIBIOS_SET_FAILED;
+ }
+ return errno;
+}
+
+static inline void schedule_pcifront_aer_op(struct pcifront_device *pdev)
+{
+ if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
+ && !test_and_set_bit(_PDEVB_op_active, &pdev->flags)) {
+ dev_dbg(&pdev->xdev->dev, "schedule aer frontend job\n");
+ schedule_work(&pdev->op_work);
+ }
+}
+
+static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
+{
+ int err = 0;
+ struct xen_pci_op *active_op = &pdev->sh_info->op;
+ unsigned long irq_flags;
+ evtchn_port_t port = pdev->evtchn;
+ unsigned irq = pdev->irq;
+ s64 ns, ns_timeout;
+ struct timeval tv;
+
+ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
+
+ memcpy(active_op, op, sizeof(struct xen_pci_op));
+
+ /* Go */
+ wmb();
+ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_evtchn(port);
+
+ /*
+ * We set a poll timeout of 3 seconds but give up on return after
+ * 2 seconds. It is better to time out too late rather than too early
+ * (in the latter case we end up continually re-executing poll() with a
+ * timeout in the past). 1s difference gives plenty of slack for error.
+ */
+ do_gettimeofday(&tv);
+ ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
+
+ xen_clear_irq_pending(irq);
+
+ while (test_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags)) {
+ xen_poll_irq_timeout(irq, jiffies + 3*HZ);
+ xen_clear_irq_pending(irq);
+ do_gettimeofday(&tv);
+ ns = timeval_to_ns(&tv);
+ if (ns > ns_timeout) {
+ dev_err(&pdev->xdev->dev,
+ "pciback not responding!!!\n");
+ clear_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags);
+ err = XEN_PCI_ERR_dev_not_found;
+ goto out;
+ }
+ }
+
+ /*
+ * We might lose backend service request since we
+ * reuse same evtchn with pci_conf backend response. So re-schedule
+ * aer pcifront service.
+ */
+ if (test_bit(_XEN_PCIB_active,
+ (unsigned long *)&pdev->sh_info->flags)) {
+ dev_err(&pdev->xdev->dev,
+ "schedule aer pcifront service\n");
+ schedule_pcifront_aer_op(pdev);
+ }
+
+ memcpy(op, active_op, sizeof(struct xen_pci_op));
+
+ err = op->err;
+out:
+ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
+ return err;
+}
+
+/* Access to this function is spinlocked in drivers/pci/access.c */
+static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ int err = 0;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_conf_read,
+ .domain = pci_domain_nr(bus),
+ .bus = bus->number,
+ .devfn = devfn,
+ .offset = where,
+ .size = size,
+ };
+ struct pcifront_sd *sd = bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev,
+ "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
+ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), where, size);
+
+ err = do_pci_op(pdev, &op);
+
+ if (likely(!err)) {
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev, "read got back value %x\n",
+ op.value);
+
+ *val = op.value;
+ } else if (err == -ENODEV) {
+ /* No device here, pretend that it just returned 0 */
+ err = 0;
+ *val = 0;
+ }
+
+ return errno_to_pcibios_err(err);
+}
+
+/* Access to this function is spinlocked in drivers/pci/access.c */
+static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_conf_write,
+ .domain = pci_domain_nr(bus),
+ .bus = bus->number,
+ .devfn = devfn,
+ .offset = where,
+ .size = size,
+ .value = val,
+ };
+ struct pcifront_sd *sd = bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ if (verbose_request)
+ dev_info(&pdev->xdev->dev,
+ "write dev=%04x:%02x:%02x.%d - "
+ "offset %x size %d val %x\n",
+ pci_domain_nr(bus), bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+
+ return errno_to_pcibios_err(do_pci_op(pdev, &op));
+}
+
+static struct pci_ops pcifront_bus_ops = {
+ .read = pcifront_bus_read,
+ .write = pcifront_bus_write,
+};
+
+#ifdef CONFIG_PCI_MSI
+static int pci_frontend_enable_msix(struct pci_dev *dev,
+ int vector[], int nvec)
+{
+ int err;
+ int i;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_enable_msix,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ .value = nvec,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+ struct msi_desc *entry;
+
+ if (nvec > SH_INFO_MAX_VEC) {
+ dev_err(&dev->dev, "too much vector for pci frontend: %x."
+ " Increase SH_INFO_MAX_VEC.\n", nvec);
+ return -EINVAL;
+ }
+
+ i = 0;
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
+ /* Vector is useless at this point. */
+ op.msix_entries[i].vector = -1;
+ i++;
+ }
+
+ err = do_pci_op(pdev, &op);
+
+ if (likely(!err)) {
+ if (likely(!op.value)) {
+ /* we get the result */
+ for (i = 0; i < nvec; i++) {
+ if (op.msix_entries[i].vector <= 0) {
+ dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
+ i, op.msix_entries[i].vector);
+ err = -EINVAL;
+ vector[i] = -1;
+ continue;
+ }
+ vector[i] = op.msix_entries[i].vector;
+ }
+ } else {
+ printk(KERN_DEBUG "enable msix get value %x\n",
+ op.value);
+ err = op.value;
+ }
+ } else {
+ dev_err(&dev->dev, "enable msix get err %x\n", err);
+ }
+ return err;
+}
+
+static void pci_frontend_disable_msix(struct pci_dev *dev)
+{
+ int err;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_disable_msix,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ err = do_pci_op(pdev, &op);
+
+ /* What should do for error ? */
+ if (err)
+ dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
+}
+
+static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
+{
+ int err;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_enable_msi,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ err = do_pci_op(pdev, &op);
+ if (likely(!err)) {
+ vector[0] = op.value;
+ if (op.value <= 0) {
+ dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
+ op.value);
+ err = -EINVAL;
+ vector[0] = -1;
+ }
+ } else {
+ dev_err(&dev->dev, "pci frontend enable msi failed for dev "
+ "%x:%x\n", op.bus, op.devfn);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void pci_frontend_disable_msi(struct pci_dev *dev)
+{
+ int err;
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_disable_msi,
+ .domain = pci_domain_nr(dev->bus),
+ .bus = dev->bus->number,
+ .devfn = dev->devfn,
+ };
+ struct pcifront_sd *sd = dev->bus->sysdata;
+ struct pcifront_device *pdev = pcifront_get_pdev(sd);
+
+ err = do_pci_op(pdev, &op);
+ if (err == XEN_PCI_ERR_dev_not_found) {
+ /* XXX No response from backend, what shall we do? */
+ printk(KERN_DEBUG "get no response from backend for disable MSI\n");
+ return;
+ }
+ if (err)
+ /* how can pciback notify us fail? */
+ printk(KERN_DEBUG "get fake response frombackend\n");
+}
+
+static struct xen_pci_frontend_ops pci_frontend_ops = {
+ .enable_msi = pci_frontend_enable_msi,
+ .disable_msi = pci_frontend_disable_msi,
+ .enable_msix = pci_frontend_enable_msix,
+ .disable_msix = pci_frontend_disable_msix,
+};
+
+static void pci_frontend_registrar(int enable)
+{
+ if (enable)
+ xen_pci_frontend = &pci_frontend_ops;
+ else
+ xen_pci_frontend = NULL;
+};
+#else
+static inline void pci_frontend_registrar(int enable) { };
+#endif /* CONFIG_PCI_MSI */
+
+/* Claim resources for the PCI frontend as-is, backend won't allow changes */
+static int pcifront_claim_resource(struct pci_dev *dev, void *data)
+{
+ struct pcifront_device *pdev = data;
+ int i;
+ struct resource *r;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ r = &dev->resource[i];
+
+ if (!r->parent && r->start && r->flags) {
+ dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n",
+ pci_name(dev), i);
+ if (pci_claim_resource(dev, i)) {
+ dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! "
+ "Device offline. Try using e820_host=1 in the guest config.\n",
+ pci_name(dev), i);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int pcifront_scan_bus(struct pcifront_device *pdev,
+ unsigned int domain, unsigned int bus,
+ struct pci_bus *b)
+{
+ struct pci_dev *d;
+ unsigned int devfn;
+
+ /* Scan the bus for functions and add.
+ * We omit handling of PCI bridge attachment because pciback prevents
+ * bridges from being exported.
+ */
+ for (devfn = 0; devfn < 0x100; devfn++) {
+ d = pci_get_slot(b, devfn);
+ if (d) {
+ /* Device is already known. */
+ pci_dev_put(d);
+ continue;
+ }
+
+ d = pci_scan_single_device(b, devfn);
+ if (d)
+ dev_info(&pdev->xdev->dev, "New device on "
+ "%04x:%02x:%02x.%d found.\n", domain, bus,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+ }
+
+ return 0;
+}
+
+static int pcifront_scan_root(struct pcifront_device *pdev,
+ unsigned int domain, unsigned int bus)
+{
+ struct pci_bus *b;
+ struct pcifront_sd *sd = NULL;
+ struct pci_bus_entry *bus_entry = NULL;
+ int err = 0;
+
+#ifndef CONFIG_PCI_DOMAINS
+ if (domain != 0) {
+ dev_err(&pdev->xdev->dev,
+ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
+ dev_err(&pdev->xdev->dev,
+ "Please compile with CONFIG_PCI_DOMAINS\n");
+ err = -EINVAL;
+ goto err_out;
+ }
+#endif
+
+ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
+ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ if (!bus_entry || !sd) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ pcifront_init_sd(sd, domain, bus, pdev);
+
+ pci_lock_rescan_remove();
+
+ b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
+ &pcifront_bus_ops, sd);
+ if (!b) {
+ dev_err(&pdev->xdev->dev,
+ "Error creating PCI Frontend Bus!\n");
+ err = -ENOMEM;
+ pci_unlock_rescan_remove();
+ goto err_out;
+ }
+
+ bus_entry->bus = b;
+
+ list_add(&bus_entry->list, &pdev->root_buses);
+
+ /* pci_scan_bus_parented skips devices which do not have a have
+ * devfn==0. The pcifront_scan_bus enumerates all devfn. */
+ err = pcifront_scan_bus(pdev, domain, bus, b);
+
+ /* Claim resources before going "live" with our devices */
+ pci_walk_bus(b, pcifront_claim_resource, pdev);
+
+ /* Create SysFS and notify udev of the devices. Aka: "going live" */
+ pci_bus_add_devices(b);
+
+ pci_unlock_rescan_remove();
+ return err;
+
+err_out:
+ kfree(bus_entry);
+ kfree(sd);
+
+ return err;
+}
+
+static int pcifront_rescan_root(struct pcifront_device *pdev,
+ unsigned int domain, unsigned int bus)
+{
+ int err;
+ struct pci_bus *b;
+
+#ifndef CONFIG_PCI_DOMAINS
+ if (domain != 0) {
+ dev_err(&pdev->xdev->dev,
+ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
+ dev_err(&pdev->xdev->dev,
+ "Please compile with CONFIG_PCI_DOMAINS\n");
+ return -EINVAL;
+ }
+#endif
+
+ dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
+ domain, bus);
+
+ b = pci_find_bus(domain, bus);
+ if (!b)
+ /* If the bus is unknown, create it. */
+ return pcifront_scan_root(pdev, domain, bus);
+
+ err = pcifront_scan_bus(pdev, domain, bus, b);
+
+ /* Claim resources before going "live" with our devices */
+ pci_walk_bus(b, pcifront_claim_resource, pdev);
+
+ /* Create SysFS and notify udev of the devices. Aka: "going live" */
+ pci_bus_add_devices(b);
+
+ return err;
+}
+
+static void free_root_bus_devs(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ while (!list_empty(&bus->devices)) {
+ dev = container_of(bus->devices.next, struct pci_dev,
+ bus_list);
+ dev_dbg(&dev->dev, "removing device\n");
+ pci_stop_and_remove_bus_device(dev);
+ }
+}
+
+static void pcifront_free_roots(struct pcifront_device *pdev)
+{
+ struct pci_bus_entry *bus_entry, *t;
+
+ dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
+
+ pci_lock_rescan_remove();
+ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
+ list_del(&bus_entry->list);
+
+ free_root_bus_devs(bus_entry->bus);
+
+ kfree(bus_entry->bus->sysdata);
+
+ device_unregister(bus_entry->bus->bridge);
+ pci_remove_bus(bus_entry->bus);
+
+ kfree(bus_entry);
+ }
+ pci_unlock_rescan_remove();
+}
+
+static pci_ers_result_t pcifront_common_process(int cmd,
+ struct pcifront_device *pdev,
+ pci_channel_state_t state)
+{
+ pci_ers_result_t result;
+ struct pci_driver *pdrv;
+ int bus = pdev->sh_info->aer_op.bus;
+ int devfn = pdev->sh_info->aer_op.devfn;
+ struct pci_dev *pcidev;
+ int flag = 0;
+
+ dev_dbg(&pdev->xdev->dev,
+ "pcifront AER process: cmd %x (bus:%x, devfn%x)",
+ cmd, bus, devfn);
+ result = PCI_ERS_RESULT_NONE;
+
+ pcidev = pci_get_bus_and_slot(bus, devfn);
+ if (!pcidev || !pcidev->driver) {
+ dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
+ pci_dev_put(pcidev);
+ return result;
+ }
+ pdrv = pcidev->driver;
+
+ if (pdrv) {
+ if (pdrv->err_handler && pdrv->err_handler->error_detected) {
+ dev_dbg(&pcidev->dev,
+ "trying to call AER service\n");
+ if (pcidev) {
+ flag = 1;
+ switch (cmd) {
+ case XEN_PCI_OP_aer_detected:
+ result = pdrv->err_handler->
+ error_detected(pcidev, state);
+ break;
+ case XEN_PCI_OP_aer_mmio:
+ result = pdrv->err_handler->
+ mmio_enabled(pcidev);
+ break;
+ case XEN_PCI_OP_aer_slotreset:
+ result = pdrv->err_handler->
+ slot_reset(pcidev);
+ break;
+ case XEN_PCI_OP_aer_resume:
+ pdrv->err_handler->resume(pcidev);
+ break;
+ default:
+ dev_err(&pdev->xdev->dev,
+ "bad request in aer recovery "
+ "operation!\n");
+
+ }
+ }
+ }
+ }
+ if (!flag)
+ result = PCI_ERS_RESULT_NONE;
+
+ return result;
+}
+
+
+static void pcifront_do_aer(struct work_struct *data)
+{
+ struct pcifront_device *pdev =
+ container_of(data, struct pcifront_device, op_work);
+ int cmd = pdev->sh_info->aer_op.cmd;
+ pci_channel_state_t state =
+ (pci_channel_state_t)pdev->sh_info->aer_op.err;
+
+ /*If a pci_conf op is in progress,
+ we have to wait until it is done before service aer op*/
+ dev_dbg(&pdev->xdev->dev,
+ "pcifront service aer bus %x devfn %x\n",
+ pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn);
+
+ pdev->sh_info->aer_op.err = pcifront_common_process(cmd, pdev, state);
+
+ /* Post the operation to the guest. */
+ wmb();
+ clear_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_evtchn(pdev->evtchn);
+
+ /*in case of we lost an aer request in four lines time_window*/
+ smp_mb__before_atomic();
+ clear_bit(_PDEVB_op_active, &pdev->flags);
+ smp_mb__after_atomic();
+
+ schedule_pcifront_aer_op(pdev);
+
+}
+
+static irqreturn_t pcifront_handler_aer(int irq, void *dev)
+{
+ struct pcifront_device *pdev = dev;
+ schedule_pcifront_aer_op(pdev);
+ return IRQ_HANDLED;
+}
+static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
+{
+ int err = 0;
+
+ spin_lock(&pcifront_dev_lock);
+
+ if (!pcifront_dev) {
+ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
+ pcifront_dev = pdev;
+ } else
+ err = -EEXIST;
+
+ spin_unlock(&pcifront_dev_lock);
+
+ if (!err && !swiotlb_nr_tbl()) {
+ err = pci_xen_swiotlb_init_late();
+ if (err)
+ dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
+ }
+ return err;
+}
+
+static void pcifront_disconnect(struct pcifront_device *pdev)
+{
+ spin_lock(&pcifront_dev_lock);
+
+ if (pdev == pcifront_dev) {
+ dev_info(&pdev->xdev->dev,
+ "Disconnecting PCI Frontend Buses\n");
+ pcifront_dev = NULL;
+ }
+
+ spin_unlock(&pcifront_dev_lock);
+}
+static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
+{
+ struct pcifront_device *pdev;
+
+ pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL);
+ if (pdev == NULL)
+ goto out;
+
+ pdev->sh_info =
+ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
+ if (pdev->sh_info == NULL) {
+ kfree(pdev);
+ pdev = NULL;
+ goto out;
+ }
+ pdev->sh_info->flags = 0;
+
+ /*Flag for registering PV AER handler*/
+ set_bit(_XEN_PCIB_AERHANDLER, (void *)&pdev->sh_info->flags);
+
+ dev_set_drvdata(&xdev->dev, pdev);
+ pdev->xdev = xdev;
+
+ INIT_LIST_HEAD(&pdev->root_buses);
+
+ spin_lock_init(&pdev->sh_info_lock);
+
+ pdev->evtchn = INVALID_EVTCHN;
+ pdev->gnt_ref = INVALID_GRANT_REF;
+ pdev->irq = -1;
+
+ INIT_WORK(&pdev->op_work, pcifront_do_aer);
+
+ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
+ pdev, pdev->sh_info);
+out:
+ return pdev;
+}
+
+static void free_pdev(struct pcifront_device *pdev)
+{
+ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
+
+ pcifront_free_roots(pdev);
+
+ cancel_work_sync(&pdev->op_work);
+
+ if (pdev->irq >= 0)
+ unbind_from_irqhandler(pdev->irq, pdev);
+
+ if (pdev->evtchn != INVALID_EVTCHN)
+ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
+
+ if (pdev->gnt_ref != INVALID_GRANT_REF)
+ gnttab_end_foreign_access(pdev->gnt_ref, 0 /* r/w page */,
+ (unsigned long)pdev->sh_info);
+ else
+ free_page((unsigned long)pdev->sh_info);
+
+ dev_set_drvdata(&pdev->xdev->dev, NULL);
+
+ kfree(pdev);
+}
+
+static int pcifront_publish_info(struct pcifront_device *pdev)
+{
+ int err = 0;
+ struct xenbus_transaction trans;
+ grant_ref_t gref;
+
+ err = xenbus_grant_ring(pdev->xdev, pdev->sh_info, 1, &gref);
+ if (err < 0)
+ goto out;
+
+ pdev->gnt_ref = gref;
+
+ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
+ if (err)
+ goto out;
+
+ err = bind_evtchn_to_irqhandler(pdev->evtchn, pcifront_handler_aer,
+ 0, "pcifront", pdev);
+
+ if (err < 0)
+ return err;
+
+ pdev->irq = err;
+
+do_publish:
+ err = xenbus_transaction_start(&trans);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error writing configuration for backend "
+ "(start transaction)");
+ goto out;
+ }
+
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "pci-op-ref", "%u", pdev->gnt_ref);
+ if (!err)
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "event-channel", "%u", pdev->evtchn);
+ if (!err)
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "magic", XEN_PCI_MAGIC);
+
+ if (err) {
+ xenbus_transaction_end(trans, 1);
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error writing configuration for backend");
+ goto out;
+ } else {
+ err = xenbus_transaction_end(trans, 0);
+ if (err == -EAGAIN)
+ goto do_publish;
+ else if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error completing transaction "
+ "for backend");
+ goto out;
+ }
+ }
+
+ xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
+
+ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
+
+out:
+ return err;
+}
+
+static int pcifront_try_connect(struct pcifront_device *pdev)
+{
+ int err = -EFAULT;
+ int i, num_roots, len;
+ char str[64];
+ unsigned int domain, bus;
+
+
+ /* Only connect once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitialised)
+ goto out;
+
+ err = pcifront_connect_and_init_dma(pdev);
+ if (err && err != -EEXIST) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error setting up PCI Frontend");
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
+ "root_num", "%d", &num_roots);
+ if (err == -ENOENT) {
+ xenbus_dev_error(pdev->xdev, err,
+ "No PCI Roots found, trying 0000:00");
+ err = pcifront_scan_root(pdev, 0, 0);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error scanning PCI root 0000:00");
+ goto out;
+ }
+ num_roots = 0;
+ } else if (err != 1) {
+ if (err == 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI roots");
+ goto out;
+ }
+
+ for (i = 0; i < num_roots; i++) {
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
+ "%x:%x", &domain, &bus);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI root %d", i);
+ goto out;
+ }
+
+ err = pcifront_scan_root(pdev, domain, bus);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error scanning PCI root %04x:%02x",
+ domain, bus);
+ goto out;
+ }
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+
+out:
+ return err;
+}
+
+static int pcifront_try_disconnect(struct pcifront_device *pdev)
+{
+ int err = 0;
+ enum xenbus_state prev_state;
+
+
+ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
+
+ if (prev_state >= XenbusStateClosing)
+ goto out;
+
+ if (prev_state == XenbusStateConnected) {
+ pcifront_free_roots(pdev);
+ pcifront_disconnect(pdev);
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateClosed);
+
+out:
+
+ return err;
+}
+
+static int pcifront_attach_devices(struct pcifront_device *pdev)
+{
+ int err = -EFAULT;
+ int i, num_roots, len;
+ unsigned int domain, bus;
+ char str[64];
+
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateReconfiguring)
+ goto out;
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
+ "root_num", "%d", &num_roots);
+ if (err == -ENOENT) {
+ xenbus_dev_error(pdev->xdev, err,
+ "No PCI Roots found, trying 0000:00");
+ err = pcifront_rescan_root(pdev, 0, 0);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error scanning PCI root 0000:00");
+ goto out;
+ }
+ num_roots = 0;
+ } else if (err != 1) {
+ if (err == 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI roots");
+ goto out;
+ }
+
+ for (i = 0; i < num_roots; i++) {
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
+ "%x:%x", &domain, &bus);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI root %d", i);
+ goto out;
+ }
+
+ err = pcifront_rescan_root(pdev, domain, bus);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error scanning PCI root %04x:%02x",
+ domain, bus);
+ goto out;
+ }
+ }
+
+ xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+
+out:
+ return err;
+}
+
+static int pcifront_detach_devices(struct pcifront_device *pdev)
+{
+ int err = 0;
+ int i, num_devs;
+ unsigned int domain, bus, slot, func;
+ struct pci_dev *pci_dev;
+ char str[64];
+
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateConnected)
+ goto out;
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
+ &num_devs);
+ if (err != 1) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI devices");
+ goto out;
+ }
+
+ /* Find devices being detached and remove them. */
+ for (i = 0; i < num_devs; i++) {
+ int l, state;
+ l = snprintf(str, sizeof(str), "state-%d", i);
+ if (unlikely(l >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
+ &state);
+ if (err != 1)
+ state = XenbusStateUnknown;
+
+ if (state != XenbusStateClosing)
+ continue;
+
+ /* Remove device. */
+ l = snprintf(str, sizeof(str), "vdev-%d", i);
+ if (unlikely(l >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
+ if (err != 4) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI device %d", i);
+ goto out;
+ }
+
+ pci_dev = pci_get_domain_bus_and_slot(domain, bus,
+ PCI_DEVFN(slot, func));
+ if (!pci_dev) {
+ dev_dbg(&pdev->xdev->dev,
+ "Cannot get PCI device %04x:%02x:%02x.%d\n",
+ domain, bus, slot, func);
+ continue;
+ }
+ pci_lock_rescan_remove();
+ pci_stop_and_remove_bus_device(pci_dev);
+ pci_dev_put(pci_dev);
+ pci_unlock_rescan_remove();
+
+ dev_dbg(&pdev->xdev->dev,
+ "PCI device %04x:%02x:%02x.%d removed.\n",
+ domain, bus, slot, func);
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
+
+out:
+ return err;
+}
+
+static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
+ enum xenbus_state be_state)
+{
+ struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
+
+ switch (be_state) {
+ case XenbusStateUnknown:
+ case XenbusStateInitialising:
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateConnected:
+ pcifront_try_connect(pdev);
+ break;
+
+ case XenbusStateClosed:
+ if (xdev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
+ case XenbusStateClosing:
+ dev_warn(&xdev->dev, "backend going away!\n");
+ pcifront_try_disconnect(pdev);
+ break;
+
+ case XenbusStateReconfiguring:
+ pcifront_detach_devices(pdev);
+ break;
+
+ case XenbusStateReconfigured:
+ pcifront_attach_devices(pdev);
+ break;
+ }
+}
+
+static int pcifront_xenbus_probe(struct xenbus_device *xdev,
+ const struct xenbus_device_id *id)
+{
+ int err = 0;
+ struct pcifront_device *pdev = alloc_pdev(xdev);
+
+ if (pdev == NULL) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(xdev, err,
+ "Error allocating pcifront_device struct");
+ goto out;
+ }
+
+ err = pcifront_publish_info(pdev);
+ if (err)
+ free_pdev(pdev);
+
+out:
+ return err;
+}
+
+static int pcifront_xenbus_remove(struct xenbus_device *xdev)
+{
+ struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
+ if (pdev)
+ free_pdev(pdev);
+
+ return 0;
+}
+
+static const struct xenbus_device_id xenpci_ids[] = {
+ {"pci"},
+ {""},
+};
+
+static struct xenbus_driver xenpci_driver = {
+ .name = "pcifront",
+ .ids = xenpci_ids,
+ .probe = pcifront_xenbus_probe,
+ .remove = pcifront_xenbus_remove,
+ .otherend_changed = pcifront_backend_changed,
+};
+
+static int __init pcifront_init(void)
+{
+ if (!xen_pv_domain() || xen_initial_domain())
+ return -ENODEV;
+
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
+ pci_frontend_registrar(1 /* enable */);
+
+ return xenbus_register_frontend(&xenpci_driver);
+}
+
+static void __exit pcifront_cleanup(void)
+{
+ xenbus_unregister_driver(&xenpci_driver);
+ pci_frontend_registrar(0 /* disable */);
+}
+module_init(pcifront_init);
+module_exit(pcifront_cleanup);
+
+MODULE_DESCRIPTION("Xen PCI passthrough frontend.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:pci");