summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-09-08 01:01:14 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-09-08 01:01:14 -0300
commite5fd91f1ef340da553f7a79da9540c3db711c937 (patch)
treeb11842027dc6641da63f4bcc524f8678263304a3 /arch/powerpc/platforms
parent2a9b0348e685a63d97486f6749622b61e9e3292f (diff)
Linux-libre 4.2-gnu
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c2
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c3
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c3
-rw-r--r--arch/powerpc/platforms/85xx/smp.c51
-rw-r--r--arch/powerpc/platforms/85xx/twr_p102x.c4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype11
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c7
-rw-r--r--arch/powerpc/platforms/cell/iommu.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c5
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c2
-rw-r--r--arch/powerpc/platforms/pasemi/Makefile1
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c7
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c168
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig7
-rw-r--r--arch/powerpc/platforms/powernv/Makefile5
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c62
-rw-r--r--arch/powerpc/platforms/powernv/idle.c293
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c56
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c40
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c253
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c448
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c43
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c221
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c791
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c45
-rw-r--r--arch/powerpc/platforms/powernv/pci.c204
-rw-r--r--arch/powerpc/platforms/powernv/pci.h31
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h11
-rw-r--r--arch/powerpc/platforms/powernv/setup.c181
-rw-r--r--arch/powerpc/platforms/ps3/time.c3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c3
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c179
-rw-r--r--arch/powerpc/platforms/pseries/msi.c16
42 files changed, 2295 insertions, 890 deletions
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index c949ca055..63016621a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -193,7 +193,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
{
- struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq);
+ struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
int sub_virq;
u32 status;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index e2d401ad8..6eb3b2aba 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -12,7 +12,7 @@
#undef DEBUG
-#include <asm/pci.h>
+#include <linux/pci.h>
#include <asm/mpc52xx.h>
#include <asm/delay.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index c9adbfb65..fcbea4b51 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -445,5 +445,4 @@ static int pmc_init(void)
{
return platform_driver_register(&pmc_driver);
}
-
-module_init(pmc_init);
+device_initcall(pmc_init);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 2fb4b2436..97915feff 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -282,7 +282,7 @@ config CORENET_GENERIC
For 64bit kernel, the following boards are supported:
T208x QDS/RDB, T4240 QDS/RDB and B4 QDS
The following boards are supported for both 32bit and 64bit kernel:
- P5020 DS, P5040 DS and T104xQDS/RDB
+ P5020 DS, P5040 DS, T102x QDS/RDB, T104x QDS/RDB
endif # FSL_SOC_BOOKE
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 9824d2cf7..bd839dc28 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -150,6 +150,9 @@ static const char * const boards[] __initconst = {
"fsl,B4860QDS",
"fsl,B4420QDS",
"fsl,B4220QDS",
+ "fsl,T1023RDB",
+ "fsl,T1024QDS",
+ "fsl,T1024RDB",
"fsl,T1040QDS",
"fsl,T1042QDS",
"fsl,T1040RDB",
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 8631ac5f0..b8b821697 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -345,6 +345,7 @@ void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
local_irq_disable();
if (secondary) {
+ __flush_disable_L1();
atomic_inc(&kexec_down_cpus);
/* loop forever */
while (1);
@@ -357,61 +358,11 @@ static void mpc85xx_smp_kexec_down(void *arg)
ppc_md.kexec_cpu_down(0,1);
}
-static void map_and_flush(unsigned long paddr)
-{
- struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
- unsigned long kaddr = (unsigned long)kmap_atomic(page);
-
- flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
- kunmap_atomic((void *)kaddr);
-}
-
-/**
- * Before we reset the other cores, we need to flush relevant cache
- * out to memory so we don't get anything corrupted, some of these flushes
- * are performed out of an overabundance of caution as interrupts are not
- * disabled yet and we can switch cores
- */
-static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
-{
- kimage_entry_t *ptr, entry;
- unsigned long paddr;
- int i;
-
- if (image->type == KEXEC_TYPE_DEFAULT) {
- /* normal kexec images are stored in temporary pages */
- for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
- ptr = (entry & IND_INDIRECTION) ?
- phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
- if (!(entry & IND_DESTINATION)) {
- map_and_flush(entry);
- }
- }
- /* flush out last IND_DONE page */
- map_and_flush(entry);
- } else {
- /* crash type kexec images are copied to the crash region */
- for (i = 0; i < image->nr_segments; i++) {
- struct kexec_segment *seg = &image->segment[i];
- for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
- paddr += PAGE_SIZE) {
- map_and_flush(paddr);
- }
- }
- }
-
- /* also flush the kimage struct to be passed in as well */
- flush_dcache_range((unsigned long)image,
- (unsigned long)image + sizeof(*image));
-}
-
static void mpc85xx_smp_machine_kexec(struct kimage *image)
{
int timeout = INT_MAX;
int i, num_cpus = num_present_cpus();
- mpc85xx_smp_flush_dcache_kexec(image);
-
if (image->type == KEXEC_TYPE_DEFAULT)
smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c
index 1eadb6d0d..30e002f46 100644
--- a/arch/powerpc/platforms/85xx/twr_p102x.c
+++ b/arch/powerpc/platforms/85xx/twr_p102x.c
@@ -79,7 +79,7 @@ static void __init twr_p1025_setup_arch(void)
mpc85xx_qe_init();
mpc85xx_qe_par_io_init();
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
+#if IS_ENABLED(CONFIG_UCC_GETH) || IS_ENABLED(CONFIG_SERIAL_QE)
if (machine_is(twr_p1025)) {
struct ccsr_guts __iomem *guts;
@@ -101,7 +101,7 @@ static void __init twr_p1025_setup_arch(void)
MPC85xx_PMUXCR_QE(12));
iounmap(guts);
-#if defined(CONFIG_SERIAL_QE)
+#if IS_ENABLED(CONFIG_SERIAL_QE)
/* On P1025TWR board, the UCC7 acted as UART port.
* However, The UCC7's CTS pin is low level in default,
* it will impact the transmission in full duplex
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7264e9119..c140e94c7 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -405,6 +405,16 @@ config PPC_DOORBELL
endmenu
+config VDSO32
+ def_bool y
+ depends on PPC32 || CPU_BIG_ENDIAN
+ help
+ This symbol controls whether we build the 32-bit VDSO. We obviously
+ want to do that if we're building a 32-bit kernel. If we're building
+ a 64-bit kernel then we only want a 32-bit VDSO if we're building for
+ big endian. That is because the only little endian configuration we
+ support is ppc64le which is 64-bit only.
+
choice
prompt "Endianness selection"
default CPU_BIG_ENDIAN
@@ -421,6 +431,7 @@ config CPU_BIG_ENDIAN
config CPU_LITTLE_ENDIAN
bool "Build little endian kernel"
+ depends on PPC_BOOK3S_64
select PPC64_BOOT_WRAPPER
help
Build a little endian kernel.
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 623bd9614..fe51de4fc 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -22,6 +22,7 @@
#include <asm/machdep.h>
#include <asm/prom.h>
+#include "cell.h"
/*
* MSIC registers, specified as offsets from dcr_base
@@ -95,7 +96,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct axon_msic *msic = irq_get_handler_data(irq);
+ struct axon_msic *msic = irq_desc_get_handler_data(desc);
u32 write_offset, msi;
int idx;
int retry = 0;
@@ -406,8 +407,8 @@ static int axon_msi_probe(struct platform_device *device)
dev_set_drvdata(&device->dev, msic);
- ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
- ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
+ cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
+ cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
axon_msi_debug_setup(dn, msic);
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 21b502398..14a582b21 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -466,6 +466,11 @@ static inline u32 cell_iommu_get_ioid(struct device_node *np)
return *ioid;
}
+static struct iommu_table_ops cell_iommu_ops = {
+ .set = tce_build_cell,
+ .clear = tce_free_cell
+};
+
static struct iommu_window * __init
cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
unsigned long offset, unsigned long size,
@@ -492,6 +497,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
window->table.it_offset =
(offset >> window->table.it_page_shift) + pte_offset;
window->table.it_size = size >> window->table.it_page_shift;
+ window->table.it_ops = &cell_iommu_ops;
iommu_init_table(&window->table, iommu->nid);
@@ -1201,8 +1207,6 @@ static int __init cell_iommu_init(void)
/* Setup various callbacks */
cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
- ppc_md.tce_build = tce_build_cell;
- ppc_md.tce_free = tce_free_cell;
if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
goto bail;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1ba6307be..11634fa7a 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -166,7 +166,7 @@ static void spufs_prune_dir(struct dentry *dir)
mutex_lock(&d_inode(dir)->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) {
spin_lock(&dentry->d_lock);
- if (!(d_unhashed(dentry)) && d_really_is_positive(dentry)) {
+ if (simple_positive(dentry)) {
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 5c80a0f9d..998f632e7 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -64,6 +64,11 @@ static struct timer_list spusched_timer;
static struct timer_list spuloadavg_timer;
/*
+ * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
+ */
+#define NORMAL_PRIO 120
+
+/*
* Frequency of the spu scheduler tick. By default we do one SPU scheduler
* tick for every 10 CPU scheduler ticks.
*/
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index c269caee5..9dd154d6f 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -124,7 +124,7 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq);
+ struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
unsigned int virq;
raw_spin_lock(&desc->lock);
diff --git a/arch/powerpc/platforms/pasemi/Makefile b/arch/powerpc/platforms/pasemi/Makefile
index 8e8d4cae5..60b4e0fd9 100644
--- a/arch/powerpc/platforms/pasemi/Makefile
+++ b/arch/powerpc/platforms/pasemi/Makefile
@@ -1,2 +1,3 @@
obj-y += setup.o pci.o time.o idle.o powersave.o iommu.o dma_lib.o misc.o
obj-$(CONFIG_PPC_PASEMI_MDIO) += gpio_mdio.o
+obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index b8f567b2e..c929644e7 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -134,6 +134,10 @@ static void iobmap_free(struct iommu_table *tbl, long index,
}
}
+static struct iommu_table_ops iommu_table_iobmap_ops = {
+ .set = iobmap_build,
+ .clear = iobmap_free
+};
static void iommu_table_iobmap_setup(void)
{
@@ -153,6 +157,7 @@ static void iommu_table_iobmap_setup(void)
* Should probably be 8 (64 bytes)
*/
iommu_table_iobmap.it_blocksize = 4;
+ iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
iommu_init_table(&iommu_table_iobmap, 0);
pr_debug(" <- %s\n", __func__);
}
@@ -252,8 +257,6 @@ void __init iommu_init_early_pasemi(void)
pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
- ppc_md.tce_build = iobmap_build;
- ppc_md.tce_free = iobmap_free;
set_pci_dma_ops(&dma_iommu_ops);
}
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
new file mode 100644
index 000000000..27f2b187a
--- /dev/null
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2007, Olof Johansson, PA Semi
+ *
+ * Based on arch/powerpc/sysdev/mpic_u3msi.c:
+ *
+ * Copyright 2006, Segher Boessenkool, IBM Corporation.
+ * Copyright 2006-2007, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <asm/mpic.h>
+#include <asm/prom.h>
+#include <asm/hw_irq.h>
+#include <asm/ppc-pci.h>
+#include <asm/msi_bitmap.h>
+
+#include <sysdev/mpic.h>
+
+/* Allocate 16 interrupts per device, to give an alignment of 16,
+ * since that's the size of the grouping w.r.t. affinity. If someone
+ * needs more than 32 MSI's down the road we'll have to rethink this,
+ * but it should be OK for now.
+ */
+#define ALLOC_CHUNK 16
+
+#define PASEMI_MSI_ADDR 0xfc080000
+
+/* A bit ugly, can we get this from the pci_dev somehow? */
+static struct mpic *msi_mpic;
+
+
+static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
+{
+ pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
+ pci_msi_mask_irq(data);
+ mpic_mask_irq(data);
+}
+
+static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
+{
+ pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
+ mpic_unmask_irq(data);
+ pci_msi_unmask_irq(data);
+}
+
+static struct irq_chip mpic_pasemi_msi_chip = {
+ .irq_shutdown = mpic_pasemi_msi_mask_irq,
+ .irq_mask = mpic_pasemi_msi_mask_irq,
+ .irq_unmask = mpic_pasemi_msi_unmask_irq,
+ .irq_eoi = mpic_end_irq,
+ .irq_set_type = mpic_set_irq_type,
+ .irq_set_affinity = mpic_set_affinity,
+ .name = "PASEMI-MSI",
+};
+
+static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct msi_desc *entry;
+
+ pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (entry->irq == NO_IRQ)
+ continue;
+
+ irq_set_msi_desc(entry->irq, NULL);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+ virq_to_hw(entry->irq), ALLOC_CHUNK);
+ irq_dispose_mapping(entry->irq);
+ }
+
+ return;
+}
+
+static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ unsigned int virq;
+ struct msi_desc *entry;
+ struct msi_msg msg;
+ int hwirq;
+
+ if (type == PCI_CAP_ID_MSIX)
+ pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
+ pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n",
+ pdev, nvec, type);
+
+ msg.address_hi = 0;
+ msg.address_lo = PASEMI_MSI_ADDR;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ /* Allocate 16 interrupts for now, since that's the grouping for
+ * affinity. This can be changed later if it turns out 32 is too
+ * few MSIs for someone, but restrictions will apply to how the
+ * sources can be changed independently.
+ */
+ hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap,
+ ALLOC_CHUNK);
+ if (hwirq < 0) {
+ pr_debug("pasemi_msi: failed allocating hwirq\n");
+ return hwirq;
+ }
+
+ virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
+ if (virq == NO_IRQ) {
+ pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n",
+ hwirq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq,
+ ALLOC_CHUNK);
+ return -ENOSPC;
+ }
+
+ /* Vector on MSI is really an offset, the hardware adds
+ * it to the value written at the magic address. So set
+ * it to 0 to remain sane.
+ */
+ mpic_set_vector(virq, 0);
+
+ irq_set_msi_desc(virq, entry);
+ irq_set_chip(virq, &mpic_pasemi_msi_chip);
+ irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+
+ pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \
+ "addr 0x%x\n", virq, hwirq, msg.address_lo);
+
+ /* Likewise, the device writes [0...511] into the target
+ * register to generate MSI [512...1023]
+ */
+ msg.data = hwirq-0x200;
+ pci_write_msi_msg(virq, &msg);
+ }
+
+ return 0;
+}
+
+int mpic_pasemi_msi_init(struct mpic *mpic)
+{
+ int rc;
+ struct pci_controller *phb;
+
+ if (!mpic->irqhost->of_node ||
+ !of_device_is_compatible(mpic->irqhost->of_node,
+ "pasemi,pwrficient-openpic"))
+ return -ENODEV;
+
+ rc = mpic_msi_init_allocator(mpic);
+ if (rc) {
+ pr_debug("pasemi_msi: Error allocating bitmap!\n");
+ return rc;
+ }
+
+ pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n");
+
+ msi_mpic = mpic;
+ list_for_each_entry(phb, &hose_list, list_node) {
+ WARN_ON(phb->controller_ops.setup_msi_irqs);
+ phb->controller_ops.setup_msi_irqs = pasemi_msi_setup_msi_irqs;
+ phb->controller_ops.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 4b044d8cb..604190cab 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -19,3 +19,10 @@ config PPC_POWERNV
select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
default y
+
+config OPAL_PRD
+ tristate 'OPAL PRD driver'
+ depends on PPC_POWERNV
+ help
+ This enables the opal-prd driver, a facility to run processor
+ recovery diagnostics on OpenPower machines
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 33e44f372..1c8cdb625 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,7 +1,7 @@
-obj-y += setup.o opal-wrappers.o opal.o opal-async.o
+obj-y += setup.o opal-wrappers.o opal.o opal-async.o idle.o
obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
-obj-y += opal-msglog.o opal-hmi.o opal-power.o
+obj-y += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
@@ -9,3 +9,4 @@ obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o
+obj-$(CONFIG_OPAL_PRD) += opal-prd.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index ce738ab3d..7cf0df859 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/msi.h>
#include <linux/of.h>
@@ -40,6 +41,7 @@
#include "pci.h"
static bool pnv_eeh_nb_init = false;
+static int eeh_event_irq = -EINVAL;
/**
* pnv_eeh_init - EEH platform dependent initialization
@@ -88,34 +90,22 @@ static int pnv_eeh_init(void)
return 0;
}
-static int pnv_eeh_event(struct notifier_block *nb,
- unsigned long events, void *change)
+static irqreturn_t pnv_eeh_event(int irq, void *data)
{
- uint64_t changed_evts = (uint64_t)change;
-
/*
- * We simply send special EEH event if EEH has
- * been enabled, or clear pending events in
- * case that we enable EEH soon
+ * We simply send a special EEH event if EEH has been
+ * enabled. We don't care about EEH events until we've
+ * finished processing the outstanding ones. Event processing
+ * gets unmasked in next_error() if EEH is enabled.
*/
- if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
- !(events & OPAL_EVENT_PCI_ERROR))
- return 0;
+ disable_irq_nosync(irq);
if (eeh_enabled())
eeh_send_failure_event(NULL);
- else
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
- return 0;
+ return IRQ_HANDLED;
}
-static struct notifier_block pnv_eeh_nb = {
- .notifier_call = pnv_eeh_event,
- .next = NULL,
- .priority = 0
-};
-
#ifdef CONFIG_DEBUG_FS
static ssize_t pnv_eeh_ei_write(struct file *filp,
const char __user *user_buf,
@@ -237,16 +227,28 @@ static int pnv_eeh_post_init(void)
/* Register OPAL event notifier */
if (!pnv_eeh_nb_init) {
- ret = opal_notifier_register(&pnv_eeh_nb);
- if (ret) {
- pr_warn("%s: Can't register OPAL event notifier (%d)\n",
- __func__, ret);
+ eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
+ if (eeh_event_irq < 0) {
+ pr_err("%s: Can't register OPAL event interrupt (%d)\n",
+ __func__, eeh_event_irq);
+ return eeh_event_irq;
+ }
+
+ ret = request_irq(eeh_event_irq, pnv_eeh_event,
+ IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL);
+ if (ret < 0) {
+ irq_dispose_mapping(eeh_event_irq);
+ pr_err("%s: Can't request OPAL event interrupt (%d)\n",
+ __func__, eeh_event_irq);
return ret;
}
pnv_eeh_nb_init = true;
}
+ if (!eeh_enabled())
+ disable_irq(eeh_event_irq);
+
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
@@ -979,7 +981,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
/**
* pnv_eeh_wait_state - Wait for PE state
* @pe: EEH PE
- * @max_wait: maximal period in microsecond
+ * @max_wait: maximal period in millisecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
@@ -1000,13 +1002,13 @@ static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
if (ret != EEH_STATE_UNAVAILABLE)
return ret;
- max_wait -= mwait;
if (max_wait <= 0) {
pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
__func__, pe->addr, max_wait);
return EEH_STATE_NOT_SUPPORT;
}
+ max_wait -= mwait;
msleep(mwait);
}
@@ -1303,12 +1305,10 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
int state, ret = EEH_NEXT_ERR_NONE;
/*
- * While running here, it's safe to purge the event queue.
- * And we should keep the cached OPAL notifier event sychronized
- * between the kernel and firmware.
+ * While running here, it's safe to purge the event queue. The
+ * event should still be masked.
*/
eeh_remove_event(NULL, false);
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
list_for_each_entry(hose, &hose_list, list_node) {
/*
@@ -1477,6 +1477,10 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
break;
}
+ /* Unmask the event */
+ if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
+ enable_irq(eeh_event_irq);
+
return ret;
}
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
new file mode 100644
index 000000000..59d735d2e
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -0,0 +1,293 @@
+/*
+ * PowerNV cpuidle code
+ *
+ * Copyright 2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/device.h>
+#include <linux/cpu.h>
+
+#include <asm/firmware.h>
+#include <asm/machdep.h>
+#include <asm/opal.h>
+#include <asm/cputhreads.h>
+#include <asm/cpuidle.h>
+#include <asm/code-patching.h>
+#include <asm/smp.h>
+
+#include "powernv.h"
+#include "subcore.h"
+
+static u32 supported_cpuidle_states;
+
+int pnv_save_sprs_for_winkle(void)
+{
+ int cpu;
+ int rc;
+
+ /*
+ * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
+ * all cpus at boot. Get these reg values of current cpu and use the
+ * same accross all cpus.
+ */
+ uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
+ uint64_t hid0_val = mfspr(SPRN_HID0);
+ uint64_t hid1_val = mfspr(SPRN_HID1);
+ uint64_t hid4_val = mfspr(SPRN_HID4);
+ uint64_t hid5_val = mfspr(SPRN_HID5);
+ uint64_t hmeer_val = mfspr(SPRN_HMEER);
+
+ for_each_possible_cpu(cpu) {
+ uint64_t pir = get_hard_smp_processor_id(cpu);
+ uint64_t hsprg0_val = (uint64_t)&paca[cpu];
+
+ /*
+ * HSPRG0 is used to store the cpu's pointer to paca. Hence last
+ * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
+ * with 63rd bit set, so that when a thread wakes up at 0x100 we
+ * can use this bit to distinguish between fastsleep and
+ * deep winkle.
+ */
+ hsprg0_val |= 1;
+
+ rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+ if (rc != 0)
+ return rc;
+
+ /* HIDs are per core registers */
+ if (cpu_thread_in_core(cpu) == 0) {
+
+ rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
+ if (rc != 0)
+ return rc;
+
+ rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void pnv_alloc_idle_core_states(void)
+{
+ int i, j;
+ int nr_cores = cpu_nr_cores();
+ u32 *core_idle_state;
+
+ /*
+ * core_idle_state - First 8 bits track the idle state of each thread
+ * of the core. The 8th bit is the lock bit. Initially all thread bits
+ * are set. They are cleared when the thread enters deep idle state
+ * like sleep and winkle. Initially the lock bit is cleared.
+ * The lock bit has 2 purposes
+ * a. While the first thread is restoring core state, it prevents
+ * other threads in the core from switching to process context.
+ * b. While the last thread in the core is saving the core state, it
+ * prevents a different thread from waking up.
+ */
+ for (i = 0; i < nr_cores; i++) {
+ int first_cpu = i * threads_per_core;
+ int node = cpu_to_node(first_cpu);
+
+ core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
+ *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
+
+ for (j = 0; j < threads_per_core; j++) {
+ int cpu = first_cpu + j;
+
+ paca[cpu].core_idle_state_ptr = core_idle_state;
+ paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
+ paca[cpu].thread_mask = 1 << j;
+ }
+ }
+
+ update_subcore_sibling_mask();
+
+ if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
+ pnv_save_sprs_for_winkle();
+}
+
+u32 pnv_get_supported_cpuidle_states(void)
+{
+ return supported_cpuidle_states;
+}
+EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
+
+
+static void pnv_fastsleep_workaround_apply(void *info)
+
+{
+ int rc;
+ int *err = info;
+
+ rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
+ OPAL_CONFIG_IDLE_APPLY);
+ if (rc)
+ *err = 1;
+}
+
+/*
+ * Used to store fastsleep workaround state
+ * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
+ * 1 - Workaround applied once, never undone.
+ */
+static u8 fastsleep_workaround_applyonce;
+
+static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
+}
+
+static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ cpumask_t primary_thread_mask;
+ int err;
+ u8 val;
+
+ if (kstrtou8(buf, 0, &val) || val != 1)
+ return -EINVAL;
+
+ if (fastsleep_workaround_applyonce == 1)
+ return count;
+
+ /*
+ * fastsleep_workaround_applyonce = 1 implies
+ * fastsleep workaround needs to be left in 'applied' state on all
+ * the cores. Do this by-
+ * 1. Patching out the call to 'undo' workaround in fastsleep exit path
+ * 2. Sending ipi to all the cores which have atleast one online thread
+ * 3. Patching out the call to 'apply' workaround in fastsleep entry
+ * path
+ * There is no need to send ipi to cores which have all threads
+ * offlined, as last thread of the core entering fastsleep or deeper
+ * state would have applied workaround.
+ */
+ err = patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_exit,
+ PPC_INST_NOP);
+ if (err) {
+ pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
+ goto fail;
+ }
+
+ get_online_cpus();
+ primary_thread_mask = cpu_online_cores_map();
+ on_each_cpu_mask(&primary_thread_mask,
+ pnv_fastsleep_workaround_apply,
+ &err, 1);
+ put_online_cpus();
+ if (err) {
+ pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
+ goto fail;
+ }
+
+ err = patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_entry,
+ PPC_INST_NOP);
+ if (err) {
+ pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
+ goto fail;
+ }
+
+ fastsleep_workaround_applyonce = 1;
+
+ return count;
+fail:
+ return -EIO;
+}
+
+static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
+ show_fastsleep_workaround_applyonce,
+ store_fastsleep_workaround_applyonce);
+
+static int __init pnv_init_idle_states(void)
+{
+ struct device_node *power_mgt;
+ int dt_idle_states;
+ u32 *flags;
+ int i;
+
+ supported_cpuidle_states = 0;
+
+ if (cpuidle_disable != IDLE_NO_OVERRIDE)
+ goto out;
+
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ goto out;
+
+ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+ if (!power_mgt) {
+ pr_warn("opal: PowerMgmt Node not found\n");
+ goto out;
+ }
+ dt_idle_states = of_property_count_u32_elems(power_mgt,
+ "ibm,cpu-idle-state-flags");
+ if (dt_idle_states < 0) {
+ pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ goto out;
+ }
+
+ flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+ if (of_property_read_u32_array(power_mgt,
+ "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
+ goto out_free;
+ }
+
+ for (i = 0; i < dt_idle_states; i++)
+ supported_cpuidle_states |= flags[i];
+
+ if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
+ patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_entry,
+ PPC_INST_NOP);
+ patch_instruction(
+ (unsigned int *)pnv_fastsleep_workaround_at_exit,
+ PPC_INST_NOP);
+ } else {
+ /*
+ * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
+ * workaround is needed to use fastsleep. Provide sysfs
+ * control to choose how this workaround has to be applied.
+ */
+ device_create_file(cpu_subsys.dev_root,
+ &dev_attr_fastsleep_workaround_applyonce);
+ }
+
+ pnv_alloc_idle_core_states();
+out_free:
+ kfree(flags);
+out:
+ return 0;
+}
+machine_subsys_initcall(powernv, pnv_init_idle_states);
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index 693b6cdac..bdc8c0c71 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -151,7 +151,7 @@ static struct notifier_block opal_async_comp_nb = {
.priority = 0,
};
-static int __init opal_async_comp_init(void)
+int __init opal_async_comp_init(void)
{
struct device_node *opal_node;
const __be32 *async;
@@ -205,4 +205,3 @@ out_opal_node:
out:
return err;
}
-machine_subsys_initcall(powernv, opal_async_comp_init);
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 5aa9c1ce4..2ee96431f 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -15,6 +15,7 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <asm/opal.h>
@@ -60,7 +61,7 @@ static ssize_t dump_type_show(struct dump_obj *dump_obj,
struct dump_attribute *attr,
char *buf)
{
-
+
return sprintf(buf, "0x%x %s\n", dump_obj->type,
dump_type_to_string(dump_obj->type));
}
@@ -363,7 +364,7 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
return dump;
}
-static int process_dump(void)
+static irqreturn_t process_dump(int irq, void *data)
{
int rc;
uint32_t dump_id, dump_size, dump_type;
@@ -387,45 +388,13 @@ static int process_dump(void)
if (!dump)
return -1;
- return 0;
-}
-
-static void dump_work_fn(struct work_struct *work)
-{
- process_dump();
+ return IRQ_HANDLED;
}
-static DECLARE_WORK(dump_work, dump_work_fn);
-
-static void schedule_process_dump(void)
-{
- schedule_work(&dump_work);
-}
-
-/*
- * New dump available notification
- *
- * Once we get notification, we add sysfs entries for it.
- * We only fetch the dump on demand, and create sysfs asynchronously.
- */
-static int dump_event(struct notifier_block *nb,
- unsigned long events, void *change)
-{
- if (events & OPAL_EVENT_DUMP_AVAIL)
- schedule_process_dump();
-
- return 0;
-}
-
-static struct notifier_block dump_nb = {
- .notifier_call = dump_event,
- .next = NULL,
- .priority = 0
-};
-
void __init opal_platform_dump_init(void)
{
int rc;
+ int dump_irq;
/* ELOG not supported by firmware */
if (!opal_check_token(OPAL_DUMP_READ))
@@ -445,10 +414,19 @@ void __init opal_platform_dump_init(void)
return;
}
- rc = opal_notifier_register(&dump_nb);
+ dump_irq = opal_event_request(ilog2(OPAL_EVENT_DUMP_AVAIL));
+ if (!dump_irq) {
+ pr_err("%s: Can't register OPAL event irq (%d)\n",
+ __func__, dump_irq);
+ return;
+ }
+
+ rc = request_threaded_irq(dump_irq, NULL, process_dump,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "opal-dump", NULL);
if (rc) {
- pr_warn("%s: Can't register OPAL event notifier (%d)\n",
- __func__, rc);
+ pr_err("%s: Can't request OPAL event irq (%d)\n",
+ __func__, rc);
return;
}
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 38ce757e5..37f959bf3 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -236,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
return elog;
}
-static void elog_work_fn(struct work_struct *work)
+static irqreturn_t elog_event(int irq, void *data)
{
__be64 size;
__be64 id;
@@ -250,7 +251,7 @@ static void elog_work_fn(struct work_struct *work)
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: OPAL log info read failed\n");
- return;
+ return IRQ_HANDLED;
}
elog_size = be64_to_cpu(size);
@@ -269,31 +270,16 @@ static void elog_work_fn(struct work_struct *work)
* entries.
*/
if (kset_find_obj(elog_kset, name))
- return;
+ return IRQ_HANDLED;
create_elog_obj(log_id, elog_size, elog_type);
-}
-
-static DECLARE_WORK(elog_work, elog_work_fn);
-static int elog_event(struct notifier_block *nb,
- unsigned long events, void *change)
-{
- /* check for error log event */
- if (events & OPAL_EVENT_ERROR_LOG_AVAIL)
- schedule_work(&elog_work);
- return 0;
+ return IRQ_HANDLED;
}
-static struct notifier_block elog_nb = {
- .notifier_call = elog_event,
- .next = NULL,
- .priority = 0
-};
-
int __init opal_elog_init(void)
{
- int rc = 0;
+ int rc = 0, irq;
/* ELOG not supported by firmware */
if (!opal_check_token(OPAL_ELOG_READ))
@@ -305,10 +291,18 @@ int __init opal_elog_init(void)
return -1;
}
- rc = opal_notifier_register(&elog_nb);
+ irq = opal_event_request(ilog2(OPAL_EVENT_ERROR_LOG_AVAIL));
+ if (!irq) {
+ pr_err("%s: Can't register OPAL event irq (%d)\n",
+ __func__, irq);
+ return irq;
+ }
+
+ rc = request_threaded_irq(irq, NULL, elog_event,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
if (rc) {
- pr_err("%s: Can't register OPAL event notifier (%d)\n",
- __func__, rc);
+ pr_err("%s: Can't request OPAL event irq (%d)\n",
+ __func__, rc);
return rc;
}
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index b322bfb51..a8f49d380 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -170,7 +170,7 @@ static struct notifier_block opal_hmi_handler_nb = {
.priority = 0,
};
-static int __init opal_hmi_handler_init(void)
+int __init opal_hmi_handler_init(void)
{
int ret;
@@ -186,4 +186,3 @@ static int __init opal_hmi_handler_init(void)
}
return 0;
}
-machine_subsys_initcall(powernv, opal_hmi_handler_init);
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
new file mode 100644
index 000000000..e2e7d75f5
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -0,0 +1,253 @@
+/*
+ * This file implements an irqchip for OPAL events. Whenever there is
+ * an interrupt that is handled by OPAL we get passed a list of events
+ * that Linux needs to do something about. These basically look like
+ * interrupts to Linux so we implement an irqchip to handle them.
+ *
+ * Copyright Alistair Popple, IBM Corporation 2014.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/irq_work.h>
+
+#include <asm/machdep.h>
+#include <asm/opal.h>
+
+#include "powernv.h"
+
+/* Maximum number of events supported by OPAL firmware */
+#define MAX_NUM_EVENTS 64
+
+struct opal_event_irqchip {
+ struct irq_chip irqchip;
+ struct irq_domain *domain;
+ unsigned long mask;
+};
+static struct opal_event_irqchip opal_event_irqchip;
+
+static unsigned int opal_irq_count;
+static unsigned int *opal_irqs;
+
+static void opal_handle_irq_work(struct irq_work *work);
+static __be64 last_outstanding_events;
+static struct irq_work opal_event_irq_work = {
+ .func = opal_handle_irq_work,
+};
+
+static void opal_event_mask(struct irq_data *d)
+{
+ clear_bit(d->hwirq, &opal_event_irqchip.mask);
+}
+
+static void opal_event_unmask(struct irq_data *d)
+{
+ set_bit(d->hwirq, &opal_event_irqchip.mask);
+
+ opal_poll_events(&last_outstanding_events);
+ if (last_outstanding_events & opal_event_irqchip.mask)
+ /* Need to retrigger the interrupt */
+ irq_work_queue(&opal_event_irq_work);
+}
+
+static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ /*
+ * For now we only support level triggered events. The irq
+ * handler will be called continuously until the event has
+ * been cleared in OPAL.
+ */
+ if (flow_type != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct opal_event_irqchip opal_event_irqchip = {
+ .irqchip = {
+ .name = "OPAL EVT",
+ .irq_mask = opal_event_mask,
+ .irq_unmask = opal_event_unmask,
+ .irq_set_type = opal_event_set_type,
+ },
+ .mask = 0,
+};
+
+static int opal_event_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, &opal_event_irqchip);
+ irq_set_chip_and_handler(irq, &opal_event_irqchip.irqchip,
+ handle_level_irq);
+
+ return 0;
+}
+
+void opal_handle_events(uint64_t events)
+{
+ int virq, hwirq = 0;
+ u64 mask = opal_event_irqchip.mask;
+
+ if (!in_irq() && (events & mask)) {
+ last_outstanding_events = events;
+ irq_work_queue(&opal_event_irq_work);
+ return;
+ }
+
+ while (events & mask) {
+ hwirq = fls64(events) - 1;
+ if (BIT_ULL(hwirq) & mask) {
+ virq = irq_find_mapping(opal_event_irqchip.domain,
+ hwirq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ events &= ~BIT_ULL(hwirq);
+ }
+}
+
+static irqreturn_t opal_interrupt(int irq, void *data)
+{
+ __be64 events;
+
+ opal_handle_interrupt(virq_to_hw(irq), &events);
+ opal_handle_events(be64_to_cpu(events));
+
+ return IRQ_HANDLED;
+}
+
+static void opal_handle_irq_work(struct irq_work *work)
+{
+ opal_handle_events(be64_to_cpu(last_outstanding_events));
+}
+
+static int opal_event_match(struct irq_domain *h, struct device_node *node)
+{
+ return h->of_node == node;
+}
+
+static int opal_event_xlate(struct irq_domain *h, struct device_node *np,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_flags)
+{
+ *out_hwirq = intspec[0];
+ *out_flags = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static const struct irq_domain_ops opal_event_domain_ops = {
+ .match = opal_event_match,
+ .map = opal_event_map,
+ .xlate = opal_event_xlate,
+};
+
+void opal_event_shutdown(void)
+{
+ unsigned int i;
+
+ /* First free interrupts, which will also mask them */
+ for (i = 0; i < opal_irq_count; i++) {
+ if (opal_irqs[i])
+ free_irq(opal_irqs[i], NULL);
+ opal_irqs[i] = 0;
+ }
+}
+
+int __init opal_event_init(void)
+{
+ struct device_node *dn, *opal_node;
+ const __be32 *irqs;
+ int i, irqlen, rc = 0;
+
+ opal_node = of_find_node_by_path("/ibm,opal");
+ if (!opal_node) {
+ pr_warn("opal: Node not found\n");
+ return -ENODEV;
+ }
+
+ /* If dn is NULL it means the domain won't be linked to a DT
+ * node so therefore irq_of_parse_and_map(...) wont work. But
+ * that shouldn't be problem because if we're running a
+ * version of skiboot that doesn't have the dn then the
+ * devices won't have the correct properties and will have to
+ * fall back to the legacy method (opal_event_request(...))
+ * anyway. */
+ dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event");
+ opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS,
+ &opal_event_domain_ops, &opal_event_irqchip);
+ of_node_put(dn);
+ if (!opal_event_irqchip.domain) {
+ pr_warn("opal: Unable to create irq domain\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Get interrupt property */
+ irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
+ opal_irq_count = irqs ? (irqlen / 4) : 0;
+ pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
+
+ /* Install interrupt handlers */
+ opal_irqs = kcalloc(opal_irq_count, sizeof(*opal_irqs), GFP_KERNEL);
+ for (i = 0; irqs && i < opal_irq_count; i++, irqs++) {
+ unsigned int irq, virq;
+
+ /* Get hardware and virtual IRQ */
+ irq = be32_to_cpup(irqs);
+ virq = irq_create_mapping(NULL, irq);
+ if (virq == NO_IRQ) {
+ pr_warn("Failed to map irq 0x%x\n", irq);
+ continue;
+ }
+
+ /* Install interrupt handler */
+ rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
+ if (rc) {
+ irq_dispose_mapping(virq);
+ pr_warn("Error %d requesting irq %d (0x%x)\n",
+ rc, virq, irq);
+ continue;
+ }
+
+ /* Cache IRQ */
+ opal_irqs[i] = virq;
+ }
+
+out:
+ of_node_put(opal_node);
+ return rc;
+}
+machine_arch_initcall(powernv, opal_event_init);
+
+/**
+ * opal_event_request(unsigned int opal_event_nr) - Request an event
+ * @opal_event_nr: the opal event number to request
+ *
+ * This routine can be used to find the linux virq number which can
+ * then be passed to request_irq to assign a handler for a particular
+ * opal event. This should only be used by legacy devices which don't
+ * have proper device tree bindings. Most devices should use
+ * irq_of_parse_and_map() instead.
+ */
+int opal_event_request(unsigned int opal_event_nr)
+{
+ if (WARN_ON_ONCE(!opal_event_irqchip.domain))
+ return NO_IRQ;
+
+ return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr);
+}
+EXPORT_SYMBOL(opal_event_request);
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index 43db2136d..00a29432b 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -144,4 +144,4 @@ static int __init opal_mem_err_init(void)
}
return 0;
}
-machine_subsys_initcall(powernv, opal_mem_err_init);
+machine_device_initcall(powernv, opal_mem_err_init);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
new file mode 100644
index 000000000..4ece8e40d
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -0,0 +1,448 @@
+/*
+ * OPAL Runtime Diagnostics interface driver
+ * Supported on POWERNV platform
+ *
+ * Copyright IBM Corporation 2015
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "opal-prd: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/poll.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/opal-prd.h>
+#include <asm/opal.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+
+/**
+ * The msg member must be at the end of the struct, as it's followed by the
+ * message data.
+ */
+struct opal_prd_msg_queue_item {
+ struct list_head list;
+ struct opal_prd_msg_header msg;
+};
+
+static struct device_node *prd_node;
+static LIST_HEAD(opal_prd_msg_queue);
+static DEFINE_SPINLOCK(opal_prd_msg_queue_lock);
+static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait);
+static atomic_t prd_usage;
+
+static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size)
+{
+ struct device_node *parent, *node;
+ bool found;
+
+ if (addr + size < addr)
+ return false;
+
+ parent = of_find_node_by_path("/reserved-memory");
+ if (!parent)
+ return false;
+
+ found = false;
+
+ for_each_child_of_node(parent, node) {
+ uint64_t range_addr, range_size, range_end;
+ const __be32 *addrp;
+ const char *label;
+
+ addrp = of_get_address(node, 0, &range_size, NULL);
+
+ range_addr = of_read_number(addrp, 2);
+ range_end = range_addr + range_size;
+
+ label = of_get_property(node, "ibm,prd-label", NULL);
+
+ /* PRD ranges need a label */
+ if (!label)
+ continue;
+
+ if (range_end <= range_addr)
+ continue;
+
+ if (addr >= range_addr && addr + size <= range_end) {
+ found = true;
+ of_node_put(node);
+ break;
+ }
+ }
+
+ of_node_put(parent);
+ return found;
+}
+
+static int opal_prd_open(struct inode *inode, struct file *file)
+{
+ /*
+ * Prevent multiple (separate) processes from concurrent interactions
+ * with the FW PRD channel
+ */
+ if (atomic_xchg(&prd_usage, 1) == 1)
+ return -EBUSY;
+
+ return 0;
+}
+
+/*
+ * opal_prd_mmap - maps firmware-provided ranges into userspace
+ * @file: file structure for the device
+ * @vma: VMA to map the registers into
+ */
+
+static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t addr, size;
+ pgprot_t page_prot;
+ int rc;
+
+ pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff,
+ vma->vm_flags);
+
+ addr = vma->vm_pgoff << PAGE_SHIFT;
+ size = vma->vm_end - vma->vm_start;
+
+ /* ensure we're mapping within one of the allowable ranges */
+ if (!opal_prd_range_is_valid(addr, size))
+ return -EINVAL;
+
+ page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size, vma->vm_page_prot);
+
+ rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
+ page_prot);
+
+ return rc;
+}
+
+static bool opal_msg_queue_empty(void)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ ret = list_empty(&opal_prd_msg_queue);
+ spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
+
+ return ret;
+}
+
+static unsigned int opal_prd_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ poll_wait(file, &opal_prd_msg_wait, wait);
+
+ if (!opal_msg_queue_empty())
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static ssize_t opal_prd_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct opal_prd_msg_queue_item *item;
+ unsigned long flags;
+ ssize_t size, err;
+ int rc;
+
+ /* we need at least a header's worth of data */
+ if (count < sizeof(item->msg))
+ return -EINVAL;
+
+ if (*ppos)
+ return -ESPIPE;
+
+ item = NULL;
+
+ for (;;) {
+
+ spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ if (!list_empty(&opal_prd_msg_queue)) {
+ item = list_first_entry(&opal_prd_msg_queue,
+ struct opal_prd_msg_queue_item, list);
+ list_del(&item->list);
+ }
+ spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
+
+ if (item)
+ break;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ rc = wait_event_interruptible(opal_prd_msg_wait,
+ !opal_msg_queue_empty());
+ if (rc)
+ return -EINTR;
+ }
+
+ size = be16_to_cpu(item->msg.size);
+ if (size > count) {
+ err = -EINVAL;
+ goto err_requeue;
+ }
+
+ rc = copy_to_user(buf, &item->msg, size);
+ if (rc) {
+ err = -EFAULT;
+ goto err_requeue;
+ }
+
+ kfree(item);
+
+ return size;
+
+err_requeue:
+ /* eep! re-queue at the head of the list */
+ spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ list_add(&item->list, &opal_prd_msg_queue);
+ spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
+ return err;
+}
+
+static ssize_t opal_prd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct opal_prd_msg_header hdr;
+ ssize_t size;
+ void *msg;
+ int rc;
+
+ size = sizeof(hdr);
+
+ if (count < size)
+ return -EINVAL;
+
+ /* grab the header */
+ rc = copy_from_user(&hdr, buf, sizeof(hdr));
+ if (rc)
+ return -EFAULT;
+
+ size = be16_to_cpu(hdr.size);
+
+ msg = kmalloc(size, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ rc = copy_from_user(msg, buf, size);
+ if (rc) {
+ size = -EFAULT;
+ goto out_free;
+ }
+
+ rc = opal_prd_msg(msg);
+ if (rc) {
+ pr_warn("write: opal_prd_msg returned %d\n", rc);
+ size = -EIO;
+ }
+
+out_free:
+ kfree(msg);
+
+ return size;
+}
+
+static int opal_prd_release(struct inode *inode, struct file *file)
+{
+ struct opal_prd_msg_header msg;
+
+ msg.size = cpu_to_be16(sizeof(msg));
+ msg.type = OPAL_PRD_MSG_TYPE_FINI;
+
+ opal_prd_msg((struct opal_prd_msg *)&msg);
+
+ atomic_xchg(&prd_usage, 0);
+
+ return 0;
+}
+
+static long opal_prd_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct opal_prd_info info;
+ struct opal_prd_scom scom;
+ int rc = 0;
+
+ switch (cmd) {
+ case OPAL_PRD_GET_INFO:
+ memset(&info, 0, sizeof(info));
+ info.version = OPAL_PRD_KERNEL_VERSION;
+ rc = copy_to_user((void __user *)param, &info, sizeof(info));
+ if (rc)
+ return -EFAULT;
+ break;
+
+ case OPAL_PRD_SCOM_READ:
+ rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
+ if (rc)
+ return -EFAULT;
+
+ scom.rc = opal_xscom_read(scom.chip, scom.addr,
+ (__be64 *)&scom.data);
+ scom.data = be64_to_cpu(scom.data);
+ pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n",
+ scom.chip, scom.addr, scom.data, scom.rc);
+
+ rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
+ if (rc)
+ return -EFAULT;
+ break;
+
+ case OPAL_PRD_SCOM_WRITE:
+ rc = copy_from_user(&scom, (void __user *)param, sizeof(scom));
+ if (rc)
+ return -EFAULT;
+
+ scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data);
+ pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n",
+ scom.chip, scom.addr, scom.data, scom.rc);
+
+ rc = copy_to_user((void __user *)param, &scom, sizeof(scom));
+ if (rc)
+ return -EFAULT;
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static const struct file_operations opal_prd_fops = {
+ .open = opal_prd_open,
+ .mmap = opal_prd_mmap,
+ .poll = opal_prd_poll,
+ .read = opal_prd_read,
+ .write = opal_prd_write,
+ .unlocked_ioctl = opal_prd_ioctl,
+ .release = opal_prd_release,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice opal_prd_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "opal-prd",
+ .fops = &opal_prd_fops,
+};
+
+/* opal interface */
+static int opal_prd_msg_notifier(struct notifier_block *nb,
+ unsigned long msg_type, void *_msg)
+{
+ struct opal_prd_msg_queue_item *item;
+ struct opal_prd_msg_header *hdr;
+ struct opal_msg *msg = _msg;
+ int msg_size, item_size;
+ unsigned long flags;
+
+ if (msg_type != OPAL_MSG_PRD)
+ return 0;
+
+ /* Calculate total size of the message and item we need to store. The
+ * 'size' field in the header includes the header itself. */
+ hdr = (void *)msg->params;
+ msg_size = be16_to_cpu(hdr->size);
+ item_size = msg_size + sizeof(*item) - sizeof(item->msg);
+
+ item = kzalloc(item_size, GFP_ATOMIC);
+ if (!item)
+ return -ENOMEM;
+
+ memcpy(&item->msg, msg->params, msg_size);
+
+ spin_lock_irqsave(&opal_prd_msg_queue_lock, flags);
+ list_add_tail(&item->list, &opal_prd_msg_queue);
+ spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags);
+
+ wake_up_interruptible(&opal_prd_msg_wait);
+
+ return 0;
+}
+
+static struct notifier_block opal_prd_event_nb = {
+ .notifier_call = opal_prd_msg_notifier,
+ .next = NULL,
+ .priority = 0,
+};
+
+static int opal_prd_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ /* We should only have one prd driver instance per machine; ensure
+ * that we only get a valid probe on a single OF node.
+ */
+ if (prd_node)
+ return -EBUSY;
+
+ prd_node = pdev->dev.of_node;
+
+ rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb);
+ if (rc) {
+ pr_err("Couldn't register event notifier\n");
+ return rc;
+ }
+
+ rc = misc_register(&opal_prd_dev);
+ if (rc) {
+ pr_err("failed to register miscdev\n");
+ opal_message_notifier_unregister(OPAL_MSG_PRD,
+ &opal_prd_event_nb);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int opal_prd_remove(struct platform_device *pdev)
+{
+ misc_deregister(&opal_prd_dev);
+ opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
+ return 0;
+}
+
+static const struct of_device_id opal_prd_match[] = {
+ { .compatible = "ibm,opal-prd" },
+ { },
+};
+
+static struct platform_driver opal_prd_driver = {
+ .driver = {
+ .name = "opal-prd",
+ .owner = THIS_MODULE,
+ .of_match_table = opal_prd_match,
+ },
+ .probe = opal_prd_probe,
+ .remove = opal_prd_remove,
+};
+
+module_platform_driver(opal_prd_driver);
+
+MODULE_DEVICE_TABLE(of, opal_prd_match);
+MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 655250499..a06059df9 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -77,7 +77,7 @@ out:
}
EXPORT_SYMBOL_GPL(opal_get_sensor_data);
-static __init int opal_sensor_init(void)
+int __init opal_sensor_init(void)
{
struct platform_device *pdev;
struct device_node *sensor;
@@ -93,4 +93,3 @@ static __init int opal_sensor_init(void)
return PTR_ERR_OR_ZERO(pdev);
}
-machine_subsys_initcall(powernv, opal_sensor_init);
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
index 9d1acf22a..afe66c576 100644
--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -55,8 +55,10 @@ static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
}
ret = opal_get_param(token, param_id, (u64)buffer, length);
- if (ret != OPAL_ASYNC_COMPLETION)
+ if (ret != OPAL_ASYNC_COMPLETION) {
+ ret = opal_error_code(ret);
goto out_token;
+ }
ret = opal_async_wait_response(token, &msg);
if (ret) {
@@ -65,7 +67,7 @@ static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
- ret = be64_to_cpu(msg.params[1]);
+ ret = opal_error_code(be64_to_cpu(msg.params[1]));
out_token:
opal_async_release_token(token);
@@ -89,8 +91,10 @@ static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
ret = opal_set_param(token, param_id, (u64)buffer, length);
- if (ret != OPAL_ASYNC_COMPLETION)
+ if (ret != OPAL_ASYNC_COMPLETION) {
+ ret = opal_error_code(ret);
goto out_token;
+ }
ret = opal_async_wait_response(token, &msg);
if (ret) {
@@ -99,7 +103,7 @@ static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
goto out_token;
}
- ret = be64_to_cpu(msg.params[1]);
+ ret = opal_error_code(be64_to_cpu(msg.params[1]));
out_token:
opal_async_release_token(token);
@@ -162,10 +166,20 @@ void __init opal_sys_param_init(void)
goto out;
}
+ /* Some systems do not use sysparams; this is not an error */
+ sysparam = of_find_node_by_path("/ibm,opal/sysparams");
+ if (!sysparam)
+ goto out;
+
+ if (!of_device_is_compatible(sysparam, "ibm,opal-sysparams")) {
+ pr_err("SYSPARAM: Opal sysparam node not compatible\n");
+ goto out_node_put;
+ }
+
sysparam_kobj = kobject_create_and_add("sysparams", opal_kobj);
if (!sysparam_kobj) {
pr_err("SYSPARAM: Failed to create sysparam kobject\n");
- goto out;
+ goto out_node_put;
}
/* Allocate big enough buffer for any get/set transactions */
@@ -176,30 +190,19 @@ void __init opal_sys_param_init(void)
goto out_kobj_put;
}
- sysparam = of_find_node_by_path("/ibm,opal/sysparams");
- if (!sysparam) {
- pr_err("SYSPARAM: Opal sysparam node not found\n");
- goto out_param_buf;
- }
-
- if (!of_device_is_compatible(sysparam, "ibm,opal-sysparams")) {
- pr_err("SYSPARAM: Opal sysparam node not compatible\n");
- goto out_node_put;
- }
-
/* Number of parameters exposed through DT */
count = of_property_count_strings(sysparam, "param-name");
if (count < 0) {
pr_err("SYSPARAM: No string found of property param-name in "
"the node %s\n", sysparam->name);
- goto out_node_put;
+ goto out_param_buf;
}
id = kzalloc(sizeof(*id) * count, GFP_KERNEL);
if (!id) {
pr_err("SYSPARAM: Failed to allocate memory to read parameter "
"id\n");
- goto out_node_put;
+ goto out_param_buf;
}
size = kzalloc(sizeof(*size) * count, GFP_KERNEL);
@@ -293,12 +296,12 @@ out_free_size:
kfree(size);
out_free_id:
kfree(id);
-out_node_put:
- of_node_put(sysparam);
out_param_buf:
kfree(param_data_buf);
out_kobj_put:
kobject_put(sysparam_kobj);
+out_node_put:
+ of_node_put(sysparam);
out:
return;
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index a7ade94cd..d6a7b8252 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -283,6 +283,7 @@ OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ);
OPAL_CALL(opal_get_param, OPAL_GET_PARAM);
OPAL_CALL(opal_set_param, OPAL_SET_PARAM);
OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
+OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
@@ -295,3 +296,4 @@ OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
+OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 2241565b0..f084afa0e 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -53,13 +53,7 @@ static int mc_recoverable_range_len;
struct device_node *opal_node;
static DEFINE_SPINLOCK(opal_write_lock);
-static unsigned int *opal_irqs;
-static unsigned int opal_irq_count;
-static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
-static DEFINE_SPINLOCK(opal_notifier_lock);
-static uint64_t last_notified_mask = 0x0ul;
-static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
static uint32_t opal_heartbeat;
static void opal_reinit_cores(void)
@@ -225,82 +219,6 @@ static int __init opal_register_exception_handlers(void)
}
machine_early_initcall(powernv, opal_register_exception_handlers);
-int opal_notifier_register(struct notifier_block *nb)
-{
- if (!nb) {
- pr_warning("%s: Invalid argument (%p)\n",
- __func__, nb);
- return -EINVAL;
- }
-
- atomic_notifier_chain_register(&opal_notifier_head, nb);
- return 0;
-}
-EXPORT_SYMBOL_GPL(opal_notifier_register);
-
-int opal_notifier_unregister(struct notifier_block *nb)
-{
- if (!nb) {
- pr_warning("%s: Invalid argument (%p)\n",
- __func__, nb);
- return -EINVAL;
- }
-
- atomic_notifier_chain_unregister(&opal_notifier_head, nb);
- return 0;
-}
-EXPORT_SYMBOL_GPL(opal_notifier_unregister);
-
-static void opal_do_notifier(uint64_t events)
-{
- unsigned long flags;
- uint64_t changed_mask;
-
- if (atomic_read(&opal_notifier_hold))
- return;
-
- spin_lock_irqsave(&opal_notifier_lock, flags);
- changed_mask = last_notified_mask ^ events;
- last_notified_mask = events;
- spin_unlock_irqrestore(&opal_notifier_lock, flags);
-
- /*
- * We feed with the event bits and changed bits for
- * enough information to the callback.
- */
- atomic_notifier_call_chain(&opal_notifier_head,
- events, (void *)changed_mask);
-}
-
-void opal_notifier_update_evt(uint64_t evt_mask,
- uint64_t evt_val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&opal_notifier_lock, flags);
- last_notified_mask &= ~evt_mask;
- last_notified_mask |= evt_val;
- spin_unlock_irqrestore(&opal_notifier_lock, flags);
-}
-
-void opal_notifier_enable(void)
-{
- int64_t rc;
- __be64 evt = 0;
-
- atomic_set(&opal_notifier_hold, 0);
-
- /* Process pending events */
- rc = opal_poll_events(&evt);
- if (rc == OPAL_SUCCESS && evt)
- opal_do_notifier(be64_to_cpu(evt));
-}
-
-void opal_notifier_disable(void)
-{
- atomic_set(&opal_notifier_hold, 1);
-}
-
/*
* Opal message notifier based on message type. Allow subscribers to get
* notified for specific messgae type.
@@ -317,6 +235,7 @@ int opal_message_notifier_register(enum opal_msg_type msg_type,
return atomic_notifier_chain_register(
&opal_msg_notifier_head[msg_type], nb);
}
+EXPORT_SYMBOL_GPL(opal_message_notifier_register);
int opal_message_notifier_unregister(enum opal_msg_type msg_type,
struct notifier_block *nb)
@@ -324,6 +243,7 @@ int opal_message_notifier_unregister(enum opal_msg_type msg_type,
return atomic_notifier_chain_unregister(
&opal_msg_notifier_head[msg_type], nb);
}
+EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
static void opal_message_do_notify(uint32_t msg_type, void *msg)
{
@@ -364,36 +284,36 @@ static void opal_handle_message(void)
opal_message_do_notify(type, (void *)&msg);
}
-static int opal_message_notify(struct notifier_block *nb,
- unsigned long events, void *change)
+static irqreturn_t opal_message_notify(int irq, void *data)
{
- if (events & OPAL_EVENT_MSG_PENDING)
- opal_handle_message();
- return 0;
+ opal_handle_message();
+ return IRQ_HANDLED;
}
-static struct notifier_block opal_message_nb = {
- .notifier_call = opal_message_notify,
- .next = NULL,
- .priority = 0,
-};
-
static int __init opal_message_init(void)
{
- int ret, i;
+ int ret, i, irq;
for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
- ret = opal_notifier_register(&opal_message_nb);
+ irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING));
+ if (!irq) {
+ pr_err("%s: Can't register OPAL event irq (%d)\n",
+ __func__, irq);
+ return irq;
+ }
+
+ ret = request_irq(irq, opal_message_notify,
+ IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL);
if (ret) {
- pr_err("%s: Can't register OPAL event notifier (%d)\n",
+ pr_err("%s: Can't request OPAL event irq (%d)\n",
__func__, ret);
return ret;
}
+
return 0;
}
-machine_early_initcall(powernv, opal_message_init);
int opal_get_chars(uint32_t vtermno, char *buf, int count)
{
@@ -573,7 +493,7 @@ int opal_handle_hmi_exception(struct pt_regs *regs)
local_paca->hmi_event_available = 0;
rc = opal_poll_events(&evt);
if (rc == OPAL_SUCCESS && evt)
- opal_do_notifier(be64_to_cpu(evt));
+ opal_handle_events(be64_to_cpu(evt));
return 1;
}
@@ -610,17 +530,6 @@ out:
return !!recover_addr;
}
-static irqreturn_t opal_interrupt(int irq, void *data)
-{
- __be64 events;
-
- opal_handle_interrupt(virq_to_hw(irq), &events);
-
- opal_do_notifier(be64_to_cpu(events));
-
- return IRQ_HANDLED;
-}
-
static int opal_sysfs_init(void)
{
opal_kobj = kobject_create_and_add("opal", firmware_kobj);
@@ -693,21 +602,13 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
-static void opal_flash_init(struct device_node *opal_node)
-{
- struct device_node *np;
-
- for_each_child_of_node(opal_node, np)
- if (of_device_is_compatible(np, "ibm,opal-flash"))
- of_platform_device_create(np, NULL, NULL);
-}
-
-static void opal_ipmi_init(struct device_node *opal_node)
+static void opal_pdev_init(struct device_node *opal_node,
+ const char *compatible)
{
struct device_node *np;
for_each_child_of_node(opal_node, np)
- if (of_device_is_compatible(np, "ibm,opal-ipmi"))
+ if (of_device_is_compatible(np, compatible))
of_platform_device_create(np, NULL, NULL);
}
@@ -719,52 +620,15 @@ static void opal_i2c_create_devs(void)
of_platform_device_create(np, NULL, NULL);
}
-static void __init opal_irq_init(struct device_node *dn)
-{
- const __be32 *irqs;
- int i, irqlen;
-
- /* Get interrupt property */
- irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
- opal_irq_count = irqs ? (irqlen / 4) : 0;
- pr_debug("Found %d interrupts reserved for OPAL\n", opal_irq_count);
- if (!opal_irq_count)
- return;
-
- /* Install interrupt handlers */
- opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
- for (i = 0; irqs && i < opal_irq_count; i++, irqs++) {
- unsigned int irq, virq;
- int rc;
-
- /* Get hardware and virtual IRQ */
- irq = be32_to_cpup(irqs);
- virq = irq_create_mapping(NULL, irq);
- if (virq == NO_IRQ) {
- pr_warn("Failed to map irq 0x%x\n", irq);
- continue;
- }
-
- /* Install interrupt handler */
- rc = request_irq(virq, opal_interrupt, 0, "opal", NULL);
- if (rc) {
- irq_dispose_mapping(virq);
- pr_warn("Error %d requesting irq %d (0x%x)\n",
- rc, virq, irq);
- continue;
- }
-
- /* Cache IRQ */
- opal_irqs[i] = virq;
- }
-}
-
static int kopald(void *unused)
{
+ __be64 events;
+
set_freezable();
do {
try_to_freeze();
- opal_poll_events(NULL);
+ opal_poll_events(&events);
+ opal_handle_events(be64_to_cpu(events));
msleep_interruptible(opal_heartbeat);
} while (!kthread_should_stop());
@@ -807,15 +671,24 @@ static int __init opal_init(void)
of_node_put(consoles);
}
+ /* Initialise OPAL messaging system */
+ opal_message_init();
+
+ /* Initialise OPAL asynchronous completion interface */
+ opal_async_comp_init();
+
+ /* Initialise OPAL sensor interface */
+ opal_sensor_init();
+
+ /* Initialise OPAL hypervisor maintainence interrupt handling */
+ opal_hmi_handler_init();
+
/* Create i2c platform devices */
opal_i2c_create_devs();
/* Setup a heatbeat thread if requested by OPAL */
opal_init_heartbeat();
- /* Find all OPAL interrupts and request them */
- opal_irq_init(opal_node);
-
/* Create "opal" kobject under /sys/firmware */
rc = opal_sysfs_init();
if (rc == 0) {
@@ -835,10 +708,10 @@ static int __init opal_init(void)
opal_msglog_init();
}
- /* Initialize OPAL IPMI backend */
- opal_ipmi_init(opal_node);
-
- opal_flash_init(opal_node);
+ /* Initialize platform devices: IPMI backend, PRD & flash interface */
+ opal_pdev_init(opal_node, "ibm,opal-ipmi");
+ opal_pdev_init(opal_node, "ibm,opal-flash");
+ opal_pdev_init(opal_node, "ibm,opal-prd");
return 0;
}
@@ -846,15 +719,9 @@ machine_subsys_initcall(powernv, opal_init);
void opal_shutdown(void)
{
- unsigned int i;
long rc = OPAL_BUSY;
- /* First free interrupts, which will also mask them */
- for (i = 0; i < opal_irq_count; i++) {
- if (opal_irqs[i])
- free_irq(opal_irqs[i], NULL);
- opal_irqs[i] = 0;
- }
+ opal_event_shutdown();
/*
* Then sync with OPAL which ensure anything that can
@@ -876,11 +743,14 @@ void opal_shutdown(void)
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
+EXPORT_SYMBOL_GPL(opal_xscom_read);
+EXPORT_SYMBOL_GPL(opal_xscom_write);
EXPORT_SYMBOL_GPL(opal_ipmi_send);
EXPORT_SYMBOL_GPL(opal_ipmi_recv);
EXPORT_SYMBOL_GPL(opal_flash_read);
EXPORT_SYMBOL_GPL(opal_flash_write);
EXPORT_SYMBOL_GPL(opal_flash_erase);
+EXPORT_SYMBOL_GPL(opal_prd_msg);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -954,6 +824,7 @@ int opal_error_code(int rc)
case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
case OPAL_BUSY_EVENT: return -EBUSY;
case OPAL_NO_MEM: return -ENOMEM;
+ case OPAL_PERMISSION: return -EPERM;
case OPAL_UNSUPPORTED: return -EIO;
case OPAL_HARDWARE: return -EIO;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index f8bc950ef..85cbc96ef 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -23,6 +23,9 @@
#include <linux/io.h>
#include <linux/msi.h>
#include <linux/memblock.h>
+#include <linux/iommu.h>
+#include <linux/rculist.h>
+#include <linux/sizes.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -38,8 +41,9 @@
#include <asm/debug.h>
#include <asm/firmware.h>
#include <asm/pnv-pci.h>
+#include <asm/mmzone.h>
-#include <misc/cxl.h>
+#include <misc/cxl-base.h>
#include "powernv.h"
#include "pci.h"
@@ -47,6 +51,11 @@
/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
+#define POWERNV_IOMMU_DEFAULT_LEVELS 1
+#define POWERNV_IOMMU_MAX_LEVELS 5
+
+static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl);
+
static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
{
@@ -1086,10 +1095,6 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
return;
}
- pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
- GFP_KERNEL, hose->node);
- pe->tce32_table->data = pe;
-
/* Associate it with all child devices */
pnv_ioda_setup_same_PE(bus, pe);
@@ -1283,36 +1288,27 @@ m64_failed:
return -EBUSY;
}
+static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
+ int num);
+static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
+
static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
struct iommu_table *tbl;
- unsigned long addr;
int64_t rc;
- bus = dev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- tbl = pe->tce32_table;
- addr = tbl->it_base;
-
- opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
- pe->pe_number << 1, 1, __pa(addr),
- 0, 0x1000);
-
- rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
- pe->pe_number,
- (pe->pe_number << 1) + 1,
- pe->tce_bypass_base,
- 0);
+ tbl = pe->table_group.tables[0];
+ rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (rc)
pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+ pnv_pci_ioda2_set_bypass(pe, false);
+ if (pe->table_group.group) {
+ iommu_group_put(pe->table_group.group);
+ BUG_ON(pe->table_group.group);
+ }
+ pnv_pci_ioda2_table_free_pages(tbl);
iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
- free_pages(addr, get_order(TCE32_TABLE_SIZE));
- pe->tce32_table = NULL;
}
static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
@@ -1460,10 +1456,6 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
continue;
}
- pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
- GFP_KERNEL, hose->node);
- pe->tce32_table->data = pe;
-
/* Put PE to the list */
mutex_lock(&phb->ioda.pe_list_mutex);
list_add_tail(&pe->list, &phb->ioda.pe_list);
@@ -1598,12 +1590,19 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
- set_iommu_table_base_and_group(&pdev->dev, pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
+ /*
+ * Note: iommu_add_device() will fail here as
+ * for physical PE: the device is already added by now;
+ * for virtual PE: sysfs entries are not ready yet and
+ * tce_iommu_bus_notifier will add the device to a group later.
+ */
}
-static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
- struct pci_dev *pdev, u64 dma_mask)
+static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
uint64_t top;
@@ -1625,7 +1624,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
- set_iommu_table_base(&pdev->dev, pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
}
*pdev->dev.dma_mask = dma_mask;
return 0;
@@ -1654,36 +1653,36 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
}
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
- struct pci_bus *bus,
- bool add_to_iommu_group)
+ struct pci_bus *bus)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (add_to_iommu_group)
- set_iommu_table_base_and_group(&dev->dev,
- pe->tce32_table);
- else
- set_iommu_table_base(&dev->dev, pe->tce32_table);
+ set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
+ iommu_add_device(&dev->dev);
- if (dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate,
- add_to_iommu_group);
+ if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
}
}
-static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
- struct iommu_table *tbl,
- __be64 *startp, __be64 *endp, bool rm)
+static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
+ unsigned long index, unsigned long npages, bool rm)
{
+ struct iommu_table_group_link *tgl = list_first_entry_or_null(
+ &tbl->it_group_list, struct iommu_table_group_link,
+ next);
+ struct pnv_ioda_pe *pe = container_of(tgl->table_group,
+ struct pnv_ioda_pe, table_group);
__be64 __iomem *invalidate = rm ?
- (__be64 __iomem *)pe->tce_inval_reg_phys :
- (__be64 __iomem *)tbl->it_index;
+ (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
+ pe->phb->ioda.tce_inval_reg;
unsigned long start, end, inc;
const unsigned shift = tbl->it_page_shift;
- start = __pa(startp);
- end = __pa(endp);
+ start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
+ end = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset +
+ npages - 1);
/* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
if (tbl->it_busno) {
@@ -1719,26 +1718,79 @@ static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
*/
}
-static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
- struct iommu_table *tbl,
- __be64 *startp, __be64 *endp, bool rm)
+static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
+ long npages, unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
+ attrs);
+
+ if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
+ pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_API
+static int pnv_ioda1_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction)
+{
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+
+ if (!ret && (tbl->it_type &
+ (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
+ pnv_pci_ioda1_tce_invalidate(tbl, index, 1, false);
+
+ return ret;
+}
+#endif
+
+static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
+ long npages)
+{
+ pnv_tce_free(tbl, index, npages);
+
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
+ pnv_pci_ioda1_tce_invalidate(tbl, index, npages, false);
+}
+
+static struct iommu_table_ops pnv_ioda1_iommu_ops = {
+ .set = pnv_ioda1_tce_build,
+#ifdef CONFIG_IOMMU_API
+ .exchange = pnv_ioda1_tce_xchg,
+#endif
+ .clear = pnv_ioda1_tce_free,
+ .get = pnv_tce_get,
+};
+
+static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe)
+{
+ /* 01xb - invalidate TCEs that match the specified PE# */
+ unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF);
+ struct pnv_phb *phb = pe->phb;
+
+ if (!phb->ioda.tce_inval_reg)
+ return;
+
+ mb(); /* Ensure above stores are visible */
+ __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+}
+
+static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
+ __be64 __iomem *invalidate, unsigned shift,
+ unsigned long index, unsigned long npages)
{
unsigned long start, end, inc;
- __be64 __iomem *invalidate = rm ?
- (__be64 __iomem *)pe->tce_inval_reg_phys :
- (__be64 __iomem *)tbl->it_index;
- const unsigned shift = tbl->it_page_shift;
/* We'll invalidate DMA address in PE scope */
start = 0x2ull << 60;
- start |= (pe->pe_number & 0xFF);
+ start |= (pe_number & 0xFF);
end = start;
/* Figure out the start, end and step */
- inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
- start |= (inc << shift);
- inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
- end |= (inc << shift);
+ start |= (index << shift);
+ end |= ((index + npages - 1) << shift);
inc = (0x1ull << shift);
mb();
@@ -1751,25 +1803,83 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
}
}
-void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- __be64 *startp, __be64 *endp, bool rm)
+static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
+ unsigned long index, unsigned long npages, bool rm)
{
- struct pnv_ioda_pe *pe = tbl->data;
- struct pnv_phb *phb = pe->phb;
+ struct iommu_table_group_link *tgl;
- if (phb->type == PNV_PHB_IODA1)
- pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
- else
- pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
+ list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+ struct pnv_ioda_pe *pe = container_of(tgl->table_group,
+ struct pnv_ioda_pe, table_group);
+ __be64 __iomem *invalidate = rm ?
+ (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
+ pe->phb->ioda.tce_inval_reg;
+
+ pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
+ invalidate, tbl->it_page_shift,
+ index, npages);
+ }
+}
+
+static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
+ long npages, unsigned long uaddr,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
+ attrs);
+
+ if (!ret && (tbl->it_type & TCE_PCI_SWINV_CREATE))
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_API
+static int pnv_ioda2_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction)
+{
+ long ret = pnv_tce_xchg(tbl, index, hpa, direction);
+
+ if (!ret && (tbl->it_type &
+ (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE)))
+ pnv_pci_ioda2_tce_invalidate(tbl, index, 1, false);
+
+ return ret;
+}
+#endif
+
+static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
+ long npages)
+{
+ pnv_tce_free(tbl, index, npages);
+
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+}
+
+static void pnv_ioda2_table_free(struct iommu_table *tbl)
+{
+ pnv_pci_ioda2_table_free_pages(tbl);
+ iommu_free_table(tbl, "pnv");
}
+static struct iommu_table_ops pnv_ioda2_iommu_ops = {
+ .set = pnv_ioda2_tce_build,
+#ifdef CONFIG_IOMMU_API
+ .exchange = pnv_ioda2_tce_xchg,
+#endif
+ .clear = pnv_ioda2_tce_free,
+ .get = pnv_tce_get,
+ .free = pnv_ioda2_table_free,
+};
+
static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe, unsigned int base,
unsigned int segs)
{
struct page *tce_mem = NULL;
- const __be64 *swinvp;
struct iommu_table *tbl;
unsigned int i;
int64_t rc;
@@ -1783,6 +1893,11 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
if (WARN_ON(pe->tce32_seg >= 0))
return;
+ tbl = pnv_pci_table_alloc(phb->hose->node);
+ iommu_register_group(&pe->table_group, phb->hose->global_number,
+ pe->pe_number);
+ pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group);
+
/* Grab a 32-bit TCE table */
pe->tce32_seg = base;
pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
@@ -1817,39 +1932,30 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
base << 28, IOMMU_PAGE_SHIFT_4K);
/* OPAL variant of P7IOC SW invalidated TCEs */
- swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
- if (swinvp) {
- /* We need a couple more fields -- an address and a data
- * to or. Since the bus is only printed out on table free
- * errors, and on the first pass the data will be a relative
- * bus number, print that out instead.
- */
- pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
- tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
- 8);
+ if (phb->ioda.tce_inval_reg)
tbl->it_type |= (TCE_PCI_SWINV_CREATE |
TCE_PCI_SWINV_FREE |
TCE_PCI_SWINV_PAIR);
- }
+
+ tbl->it_ops = &pnv_ioda1_iommu_ops;
+ pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
+ pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
iommu_init_table(tbl, phb->hose->node);
if (pe->flags & PNV_IODA_PE_DEV) {
- iommu_register_group(tbl, phb->hose->global_number,
- pe->pe_number);
- set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
- iommu_register_group(tbl, phb->hose->global_number,
- pe->pe_number);
- pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
- } else if (pe->flags & PNV_IODA_PE_VF) {
- iommu_register_group(tbl, phb->hose->global_number,
- pe->pe_number);
- }
+ /*
+ * Setting table base here only for carrying iommu_group
+ * further down to let iommu_add_device() do the job.
+ * pnv_pci_ioda_dma_dev_setup will override it later anyway.
+ */
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ iommu_add_device(&pe->pdev->dev);
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
return;
fail:
@@ -1858,11 +1964,53 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
pe->tce32_seg = -1;
if (tce_mem)
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
+ if (tbl) {
+ pnv_pci_unlink_table_and_group(tbl, &pe->table_group);
+ iommu_free_table(tbl, "pnv");
+ }
}
-static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group,
+ int num, struct iommu_table *tbl)
+{
+ struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
+ struct pnv_phb *phb = pe->phb;
+ int64_t rc;
+ const unsigned long size = tbl->it_indirect_levels ?
+ tbl->it_level_size : tbl->it_size;
+ const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
+ const __u64 win_size = tbl->it_size << tbl->it_page_shift;
+
+ pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num,
+ start_addr, start_addr + win_size - 1,
+ IOMMU_PAGE_SIZE(tbl));
+
+ /*
+ * Map TCE table through TVT. The TVE index is the PE number
+ * shifted by 1 bit for 32-bits DMA space.
+ */
+ rc = opal_pci_map_pe_dma_window(phb->opal_id,
+ pe->pe_number,
+ (pe->pe_number << 1) + num,
+ tbl->it_indirect_levels + 1,
+ __pa(tbl->it_base),
+ size << 3,
+ IOMMU_PAGE_SIZE(tbl));
+ if (rc) {
+ pe_err(pe, "Failed to configure TCE table, err %ld\n", rc);
+ return rc;
+ }
+
+ pnv_pci_link_table_and_group(phb->hose->node, num,
+ tbl, &pe->table_group);
+ pnv_pci_ioda2_tce_invalidate_entire(pe);
+
+ return 0;
+}
+
+static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
{
- struct pnv_ioda_pe *pe = tbl->data;
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -1882,17 +2030,6 @@ static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
window_id,
pe->tce_bypass_base,
0);
-
- /*
- * EEH needs the mapping between IOMMU table and group
- * of those VFIO/KVM pass-through devices. We can postpone
- * resetting DMA ops until the DMA mask is configured in
- * host side.
- */
- if (pe->pdev)
- set_iommu_table_base(&pe->pdev->dev, tbl);
- else
- pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
}
if (rc)
pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
@@ -1900,106 +2037,364 @@ static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
pe->tce_bypass_enabled = enable;
}
-static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
- struct pnv_ioda_pe *pe)
+static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ __u32 page_shift, __u64 window_size, __u32 levels,
+ struct iommu_table *tbl);
+
+static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group,
+ int num, __u32 page_shift, __u64 window_size, __u32 levels,
+ struct iommu_table **ptbl)
{
- /* TVE #1 is selected by PCI address bit 59 */
- pe->tce_bypass_base = 1ull << 59;
+ struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
+ int nid = pe->phb->hose->node;
+ __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start;
+ long ret;
+ struct iommu_table *tbl;
- /* Install set_bypass callback for VFIO */
- pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
+ tbl = pnv_pci_table_alloc(nid);
+ if (!tbl)
+ return -ENOMEM;
- /* Enable bypass by default */
- pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
+ ret = pnv_pci_ioda2_table_alloc_pages(nid,
+ bus_offset, page_shift, window_size,
+ levels, tbl);
+ if (ret) {
+ iommu_free_table(tbl, "pnv");
+ return ret;
+ }
+
+ tbl->it_ops = &pnv_ioda2_iommu_ops;
+ if (pe->phb->ioda.tce_inval_reg)
+ tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
+
+ *ptbl = tbl;
+
+ return 0;
}
-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
- struct pnv_ioda_pe *pe)
+static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
+{
+ struct iommu_table *tbl = NULL;
+ long rc;
+
+ rc = pnv_pci_ioda2_create_table(&pe->table_group, 0,
+ IOMMU_PAGE_SHIFT_4K,
+ pe->table_group.tce32_size,
+ POWERNV_IOMMU_DEFAULT_LEVELS, &tbl);
+ if (rc) {
+ pe_err(pe, "Failed to create 32-bit TCE table, err %ld",
+ rc);
+ return rc;
+ }
+
+ iommu_init_table(tbl, pe->phb->hose->node);
+
+ rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
+ if (rc) {
+ pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
+ rc);
+ pnv_ioda2_table_free(tbl);
+ return rc;
+ }
+
+ if (!pnv_iommu_bypass_disabled)
+ pnv_pci_ioda2_set_bypass(pe, true);
+
+ /* OPAL variant of PHB3 invalidated TCEs */
+ if (pe->phb->ioda.tce_inval_reg)
+ tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
+
+ /*
+ * Setting table base here only for carrying iommu_group
+ * further down to let iommu_add_device() do the job.
+ * pnv_pci_ioda_dma_dev_setup will override it later anyway.
+ */
+ if (pe->flags & PNV_IODA_PE_DEV)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+
+ return 0;
+}
+
+#if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
+static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
+ int num)
+{
+ struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
+ struct pnv_phb *phb = pe->phb;
+ long ret;
+
+ pe_info(pe, "Removing DMA window #%d\n", num);
+
+ ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ (pe->pe_number << 1) + num,
+ 0/* levels */, 0/* table address */,
+ 0/* table size */, 0/* page size */);
+ if (ret)
+ pe_warn(pe, "Unmapping failed, ret = %ld\n", ret);
+ else
+ pnv_pci_ioda2_tce_invalidate_entire(pe);
+
+ pnv_pci_unlink_table_and_group(table_group->tables[num], table_group);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_IOMMU_API
+static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
+ __u64 window_size, __u32 levels)
+{
+ unsigned long bytes = 0;
+ const unsigned window_shift = ilog2(window_size);
+ unsigned entries_shift = window_shift - page_shift;
+ unsigned table_shift = entries_shift + 3;
+ unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift);
+ unsigned long direct_table_size;
+
+ if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) ||
+ (window_size > memory_hotplug_max()) ||
+ !is_power_of_2(window_size))
+ return 0;
+
+ /* Calculate a direct table size from window_size and levels */
+ entries_shift = (entries_shift + levels - 1) / levels;
+ table_shift = entries_shift + 3;
+ table_shift = max_t(unsigned, table_shift, PAGE_SHIFT);
+ direct_table_size = 1UL << table_shift;
+
+ for ( ; levels; --levels) {
+ bytes += _ALIGN_UP(tce_table_size, direct_table_size);
+
+ tce_table_size /= direct_table_size;
+ tce_table_size <<= 3;
+ tce_table_size = _ALIGN_UP(tce_table_size, direct_table_size);
+ }
+
+ return bytes;
+}
+
+static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
+{
+ struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
+ /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */
+ struct iommu_table *tbl = pe->table_group.tables[0];
+
+ pnv_pci_ioda2_set_bypass(pe, false);
+ pnv_pci_ioda2_unset_window(&pe->table_group, 0);
+ pnv_ioda2_table_free(tbl);
+}
+
+static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
+{
+ struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
+
+ pnv_pci_ioda2_setup_default_config(pe);
+}
+
+static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
+ .get_table_size = pnv_pci_ioda2_get_table_size,
+ .create_table = pnv_pci_ioda2_create_table,
+ .set_window = pnv_pci_ioda2_set_window,
+ .unset_window = pnv_pci_ioda2_unset_window,
+ .take_ownership = pnv_ioda2_take_ownership,
+ .release_ownership = pnv_ioda2_release_ownership,
+};
+#endif
+
+static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
{
- struct page *tce_mem = NULL;
- void *addr;
const __be64 *swinvp;
- struct iommu_table *tbl;
- unsigned int tce_table_size, end;
- int64_t rc;
- /* We shouldn't already have a 32-bit DMA associated */
- if (WARN_ON(pe->tce32_seg >= 0))
+ /* OPAL variant of PHB3 invalidated TCEs */
+ swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
+ if (!swinvp)
return;
- /* The PE will reserve all possible 32-bits space */
- pe->tce32_seg = 0;
- end = (1 << ilog2(phb->ioda.m32_pci_base));
- tce_table_size = (end / 0x1000) * 8;
- pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
- end);
+ phb->ioda.tce_inval_reg_phys = be64_to_cpup(swinvp);
+ phb->ioda.tce_inval_reg = ioremap(phb->ioda.tce_inval_reg_phys, 8);
+}
- /* Allocate TCE table */
- tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
- get_order(tce_table_size));
+static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
+ unsigned levels, unsigned long limit,
+ unsigned long *current_offset, unsigned long *total_allocated)
+{
+ struct page *tce_mem = NULL;
+ __be64 *addr, *tmp;
+ unsigned order = max_t(unsigned, shift, PAGE_SHIFT) - PAGE_SHIFT;
+ unsigned long allocated = 1UL << (order + PAGE_SHIFT);
+ unsigned entries = 1UL << (shift - 3);
+ long i;
+
+ tce_mem = alloc_pages_node(nid, GFP_KERNEL, order);
if (!tce_mem) {
- pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
- goto fail;
+ pr_err("Failed to allocate a TCE memory, order=%d\n", order);
+ return NULL;
}
addr = page_address(tce_mem);
- memset(addr, 0, tce_table_size);
+ memset(addr, 0, allocated);
+ *total_allocated += allocated;
+
+ --levels;
+ if (!levels) {
+ *current_offset += allocated;
+ return addr;
+ }
+
+ for (i = 0; i < entries; ++i) {
+ tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
+ levels, limit, current_offset, total_allocated);
+ if (!tmp)
+ break;
+
+ addr[i] = cpu_to_be64(__pa(tmp) |
+ TCE_PCI_READ | TCE_PCI_WRITE);
+
+ if (*current_offset >= limit)
+ break;
+ }
+
+ return addr;
+}
+
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+ unsigned long size, unsigned level);
+
+static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
+ __u32 page_shift, __u64 window_size, __u32 levels,
+ struct iommu_table *tbl)
+{
+ void *addr;
+ unsigned long offset = 0, level_shift, total_allocated = 0;
+ const unsigned window_shift = ilog2(window_size);
+ unsigned entries_shift = window_shift - page_shift;
+ unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
+ const unsigned long tce_table_size = 1UL << table_shift;
+
+ if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
+ return -EINVAL;
+
+ if ((window_size > memory_hotplug_max()) || !is_power_of_2(window_size))
+ return -EINVAL;
+
+ /* Adjust direct table size from window_size and levels */
+ entries_shift = (entries_shift + levels - 1) / levels;
+ level_shift = entries_shift + 3;
+ level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
+
+ /* Allocate TCE table */
+ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
+ levels, tce_table_size, &offset, &total_allocated);
+
+ /* addr==NULL means that the first level allocation failed */
+ if (!addr)
+ return -ENOMEM;
/*
- * Map TCE table through TVT. The TVE index is the PE number
- * shifted by 1 bit for 32-bits DMA space.
+ * First level was allocated but some lower level failed as
+ * we did not allocate as much as we wanted,
+ * release partially allocated table.
*/
- rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
- pe->pe_number << 1, 1, __pa(addr),
- tce_table_size, 0x1000);
- if (rc) {
- pe_err(pe, "Failed to configure 32-bit TCE table,"
- " err %ld\n", rc);
- goto fail;
+ if (offset < tce_table_size) {
+ pnv_pci_ioda2_table_do_free_pages(addr,
+ 1ULL << (level_shift - 3), levels - 1);
+ return -ENOMEM;
}
/* Setup linux iommu table */
- tbl = pe->tce32_table;
- pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
- IOMMU_PAGE_SHIFT_4K);
+ pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset,
+ page_shift);
+ tbl->it_level_size = 1ULL << (level_shift - 3);
+ tbl->it_indirect_levels = levels - 1;
+ tbl->it_allocated_size = total_allocated;
- /* OPAL variant of PHB3 invalidated TCEs */
- swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
- if (swinvp) {
- /* We need a couple more fields -- an address and a data
- * to or. Since the bus is only printed out on table free
- * errors, and on the first pass the data will be a relative
- * bus number, print that out instead.
- */
- pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
- tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
- 8);
- tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
+ pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
+ window_size, tce_table_size, bus_offset);
+
+ return 0;
+}
+
+static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
+ unsigned long size, unsigned level)
+{
+ const unsigned long addr_ul = (unsigned long) addr &
+ ~(TCE_PCI_READ | TCE_PCI_WRITE);
+
+ if (level) {
+ long i;
+ u64 *tmp = (u64 *) addr_ul;
+
+ for (i = 0; i < size; ++i) {
+ unsigned long hpa = be64_to_cpu(tmp[i]);
+
+ if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE)))
+ continue;
+
+ pnv_pci_ioda2_table_do_free_pages(__va(hpa), size,
+ level - 1);
+ }
}
- iommu_init_table(tbl, phb->hose->node);
- if (pe->flags & PNV_IODA_PE_DEV) {
- iommu_register_group(tbl, phb->hose->global_number,
- pe->pe_number);
- set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
- iommu_register_group(tbl, phb->hose->global_number,
- pe->pe_number);
- pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
- } else if (pe->flags & PNV_IODA_PE_VF) {
- iommu_register_group(tbl, phb->hose->global_number,
- pe->pe_number);
- }
-
- /* Also create a bypass window */
- if (!pnv_iommu_bypass_disabled)
- pnv_pci_ioda2_setup_bypass_pe(phb, pe);
+ free_pages(addr_ul, get_order(size << 3));
+}
- return;
-fail:
- if (pe->tce32_seg >= 0)
- pe->tce32_seg = -1;
- if (tce_mem)
- __free_pages(tce_mem, get_order(tce_table_size));
+static void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl)
+{
+ const unsigned long size = tbl->it_indirect_levels ?
+ tbl->it_level_size : tbl->it_size;
+
+ if (!tbl->it_size)
+ return;
+
+ pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size,
+ tbl->it_indirect_levels);
+}
+
+static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ int64_t rc;
+
+ /* We shouldn't already have a 32-bit DMA associated */
+ if (WARN_ON(pe->tce32_seg >= 0))
+ return;
+
+ /* TVE #1 is selected by PCI address bit 59 */
+ pe->tce_bypass_base = 1ull << 59;
+
+ iommu_register_group(&pe->table_group, phb->hose->global_number,
+ pe->pe_number);
+
+ /* The PE will reserve all possible 32-bits space */
+ pe->tce32_seg = 0;
+ pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
+ phb->ioda.m32_pci_base);
+
+ /* Setup linux iommu table */
+ pe->table_group.tce32_start = 0;
+ pe->table_group.tce32_size = phb->ioda.m32_pci_base;
+ pe->table_group.max_dynamic_windows_supported =
+ IOMMU_TABLE_GROUP_MAX_TABLES;
+ pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
+ pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M;
+#ifdef CONFIG_IOMMU_API
+ pe->table_group.ops = &pnv_pci_ioda2_ops;
+#endif
+
+ rc = pnv_pci_ioda2_setup_default_config(pe);
+ if (rc) {
+ if (pe->tce32_seg >= 0)
+ pe->tce32_seg = -1;
+ return;
+ }
+
+ if (pe->flags & PNV_IODA_PE_DEV)
+ iommu_add_device(&pe->pdev->dev);
+ else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
}
static void pnv_ioda_setup_dma(struct pnv_phb *phb)
@@ -2024,6 +2419,8 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
pr_info("PCI: %d PE# for a total weight of %d\n",
phb->ioda.dma_pe_count, phb->ioda.dma_weight);
+ pnv_pci_ioda_setup_opal_tce_kill(phb);
+
/* Walk our PE list and configure their DMA segments, hand them
* out one base segment plus any residual segments based on
* weight
@@ -2642,12 +3039,27 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
}
-static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
+static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{
+ struct pnv_phb *phb = hose->private_data;
+
opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE,
OPAL_ASSERT_RESET);
}
+static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
+#ifdef CONFIG_PCI_MSI
+ .setup_msi_irqs = pnv_setup_msi_irqs,
+ .teardown_msi_irqs = pnv_teardown_msi_irqs,
+#endif
+ .enable_device_hook = pnv_pci_enable_device_hook,
+ .window_alignment = pnv_pci_window_alignment,
+ .reset_secondary_bus = pnv_pci_reset_secondary_bus,
+ .dma_set_mask = pnv_pci_ioda_dma_set_mask,
+ .shutdown = pnv_pci_ioda_shutdown,
+};
+
static void __init pnv_pci_init_ioda_phb(struct device_node *np,
u64 hub_id, int ioda_type)
{
@@ -2791,12 +3203,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
- phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
- /* Setup shutdown function for kexec */
- phb->shutdown = pnv_pci_ioda_shutdown;
-
/* Setup MSI support */
pnv_pci_init_ioda_msis(phb);
@@ -2808,10 +3216,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
* the child P2P bridges) can form individual PE.
*/
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
- pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
- pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
- pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
- hose->controller_ops = pnv_pci_controller_ops;
+ hose->controller_ops = pnv_pci_ioda_controller_ops;
#ifdef CONFIG_PCI_IOV
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 4729ca793..f2bdfea3b 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -83,18 +83,42 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
#endif /* CONFIG_PCI_MSI */
+static struct iommu_table_ops pnv_p5ioc2_iommu_ops = {
+ .set = pnv_tce_build,
+#ifdef CONFIG_IOMMU_API
+ .exchange = pnv_tce_xchg,
+#endif
+ .clear = pnv_tce_free,
+ .get = pnv_tce_get,
+};
+
static void pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
struct pci_dev *pdev)
{
- if (phb->p5ioc2.iommu_table.it_map == NULL) {
- iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node);
- iommu_register_group(&phb->p5ioc2.iommu_table,
+ struct iommu_table *tbl = phb->p5ioc2.table_group.tables[0];
+
+ if (!tbl->it_map) {
+ tbl->it_ops = &pnv_p5ioc2_iommu_ops;
+ iommu_init_table(tbl, phb->hose->node);
+ iommu_register_group(&phb->p5ioc2.table_group,
pci_domain_nr(phb->hose->bus), phb->opal_id);
+ INIT_LIST_HEAD_RCU(&tbl->it_group_list);
+ pnv_pci_link_table_and_group(phb->hose->node, 0,
+ tbl, &phb->p5ioc2.table_group);
}
- set_iommu_table_base_and_group(&pdev->dev, &phb->p5ioc2.iommu_table);
+ set_iommu_table_base(&pdev->dev, tbl);
+ iommu_add_device(&pdev->dev);
}
+static const struct pci_controller_ops pnv_pci_p5ioc2_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
+#ifdef CONFIG_PCI_MSI
+ .setup_msi_irqs = pnv_setup_msi_irqs,
+ .teardown_msi_irqs = pnv_teardown_msi_irqs,
+#endif
+};
+
static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
void *tce_mem, u64 tce_size)
{
@@ -103,6 +127,8 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
u64 phb_id;
int64_t rc;
static int primary = 1;
+ struct iommu_table_group *table_group;
+ struct iommu_table *tbl;
pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name);
@@ -133,7 +159,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
phb->hose->first_busno = 0;
phb->hose->last_busno = 0xff;
phb->hose->private_data = phb;
- phb->hose->controller_ops = pnv_pci_controller_ops;
+ phb->hose->controller_ops = pnv_pci_p5ioc2_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = PNV_PHB_P5IOC2;
@@ -172,6 +198,15 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table,
tce_mem, tce_size, 0,
IOMMU_PAGE_SHIFT_4K);
+ /*
+ * We do not allocate iommu_table as we do not support
+ * hotplug or SRIOV on P5IOC2 and therefore iommu_free_table()
+ * should not be called for phb->p5ioc2.table_group.tables[0] ever.
+ */
+ tbl = phb->p5ioc2.table_group.tables[0] = &phb->p5ioc2.iommu_table;
+ table_group = &phb->p5ioc2.table_group;
+ table_group->tce32_start = tbl->it_offset << tbl->it_page_shift;
+ table_group->tce32_size = tbl->it_size << tbl->it_page_shift;
}
void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index bca2aeb6e..765d8ed55 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -45,7 +45,7 @@
//#define cfg_dbg(fmt...) printk(fmt)
#ifdef CONFIG_PCI_MSI
-static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -94,7 +94,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return 0;
}
-static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+void pnv_teardown_msi_irqs(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -572,80 +572,152 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
-static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs, bool rm)
+static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
{
- u64 proto_tce;
- __be64 *tcep, *tces;
- u64 rpn;
-
- proto_tce = TCE_PCI_READ; // Read allowed
+ __be64 *tmp = ((__be64 *)tbl->it_base);
+ int level = tbl->it_indirect_levels;
+ const long shift = ilog2(tbl->it_level_size);
+ unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
+
+ while (level) {
+ int n = (idx & mask) >> (level * shift);
+ unsigned long tce = be64_to_cpu(tmp[n]);
+
+ tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
+ idx &= ~mask;
+ mask >>= shift;
+ --level;
+ }
- if (direction != DMA_TO_DEVICE)
- proto_tce |= TCE_PCI_WRITE;
+ return tmp + idx;
+}
- tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
- rpn = __pa(uaddr) >> tbl->it_page_shift;
+int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ u64 proto_tce = iommu_direction_to_tce_perm(direction);
+ u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
+ long i;
- while (npages--)
- *(tcep++) = cpu_to_be64(proto_tce |
- (rpn++ << tbl->it_page_shift));
+ for (i = 0; i < npages; i++) {
+ unsigned long newtce = proto_tce |
+ ((rpn + i) << tbl->it_page_shift);
+ unsigned long idx = index - tbl->it_offset + i;
- /* Some implementations won't cache invalid TCEs and thus may not
- * need that flush. We'll probably turn it_type into a bit mask
- * of flags if that becomes the case
- */
- if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
+ *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
+ }
return 0;
}
-static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+#ifdef CONFIG_IOMMU_API
+int pnv_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction)
{
- return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
- false);
+ u64 proto_tce = iommu_direction_to_tce_perm(*direction);
+ unsigned long newtce = *hpa | proto_tce, oldtce;
+ unsigned long idx = index - tbl->it_offset;
+
+ BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
+
+ oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
+ *hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ *direction = iommu_tce_direction(oldtce);
+
+ return 0;
}
+#endif
-static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
- bool rm)
+void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
{
- __be64 *tcep, *tces;
+ long i;
- tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
+ for (i = 0; i < npages; i++) {
+ unsigned long idx = index - tbl->it_offset + i;
- while (npages--)
- *(tcep++) = cpu_to_be64(0);
+ *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
+ }
+}
- if (tbl->it_type & TCE_PCI_SWINV_FREE)
- pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
+unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
+{
+ return *(pnv_tce(tbl, index - tbl->it_offset));
}
-static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
+struct iommu_table *pnv_pci_table_alloc(int nid)
{
- pnv_tce_free(tbl, index, npages, false);
+ struct iommu_table *tbl;
+
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
+ INIT_LIST_HEAD_RCU(&tbl->it_group_list);
+
+ return tbl;
}
-static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
+long pnv_pci_link_table_and_group(int node, int num,
+ struct iommu_table *tbl,
+ struct iommu_table_group *table_group)
{
- return ((u64 *)tbl->it_base)[index - tbl->it_offset];
+ struct iommu_table_group_link *tgl = NULL;
+
+ if (WARN_ON(!tbl || !table_group))
+ return -EINVAL;
+
+ tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
+ node);
+ if (!tgl)
+ return -ENOMEM;
+
+ tgl->table_group = table_group;
+ list_add_rcu(&tgl->next, &tbl->it_group_list);
+
+ table_group->tables[num] = tbl;
+
+ return 0;
}
-static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
- unsigned long uaddr,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+static void pnv_iommu_table_group_link_free(struct rcu_head *head)
{
- return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
+ struct iommu_table_group_link *tgl = container_of(head,
+ struct iommu_table_group_link, rcu);
+
+ kfree(tgl);
}
-static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
+void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
+ struct iommu_table_group *table_group)
{
- pnv_tce_free(tbl, index, npages, true);
+ long i;
+ bool found;
+ struct iommu_table_group_link *tgl;
+
+ if (!tbl || !table_group)
+ return;
+
+ /* Remove link to a group from table's list of attached groups */
+ found = false;
+ list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+ if (tgl->table_group == table_group) {
+ list_del_rcu(&tgl->next);
+ call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
+ found = true;
+ break;
+ }
+ }
+ if (WARN_ON(!found))
+ return;
+
+ /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
+ found = false;
+ for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
+ if (table_group->tables[i] == tbl) {
+ table_group->tables[i] = NULL;
+ found = true;
+ break;
+ }
+ }
+ WARN_ON(!found);
}
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
@@ -662,7 +734,7 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
tbl->it_type = TCE_PCI;
}
-static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
+void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -689,16 +761,6 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
phb->dma_dev_setup(phb, pdev);
}
-int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- if (phb && phb->dma_set_mask)
- return phb->dma_set_mask(phb, pdev, dma_mask);
- return __dma_set_mask(&pdev->dev, dma_mask);
-}
-
u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
@@ -714,12 +776,9 @@ void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
- list_for_each_entry(hose, &hose_list, list_node) {
- struct pnv_phb *phb = hose->private_data;
-
- if (phb && phb->shutdown)
- phb->shutdown(phb);
- }
+ list_for_each_entry(hose, &hose_list, list_node)
+ if (hose->controller_ops.shutdown)
+ hose->controller_ops.shutdown(hose);
}
/* Fixup wrong class code in p7ioc and p8 root complex */
@@ -762,22 +821,7 @@ void __init pnv_pci_init(void)
pci_devs_phb_init();
/* Configure IOMMU DMA hooks */
- ppc_md.tce_build = pnv_tce_build_vm;
- ppc_md.tce_free = pnv_tce_free_vm;
- ppc_md.tce_build_rm = pnv_tce_build_rm;
- ppc_md.tce_free_rm = pnv_tce_free_rm;
- ppc_md.tce_get = pnv_tce_get;
set_pci_dma_ops(&dma_iommu_ops);
-
- /* Configure MSIs */
-#ifdef CONFIG_PCI_MSI
- ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
- ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
-#endif
}
machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
-
-struct pci_controller_ops pnv_pci_controller_ops = {
- .dma_dev_setup = pnv_pci_dma_dev_setup,
-};
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 070ee888f..8ef2d28ad 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -57,8 +57,7 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
int tce32_seg;
int tce32_segcount;
- struct iommu_table *tce32_table;
- phys_addr_t tce_inval_reg_phys;
+ struct iommu_table_group table_group;
/* 64-bit TCE bypass region */
bool tce_bypass_enabled;
@@ -106,13 +105,10 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
- int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
- u64 dma_mask);
u64 (*dma_get_required_mask)(struct pnv_phb *phb,
struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
- void (*shutdown)(struct pnv_phb *phb);
int (*init_m64)(struct pnv_phb *phb);
void (*reserve_m64_pe)(struct pnv_phb *phb);
int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
@@ -123,6 +119,7 @@ struct pnv_phb {
union {
struct {
struct iommu_table iommu_table;
+ struct iommu_table_group table_group;
} p5ioc2;
struct {
@@ -186,6 +183,12 @@ struct pnv_phb {
* boot for resource allocation purposes
*/
struct list_head pe_dma_list;
+
+ /* TCE cache invalidate registers (physical and
+ * remapped)
+ */
+ phys_addr_t tce_inval_reg_phys;
+ __be64 __iomem *tce_inval_reg;
} ioda;
};
@@ -200,6 +203,13 @@ struct pnv_phb {
};
extern struct pci_ops pnv_pci_ops;
+extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ struct dma_attrs *attrs);
+extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
+extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
+ unsigned long *hpa, enum dma_data_direction *direction);
+extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
unsigned char *log_buff);
@@ -207,6 +217,13 @@ int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val);
int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
+extern struct iommu_table *pnv_pci_table_alloc(int nid);
+
+extern long pnv_pci_link_table_and_group(int node, int num,
+ struct iommu_table *tbl,
+ struct iommu_table_group *table_group);
+extern void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
+ struct iommu_table_group *table_group);
extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset, unsigned page_shift);
@@ -218,4 +235,8 @@ extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
+extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
+extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
+
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 826d2c9be..9269e30e4 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -12,29 +12,24 @@ struct pci_dev;
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
-extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
-static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
-{
- return -ENODEV;
-}
-
static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
{
return 0;
}
#endif
-extern struct pci_controller_ops pnv_pci_controller_ops;
-
extern u32 pnv_get_supported_cpuidle_states(void);
extern void pnv_lpc_init(void);
+extern void opal_handle_events(uint64_t events);
+extern void opal_event_shutdown(void);
+
bool cpu_core_split_required(void);
#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 16fdcb23f..53737e019 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -35,12 +35,8 @@
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
-#include <asm/cputhreads.h>
-#include <asm/cpuidle.h>
-#include <asm/code-patching.h>
#include "powernv.h"
-#include "subcore.h"
static void __init pnv_setup_arch(void)
{
@@ -111,7 +107,7 @@ static void pnv_prepare_going_down(void)
* Disable all notifiers from OPAL, we can't
* service interrupts anymore anyway
*/
- opal_notifier_disable();
+ opal_event_shutdown();
/* Soft disable interrupts */
local_irq_disable();
@@ -169,13 +165,6 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
-static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (dev_is_pci(dev))
- return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
- return __dma_set_mask(dev, dma_mask);
-}
-
static u64 pnv_dma_get_required_mask(struct device *dev)
{
if (dev_is_pci(dev))
@@ -277,173 +266,6 @@ static void __init pnv_setup_machdep_opal(void)
ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
}
-static u32 supported_cpuidle_states;
-
-int pnv_save_sprs_for_winkle(void)
-{
- int cpu;
- int rc;
-
- /*
- * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
- * all cpus at boot. Get these reg values of current cpu and use the
- * same accross all cpus.
- */
- uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
- uint64_t hid0_val = mfspr(SPRN_HID0);
- uint64_t hid1_val = mfspr(SPRN_HID1);
- uint64_t hid4_val = mfspr(SPRN_HID4);
- uint64_t hid5_val = mfspr(SPRN_HID5);
- uint64_t hmeer_val = mfspr(SPRN_HMEER);
-
- for_each_possible_cpu(cpu) {
- uint64_t pir = get_hard_smp_processor_id(cpu);
- uint64_t hsprg0_val = (uint64_t)&paca[cpu];
-
- /*
- * HSPRG0 is used to store the cpu's pointer to paca. Hence last
- * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
- * with 63rd bit set, so that when a thread wakes up at 0x100 we
- * can use this bit to distinguish between fastsleep and
- * deep winkle.
- */
- hsprg0_val |= 1;
-
- rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
- if (rc != 0)
- return rc;
-
- rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
- if (rc != 0)
- return rc;
-
- /* HIDs are per core registers */
- if (cpu_thread_in_core(cpu) == 0) {
-
- rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
- if (rc != 0)
- return rc;
-
- rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
- if (rc != 0)
- return rc;
-
- rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
- if (rc != 0)
- return rc;
-
- rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
- if (rc != 0)
- return rc;
-
- rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
- if (rc != 0)
- return rc;
- }
- }
-
- return 0;
-}
-
-static void pnv_alloc_idle_core_states(void)
-{
- int i, j;
- int nr_cores = cpu_nr_cores();
- u32 *core_idle_state;
-
- /*
- * core_idle_state - First 8 bits track the idle state of each thread
- * of the core. The 8th bit is the lock bit. Initially all thread bits
- * are set. They are cleared when the thread enters deep idle state
- * like sleep and winkle. Initially the lock bit is cleared.
- * The lock bit has 2 purposes
- * a. While the first thread is restoring core state, it prevents
- * other threads in the core from switching to process context.
- * b. While the last thread in the core is saving the core state, it
- * prevents a different thread from waking up.
- */
- for (i = 0; i < nr_cores; i++) {
- int first_cpu = i * threads_per_core;
- int node = cpu_to_node(first_cpu);
-
- core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
- *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
-
- for (j = 0; j < threads_per_core; j++) {
- int cpu = first_cpu + j;
-
- paca[cpu].core_idle_state_ptr = core_idle_state;
- paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
- paca[cpu].thread_mask = 1 << j;
- }
- }
-
- update_subcore_sibling_mask();
-
- if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
- pnv_save_sprs_for_winkle();
-}
-
-u32 pnv_get_supported_cpuidle_states(void)
-{
- return supported_cpuidle_states;
-}
-EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
-
-static int __init pnv_init_idle_states(void)
-{
- struct device_node *power_mgt;
- int dt_idle_states;
- u32 *flags;
- int i;
-
- supported_cpuidle_states = 0;
-
- if (cpuidle_disable != IDLE_NO_OVERRIDE)
- goto out;
-
- if (!firmware_has_feature(FW_FEATURE_OPALv3))
- goto out;
-
- power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
- if (!power_mgt) {
- pr_warn("opal: PowerMgmt Node not found\n");
- goto out;
- }
- dt_idle_states = of_property_count_u32_elems(power_mgt,
- "ibm,cpu-idle-state-flags");
- if (dt_idle_states < 0) {
- pr_warn("cpuidle-powernv: no idle states found in the DT\n");
- goto out;
- }
-
- flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
- if (of_property_read_u32_array(power_mgt,
- "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
- goto out_free;
- }
-
- for (i = 0; i < dt_idle_states; i++)
- supported_cpuidle_states |= flags[i];
-
- if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
- patch_instruction(
- (unsigned int *)pnv_fastsleep_workaround_at_entry,
- PPC_INST_NOP);
- patch_instruction(
- (unsigned int *)pnv_fastsleep_workaround_at_exit,
- PPC_INST_NOP);
- }
- pnv_alloc_idle_core_states();
-out_free:
- kfree(flags);
-out:
- return 0;
-}
-
-subsys_initcall(pnv_init_idle_states);
-
static int __init pnv_probe(void)
{
unsigned long root = of_get_flat_dt_root();
@@ -492,7 +314,6 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = power7_idle,
.calibrate_decr = generic_calibrate_decr,
- .dma_set_mask = pnv_dma_set_mask,
.dma_get_required_mask = pnv_dma_get_required_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index ce73ce865..791c6142c 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -92,5 +92,4 @@ static int __init ps3_rtc_init(void)
return PTR_ERR_OR_ZERO(pdev);
}
-
-module_init(ps3_rtc_init);
+device_initcall(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 019d34aaf..47d9cebe7 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -421,11 +421,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
return -ENODEV;
dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
+ of_node_put(parent);
if (!dn)
return -EINVAL;
- of_node_put(parent);
-
rc = dlpar_attach_node(dn);
if (rc) {
dlpar_release_drc(drc_index);
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 2039397cc..1ba55d0bb 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -519,7 +519,7 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option)
/**
* pseries_eeh_wait_state - Wait for PE state
* @pe: EEH PE
- * @max_wait: maximal period in microsecond
+ * @max_wait: maximal period in millisecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 61d5a17f4..10510dea1 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -36,6 +36,8 @@
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/of.h>
+#include <linux/iommu.h>
+#include <linux/rculist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -51,6 +53,73 @@
#include "pseries.h"
+static struct iommu_table_group *iommu_pseries_alloc_group(int node)
+{
+ struct iommu_table_group *table_group = NULL;
+ struct iommu_table *tbl = NULL;
+ struct iommu_table_group_link *tgl = NULL;
+
+ table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
+ node);
+ if (!table_group)
+ goto fail_exit;
+
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
+ if (!tbl)
+ goto fail_exit;
+
+ tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
+ node);
+ if (!tgl)
+ goto fail_exit;
+
+ INIT_LIST_HEAD_RCU(&tbl->it_group_list);
+ tgl->table_group = table_group;
+ list_add_rcu(&tgl->next, &tbl->it_group_list);
+
+ table_group->tables[0] = tbl;
+
+ return table_group;
+
+fail_exit:
+ kfree(tgl);
+ kfree(table_group);
+ kfree(tbl);
+
+ return NULL;
+}
+
+static void iommu_pseries_free_group(struct iommu_table_group *table_group,
+ const char *node_name)
+{
+ struct iommu_table *tbl;
+#ifdef CONFIG_IOMMU_API
+ struct iommu_table_group_link *tgl;
+#endif
+
+ if (!table_group)
+ return;
+
+ tbl = table_group->tables[0];
+#ifdef CONFIG_IOMMU_API
+ tgl = list_first_entry_or_null(&tbl->it_group_list,
+ struct iommu_table_group_link, next);
+
+ WARN_ON_ONCE(!tgl);
+ if (tgl) {
+ list_del_rcu(&tgl->next);
+ kfree(tgl);
+ }
+ if (table_group->group) {
+ iommu_group_put(table_group->group);
+ BUG_ON(table_group->group);
+ }
+#endif
+ iommu_free_table(tbl, node_name);
+
+ kfree(table_group);
+}
+
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
__be64 *startp, __be64 *endp)
{
@@ -193,7 +262,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
int ret = 0;
unsigned long flags;
- if (npages == 1) {
+ if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
@@ -285,6 +354,9 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
{
u64 rc;
+ if (!firmware_has_feature(FW_FEATURE_MULTITCE))
+ return tce_free_pSeriesLP(tbl, tcenum, npages);
+
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
if (rc && printk_ratelimit()) {
@@ -460,7 +532,6 @@ static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
}
-
#ifdef CONFIG_PCI
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
@@ -546,6 +617,12 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
tbl->it_size = size >> tbl->it_page_shift;
}
+struct iommu_table_ops iommu_table_pseries_ops = {
+ .set = tce_build_pSeries,
+ .clear = tce_free_pSeries,
+ .get = tce_get_pseries
+};
+
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
{
struct device_node *dn;
@@ -610,12 +687,13 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
- pci->phb->node);
+ pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
+ tbl = pci->table_group->tables[0];
iommu_table_setparms(pci->phb, dn, tbl);
- pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
- iommu_register_group(tbl, pci_domain_nr(bus), 0);
+ tbl->it_ops = &iommu_table_pseries_ops;
+ iommu_init_table(tbl, pci->phb->node);
+ iommu_register_group(pci->table_group, pci_domain_nr(bus), 0);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
@@ -625,6 +703,11 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
}
+struct iommu_table_ops iommu_table_lpar_multi_ops = {
+ .set = tce_buildmulti_pSeriesLP,
+ .clear = tce_freemulti_pSeriesLP,
+ .get = tce_get_pSeriesLP
+};
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
{
@@ -653,15 +736,17 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
ppci = PCI_DN(pdn);
pr_debug(" parent is %s, iommu_table: 0x%p\n",
- pdn->full_name, ppci->iommu_table);
+ pdn->full_name, ppci->table_group);
- if (!ppci->iommu_table) {
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
- ppci->phb->node);
+ if (!ppci->table_group) {
+ ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
+ tbl = ppci->table_group->tables[0];
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
- ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
- iommu_register_group(tbl, pci_domain_nr(bus), 0);
- pr_debug(" created table: %p\n", ppci->iommu_table);
+ tbl->it_ops = &iommu_table_lpar_multi_ops;
+ iommu_init_table(tbl, ppci->phb->node);
+ iommu_register_group(ppci->table_group,
+ pci_domain_nr(bus), 0);
+ pr_debug(" created table: %p\n", ppci->table_group);
}
}
@@ -683,13 +768,15 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
struct pci_controller *phb = PCI_DN(dn)->phb;
pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
- phb->node);
+ PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
+ tbl = PCI_DN(dn)->table_group->tables[0];
iommu_table_setparms(phb, dn, tbl);
- PCI_DN(dn)->iommu_table = iommu_init_table(tbl, phb->node);
- iommu_register_group(tbl, pci_domain_nr(phb->bus), 0);
- set_iommu_table_base_and_group(&dev->dev,
- PCI_DN(dn)->iommu_table);
+ tbl->it_ops = &iommu_table_pseries_ops;
+ iommu_init_table(tbl, phb->node);
+ iommu_register_group(PCI_DN(dn)->table_group,
+ pci_domain_nr(phb->bus), 0);
+ set_iommu_table_base(&dev->dev, tbl);
+ iommu_add_device(&dev->dev);
return;
}
@@ -697,13 +784,14 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
* an already allocated iommu table is found and use that.
*/
- while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL)
+ while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
dn = dn->parent;
- if (dn && PCI_DN(dn))
- set_iommu_table_base_and_group(&dev->dev,
- PCI_DN(dn)->iommu_table);
- else
+ if (dn && PCI_DN(dn)) {
+ set_iommu_table_base(&dev->dev,
+ PCI_DN(dn)->table_group->tables[0]);
+ iommu_add_device(&dev->dev);
+ } else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
}
@@ -1088,7 +1176,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %s\n", dn->full_name);
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
+ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
pdn = pdn->parent) {
dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
if (dma_window)
@@ -1104,18 +1192,21 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pr_debug(" parent is %s\n", pdn->full_name);
pci = PCI_DN(pdn);
- if (!pci->iommu_table) {
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
- pci->phb->node);
+ if (!pci->table_group) {
+ pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
+ tbl = pci->table_group->tables[0];
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
- pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
- iommu_register_group(tbl, pci_domain_nr(pci->phb->bus), 0);
- pr_debug(" created table: %p\n", pci->iommu_table);
+ tbl->it_ops = &iommu_table_lpar_multi_ops;
+ iommu_init_table(tbl, pci->phb->node);
+ iommu_register_group(pci->table_group,
+ pci_domain_nr(pci->phb->bus), 0);
+ pr_debug(" created table: %p\n", pci->table_group);
} else {
- pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
+ pr_debug(" found DMA window, table: %p\n", pci->table_group);
}
- set_iommu_table_base_and_group(&dev->dev, pci->iommu_table);
+ set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
+ iommu_add_device(&dev->dev);
}
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
@@ -1145,7 +1236,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
* search upwards in the tree until we either hit a dma-window
* property, OR find a parent with a table already allocated.
*/
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
+ for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
pdn = pdn->parent) {
dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
if (dma_window)
@@ -1189,7 +1280,7 @@ static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
dn = pci_device_to_OF_node(pdev);
/* search upwards for ibm,dma-window */
- for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
+ for (; dn && PCI_DN(dn) && !PCI_DN(dn)->table_group;
dn = dn->parent)
if (of_get_property(dn, "ibm,dma-window", NULL))
break;
@@ -1269,8 +1360,9 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
* the device node.
*/
remove_ddw(np, false);
- if (pci && pci->iommu_table)
- iommu_free_table(pci->iommu_table, np->full_name);
+ if (pci && pci->table_group)
+ iommu_pseries_free_group(pci->table_group,
+ np->full_name);
spin_lock(&direct_window_list_lock);
list_for_each_entry(window, &direct_window_list, list) {
@@ -1300,22 +1392,11 @@ void iommu_init_early_pSeries(void)
return;
if (firmware_has_feature(FW_FEATURE_LPAR)) {
- if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
- ppc_md.tce_build = tce_buildmulti_pSeriesLP;
- ppc_md.tce_free = tce_freemulti_pSeriesLP;
- } else {
- ppc_md.tce_build = tce_build_pSeriesLP;
- ppc_md.tce_free = tce_free_pSeriesLP;
- }
- ppc_md.tce_get = tce_get_pSeriesLP;
pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
} else {
- ppc_md.tce_build = tce_build_pSeries;
- ppc_md.tce_free = tce_free_pSeries;
- ppc_md.tce_get = tce_get_pseries;
pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
}
@@ -1333,8 +1414,6 @@ static int __init disable_multitce(char *str)
firmware_has_feature(FW_FEATURE_LPAR) &&
firmware_has_feature(FW_FEATURE_MULTITCE)) {
printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
- ppc_md.tce_build = tce_build_pSeriesLP;
- ppc_md.tce_free = tce_free_pSeriesLP;
powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
}
return 1;
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index c8d24f9a6..c22bb647c 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -18,6 +18,8 @@
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
+#include "pseries.h"
+
static int query_token, change_token;
#define RTAS_QUERY_FN 0
@@ -505,6 +507,8 @@ static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
static int rtas_msi_init(void)
{
+ struct pci_controller *phb;
+
query_token = rtas_token("ibm,query-interrupt-source-number");
change_token = rtas_token("ibm,change-msi");
@@ -516,9 +520,15 @@ static int rtas_msi_init(void)
pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n");
- WARN_ON(ppc_md.setup_msi_irqs);
- ppc_md.setup_msi_irqs = rtas_setup_msi_irqs;
- ppc_md.teardown_msi_irqs = rtas_teardown_msi_irqs;
+ WARN_ON(pseries_pci_controller_ops.setup_msi_irqs);
+ pseries_pci_controller_ops.setup_msi_irqs = rtas_setup_msi_irqs;
+ pseries_pci_controller_ops.teardown_msi_irqs = rtas_teardown_msi_irqs;
+
+ list_for_each_entry(phb, &hose_list, list_node) {
+ WARN_ON(phb->controller_ops.setup_msi_irqs);
+ phb->controller_ops.setup_msi_irqs = rtas_setup_msi_irqs;
+ phb->controller_ops.teardown_msi_irqs = rtas_teardown_msi_irqs;
+ }
WARN_ON(ppc_md.pci_irq_fixup);
ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;