summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2015-12-15 14:52:16 -0300
commit8d91c1e411f55d7ea91b1183a2e9f8088fb4d5be (patch)
treee9891aa6c295060d065adffd610c4f49ecf884f3 /arch/powerpc/platforms
parenta71852147516bc1cb5b0b3cbd13639bfd4022dc8 (diff)
Linux-libre 4.3.2-gnu
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/512x/Kconfig4
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c1
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c8
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c2
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c4
-rw-r--r--arch/powerpc/platforms/85xx/common.c2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c5
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c3
-rw-r--r--arch/powerpc/platforms/86xx/pic.c2
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig15
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c8
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c6
-rw-r--r--arch/powerpc/platforms/cell/ras.c2
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c55
-rw-r--r--arch/powerpc/platforms/cell/spufs/lscsa_alloc.c124
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c5
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c2
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c4
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c177
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c147
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c50
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c147
-rw-r--r--arch/powerpc/platforms/powernv/pci.c15
-rw-r--r--arch/powerpc/platforms/powernv/pci.h7
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h6
-rw-r--r--arch/powerpc/platforms/powernv/rng.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c29
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c4
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c3
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c3
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c3
-rw-r--r--arch/powerpc/platforms/pseries/msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/rng.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c26
50 files changed, 533 insertions, 411 deletions
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 5aa3f4b53..48bf38d0d 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,8 +7,8 @@ config PPC_MPC512x
select PPC_PCI_CHOICE
select FSL_PCI if PCI
select ARCH_WANT_OPTIONAL_GPIOLIB
- select USB_EHCI_BIG_ENDIAN_MMIO
- select USB_EHCI_BIG_ENDIAN_DESC
+ select USB_EHCI_BIG_ENDIAN_MMIO if USB_EHCI_HCD
+ select USB_EHCI_BIG_ENDIAN_DESC if USB_EHCI_HCD
config MPC5121_ADS
bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index f691bcabd..c50ea76ba 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -12,6 +12,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/device.h>
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index ca3a062ed..0035d146d 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -104,9 +104,10 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
return irq_linear_revmap(cpld_pic_host, cpld_irq);
}
-static void
-cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpld_pic_cascade(struct irq_desc *desc)
{
+ unsigned int irq;
+
irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
&cpld_regs->pci_mask);
if (irq != NO_IRQ) {
@@ -123,7 +124,8 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
}
static int
-cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
+cpld_pic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return cpld_pic_node == node;
}
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 32cae33c4..8fb95480f 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -80,7 +80,7 @@ static struct irq_chip media5200_irq_chip = {
.irq_mask_ack = media5200_irq_mask,
};
-void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void media5200_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int sub_virq, val;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 63016621a..78ac19aef 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -191,7 +191,7 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
.irq_set_type = mpc52xx_gpt_irq_set_type,
};
-void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
+static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
{
struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
int sub_virq;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 2944bc84b..4fe2074c8 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -196,7 +196,7 @@ static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
- __irq_set_handler_locked(d->irq, handler);
+ irq_set_handler_locked(d, handler);
return 0;
}
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 74861a7fb..60e89fc9c 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -78,7 +78,7 @@ static struct irq_chip pq2ads_pci_ic = {
.irq_disable = pq2ads_pci_mask_irq
};
-static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void pq2ads_pci_irq_demux(struct irq_desc *desc)
{
struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc);
u32 stat, mask, pend;
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 84476b646..61bc851e9 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -66,10 +66,6 @@ define_machine(c293_pcie) {
.probe = c293_pcie_probe,
.setup_arch = c293_pcie_setup_arch,
.init_IRQ = c293_pcie_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
- .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
-#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 7bfb9b184..23791de7b 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -49,7 +49,7 @@ int __init mpc85xx_common_publish_devices(void)
return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL);
}
#ifdef CONFIG_CPM2
-static void cpm2_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpm2_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq;
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index bd839dc28..b39557120 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -153,6 +153,8 @@ static const char * const boards[] __initconst = {
"fsl,T1023RDB",
"fsl,T1024QDS",
"fsl,T1024RDB",
+ "fsl,T1040D4RDB",
+ "fsl,T1042D4RDB",
"fsl,T1040QDS",
"fsl,T1042QDS",
"fsl,T1040RDB",
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index b0753e222..5ac70de3e 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -192,8 +192,7 @@ void mpc85xx_cds_fixup_bus(struct pci_bus *bus)
}
#ifdef CONFIG_PPC_I8259
-static void mpc85xx_8259_cascade_handler(unsigned int irq,
- struct irq_desc *desc)
+static void mpc85xx_8259_cascade_handler(struct irq_desc *desc)
{
unsigned int cascade_irq = i8259_irq();
@@ -202,7 +201,7 @@ static void mpc85xx_8259_cascade_handler(unsigned int irq,
generic_handle_irq(cascade_irq);
/* check for any interrupts from the shared IRQ line */
- handle_fasteoi_irq(irq, desc);
+ handle_fasteoi_irq(desc);
}
static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index ffdf02121..f858306db 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -46,7 +46,7 @@
#endif
#ifdef CONFIG_PPC_I8259
-static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc85xx_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 55a9682b9..b02d6a5bb 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -91,9 +91,10 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
(irq_hw_number_t)i);
}
-void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc)
+static void socrates_fpga_pic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
unsigned int cascade_irq;
/*
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index d5b98c0f9..845defa1f 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -17,7 +17,7 @@
#include <asm/i8259.h>
#ifdef CONFIG_PPC_I8259
-static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc86xx_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index d30377470..c289fc77b 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -214,7 +214,7 @@ void mpc8xx_restart(char *cmd)
panic("Restart failed\n");
}
-static void cpm_cascade(unsigned int irq, struct irq_desc *desc)
+static void cpm_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int cascade_irq = cpm_get_irq();
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2f23133ab..b0ac1773c 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -57,21 +57,6 @@ config SPU_FS
Units on machines implementing the Broadband Processor
Architecture.
-config SPU_FS_64K_LS
- bool "Use 64K pages to map SPE local store"
- # we depend on PPC_MM_SLICES for now rather than selecting
- # it because we depend on hugetlbfs hooks being present. We
- # will fix that when the generic code has been improved to
- # not require hijacking hugetlbfs hooks.
- depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
- default y
- select PPC_HAS_HASH_64K
- help
- This option causes SPE local stores to be mapped in process
- address spaces using 64K pages while the rest of the kernel
- uses 4K pages. This can improve performances of applications
- using multiple SPEs by lowering the TLB pressure on them.
-
config SPU_BASE
bool
default n
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index fe51de4fc..e0e68a1c0 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -93,7 +93,7 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
dcr_write(msic->dcr_host, dcr_n, val);
}
-static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
+static void axon_msi_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct axon_msic *msic = irq_desc_get_handler_data(desc);
@@ -213,7 +213,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
return -ENODEV;
}
- entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ entry = first_pci_msi_entry(dev);
for (; dn; dn = of_get_next_parent(dn)) {
if (entry->msi_attrib.is_64) {
@@ -269,7 +269,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (rc)
return rc;
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) {
dev_warn(&dev->dev,
@@ -292,7 +292,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
- list_for_each_entry(entry, &dev->msi_list, list) {
+ for_each_pci_msi_entry(entry, dev) {
if (entry->irq == NO_IRQ)
continue;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 3af8324c1..9f609fc8d 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -99,11 +99,12 @@ static void iic_ioexc_eoi(struct irq_data *d)
{
}
-static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
+static void iic_ioexc_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct cbe_iic_regs __iomem *node_iic =
(void __iomem *)irq_desc_get_handler_data(desc);
+ unsigned int irq = irq_desc_get_irq(desc);
unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
unsigned long bits, ack;
int cascade;
@@ -222,7 +223,8 @@ void iic_request_IPIs(void)
#endif /* CONFIG_SMP */
-static int iic_host_match(struct irq_domain *h, struct device_node *node)
+static int iic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return of_device_is_compatible(node,
"IBM,CBEA-Internal-Interrupt-Controller");
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index e865d7481..2d4f60c01 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,7 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid;
area->order = order;
- area->pages = alloc_pages_exact_node(area->nid,
+ area->pages = __alloc_pages_node(area->nid,
GFP_KERNEL|__GFP_THISNODE,
area->order);
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 1f72f4ab6..9d27de62d 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -199,7 +199,7 @@ static const struct irq_domain_ops spider_host_ops = {
.xlate = spider_host_xlate,
};
-static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void spider_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct spider_pic *pic = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d966bbe58..5038fd578 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -239,23 +239,6 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long address = (unsigned long)vmf->virtual_address;
unsigned long pfn, offset;
-#ifdef CONFIG_SPU_FS_64K_LS
- struct spu_state *csa = &ctx->csa;
- int psize;
-
- /* Check what page size we are using */
- psize = get_slice_psize(vma->vm_mm, address);
-
- /* Some sanity checking */
- BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
-
- /* Wow, 64K, cool, we need to align the address though */
- if (csa->use_big_pages) {
- BUG_ON(vma->vm_start & 0xffff);
- address &= ~0xfffful;
- }
-#endif /* CONFIG_SPU_FS_64K_LS */
-
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= LS_SIZE)
return VM_FAULT_SIGBUS;
@@ -310,22 +293,6 @@ static const struct vm_operations_struct spufs_mem_mmap_vmops = {
static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
{
-#ifdef CONFIG_SPU_FS_64K_LS
- struct spu_context *ctx = file->private_data;
- struct spu_state *csa = &ctx->csa;
-
- /* Sanity check VMA alignment */
- if (csa->use_big_pages) {
- pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
- " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
- vma->vm_pgoff);
- if (vma->vm_start & 0xffff)
- return -EINVAL;
- if (vma->vm_pgoff & 0xf)
- return -EINVAL;
- }
-#endif /* CONFIG_SPU_FS_64K_LS */
-
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -336,25 +303,6 @@ static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-#ifdef CONFIG_SPU_FS_64K_LS
-static unsigned long spufs_get_unmapped_area(struct file *file,
- unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct spu_context *ctx = file->private_data;
- struct spu_state *csa = &ctx->csa;
-
- /* If not using big pages, fallback to normal MM g_u_a */
- if (!csa->use_big_pages)
- return current->mm->get_unmapped_area(file, addr, len,
- pgoff, flags);
-
- /* Else, try to obtain a 64K pages slice */
- return slice_get_unmapped_area(addr, len, flags,
- MMU_PAGE_64K, 1);
-}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
static const struct file_operations spufs_mem_fops = {
.open = spufs_mem_open,
.release = spufs_mem_release,
@@ -362,9 +310,6 @@ static const struct file_operations spufs_mem_fops = {
.write = spufs_mem_write,
.llseek = generic_file_llseek,
.mmap = spufs_mem_mmap,
-#ifdef CONFIG_SPU_FS_64K_LS
- .get_unmapped_area = spufs_get_unmapped_area,
-#endif
};
static int spufs_ps_fault(struct vm_area_struct *vma,
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index 147069938..b847e9403 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -31,7 +31,7 @@
#include "spufs.h"
-static int spu_alloc_lscsa_std(struct spu_state *csa)
+int spu_alloc_lscsa(struct spu_state *csa)
{
struct spu_lscsa *lscsa;
unsigned char *p;
@@ -48,7 +48,7 @@ static int spu_alloc_lscsa_std(struct spu_state *csa)
return 0;
}
-static void spu_free_lscsa_std(struct spu_state *csa)
+void spu_free_lscsa(struct spu_state *csa)
{
/* Clear reserved bit before vfree. */
unsigned char *p;
@@ -61,123 +61,3 @@ static void spu_free_lscsa_std(struct spu_state *csa)
vfree(csa->lscsa);
}
-
-#ifdef CONFIG_SPU_FS_64K_LS
-
-#define SPU_64K_PAGE_SHIFT 16
-#define SPU_64K_PAGE_ORDER (SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
-#define SPU_64K_PAGE_COUNT (1ul << SPU_64K_PAGE_ORDER)
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
- struct page **pgarray;
- unsigned char *p;
- int i, j, n_4k;
-
- /* Check availability of 64K pages */
- if (!spu_64k_pages_available())
- goto fail;
-
- csa->use_big_pages = 1;
-
- pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
- csa);
-
- /* First try to allocate our 64K pages. We need 5 of them
- * with the current implementation. In the future, we should try
- * to separate the lscsa with the actual local store image, thus
- * allowing us to require only 4 64K pages per context
- */
- for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
- /* XXX This is likely to fail, we should use a special pool
- * similar to what hugetlbfs does.
- */
- csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
- SPU_64K_PAGE_ORDER);
- if (csa->lscsa_pages[i] == NULL)
- goto fail;
- }
-
- pr_debug(" success ! creating vmap...\n");
-
- /* Now we need to create a vmalloc mapping of these for the kernel
- * and SPU context switch code to use. Currently, we stick to a
- * normal kernel vmalloc mapping, which in our case will be 4K
- */
- n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
- pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
- if (pgarray == NULL)
- goto fail;
- for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
- for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
- /* We assume all the struct page's are contiguous
- * which should be hopefully the case for an order 4
- * allocation..
- */
- pgarray[i * SPU_64K_PAGE_COUNT + j] =
- csa->lscsa_pages[i] + j;
- csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
- kfree(pgarray);
- if (csa->lscsa == NULL)
- goto fail;
-
- memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
-
- /* Set LS pages reserved to allow for user-space mapping.
- *
- * XXX isn't that a bit obsolete ? I think we should just
- * make sure the page count is high enough. Anyway, won't harm
- * for now
- */
- for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
- SetPageReserved(vmalloc_to_page(p));
-
- pr_debug(" all good !\n");
-
- return 0;
-fail:
- pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
- spu_free_lscsa(csa);
- return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
- unsigned char *p;
- int i;
-
- if (!csa->use_big_pages) {
- spu_free_lscsa_std(csa);
- return;
- }
- csa->use_big_pages = 0;
-
- if (csa->lscsa == NULL)
- goto free_pages;
-
- for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
- ClearPageReserved(vmalloc_to_page(p));
-
- vunmap(csa->lscsa);
- csa->lscsa = NULL;
-
- free_pages:
-
- for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
- if (csa->lscsa_pages[i])
- __free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
-}
-
-#else /* CONFIG_SPU_FS_64K_LS */
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
- return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
- spu_free_lscsa_std(csa);
-}
-
-#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 998f632e7..5c80a0f9d 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -64,11 +64,6 @@ static struct timer_list spusched_timer;
static struct timer_list spuloadavg_timer;
/*
- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
- */
-#define NORMAL_PRIO 120
-
-/*
* Frequency of the spu scheduler tick. By default we do one SPU scheduler
* tick for every 10 CPU scheduler ticks.
*/
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 15ebc4e8a..987d1b8d6 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -363,7 +363,7 @@ void __init chrp_setup_arch(void)
if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
}
-static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void chrp_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 4cde8e7da..b7866e014 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -108,7 +108,8 @@ static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
return 0;
}
-static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
+static int flipper_pic_match(struct irq_domain *h, struct device_node *np,
+ enum irq_domain_bus_token bus_token)
{
return 1;
}
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 9dd154d6f..9b7975706 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -120,8 +120,7 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
return irq_linear_revmap(h, irq);
}
-static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
- struct irq_desc *desc)
+static void hlwd_pic_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 161330317..8f65aa374 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -42,7 +42,7 @@
static phys_addr_t pci_membase;
static u_char *restart;
-static void mvme5100_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void mvme5100_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index ff1bb4b69..b304a9fe5 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -67,7 +67,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
@@ -95,7 +95,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
msg.address_hi = 0;
msg.address_lo = PASEMI_MSI_ADDR;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
/* Allocate 16 interrupts for now, since that's the grouping for
* affinity. This can be changed later if it turns out 32 is too
* few MSIs for someone, but restrictions will apply to how the
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 59cfc9d63..6f4f8b060 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -268,7 +268,8 @@ static struct irqaction gatwick_cascade_action = {
.name = "cascade",
};
-static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
/* We match all, we don't always have a node anyway */
return 1;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 7cf0df859..3bb6acb76 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1394,11 +1394,19 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
*/
if (pnv_eeh_get_pe(hose,
be64_to_cpu(frozen_pe_no), pe)) {
- /* Try best to clear it */
pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
- hose->global_number, frozen_pe_no);
+ hose->global_number, be64_to_cpu(frozen_pe_no));
pr_info("EEH: PHB location: %s\n",
eeh_pe_loc_get(phb_pe));
+
+ /* Dump PHB diag-data */
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id,
+ phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+ if (rc == OPAL_SUCCESS)
+ pnv_pci_dump_phb_diag_data(hose,
+ phb->diag.blob);
+
+ /* Try best to clear it */
opal_pci_eeh_freeze_clear(phb->opal_id,
frozen_pe_no,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index a8f49d380..d000f4e21 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -35,9 +35,134 @@ struct OpalHmiEvtNode {
struct list_head list;
struct OpalHMIEvent hmi_evt;
};
+
+struct xstop_reason {
+ uint32_t xstop_reason;
+ const char *unit_failed;
+ const char *description;
+};
+
static LIST_HEAD(opal_hmi_evt_list);
static DEFINE_SPINLOCK(opal_hmi_evt_lock);
+static void print_core_checkstop_reason(const char *level,
+ struct OpalHMIEvent *hmi_evt)
+{
+ int i;
+ static const struct xstop_reason xstop_reason[] = {
+ { CORE_CHECKSTOP_IFU_REGFILE, "IFU",
+ "RegFile core check stop" },
+ { CORE_CHECKSTOP_IFU_LOGIC, "IFU", "Logic core check stop" },
+ { CORE_CHECKSTOP_PC_DURING_RECOV, "PC",
+ "Core checkstop during recovery" },
+ { CORE_CHECKSTOP_ISU_REGFILE, "ISU",
+ "RegFile core check stop (mapper error)" },
+ { CORE_CHECKSTOP_ISU_LOGIC, "ISU", "Logic core check stop" },
+ { CORE_CHECKSTOP_FXU_LOGIC, "FXU", "Logic core check stop" },
+ { CORE_CHECKSTOP_VSU_LOGIC, "VSU", "Logic core check stop" },
+ { CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE, "PC",
+ "Recovery in maintenance mode" },
+ { CORE_CHECKSTOP_LSU_REGFILE, "LSU",
+ "RegFile core check stop" },
+ { CORE_CHECKSTOP_PC_FWD_PROGRESS, "PC",
+ "Forward Progress Error" },
+ { CORE_CHECKSTOP_LSU_LOGIC, "LSU", "Logic core check stop" },
+ { CORE_CHECKSTOP_PC_LOGIC, "PC", "Logic core check stop" },
+ { CORE_CHECKSTOP_PC_HYP_RESOURCE, "PC",
+ "Hypervisor Resource error - core check stop" },
+ { CORE_CHECKSTOP_PC_HANG_RECOV_FAILED, "PC",
+ "Hang Recovery Failed (core check stop)" },
+ { CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED, "PC",
+ "Ambiguous Hang Detected (unknown source)" },
+ { CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ, "PC",
+ "Debug Trigger Error inject" },
+ { CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ, "PC",
+ "Hypervisor check stop via SPRC/SPRD" },
+ };
+
+ /* Validity check */
+ if (!hmi_evt->u.xstop_error.xstop_reason) {
+ printk("%s Unknown Core check stop.\n", level);
+ return;
+ }
+
+ printk("%s CPU PIR: %08x\n", level,
+ be32_to_cpu(hmi_evt->u.xstop_error.u.pir));
+ for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+ if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+ xstop_reason[i].xstop_reason)
+ printk("%s [Unit: %-3s] %s\n", level,
+ xstop_reason[i].unit_failed,
+ xstop_reason[i].description);
+}
+
+static void print_nx_checkstop_reason(const char *level,
+ struct OpalHMIEvent *hmi_evt)
+{
+ int i;
+ static const struct xstop_reason xstop_reason[] = {
+ { NX_CHECKSTOP_SHM_INVAL_STATE_ERR, "DMA & Engine",
+ "SHM invalid state error" },
+ { NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1, "DMA & Engine",
+ "DMA invalid state error bit 15" },
+ { NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2, "DMA & Engine",
+ "DMA invalid state error bit 16" },
+ { NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 0 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 1 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 2 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 3 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 4 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 5 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 6 invalid state error" },
+ { NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR, "DMA & Engine",
+ "Channel 7 invalid state error" },
+ { NX_CHECKSTOP_DMA_CRB_UE, "DMA & Engine",
+ "UE error on CRB(CSB address, CCB)" },
+ { NX_CHECKSTOP_DMA_CRB_SUE, "DMA & Engine",
+ "SUE error on CRB(CSB address, CCB)" },
+ { NX_CHECKSTOP_PBI_ISN_UE, "PowerBus Interface",
+ "CRB Kill ISN received while holding ISN with UE error" },
+ };
+
+ /* Validity check */
+ if (!hmi_evt->u.xstop_error.xstop_reason) {
+ printk("%s Unknown NX check stop.\n", level);
+ return;
+ }
+
+ printk("%s NX checkstop on CHIP ID: %x\n", level,
+ be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id));
+ for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+ if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+ xstop_reason[i].xstop_reason)
+ printk("%s [Unit: %-3s] %s\n", level,
+ xstop_reason[i].unit_failed,
+ xstop_reason[i].description);
+}
+
+static void print_checkstop_reason(const char *level,
+ struct OpalHMIEvent *hmi_evt)
+{
+ switch (hmi_evt->u.xstop_error.xstop_type) {
+ case CHECKSTOP_TYPE_CORE:
+ print_core_checkstop_reason(level, hmi_evt);
+ break;
+ case CHECKSTOP_TYPE_NX:
+ print_nx_checkstop_reason(level, hmi_evt);
+ break;
+ case CHECKSTOP_TYPE_UNKNOWN:
+ printk("%s Unknown Malfunction Alert.\n", level);
+ break;
+ }
+}
+
static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
{
const char *level, *sevstr, *error_info;
@@ -95,6 +220,13 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
(hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
printk("%s TFMR: %016llx\n", level,
be64_to_cpu(hmi_evt->tfmr));
+
+ if (hmi_evt->version < OpalHMIEvt_V2)
+ return;
+
+ /* OpalHMIEvt_V2 and above provides reason for malfunction alert. */
+ if (hmi_evt->type == OpalHMI_ERROR_MALFUNC_ALERT)
+ print_checkstop_reason(level, hmi_evt);
}
static void hmi_event_handler(struct work_struct *work)
@@ -103,6 +235,8 @@ static void hmi_event_handler(struct work_struct *work)
struct OpalHMIEvent *hmi_evt;
struct OpalHmiEvtNode *msg_node;
uint8_t disposition;
+ struct opal_msg msg;
+ int unrecoverable = 0;
spin_lock_irqsave(&opal_hmi_evt_lock, flags);
while (!list_empty(&opal_hmi_evt_list)) {
@@ -118,14 +252,53 @@ static void hmi_event_handler(struct work_struct *work)
/*
* Check if HMI event has been recovered or not. If not
- * then we can't continue, invoke panic.
+ * then kernel can't continue, we need to panic.
+ * But before we do that, display all the HMI event
+ * available on the list and set unrecoverable flag to 1.
*/
if (disposition != OpalHMI_DISPOSITION_RECOVERED)
- panic("Unrecoverable HMI exception");
+ unrecoverable = 1;
spin_lock_irqsave(&opal_hmi_evt_lock, flags);
}
spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
+
+ if (unrecoverable) {
+ int ret;
+
+ /* Pull all HMI events from OPAL before we panic. */
+ while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) {
+ u32 type;
+
+ type = be32_to_cpu(msg.msg_type);
+
+ /* skip if not HMI event */
+ if (type != OPAL_MSG_HMI_EVT)
+ continue;
+
+ /* HMI event info starts from param[0] */
+ hmi_evt = (struct OpalHMIEvent *)&msg.params[0];
+ print_hmi_event_info(hmi_evt);
+ }
+
+ /*
+ * Unrecoverable HMI exception. We need to inform BMC/OCC
+ * about this error so that it can collect relevant data
+ * for error analysis before rebooting.
+ */
+ ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+ "Unrecoverable HMI exception");
+ if (ret == OPAL_UNSUPPORTED) {
+ pr_emerg("Reboot type %d not supported\n",
+ OPAL_REBOOT_PLATFORM_ERROR);
+ }
+
+ /*
+ * Fall through and panic if opal_cec_reboot2() returns
+ * OPAL_UNSUPPORTED.
+ */
+ panic("Unrecoverable HMI exception");
+ }
}
static DECLARE_WORK(hmi_event_work, hmi_event_handler);
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index e2e7d75f5..2c91ee780 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -134,7 +134,8 @@ static void opal_handle_irq_work(struct irq_work *work)
opal_handle_events(be64_to_cpu(last_outstanding_events));
}
-static int opal_event_match(struct irq_domain *h, struct device_node *node)
+static int opal_event_match(struct irq_domain *h, struct device_node *node,
+ enum irq_domain_bus_token bus_token)
{
return h->of_node == node;
}
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index ac46c2c24..58dc33082 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -9,9 +9,12 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "opal-power: " fmt
+
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
+#include <linux/of.h>
#include <asm/opal.h>
#include <asm/machdep.h>
@@ -19,30 +22,116 @@
#define SOFT_OFF 0x00
#define SOFT_REBOOT 0x01
+/* Detect EPOW event */
+static bool detect_epow(void)
+{
+ u16 epow;
+ int i, rc;
+ __be16 epow_classes;
+ __be16 opal_epow_status[OPAL_SYSEPOW_MAX] = {0};
+
+ /*
+ * Check for EPOW event. Kernel sends supported EPOW classes info
+ * to OPAL. OPAL returns EPOW info along with classes present.
+ */
+ epow_classes = cpu_to_be16(OPAL_SYSEPOW_MAX);
+ rc = opal_get_epow_status(opal_epow_status, &epow_classes);
+ if (rc != OPAL_SUCCESS) {
+ pr_err("Failed to get EPOW event information\n");
+ return false;
+ }
+
+ /* Look for EPOW events present */
+ for (i = 0; i < be16_to_cpu(epow_classes); i++) {
+ epow = be16_to_cpu(opal_epow_status[i]);
+
+ /* Filter events which do not need shutdown. */
+ if (i == OPAL_SYSEPOW_POWER)
+ epow &= ~(OPAL_SYSPOWER_CHNG | OPAL_SYSPOWER_FAIL |
+ OPAL_SYSPOWER_INCL);
+ if (epow)
+ return true;
+ }
+
+ return false;
+}
+
+/* Check for existing EPOW, DPO events */
+static bool poweroff_pending(void)
+{
+ int rc;
+ __be64 opal_dpo_timeout;
+
+ /* Check for DPO event */
+ rc = opal_get_dpo_status(&opal_dpo_timeout);
+ if (rc == OPAL_SUCCESS) {
+ pr_info("Existing DPO event detected.\n");
+ return true;
+ }
+
+ /* Check for EPOW event */
+ if (detect_epow()) {
+ pr_info("Existing EPOW event detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/* OPAL power-control events notifier */
static int opal_power_control_event(struct notifier_block *nb,
- unsigned long msg_type, void *msg)
+ unsigned long msg_type, void *msg)
{
- struct opal_msg *power_msg = msg;
uint64_t type;
- type = be64_to_cpu(power_msg->params[0]);
-
- switch (type) {
- case SOFT_REBOOT:
- pr_info("OPAL: reboot requested\n");
- orderly_reboot();
+ switch (msg_type) {
+ case OPAL_MSG_EPOW:
+ if (detect_epow()) {
+ pr_info("EPOW msg received. Powering off system\n");
+ orderly_poweroff(true);
+ }
break;
- case SOFT_OFF:
- pr_info("OPAL: poweroff requested\n");
+ case OPAL_MSG_DPO:
+ pr_info("DPO msg received. Powering off system\n");
orderly_poweroff(true);
break;
+ case OPAL_MSG_SHUTDOWN:
+ type = be64_to_cpu(((struct opal_msg *)msg)->params[0]);
+ switch (type) {
+ case SOFT_REBOOT:
+ pr_info("Reboot requested\n");
+ orderly_reboot();
+ break;
+ case SOFT_OFF:
+ pr_info("Poweroff requested\n");
+ orderly_poweroff(true);
+ break;
+ default:
+ pr_err("Unknown power-control type %llu\n", type);
+ }
+ break;
default:
- pr_err("OPAL: power control type unexpected %016llx\n", type);
+ pr_err("Unknown OPAL message type %lu\n", msg_type);
}
return 0;
}
+/* OPAL EPOW event notifier block */
+static struct notifier_block opal_epow_nb = {
+ .notifier_call = opal_power_control_event,
+ .next = NULL,
+ .priority = 0,
+};
+
+/* OPAL DPO event notifier block */
+static struct notifier_block opal_dpo_nb = {
+ .notifier_call = opal_power_control_event,
+ .next = NULL,
+ .priority = 0,
+};
+
+/* OPAL power-control event notifier block */
static struct notifier_block opal_power_control_nb = {
.notifier_call = opal_power_control_event,
.next = NULL,
@@ -51,16 +140,40 @@ static struct notifier_block opal_power_control_nb = {
static int __init opal_power_control_init(void)
{
- int ret;
+ int ret, supported = 0;
+ struct device_node *np;
+ /* Register OPAL power-control events notifier */
ret = opal_message_notifier_register(OPAL_MSG_SHUTDOWN,
- &opal_power_control_nb);
- if (ret) {
- pr_err("%s: Can't register OPAL event notifier (%d)\n",
- __func__, ret);
- return ret;
+ &opal_power_control_nb);
+ if (ret)
+ pr_err("Failed to register SHUTDOWN notifier, ret = %d\n", ret);
+
+ /* Determine OPAL EPOW, DPO support */
+ np = of_find_node_by_path("/ibm,opal/epow");
+ if (np) {
+ supported = of_device_is_compatible(np, "ibm,opal-v3-epow");
+ of_node_put(np);
}
+ if (!supported)
+ return 0;
+ pr_info("OPAL EPOW, DPO support detected.\n");
+
+ /* Register EPOW event notifier */
+ ret = opal_message_notifier_register(OPAL_MSG_EPOW, &opal_epow_nb);
+ if (ret)
+ pr_err("Failed to register EPOW notifier, ret = %d\n", ret);
+
+ /* Register DPO event notifier */
+ ret = opal_message_notifier_register(OPAL_MSG_DPO, &opal_dpo_nb);
+ if (ret)
+ pr_err("Failed to register DPO notifier, ret = %d\n", ret);
+
+ /* Check for any pending EPOW or DPO events. */
+ if (poweroff_pending())
+ orderly_poweroff(true);
+
return 0;
}
machine_subsys_initcall(powernv, opal_power_control_init);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index d6a7b8252..b7a464fef 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -202,6 +202,7 @@ OPAL_CALL(opal_rtc_read, OPAL_RTC_READ);
OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE);
OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN);
OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT);
+OPAL_CALL(opal_cec_reboot2, OPAL_CEC_REBOOT2);
OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM);
OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM);
OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT);
@@ -249,6 +250,7 @@ OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT);
OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR);
OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS);
OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS);
+OPAL_CALL(opal_get_dpo_status, OPAL_GET_DPO_STATUS);
OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED);
OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR);
OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL);
@@ -297,3 +299,5 @@ OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG);
+OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR);
+OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index f084afa0e..4296d55e8 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -441,6 +441,7 @@ static int opal_recover_mce(struct pt_regs *regs,
int opal_machine_check(struct pt_regs *regs)
{
struct machine_check_event evt;
+ int ret;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return 0;
@@ -455,6 +456,43 @@ int opal_machine_check(struct pt_regs *regs)
if (opal_recover_mce(regs, &evt))
return 1;
+
+ /*
+ * Unrecovered machine check, we are heading to panic path.
+ *
+ * We may have hit this MCE in very early stage of kernel
+ * initialization even before opal-prd has started running. If
+ * this is the case then this MCE error may go un-noticed or
+ * un-analyzed if we go down panic path. We need to inform
+ * BMC/OCC about this error so that they can collect relevant
+ * data for error analysis before rebooting.
+ * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so.
+ * This function may not return on BMC based system.
+ */
+ ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+ "Unrecoverable Machine Check exception");
+ if (ret == OPAL_UNSUPPORTED) {
+ pr_emerg("Reboot type %d not supported\n",
+ OPAL_REBOOT_PLATFORM_ERROR);
+ }
+
+ /*
+ * We reached here. There can be three possibilities:
+ * 1. We are running on a firmware level that do not support
+ * opal_cec_reboot2()
+ * 2. We are running on a firmware level that do not support
+ * OPAL_REBOOT_PLATFORM_ERROR reboot type.
+ * 3. We are running on FSP based system that does not need opal
+ * to trigger checkstop explicitly for error analysis. The FSP
+ * PRD component would have already got notified about this
+ * error through other channels.
+ *
+ * If hardware marked this as an unrecoverable MCE, we are
+ * going to panic anyway. Even if it didn't, it's not safe to
+ * continue at this point, so we should explicitly panic.
+ */
+
+ panic("PowerNV Unrecovered Machine Check");
return 0;
}
@@ -648,7 +686,7 @@ static void opal_init_heartbeat(void)
static int __init opal_init(void)
{
- struct device_node *np, *consoles;
+ struct device_node *np, *consoles, *leds;
int rc;
opal_node = of_find_node_by_path("/ibm,opal");
@@ -689,6 +727,13 @@ static int __init opal_init(void)
/* Setup a heatbeat thread if requested by OPAL */
opal_init_heartbeat();
+ /* Create leds platform devices */
+ leds = of_find_node_by_path("/ibm,opal/leds");
+ if (leds) {
+ of_platform_device_create(leds, "opal_leds", NULL);
+ of_node_put(leds);
+ }
+
/* Create "opal" kobject under /sys/firmware */
rc = opal_sysfs_init();
if (rc == 0) {
@@ -841,3 +886,6 @@ EXPORT_SYMBOL_GPL(opal_rtc_write);
EXPORT_SYMBOL_GPL(opal_tpo_read);
EXPORT_SYMBOL_GPL(opal_tpo_write);
EXPORT_SYMBOL_GPL(opal_i2c_request);
+/* Export these symbols for PowerNV LED class driver */
+EXPORT_SYMBOL_GPL(opal_leds_get_ind);
+EXPORT_SYMBOL_GPL(opal_leds_set_ind);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 8b64f89e6..414fd1a00 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -140,11 +140,9 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
return;
}
- if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
- pr_warn("%s: PE %d was assigned on PHB#%x\n",
- __func__, pe_no, phb->hose->global_number);
- return;
- }
+ if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
+ pr_debug("%s: PE %d was reserved on PHB#%x\n",
+ __func__, pe_no, phb->hose->global_number);
phb->ioda.pe_array[pe_no].phb = phb;
phb->ioda.pe_array[pe_no].pe_number = pe_no;
@@ -231,61 +229,60 @@ fail:
return -EIO;
}
-static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
+static void pnv_ioda2_reserve_dev_m64_pe(struct pci_dev *pdev,
+ unsigned long *pe_bitmap)
{
- resource_size_t sgsz = phb->ioda.m64_segsize;
- struct pci_dev *pdev;
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
struct resource *r;
- int base, step, i;
-
- /*
- * Root bus always has full M64 range and root port has
- * M64 range used in reality. So we're checking root port
- * instead of root bus.
- */
- list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
- for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
- r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
- if (!r->parent ||
- !pnv_pci_is_mem_pref_64(r->flags))
- continue;
+ resource_size_t base, sgsz, start, end;
+ int segno, i;
+
+ base = phb->ioda.m64_base;
+ sgsz = phb->ioda.m64_segsize;
+ for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+ r = &pdev->resource[i];
+ if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+ continue;
- base = (r->start - phb->ioda.m64_base) / sgsz;
- for (step = 0; step < resource_size(r) / sgsz; step++)
- pnv_ioda_reserve_pe(phb, base + step);
+ start = _ALIGN_DOWN(r->start - base, sgsz);
+ end = _ALIGN_UP(r->end - base, sgsz);
+ for (segno = start / sgsz; segno < end / sgsz; segno++) {
+ if (pe_bitmap)
+ set_bit(segno, pe_bitmap);
+ else
+ pnv_ioda_reserve_pe(phb, segno);
}
}
}
-static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
- struct pci_bus *bus, int all)
+static void pnv_ioda2_reserve_m64_pe(struct pci_bus *bus,
+ unsigned long *pe_bitmap,
+ bool all)
{
- resource_size_t segsz = phb->ioda.m64_segsize;
struct pci_dev *pdev;
- struct resource *r;
+
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ pnv_ioda2_reserve_dev_m64_pe(pdev, pe_bitmap);
+
+ if (all && pdev->subordinate)
+ pnv_ioda2_reserve_m64_pe(pdev->subordinate,
+ pe_bitmap, all);
+ }
+}
+
+static int pnv_ioda2_pick_m64_pe(struct pci_bus *bus, bool all)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
struct pnv_ioda_pe *master_pe, *pe;
unsigned long size, *pe_alloc;
- bool found;
- int start, i, j;
+ int i;
/* Root bus shouldn't use M64 */
if (pci_is_root_bus(bus))
return IODA_INVALID_PE;
- /* We support only one M64 window on each bus */
- found = false;
- pci_bus_for_each_resource(bus, r, i) {
- if (r && r->parent &&
- pnv_pci_is_mem_pref_64(r->flags)) {
- found = true;
- break;
- }
- }
-
- /* No M64 window found ? */
- if (!found)
- return IODA_INVALID_PE;
-
/* Allocate bitmap */
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
pe_alloc = kzalloc(size, GFP_KERNEL);
@@ -295,35 +292,8 @@ static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
return IODA_INVALID_PE;
}
- /*
- * Figure out reserved PE numbers by the PE
- * the its child PEs.
- */
- start = (r->start - phb->ioda.m64_base) / segsz;
- for (i = 0; i < resource_size(r) / segsz; i++)
- set_bit(start + i, pe_alloc);
-
- if (all)
- goto done;
-
- /*
- * If the PE doesn't cover all subordinate buses,
- * we need subtract from reserved PEs for children.
- */
- list_for_each_entry(pdev, &bus->devices, bus_list) {
- if (!pdev->subordinate)
- continue;
-
- pci_bus_for_each_resource(pdev->subordinate, r, i) {
- if (!r || !r->parent ||
- !pnv_pci_is_mem_pref_64(r->flags))
- continue;
-
- start = (r->start - phb->ioda.m64_base) / segsz;
- for (j = 0; j < resource_size(r) / segsz ; j++)
- clear_bit(start + j, pe_alloc);
- }
- }
+ /* Figure out reserved PE numbers by the PE */
+ pnv_ioda2_reserve_m64_pe(bus, pe_alloc, all);
/*
* the current bus might not own M64 window and that's all
@@ -339,7 +309,6 @@ static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
* Figure out the master PE and put all slave PEs to master
* PE's list to form compound PE.
*/
-done:
master_pe = NULL;
i = -1;
while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
@@ -653,7 +622,7 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
pdev = pe->pdev->bus->self;
#ifdef CONFIG_PCI_IOV
else if (pe->flags & PNV_IODA_PE_VF)
- pdev = pe->parent_dev->bus->self;
+ pdev = pe->parent_dev;
#endif /* CONFIG_PCI_IOV */
while (pdev) {
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -732,7 +701,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
parent = parent->bus->self;
}
- opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+ opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
/* Disassociate PE in PELT */
@@ -946,8 +915,9 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
res2 = *res;
res->start += size * offset;
- dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
- i, &res2, res, num_vfs, offset);
+ dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
+ i, &res2, res, (offset > 0) ? "En" : "Dis",
+ num_vfs, offset);
pci_update_resource(dev, i + PCI_IOV_RESOURCES);
}
return 0;
@@ -1050,7 +1020,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
* subordinate PCI devices and buses. The second type of PE is normally
* orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
*/
-static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
+static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct pnv_phb *phb = hose->private_data;
@@ -1059,7 +1029,7 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
/* Check if PE is determined by M64 */
if (phb->pick_m64_pe)
- pe_num = phb->pick_m64_pe(phb, bus, all);
+ pe_num = phb->pick_m64_pe(bus, all);
/* The PE number isn't pinned by M64 */
if (pe_num == IODA_INVALID_PE)
@@ -1117,12 +1087,12 @@ static void pnv_ioda_setup_PEs(struct pci_bus *bus)
{
struct pci_dev *dev;
- pnv_ioda_setup_bus_PE(bus, 0);
+ pnv_ioda_setup_bus_PE(bus, false);
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->subordinate) {
if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
- pnv_ioda_setup_bus_PE(dev->subordinate, 1);
+ pnv_ioda_setup_bus_PE(dev->subordinate, true);
else
pnv_ioda_setup_PEs(dev->subordinate);
}
@@ -1147,7 +1117,7 @@ static void pnv_pci_ioda_setup_PEs(void)
/* M64 layout might affect PE allocation */
if (phb->reserve_m64_pe)
- phb->reserve_m64_pe(phb);
+ phb->reserve_m64_pe(hose->bus, NULL, true);
pnv_ioda_setup_PEs(hose->bus);
}
@@ -1590,6 +1560,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
+ set_dma_offset(&pdev->dev, pe->tce_bypass_base);
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
/*
* Note: iommu_add_device() will fail here as
@@ -1620,19 +1591,18 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
if (bypass) {
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
set_dma_ops(&pdev->dev, &dma_direct_ops);
- set_dma_offset(&pdev->dev, pe->tce_bypass_base);
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
- set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
}
*pdev->dev.dma_mask = dma_mask;
return 0;
}
-static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
- struct pci_dev *pdev)
+static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
u64 end, mask;
@@ -1659,6 +1629,7 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
list_for_each_entry(dev, &bus->devices, bus_list) {
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
+ set_dma_offset(&dev->dev, pe->tce_bypass_base);
iommu_add_device(&dev->dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -3071,6 +3042,7 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.window_alignment = pnv_pci_window_alignment,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.dma_set_mask = pnv_pci_ioda_dma_set_mask,
+ .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
.shutdown = pnv_pci_ioda_shutdown,
};
@@ -3217,7 +3189,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
- phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
/* Setup MSI support */
pnv_pci_init_ioda_msis(phb);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index fd16f86e5..f2dd77234 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -61,7 +61,7 @@ int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
if (pdev->no_64bit_msi && !phb->msi32_support)
return -ENODEV;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
pr_warn("%s: Supports only 64-bit MSIs\n",
pci_name(pdev));
@@ -104,7 +104,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
if (WARN_ON(!phb))
return;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
hwirq = virq_to_hw(entry->irq);
@@ -762,17 +762,6 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
phb->dma_dev_setup(phb, pdev);
}
-u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- if (phb && phb->dma_get_required_mask)
- return phb->dma_get_required_mask(phb, pdev);
-
- return __dma_get_required_mask(&pdev->dev);
-}
-
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8ef2d28ad..c8ff50e90 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -105,13 +105,12 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
- u64 (*dma_get_required_mask)(struct pnv_phb *phb,
- struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
int (*init_m64)(struct pnv_phb *phb);
- void (*reserve_m64_pe)(struct pnv_phb *phb);
- int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
+ void (*reserve_m64_pe)(struct pci_bus *bus,
+ unsigned long *pe_bitmap, bool all);
+ int (*pick_m64_pe)(struct pci_bus *bus, bool all);
int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 9269e30e4..6dbc0a1da 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -12,15 +12,9 @@ struct pci_dev;
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
-extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
-
-static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
- return 0;
-}
#endif
extern u32 pnv_get_supported_cpuidle_states(void);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 6eb808ff6..5dcbdea1a 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -128,7 +128,7 @@ static __init int rng_create(struct device_node *dn)
pr_info_once("Registering arch random hook.\n");
- ppc_md.get_random_long = powernv_get_random_long;
+ ppc_md.get_random_seed = powernv_get_random_long;
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 53737e019..685b3cbe1 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -165,14 +165,6 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
-static u64 pnv_dma_get_required_mask(struct device *dev)
-{
- if (dev_is_pci(dev))
- return pnv_pci_dma_get_required_mask(to_pci_dev(dev));
-
- return __dma_get_required_mask(dev);
-}
-
static void pnv_shutdown(void)
{
/* Let the PCI code clear up IODA tables */
@@ -243,6 +235,13 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
} else {
/* Primary waits for the secondaries to have reached OPAL */
pnv_kexec_wait_secondaries_down();
+
+ /*
+ * We might be running as little-endian - now that interrupts
+ * are disabled, reset the HILE bit to big-endian so we don't
+ * take interrupts in the wrong endian later
+ */
+ opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
}
}
#endif /* CONFIG_KEXEC */
@@ -314,7 +313,6 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = power7_idle,
.calibrate_decr = generic_calibrate_decr,
- .dma_get_required_mask = pnv_dma_get_required_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 8f70ba681..ca264833e 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void)
* so clear LPCR:PECE1. We keep PECE2 enabled.
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
+
+ /*
+ * Hard-disable interrupts, and then clear irq_happened flags
+ * that we can safely ignore while off-line, since they
+ * are for things for which we do no processing when off-line
+ * (or in the case of HMI, all the processing we need to do
+ * is done in lower-level real-mode code).
+ */
+ hard_irq_disable();
+ local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI);
+
while (!generic_check_cpu_restart(cpu)) {
+ /*
+ * Clear IPI flag, since we don't handle IPIs while
+ * offline, except for those when changing micro-threading
+ * mode, which are handled explicitly below, and those
+ * for coming online, which are handled via
+ * generic_check_cpu_restart() calls.
+ */
+ kvmppc_set_host_ipi(cpu, 0);
ppc64_runlatch_off();
@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
- if ((srr1 & wmask) == SRR1_WAKEEE) {
+ if (((srr1 & wmask) == SRR1_WAKEEE) ||
+ (local_paca->irq_happened & PACA_IRQ_EE)) {
icp_native_flush_interrupt();
- local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
- smp_mb();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
- kvmppc_set_host_ipi(cpu, 0);
}
+ local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL);
+ smp_mb();
if (cpu_core_split_required())
continue;
- if (!generic_check_cpu_restart(cpu))
+ if (srr1 && !generic_check_cpu_restart(cpu))
DBG("CPU%d Unexpected exit while offline !\n", cpu);
}
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index f60f80ada..503a73f59 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -190,7 +190,7 @@ static void unsplit_core(void)
hid0 = mfspr(SPRN_HID0);
hid0 &= ~HID0_POWER8_DYNLPARDIS;
- mtspr(SPRN_HID0, hid0);
+ update_power8_hid0(hid0);
update_hid_in_slw(hid0);
while (mfspr(SPRN_HID0) & mask)
@@ -227,7 +227,7 @@ static void split_core(int new_mode)
/* Write new mode */
hid0 = mfspr(SPRN_HID0);
hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
- mtspr(SPRN_HID0, hid0);
+ update_power8_hid0(hid0);
update_hid_in_slw(hid0);
/* Wait for it to happen */
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index a6c42f343..638c40609 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -678,7 +678,8 @@ static int ps3_host_map(struct irq_domain *h, unsigned int virq,
return 0;
}
-static int ps3_host_match(struct irq_domain *h, struct device_node *np)
+static int ps3_host_match(struct irq_domain *h, struct device_node *np,
+ enum irq_domain_bus_token bus_token)
{
/* Match all */
return 1;
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index 097871398..3db53e8af 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -194,11 +194,6 @@ static const struct os_area_db_id os_area_db_id_rtc_diff = {
.key = OS_AREA_DB_KEY_RTC_DIFF
};
-static const struct os_area_db_id os_area_db_id_video_mode = {
- .owner = OS_AREA_DB_OWNER_LINUX,
- .key = OS_AREA_DB_KEY_VIDEO_MODE
-};
-
#define SECONDS_FROM_1970_TO_2000 946684800LL
/**
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 0ced387e1..e9ff44cd5 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -92,13 +92,12 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
return NULL;
new_prop->name = kstrdup(prop->name, GFP_KERNEL);
- new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+ new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
if (!new_prop->name || !new_prop->value) {
dlpar_free_drconf_property(new_prop);
return NULL;
}
- memcpy(new_prop->value, prop->value, prop->length);
new_prop->length = prop->length;
/* Convert the property to cpu endian-ness */
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 10510dea1..0946b98d7 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1253,11 +1253,10 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
}
}
- /* fall back on iommu ops, restore table pointer with ops */
+ /* fall back on iommu ops */
if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
dev_info(dev, "Restoring 32-bit DMA via iommu\n");
set_dma_ops(dev, &dma_iommu_ops);
- pci_dma_dev_setup_pSeriesLP(pdev);
}
check_mask:
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index c22bb647c..272e9ec1a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -118,7 +118,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->irq == NO_IRQ)
continue;
@@ -350,7 +350,7 @@ static int check_msix_entries(struct pci_dev *pdev)
* So we must reject such requests. */
expected = 0;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
if (entry->msi_attrib.entry_nr != expected) {
pr_debug("rtas_msi: bad MSI-X entries.\n");
return -EINVAL;
@@ -462,7 +462,7 @@ again:
}
i = 0;
- list_for_each_entry(entry, &pdev->msi_list, list) {
+ for_each_pci_msi_entry(entry, pdev) {
hwirq = rtas_query_irq_number(pdn, i++);
if (hwirq < 0) {
pr_debug("rtas_msi: error (%d) getting hwirq\n", rc);
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index e09608770..31ca557af 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -38,7 +38,7 @@ static __init int rng_init(void)
pr_info("Registering arch random hook.\n");
- ppc_md.get_random_long = pseries_get_random_long;
+ ppc_md.get_random_seed = pseries_get_random_long;
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e6e8b241d..9a83eb71b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -111,7 +111,7 @@ static void __init fwnmi_init(void)
fwnmi_active = 1;
}
-static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc)
+static void pseries_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
@@ -254,24 +254,26 @@ static void __init pseries_discover_pic(void)
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
- struct device_node *np = rd->dn;
- struct pci_dn *pci = NULL;
+ struct device_node *parent, *np = rd->dn;
+ struct pci_dn *pdn;
int err = NOTIFY_OK;
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
- pci = np->parent->data;
- if (pci) {
- update_dn_pci_info(np, pci->phb);
-
- /* Create EEH device for the OF node */
- eeh_dev_init(PCI_DN(np), pci->phb);
+ parent = of_get_parent(np);
+ pdn = parent ? PCI_DN(parent) : NULL;
+ if (pdn) {
+ /* Create pdn and EEH device */
+ update_dn_pci_info(np, pdn->phb);
+ eeh_dev_init(PCI_DN(np), pdn->phb);
}
+
+ of_node_put(parent);
break;
case OF_RECONFIG_DETACH_NODE:
- pci = PCI_DN(np);
- if (pci)
- list_del(&pci->list);
+ pdn = PCI_DN(np);
+ if (pdn)
+ list_del(&pdn->list);
break;
default:
err = NOTIFY_DONE;