summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
commitb4b7ff4b08e691656c9d77c758fc355833128ac0 (patch)
tree82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /arch/powerpc/kernel
parent35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff)
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/crash.c6
-rw-r--r--arch/powerpc/kernel/eeh.c8
-rw-r--r--arch/powerpc/kernel/eeh_driver.c13
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S17
-rw-r--r--arch/powerpc/kernel/head_64.S43
-rw-r--r--arch/powerpc/kernel/io-workarounds.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c18
-rw-r--r--arch/powerpc/kernel/misc_64.S60
-rw-r--r--arch/powerpc/kernel/nvram_64.c15
-rw-r--r--arch/powerpc/kernel/paca.c9
-rw-r--r--arch/powerpc/kernel/pci-common.c1
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kernel/prom.c18
-rw-r--r--arch/powerpc/kernel/prom_init.c40
-rw-r--r--arch/powerpc/kernel/setup_64.c25
-rw-r--r--arch/powerpc/kernel/signal_32.c14
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso32/datapage.S12
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S12
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
22 files changed, 280 insertions, 65 deletions
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 51dbace32..2bb252c01 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -221,8 +221,8 @@ void crash_kexec_secondary(struct pt_regs *regs)
#endif /* CONFIG_SMP */
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
-#if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64)
-static void crash_kexec_wait_realmode(int cpu)
+#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
+static void __maybe_unused crash_kexec_wait_realmode(int cpu)
{
unsigned int msecs;
int i;
@@ -244,7 +244,7 @@ static void crash_kexec_wait_realmode(int cpu)
}
#else
static inline void crash_kexec_wait_realmode(int cpu) {}
-#endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */
+#endif /* CONFIG_SMP && CONFIG_PPC64 */
/*
* Register a function to be called on shutdown. Only use this if you
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index e968533e3..40e4d4a27 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -351,7 +351,8 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
* worried about _PAGE_SPLITTING/collapse. Also we will not hit
* page table free, because of init_mm.
*/
- ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token, &hugepage_shift);
+ ptep = __find_linux_pte_or_hugepte(init_mm.pgd, token,
+ NULL, &hugepage_shift);
if (!ptep)
return token;
WARN_ON(hugepage_shift);
@@ -630,7 +631,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
*/
switch (function) {
case EEH_OPT_THAW_MMIO:
- active_flag = EEH_STATE_MMIO_ACTIVE;
+ active_flag = EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED;
break;
case EEH_OPT_THAW_DMA:
active_flag = EEH_STATE_DMA_ACTIVE;
@@ -1411,8 +1412,7 @@ void eeh_dev_release(struct pci_dev *pdev)
goto out;
/* Decrease PE's pass through count */
- atomic_dec(&edev->pe->pass_dev_cnt);
- WARN_ON(atomic_read(&edev->pe->pass_dev_cnt) < 0);
+ WARN_ON(atomic_dec_if_positive(&edev->pe->pass_dev_cnt) < 0);
eeh_pe_change_owner(edev->pe);
out:
mutex_unlock(&eeh_dev_mutex);
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 89eb4bc34..8d14feb40 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -416,7 +416,10 @@ static void *eeh_rmv_device(void *data, void *userdata)
driver = eeh_pcid_get(dev);
if (driver) {
eeh_pcid_put(dev);
- if (driver->err_handler)
+ if (driver->err_handler &&
+ driver->err_handler->error_detected &&
+ driver->err_handler->slot_reset &&
+ driver->err_handler->resume)
return NULL;
}
@@ -655,9 +658,17 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
* to accomplish the reset. Each child gets a report of the
* status ... if any child can't handle the reset, then the entire
* slot is dlpar removed and added.
+ *
+ * When the PHB is fenced, we have to issue a reset to recover from
+ * the error. Override the result if necessary to have partially
+ * hotplug for this case.
*/
pr_info("EEH: Notify device drivers to shutdown\n");
eeh_pe_dev_traverse(pe, eeh_report_error, &result);
+ if ((pe->type & EEH_PE_PHB) &&
+ result != PCI_ERS_RESULT_NONE &&
+ result != PCI_ERS_RESULT_NEED_RESET)
+ result = PCI_ERS_RESULT_NEED_RESET;
/* Get the current PCI slot state. This can take a long time,
* sometimes over 300 seconds for certain systems.
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index f3bd5e747..488e6314f 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -542,8 +542,8 @@ interrupt_base_book3e: /* fake trap */
EXCEPTION_STUB(0x320, ehpriv)
EXCEPTION_STUB(0x340, lrat_error)
- .globl interrupt_end_book3e
-interrupt_end_book3e:
+ .globl __end_interrupts
+__end_interrupts:
/* Critical Input Interrupt */
START_EXCEPTION(critical_input);
@@ -736,7 +736,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
beq+ 1f
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
- LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+ LOAD_REG_IMMEDIATE(r15,__end_interrupts)
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@@ -800,7 +800,7 @@ kernel_dbg_exc:
beq+ 1f
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
- LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
+ LOAD_REG_IMMEDIATE(r15,__end_interrupts)
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@@ -1351,7 +1351,10 @@ skpinv: addi r6,r6,1 /* Increment */
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
*/
/* Now we branch the new virtual address mapped by this entry */
- LOAD_REG_IMMEDIATE(r6,2f)
+ bl 1f /* Find our address */
+1: mflr r6
+ addi r6,r6,(2f - 1b)
+ tovirt(r6,r6)
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
mtspr SPRN_SRR0,r6
@@ -1583,9 +1586,11 @@ _GLOBAL(book3e_secondary_thread_init)
mflr r28
b 3b
+ .globl init_core_book3e
init_core_book3e:
/* Establish the interrupt vector base */
- LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
+ tovirt(r2,r2)
+ LOAD_REG_ADDR(r3, interrupt_base_book3e)
mtspr SPRN_IVPR,r3
sync
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d48125d0c..1b7795607 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -182,6 +182,8 @@ exception_marker:
#ifdef CONFIG_PPC_BOOK3E
_GLOBAL(fsl_secondary_thread_init)
+ mfspr r4,SPRN_BUCSR
+
/* Enable branch prediction */
lis r3,BUCSR_INIT@h
ori r3,r3,BUCSR_INIT@l
@@ -196,10 +198,24 @@ _GLOBAL(fsl_secondary_thread_init)
* number. There are two threads per core, so shift everything
* but the low bit right by two bits so that the cpu numbering is
* continuous.
+ *
+ * If the old value of BUCSR is non-zero, this thread has run
+ * before. Thus, we assume we are coming from kexec or a similar
+ * scenario, and PIR is already set to the correct value. This
+ * is a bit of a hack, but there are limited opportunities for
+ * getting information into the thread and the alternatives
+ * seemed like they'd be overkill. We can't tell just by looking
+ * at the old PIR value which state it's in, since the same value
+ * could be valid for one thread out of reset and for a different
+ * thread in Linux.
*/
+
mfspr r3, SPRN_PIR
+ cmpwi r4,0
+ bne 1f
rlwimi r3, r3, 30, 2, 30
mtspr SPRN_PIR, r3
+1:
#endif
_GLOBAL(generic_secondary_thread_init)
@@ -441,12 +457,22 @@ __after_prom_start:
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldi r25,r25,32
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
+#if defined(CONFIG_PPC_BOOK3E)
+ tophys(r26,r26)
+#endif
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
bne 1f
add r25,r25,r26
1: mr r3,r25
bl relocate
+#if defined(CONFIG_PPC_BOOK3E)
+ /* IVPR needs to be set after relocation. */
+ bl init_core_book3e
+#endif
#endif
/*
@@ -458,15 +484,15 @@ __after_prom_start:
*/
li r3,0 /* target addr */
#ifdef CONFIG_PPC_BOOK3E
- tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
+ tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
#endif
mr. r4,r26 /* In some cases the loader may */
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r4,r4)
+#endif
beq 9f /* have already put us at zero */
li r6,0x100 /* Start offset, the first 0x100 */
/* bytes were copied earlier. */
-#ifdef CONFIG_PPC_BOOK3E
- tovirt(r6,r6) /* on booke, we already run at PAGE_OFFSET */
-#endif
#ifdef CONFIG_RELOCATABLE
/*
@@ -474,12 +500,21 @@ __after_prom_start:
* variable __run_at_load, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
+#if defined(CONFIG_PPC_BOOK3E)
+ tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
+#endif
lwz r7,__run_at_load-_stext(r26)
cmplwi cr0,r7,1
bne 3f
+#ifdef CONFIG_PPC_BOOK3E
+ LOAD_REG_ADDR(r5, __end_interrupts)
+ LOAD_REG_ADDR(r11, _stext)
+ sub r5,r5,r11
+#else
/* just copy interrupts */
LOAD_REG_IMMEDIATE(r5, __end_interrupts - _stext)
+#endif
b 5f
3:
#endif
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 63d9cc4d7..5f8613ceb 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -76,7 +76,7 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
* a page table free due to init_mm
*/
ptep = __find_linux_pte_or_hugepte(init_mm.pgd, vaddr,
- &hugepage_shift);
+ NULL, &hugepage_shift);
if (ptep == NULL)
paddr = 0;
else {
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 1a74446fd..0fbd75d18 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -30,6 +30,21 @@
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
+#ifdef CONFIG_PPC_BOOK3E
+int default_machine_kexec_prepare(struct kimage *image)
+{
+ int i;
+ /*
+ * Since we use the kernel fault handlers and paging code to
+ * handle the virtual mode, we must make sure no destination
+ * overlaps kernel static data or bss.
+ */
+ for (i = 0; i < image->nr_segments; i++)
+ if (image->segment[i].mem < __pa(_end))
+ return -ETXTBSY;
+ return 0;
+}
+#else
int default_machine_kexec_prepare(struct kimage *image)
{
int i;
@@ -95,6 +110,7 @@ int default_machine_kexec_prepare(struct kimage *image)
return 0;
}
+#endif /* !CONFIG_PPC_BOOK3E */
static void copy_segments(unsigned long ind)
{
@@ -365,6 +381,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
}
+#ifndef CONFIG_PPC_BOOK3E
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
static unsigned long htab_size;
@@ -411,3 +428,4 @@ static int __init export_htab_values(void)
return 0;
}
late_initcall(export_htab_values);
+#endif /* !CONFIG_PPC_BOOK3E */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 6e4168cf4..db475d41b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -26,6 +26,7 @@
#include <asm/thread_info.h>
#include <asm/kexec.h>
#include <asm/ptrace.h>
+#include <asm/mmu.h>
.text
@@ -484,6 +485,8 @@ _GLOBAL(kexec_wait)
mtsrr1 r11
rfid
#else
+ /* Create TLB entry in book3e_secondary_core_init */
+ li r4,0
ba 0x60
#endif
#endif
@@ -496,6 +499,51 @@ kexec_flag:
#ifdef CONFIG_KEXEC
+#ifdef CONFIG_PPC_BOOK3E
+/*
+ * BOOK3E has no real MMU mode, so we have to setup the initial TLB
+ * for a core to identity map v:0 to p:0. This current implementation
+ * assumes that 1G is enough for kexec.
+ */
+kexec_create_tlb:
+ /*
+ * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
+ * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
+ */
+ PPC_TLBILX_ALL(0,R0)
+ sync
+ isync
+
+ mfspr r10,SPRN_TLB1CFG
+ andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
+ subi r10,r10,1 /* Last entry: no conflict with kernel text */
+ lis r9,MAS0_TLBSEL(1)@h
+ rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
+
+/* Set up a temp identity mapping v:0 to p:0 and return to it. */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED MAS2_M
+#else
+#define M_IF_NEEDED 0
+#endif
+ mtspr SPRN_MAS0,r9
+
+ lis r9,(MAS1_VALID|MAS1_IPROT)@h
+ ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
+ mtspr SPRN_MAS1,r9
+
+ LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
+ mtspr SPRN_MAS2,r9
+
+ LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
+ mtspr SPRN_MAS3,r9
+ li r9,0
+ mtspr SPRN_MAS7,r9
+
+ tlbwe
+ isync
+ blr
+#endif
/* kexec_smp_wait(void)
*
@@ -525,6 +573,10 @@ _GLOBAL(kexec_smp_wait)
* don't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode: /* assume normal blr return */
+#ifdef CONFIG_PPC_BOOK3E
+ /* Create an identity mapping. */
+ b kexec_create_tlb
+#else
1: li r9,MSR_RI
li r10,MSR_DR|MSR_IR
mflr r11 /* return address to SRR0 */
@@ -536,7 +588,7 @@ real_mode: /* assume normal blr return */
mtspr SPRN_SRR1,r10
mtspr SPRN_SRR0,r11
rfid
-
+#endif
/*
* kexec_sequence(newstack, start, image, control, clear_all())
@@ -579,9 +631,13 @@ _GLOBAL(kexec_sequence)
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
+#ifdef CONFIG_PPC_BOOK3E
+ wrteei 0
+#else
mfmsr r3
rlwinm r3,r3,0,17,15
mtmsrd r3,1
+#endif
/* copy dest pages, flush whole dest image */
mr r3,r29
@@ -603,6 +659,7 @@ _GLOBAL(kexec_sequence)
li r6,1
stw r6,kexec_flag-1b(5)
+#ifndef CONFIG_PPC_BOOK3E
/* clear out hardware hash page table and tlb */
#if !defined(_CALL_ELF) || _CALL_ELF != 2
ld r12,0(r27) /* deref function descriptor */
@@ -611,6 +668,7 @@ _GLOBAL(kexec_sequence)
#endif
mtctr r12
bctrl /* ppc_md.hpte_clear_all(void); */
+#endif /* !CONFIG_PPC_BOOK3E */
/*
* kexec image calling is:
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 98ba106a5..32e26526f 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -1065,7 +1065,7 @@ loff_t __init nvram_create_partition(const char *name, int sig,
/* Create our OS partition */
new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
if (!new_part) {
- pr_err("nvram_create_os_partition: kmalloc failed\n");
+ pr_err("%s: kmalloc failed\n", __func__);
return -ENOMEM;
}
@@ -1077,8 +1077,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
rc = nvram_write_header(new_part);
if (rc <= 0) {
- pr_err("nvram_create_os_partition: nvram_write_header "
- "failed (%d)\n", rc);
+ pr_err("%s: nvram_write_header failed (%d)\n", __func__, rc);
+ kfree(new_part);
return rc;
}
list_add_tail(&new_part->partition, &free_part->partition);
@@ -1090,8 +1090,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
free_part->header.checksum = nvram_checksum(&free_part->header);
rc = nvram_write_header(free_part);
if (rc <= 0) {
- pr_err("nvram_create_os_partition: nvram_write_header "
- "failed (%d)\n", rc);
+ pr_err("%s: nvram_write_header failed (%d)\n",
+ __func__, rc);
return rc;
}
} else {
@@ -1105,11 +1105,12 @@ loff_t __init nvram_create_partition(const char *name, int sig,
tmp_index += NVRAM_BLOCK_LEN) {
rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
if (rc <= 0) {
- pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
+ pr_err("%s: nvram_write failed (%d)\n",
+ __func__, rc);
return rc;
}
}
-
+
return new_part->index + NVRAM_HEADER_LEN;
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 5a23b69f8..01ea0edf0 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -204,14 +204,19 @@ static int __initdata paca_size;
void __init allocate_pacas(void)
{
- int cpu, limit;
+ u64 limit;
+ int cpu;
+ limit = ppc64_rma_size;
+
+#ifdef CONFIG_PPC_BOOK3S_64
/*
* We can't take SLB misses on the paca, and we want to access them
* in real mode, so allocate them within the RMA and also within
* the first segment.
*/
- limit = min(0x10000000ULL, ppc64_rma_size);
+ limit = min(0x10000000ULL, limit);
+#endif
paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 7587b2ae5..0f7a60f1e 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -100,6 +100,7 @@ void pcibios_free_controller(struct pci_controller *phb)
if (phb->is_dynamic)
kfree(phb);
}
+EXPORT_SYMBOL_GPL(pcibios_free_controller);
/*
* The function is used to return the minimal alignment
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 75b6676c1..646bf4d22 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
}
+ /*
+ * Use the current MSR TM suspended bit to track if we have
+ * checkpointed state outstanding.
+ * On signal delivery, we'd normally reclaim the checkpointed
+ * state to obtain stack pointer (see:get_tm_stackpointer()).
+ * This will then directly return to userspace without going
+ * through __switch_to(). However, if the stack frame is bad,
+ * we need to exit this thread which calls __switch_to() which
+ * will again attempt to reclaim the already saved tm state.
+ * Hence we need to check that we've not already reclaimed
+ * this state.
+ * We do this using the current MSR, rather tracking it in
+ * some specific thread_struct bit, as it has the additional
+ * benifit of checking for a potential TM bad thing exception.
+ */
+ if (!MSR_TM_SUSPENDED(mfmsr()))
+ return;
+
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index bef76c503..7030b0359 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -783,17 +783,19 @@ void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
int of_get_ibm_chip_id(struct device_node *np)
{
of_node_get(np);
- while(np) {
- struct device_node *old = np;
- const __be32 *prop;
+ while (np) {
+ u32 chip_id;
- prop = of_get_property(np, "ibm,chip-id", NULL);
- if (prop) {
+ /*
+ * Skiboot may produce memory nodes that contain more than one
+ * cell in chip-id, we only read the first one here.
+ */
+ if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
of_node_put(np);
- return be32_to_cpup(prop);
+ return chip_id;
}
- np = of_get_parent(np);
- of_node_put(old);
+
+ np = of_get_next_parent(np);
}
return -1;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 15099c416..92dea8df6 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1425,27 +1425,45 @@ static void __init prom_instantiate_sml(void)
{
phandle ibmvtpm_node;
ihandle ibmvtpm_inst;
- u32 entry = 0, size = 0;
+ u32 entry = 0, size = 0, succ = 0;
u64 base;
+ __be32 val;
prom_debug("prom_instantiate_sml: start...\n");
- ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/ibm,vtpm"));
+ ibmvtpm_node = call_prom("finddevice", 1, 1, ADDR("/vdevice/vtpm"));
prom_debug("ibmvtpm_node: %x\n", ibmvtpm_node);
if (!PHANDLE_VALID(ibmvtpm_node))
return;
- ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/ibm,vtpm"));
+ ibmvtpm_inst = call_prom("open", 1, 1, ADDR("/vdevice/vtpm"));
if (!IHANDLE_VALID(ibmvtpm_inst)) {
prom_printf("opening vtpm package failed (%x)\n", ibmvtpm_inst);
return;
}
- if (call_prom_ret("call-method", 2, 2, &size,
- ADDR("sml-get-handover-size"),
- ibmvtpm_inst) != 0 || size == 0) {
- prom_printf("SML get handover size failed\n");
- return;
+ if (prom_getprop(ibmvtpm_node, "ibm,sml-efi-reformat-supported",
+ &val, sizeof(val)) != PROM_ERROR) {
+ if (call_prom_ret("call-method", 2, 2, &succ,
+ ADDR("reformat-sml-to-efi-alignment"),
+ ibmvtpm_inst) != 0 || succ == 0) {
+ prom_printf("Reformat SML to EFI alignment failed\n");
+ return;
+ }
+
+ if (call_prom_ret("call-method", 2, 2, &size,
+ ADDR("sml-get-allocated-size"),
+ ibmvtpm_inst) != 0 || size == 0) {
+ prom_printf("SML get allocated size failed\n");
+ return;
+ }
+ } else {
+ if (call_prom_ret("call-method", 2, 2, &size,
+ ADDR("sml-get-handover-size"),
+ ibmvtpm_inst) != 0 || size == 0) {
+ prom_printf("SML get handover size failed\n");
+ return;
+ }
}
base = alloc_down(size, PAGE_SIZE, 0);
@@ -1454,6 +1472,8 @@ static void __init prom_instantiate_sml(void)
prom_printf("instantiating sml at 0x%x...", base);
+ memset((void *)base, 0, size);
+
if (call_prom_ret("call-method", 4, 2, &entry,
ADDR("sml-handover"),
ibmvtpm_inst, size, base) != 0 || entry == 0) {
@@ -1464,9 +1484,9 @@ static void __init prom_instantiate_sml(void)
reserve_mem(base, size);
- prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-base",
+ prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-base",
&base, sizeof(base));
- prom_setprop(ibmvtpm_node, "/ibm,vtpm", "linux,sml-size",
+ prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size",
&size, sizeof(size));
prom_debug("sml base = 0x%x\n", base);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index bdcbb716f..5c03a6a9b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -108,6 +108,14 @@ static void setup_tlb_core_data(void)
for_each_possible_cpu(cpu) {
int first = cpu_first_thread_sibling(cpu);
+ /*
+ * If we boot via kdump on a non-primary thread,
+ * make sure we point at the thread that actually
+ * set up this TLB.
+ */
+ if (cpu_first_thread_sibling(boot_cpuid) == first)
+ first = boot_cpuid;
+
paca[cpu].tcd_ptr = &paca[first].tcd;
/*
@@ -332,11 +340,26 @@ void early_setup_secondary(void)
#endif /* CONFIG_SMP */
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+static bool use_spinloop(void)
+{
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
+ return true;
+
+ /*
+ * When book3e boots from kexec, the ePAPR spin table does
+ * not get used.
+ */
+ return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
+}
+
void smp_release_cpus(void)
{
unsigned long *ptr;
int i;
+ if (!use_spinloop())
+ return;
+
DBG(" -> smp_release_cpus()\n");
/* All secondary cpus are spinning on a common spinloop, release them
@@ -516,7 +539,7 @@ void __init setup_system(void)
* Freescale Book3e parts spin in a loop provided by firmware,
* so smp_release_cpus() does nothing for them
*/
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_SMP)
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 0dbee465a..ef7c24e84 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
return 1;
#endif /* CONFIG_SPE */
+ /* Get the top half of the MSR from the user context */
+ if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+ return 1;
+ msr_hi <<= 32;
+ /* If TM bits are set to the reserved value, it's an invalid context */
+ if (MSR_TM_RESV(msr_hi))
+ return 1;
+ /* Pull in the MSR TM bits from the user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
* transactional versions should be loaded.
@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
current->thread.tm_texasr |= TEXASR_FS;
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&current->thread, msr);
- /* Get the top half of the MSR */
- if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
- return 1;
- /* Pull in MSR TM from user context */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
/* This loads the speculative FP/VEC state, if used */
if (msr & MSR_FP) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 20756dfb9..c676ecec0 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
/* get MSR separately, transfer the LE bit if doing signal return */
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ /* Don't allow reserved mode. */
+ if (MSR_TM_RESV(msr))
+ return -EINVAL;
+
/* pull in MSR TM from user context */
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 53e6c9b97..6abffb7a8 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -18,7 +18,7 @@ GCOV_PROFILE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=both)
asflags-y := -D__VDSO32__ -s
obj-y += vdso32_wrapper.o
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S
index dc21e891d..59cf5f452 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso32/datapage.S
@@ -16,6 +16,10 @@
#include <asm/vdso.h>
.text
+ .global __kernel_datapage_offset;
+__kernel_datapage_offset:
+ .long 0
+
V_FUNCTION_BEGIN(__get_datapage)
.cfi_startproc
/* We don't want that exposed or overridable as we want other objects
@@ -27,13 +31,11 @@ V_FUNCTION_BEGIN(__get_datapage)
mflr r0
.cfi_register lr,r0
- bcl 20,31,1f
- .global __kernel_datapage_offset;
-__kernel_datapage_offset:
- .long 0
-1:
+ bcl 20,31,data_page_branch
+data_page_branch:
mflr r3
mtlr r0
+ addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
add r3,r0,r3
blr
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index effca9404..8c8f2ae43 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -11,7 +11,7 @@ GCOV_PROFILE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
- $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ $(call cc-ldoption, -Wl$(comma)--hash-style=both)
asflags-y := -D__VDSO64__ -s
obj-y += vdso64_wrapper.o
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index 79796de11..2f01c4a0d 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -16,6 +16,10 @@
#include <asm/vdso.h>
.text
+.global __kernel_datapage_offset;
+__kernel_datapage_offset:
+ .long 0
+
V_FUNCTION_BEGIN(__get_datapage)
.cfi_startproc
/* We don't want that exposed or overridable as we want other objects
@@ -27,13 +31,11 @@ V_FUNCTION_BEGIN(__get_datapage)
mflr r0
.cfi_register lr,r0
- bcl 20,31,1f
- .global __kernel_datapage_offset;
-__kernel_datapage_offset:
- .long 0
-1:
+ bcl 20,31,data_page_branch
+data_page_branch:
mflr r3
mtlr r0
+ addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
add r3,r0,r3
blr
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 1db685104..d41fd0af8 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -183,6 +183,12 @@ SECTIONS
*(.rela*)
}
#endif
+ /* .exit.data is discarded at runtime, not link time,
+ * to deal with references from .exit.text
+ */
+ .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
+ EXIT_DATA
+ }
/* freed after init ends here */
. = ALIGN(PAGE_SIZE);