summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile8
-rw-r--r--arch/powerpc/kernel/align.c2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c10
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S89
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S77
-rw-r--r--arch/powerpc/kernel/fadump.c4
-rw-r--r--arch/powerpc/kernel/fpu.S73
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S42
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c3
-rw-r--r--arch/powerpc/kernel/idle_power7.S7
-rw-r--r--arch/powerpc/kernel/misc_32.S2
-rw-r--r--arch/powerpc/kernel/misc_64.S28
-rw-r--r--arch/powerpc/kernel/module_32.c6
-rw-r--r--arch/powerpc/kernel/module_64.c14
-rw-r--r--arch/powerpc/kernel/nvram_64.c19
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c3
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/kernel/process.c563
-rw-r--r--arch/powerpc/kernel/prom_init.c1
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kernel/rtas.c59
-rw-r--r--arch/powerpc/kernel/signal_32.c22
-rw-r--r--arch/powerpc/kernel/signal_64.c22
-rw-r--r--arch/powerpc/kernel/stacktrace.c7
-rw-r--r--arch/powerpc/kernel/swsusp.c4
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/kernel/systbl_chk.sh2
-rw-r--r--arch/powerpc/kernel/time.c36
-rw-r--r--arch/powerpc/kernel/traps.c7
-rw-r--r--arch/powerpc/kernel/vdso.c2
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile1
-rw-r--r--arch/powerpc/kernel/vdso32/datapage.S2
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile1
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S2
-rw-r--r--arch/powerpc/kernel/vector.S112
36 files changed, 483 insertions, 758 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ba336930d..794f22adf 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -136,12 +136,18 @@ endif
obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
-# Disable GCOV in odd or sensitive code
+# Disable GCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_prom_init.o := n
+UBSAN_SANITIZE_prom_init.o := n
GCOV_PROFILE_ftrace.o := n
+UBSAN_SANITIZE_ftrace.o := n
GCOV_PROFILE_machine_kexec_64.o := n
+UBSAN_SANITIZE_machine_kexec_64.o := n
GCOV_PROFILE_machine_kexec_32.o := n
+UBSAN_SANITIZE_machine_kexec_32.o := n
GCOV_PROFILE_kprobes.o := n
+UBSAN_SANITIZE_kprobes.o := n
+UBSAN_SANITIZE_vdso.o := n
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-$(CONFIG_ALTIVEC) += vector.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 86150fbb4..8e7cb8e2b 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs)
preempt_disable();
enable_kernel_fp();
cvt_df(&data.dd, (float *)&data.x32.low32);
+ disable_kernel_fp();
preempt_enable();
#else
return 0;
@@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs)
preempt_disable();
enable_kernel_fp();
cvt_fd((float *)&data.x32.low32, &data.dd);
+ disable_kernel_fp();
preempt_enable();
#else
return 0;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 221d584d0..07cebc351 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -185,14 +185,16 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
- DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_PPC_BOOK3S
+ DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
- context.low_slices_psize));
+ mm_ctx_low_slices_psize));
DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
- context.high_slices_psize));
+ mm_ctx_high_slices_psize));
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
#endif /* CONFIG_PPC_MM_SLICES */
+#endif
#ifdef CONFIG_PPC_BOOK3E
DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
@@ -222,7 +224,7 @@ int main(void)
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
#else
- DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
+ DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp));
#endif /* CONFIG_PPC_MM_SLICES */
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 52c1e273f..650cfb31e 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -400,7 +400,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
* support EEH. So we just care about PCI devices for
* simplicity here.
*/
- if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
+ if (!dev || (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
return NULL;
/*
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index a94f155db..0d525ce37 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -223,7 +223,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
beq- 1f
ACCOUNT_CPU_USER_EXIT(r11, r12)
- HMT_MEDIUM_LOW_HAS_PPR
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
1: ld r2,GPR2(r1)
ld r1,GPR1(r1)
@@ -312,7 +316,13 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
+BEGIN_FTR_SECTION
+ lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
+ ld r10,PACACURRENT(r13)
+ sldi r3,r3,32 /* bits 11-13 are used for ppr */
+ std r3,TASKTHREADPPR(r10)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
beq ret_from_except_lite
@@ -452,43 +462,11 @@ _GLOBAL(_switch)
/* r3-r13 are caller saved -- Cort */
SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1)
- mflr r20 /* Return to switch caller */
- mfmsr r22
- li r0, MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_VSX@h /* Disable VSX */
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_VEC@h /* Disable altivec */
- mfspr r24,SPRN_VRSAVE /* save vrsave register value */
- std r24,THREAD_VRSAVE(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
- and. r0,r0,r22
- beq+ 1f
- andc r22,r22,r0
- MTMSRD(r22)
- isync
-1: std r20,_NIP(r1)
+ std r0,_NIP(r1) /* Return to switch caller */
mfcr r23
std r23,_CCR(r1)
std r1,KSP(r3) /* Set old stack pointer */
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_FTR_SECTION
- /* Event based branch registers */
- mfspr r0, SPRN_BESCR
- std r0, THREAD_BESCR(r3)
- mfspr r0, SPRN_EBBHR
- std r0, THREAD_EBBHR(r3)
- mfspr r0, SPRN_EBBRR
- std r0, THREAD_EBBRR(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-#endif
-
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
@@ -576,47 +554,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_FTR_SECTION
- /* Event based branch registers */
- ld r0, THREAD_BESCR(r4)
- mtspr SPRN_BESCR, r0
- ld r0, THREAD_EBBHR(r4)
- mtspr SPRN_EBBHR, r0
- ld r0, THREAD_EBBRR(r4)
- mtspr SPRN_EBBRR, r0
-
- ld r0,THREAD_TAR(r4)
- mtspr SPRN_TAR,r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-#endif
-
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- ld r0,THREAD_VRSAVE(r4)
- mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_PPC64
-BEGIN_FTR_SECTION
- lwz r6,THREAD_DSCR_INHERIT(r4)
- ld r0,THREAD_DSCR(r4)
- cmpwi r6,0
- bne 1f
- ld r0,PACA_DSCR_DEFAULT(r13)
-1:
-BEGIN_FTR_SECTION_NESTED(70)
- mfspr r8, SPRN_FSCR
- rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
- mtspr SPRN_FSCR, r8
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
- cmpd r0,r25
- beq 2f
- mtspr SPRN_DSCR,r0
-2:
-END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
-#endif
-
ld r6,_CCR(r1)
mtcrf 0xFF,r6
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 0a0399c2a..7716cebf4 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -96,7 +96,6 @@ __start_interrupts:
.globl system_reset_pSeries;
system_reset_pSeries:
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -164,7 +163,6 @@ machine_check_pSeries_1:
* some code path might still want to branch into the original
* vector
*/
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
#ifdef CONFIG_PPC_P7_NAP
BEGIN_FTR_SECTION
@@ -199,7 +197,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
. = 0x300
.globl data_access_pSeries
data_access_pSeries:
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
KVMTEST, 0x300)
@@ -207,7 +204,6 @@ data_access_pSeries:
. = 0x380
.globl data_access_slb_pSeries
data_access_slb_pSeries:
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
@@ -234,15 +230,14 @@ data_access_slb_pSeries:
bctr
#endif
- STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
+ STD_EXCEPTION_PSERIES(0x400, instruction_access)
. = 0x480
.globl instruction_access_slb_pSeries
instruction_access_slb_pSeries:
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
std r3,PACA_EXSLB+EX_R3(r13)
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
#ifdef __DISABLED__
@@ -269,25 +264,24 @@ instruction_access_slb_pSeries:
.globl hardware_interrupt_hv;
hardware_interrupt_pSeries:
hardware_interrupt_hv:
- HMT_MEDIUM_PPR_DISCARD
BEGIN_FTR_SECTION
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
EXC_HV, SOFTEN_TEST_HV)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
FTR_SECTION_ELSE
_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
- EXC_STD, SOFTEN_TEST_HV_201)
+ EXC_STD, SOFTEN_TEST_PR)
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+ STD_EXCEPTION_PSERIES(0x600, alignment)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
- STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
+ STD_EXCEPTION_PSERIES(0x700, program_check)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
- STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
+ STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
. = 0x900
.globl decrementer_pSeries
@@ -297,10 +291,10 @@ decrementer_pSeries:
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
- STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
+ STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
. = 0xc00
.globl system_call_pSeries
@@ -331,8 +325,8 @@ system_call_pSeries:
SYSCALL_PSERIES_3
KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
- STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
+ STD_EXCEPTION_PSERIES(0xd00, single_step)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
* out of line to handle them
@@ -407,13 +401,12 @@ hv_facility_unavailable_trampoline:
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
- KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+ STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
. = 0x1500
.global denorm_exception_hv
denorm_exception_hv:
- HMT_MEDIUM_PPR_DISCARD
mtspr SPRN_SPRG_HSCRATCH0,r13
EXCEPTION_PROLOG_0(PACA_EXGEN)
EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
@@ -435,8 +428,8 @@ denorm_exception_hv:
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
+ STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
#ifdef CONFIG_CBE_RAS
STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
@@ -527,7 +520,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
machine_check_pSeries:
.globl machine_check_fwnmi
machine_check_fwnmi:
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
EXCEPTION_PROLOG_0(PACA_EXMC)
machine_check_pSeries_0:
@@ -536,9 +528,9 @@ machine_check_pSeries_0:
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
- KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
+ KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -621,13 +613,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
/* moved from 0xf00 */
STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
- KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+ KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
@@ -711,7 +703,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
.globl system_reset_fwnmi
.align 7
system_reset_fwnmi:
- HMT_MEDIUM_PPR_DISCARD
SET_SCRATCH0(r13) /* save r13 */
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
NOTEST, 0x100)
@@ -1556,29 +1547,19 @@ do_hash_page:
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
- /*
- * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
- * accessing a userspace segment (even from the kernel). We assume
- * kernel addresses always have the high bit set.
- */
- rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
- rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
- orc r0,r12,r0 /* MSR_PR | ~high_bit */
- rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
- ori r4,r4,1 /* add _PAGE_PRESENT */
- rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
/*
* r3 contains the faulting address
- * r4 contains the required access permissions
+ * r4 msr
* r5 contains the trap number
* r6 contains dsisr
*
* at return r3 = 0 for success, 1 for page fault, negative for error
*/
+ mr r4,r12
ld r6,_DSISR(r1)
- bl hash_page /* build HPTE if possible */
- cmpdi r3,0 /* see if hash_page succeeded */
+ bl __hash_page /* build HPTE if possible */
+ cmpdi r3,0 /* see if __hash_page succeeded */
/* Success */
beq fast_exc_return_irq /* Return from exception on success */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 26d091a1a..3cb3b02a1 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -415,7 +415,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
else
ppc_save_regs(&fdh->regs);
- fdh->cpu_online_mask = *cpu_online_mask;
+ fdh->online_mask = *cpu_online_mask;
/* Call ibm,os-term rtas call to trigger firmware assisted dump */
rtas_os_term((char *)str);
@@ -646,7 +646,7 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
}
/* Lower 4 bytes of reg_value contains logical cpu id */
cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
- if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
+ if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
SKIP_TO_NEXT_CPU(reg_entry);
continue;
}
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 9ad236e5d..2117eaca3 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -73,30 +73,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
MTFSF_L(fr0)
REST_32FPVSRS(0, R4, R7)
- /* FP/VSX off again */
- MTMSRD(r6)
- SYNC
-
blr
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
/*
- * Enable use of the FPU, and VSX if possible, for the caller.
- */
-_GLOBAL(fp_enable)
- mfmsr r3
- ori r3,r3,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- oris r3,r3,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
- SYNC
- MTMSRD(r3)
- isync /* (not necessary for arch 2.02 and later) */
- blr
-
-/*
* Load state from memory into FP registers including FPSCR.
* Assumes the caller has enabled FP in the MSR.
*/
@@ -136,31 +116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_fpu in switch_to.
- */
-#ifndef CONFIG_SMP
- LOAD_REG_ADDRBASE(r3, last_task_used_math)
- toreal(r3)
- PPC_LL r4,ADDROFF(last_task_used_math)(r3)
- PPC_LCMPI 0,r4,0
- beq 1f
- toreal(r4)
- addi r4,r4,THREAD /* want last_task_used_math->thread */
- addi r10,r4,THREAD_FPSTATE
- SAVE_32FPVSRS(0, R5, R10)
- mffs fr0
- stfd fr0,FPSTATE_FPSCR(r10)
- PPC_LL r5,PT_REGS(r4)
- toreal(r5)
- PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- li r10,MSR_FP|MSR_FE0|MSR_FE1
- andc r4,r4,r10 /* disable FP for previous task */
- PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
/* enable use of FP after return */
#ifdef CONFIG_PPC32
mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
@@ -179,36 +134,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
lfd fr0,FPSTATE_FPSCR(r10)
MTFSF_L(fr0)
REST_32FPVSRS(0, R4, R10)
-#ifndef CONFIG_SMP
- subi r4,r5,THREAD
- fromreal(r4)
- PPC_STL r4,ADDROFF(last_task_used_math)(r3)
-#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
blr
/*
- * giveup_fpu(tsk)
+ * __giveup_fpu(tsk)
* Disable FP for the task given as the argument,
* and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
*/
-_GLOBAL(giveup_fpu)
- mfmsr r5
- ori r5,r5,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- oris r5,r5,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
- SYNC_601
- ISYNC_601
- MTMSRD(r5) /* enable use of fpu now */
- SYNC_601
- isync
- PPC_LCMPI 0,r3,0
- beqlr- /* if no previous owner, done */
+_GLOBAL(__giveup_fpu)
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r6,THREAD_FPSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3)
@@ -230,11 +166,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
andc r4,r4,r3 /* disable FP for previous task */
PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
-#ifndef CONFIG_SMP
- li r5,0
- LOAD_REG_ADDRBASE(r4,last_task_used_math)
- PPC_STL r5,ADDROFF(last_task_used_math)(r4)
-#endif /* CONFIG_SMP */
blr
/*
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index fffd1f96b..f705171b9 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -857,29 +857,6 @@ _GLOBAL(load_up_spe)
oris r5,r5,MSR_SPE@h
mtmsr r5 /* enable use of SPE now */
isync
-/*
- * For SMP, we don't do lazy SPE switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_spe in switch_to.
- */
-#ifndef CONFIG_SMP
- lis r3,last_task_used_spe@ha
- lwz r4,last_task_used_spe@l(r3)
- cmpi 0,r4,0
- beq 1f
- addi r4,r4,THREAD /* want THREAD of last_task_used_spe */
- SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
- evxor evr10, evr10, evr10 /* clear out evr10 */
- evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */
- li r5,THREAD_ACC
- evstddx evr10, r4, r5 /* save off accumulator */
- lwz r5,PT_REGS(r4)
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r10,MSR_SPE@h
- andc r4,r4,r10 /* disable SPE for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* !CONFIG_SMP */
/* enable use of SPE after return */
oris r9,r9,MSR_SPE@h
mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
@@ -889,10 +866,6 @@ _GLOBAL(load_up_spe)
evlddx evr4,r10,r5
evmra evr4,evr4
REST_32EVRS(0,r10,r5,THREAD_EVR0)
-#ifndef CONFIG_SMP
- subi r4,r5,THREAD
- stw r4,last_task_used_spe@l(r3)
-#endif /* !CONFIG_SMP */
blr
/*
@@ -1011,16 +984,10 @@ _GLOBAL(__setup_ehv_ivors)
#ifdef CONFIG_SPE
/*
- * extern void giveup_spe(struct task_struct *prev)
+ * extern void __giveup_spe(struct task_struct *prev)
*
*/
-_GLOBAL(giveup_spe)
- mfmsr r5
- oris r5,r5,MSR_SPE@h
- mtmsr r5 /* enable use of SPE now */
- isync
- cmpi 0,r3,0
- beqlr- /* if no previous owner, done */
+_GLOBAL(__giveup_spe)
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpi 0,r5,0
@@ -1035,11 +1002,6 @@ _GLOBAL(giveup_spe)
andc r4,r4,r3 /* disable SPE for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
-#ifndef CONFIG_SMP
- li r5,0
- lis r4,last_task_used_spe@ha
- stw r5,last_task_used_spe@l(r4)
-#endif /* !CONFIG_SMP */
blr
#endif /* CONFIG_SPE */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 05e804cde..aec9a1b1d 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* If the breakpoint is unregistered between a hw_breakpoint_handler()
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
+ * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
*/
- if (bp->ctx && bp->ctx->task)
+ if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
bp->ctx->task->thread.last_hit_ubp = NULL;
}
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 112ccf497..cf4fb5429 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -89,13 +89,6 @@ _GLOBAL(power7_powersave_common)
std r0,_LINK(r1)
std r0,_NIP(r1)
-#ifndef CONFIG_SMP
- /* Make sure FPU, VSX etc... are flushed as we may lose
- * state when going to nap mode
- */
- bl discard_lazy_cpu_state
-#endif /* CONFIG_SMP */
-
/* Hard disable interrupts */
mfmsr r9
rldicl r9,r9,48,1
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index ed3ab509f..be8edd67f 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -743,6 +743,8 @@ relocate_new_kernel:
/* Check for 47x cores */
mfspr r3,SPRN_PVR
srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476FPE@h
+ beq setup_map_47x
cmplwi cr0,r3,PVR_476@h
beq setup_map_47x
cmplwi cr0,r3,PVR_476_ISS@h
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index db475d41b..f28754c49 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence)
li r5,0
blr /* image->start(physid, image->start, 0); */
#endif /* CONFIG_KEXEC */
-
-#ifdef CONFIG_MODULES
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-
-#ifdef CONFIG_MODVERSIONS
-.weak __crc_TOC.
-.section "___kcrctab+TOC.","a"
-.globl __kcrctab_TOC.
-__kcrctab_TOC.:
- .llong __crc_TOC.
-#endif
-
-/*
- * Export a fake .TOC. since both modpost and depmod will complain otherwise.
- * Both modpost and depmod strip the leading . so we do the same here.
- */
-.section "__ksymtab_strings","a"
-__kstrtab_TOC.:
- .asciz "TOC."
-
-.section "___ksymtab+TOC.","a"
-/* This symbol name is important: it's used by modpost to find exported syms */
-.globl __ksymtab_TOC.
-__ksymtab_TOC.:
- .llong 0 /* .value */
- .llong __kstrtab_TOC.
-#endif /* ELFv2 */
-#endif /* MODULES */
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index c94d2e018..2c01665eb 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location,
pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
/* Init, or core PLT? */
- if (location >= mod->module_core
- && location < mod->module_core + mod->core_size)
+ if (location >= mod->core_layout.base
+ && location < mod->core_layout.base + mod->core_layout.size)
entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
else
entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
@@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
module->arch.tramp =
- do_plt_call(module->module_core,
+ do_plt_call(module->core_layout.base,
(unsigned long)ftrace_caller,
sechdrs, module);
#endif
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 59663af93..08b7a40de 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers,
}
}
-/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
+/*
+ * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
+ * seem to be defined (value set later).
+ */
static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
{
unsigned int i;
@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
for (i = 1; i < numsyms; i++) {
if (syms[i].st_shndx == SHN_UNDEF) {
char *name = strtab + syms[i].st_name;
- if (name[0] == '.')
- memmove(name, name+1, strlen(name));
+ if (name[0] == '.') {
+ if (strcmp(name+1, "TOC.") == 0)
+ syms[i].st_shndx = SHN_ABS;
+ syms[i].st_name++;
+ }
}
}
}
@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
for (i = 1; i < numsyms; i++) {
- if (syms[i].st_shndx == SHN_UNDEF
+ if (syms[i].st_shndx == SHN_ABS
&& strcmp(strtab + syms[i].st_name, "TOC.") == 0)
return &syms[i];
}
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 32e26526f..0cab9e8c3 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kmsg_dump.h>
+#include <linux/pagemap.h>
#include <linux/pstore.h>
#include <linux/zlib.h>
#include <asm/uaccess.h>
@@ -733,24 +734,10 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
{
- int size;
-
if (ppc_md.nvram_size == NULL)
return -ENODEV;
- size = ppc_md.nvram_size();
-
- switch (origin) {
- case 1:
- offset += file->f_pos;
- break;
- case 2:
- offset += size;
- break;
- }
- if (offset < 0)
- return -EINVAL;
- file->f_pos = offset;
- return file->f_pos;
+ return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE,
+ ppc_md.nvram_size());
}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 2e710c158..526ac6750 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -187,9 +187,6 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
pci_device_add(dev, bus);
- /* Setup MSI caps & disable MSI/MSI-X interrupts */
- pci_msi_setup_pci_dev(dev);
-
return dev;
}
EXPORT_SYMBOL(of_create_pci_dev);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 202963ee0..41e1607e8 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount);
#endif
#ifdef CONFIG_PPC_FPU
-EXPORT_SYMBOL(giveup_fpu);
EXPORT_SYMBOL(load_fp_state);
EXPORT_SYMBOL(store_fp_state);
#endif
#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(giveup_altivec);
EXPORT_SYMBOL(load_vr_state);
EXPORT_SYMBOL(store_vr_state);
#endif
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state);
EXPORT_SYMBOL_GPL(__giveup_vsx);
#endif
-#ifdef CONFIG_SPE
-EXPORT_SYMBOL(giveup_spe);
-#endif
-
#ifdef CONFIG_EPAPR_PARAVIRT
EXPORT_SYMBOL(epapr_hypercall_start);
#endif
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ef2ad2d68..3c5736e52 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -67,15 +67,8 @@
extern unsigned long _get_SP(void);
-#ifndef CONFIG_SMP
-struct task_struct *last_task_used_math = NULL;
-struct task_struct *last_task_used_altivec = NULL;
-struct task_struct *last_task_used_vsx = NULL;
-struct task_struct *last_task_used_spe = NULL;
-#endif
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void giveup_fpu_maybe_transactional(struct task_struct *tsk)
+static void check_if_tm_restore_required(struct task_struct *tsk)
{
/*
* If we are saving the current thread's registers, and the
@@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
set_thread_flag(TIF_RESTORE_TM);
}
+}
+#else
+static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+bool strict_msr_control;
+EXPORT_SYMBOL(strict_msr_control);
+
+static int __init enable_strict_msr_control(char *str)
+{
+ strict_msr_control = true;
+ pr_info("Enabling strict facility control\n");
- giveup_fpu(tsk);
+ return 0;
}
+early_param("ppc_strict_facility_enable", enable_strict_msr_control);
-void giveup_altivec_maybe_transactional(struct task_struct *tsk)
+void msr_check_and_set(unsigned long bits)
{
- /*
- * If we are saving the current thread's registers, and the
- * thread is in a transactional state, set the TIF_RESTORE_TM
- * bit so that we know to restore the registers before
- * returning to userspace.
- */
- if (tsk == current && tsk->thread.regs &&
- MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
- !test_thread_flag(TIF_RESTORE_TM)) {
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
- set_thread_flag(TIF_RESTORE_TM);
- }
+ unsigned long oldmsr = mfmsr();
+ unsigned long newmsr;
+
+ newmsr = oldmsr | bits;
+
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+ newmsr |= MSR_VSX;
+#endif
- giveup_altivec(tsk);
+ if (oldmsr != newmsr)
+ mtmsr_isync(newmsr);
}
-#else
-#define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
-#define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+void __msr_check_and_clear(unsigned long bits)
+{
+ unsigned long oldmsr = mfmsr();
+ unsigned long newmsr;
+
+ newmsr = oldmsr & ~bits;
+
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+ newmsr &= ~MSR_VSX;
+#endif
+
+ if (oldmsr != newmsr)
+ mtmsr_isync(newmsr);
+}
+EXPORT_SYMBOL(__msr_check_and_clear);
#ifdef CONFIG_PPC_FPU
+void giveup_fpu(struct task_struct *tsk)
+{
+ check_if_tm_restore_required(tsk);
+
+ msr_check_and_set(MSR_FP);
+ __giveup_fpu(tsk);
+ msr_check_and_clear(MSR_FP);
+}
+EXPORT_SYMBOL(giveup_fpu);
+
/*
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
@@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk)
*/
preempt_disable();
if (tsk->thread.regs->msr & MSR_FP) {
-#ifdef CONFIG_SMP
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
- * the FP register state on context switch on SMP,
+ * the FP register state on context switch,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON(tsk != current);
-#endif
- giveup_fpu_maybe_transactional(tsk);
+ giveup_fpu(tsk);
}
preempt_enable();
}
}
EXPORT_SYMBOL_GPL(flush_fp_to_thread);
-#endif /* CONFIG_PPC_FPU */
void enable_kernel_fp(void)
{
WARN_ON(preemptible());
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
- giveup_fpu_maybe_transactional(current);
- else
- giveup_fpu(NULL); /* just enables FP for kernel */
-#else
- giveup_fpu_maybe_transactional(last_task_used_math);
-#endif /* CONFIG_SMP */
+ msr_check_and_set(MSR_FP);
+
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
+ check_if_tm_restore_required(current);
+ __giveup_fpu(current);
+ }
}
EXPORT_SYMBOL(enable_kernel_fp);
+#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
+void giveup_altivec(struct task_struct *tsk)
+{
+ check_if_tm_restore_required(tsk);
+
+ msr_check_and_set(MSR_VEC);
+ __giveup_altivec(tsk);
+ msr_check_and_clear(MSR_VEC);
+}
+EXPORT_SYMBOL(giveup_altivec);
+
void enable_kernel_altivec(void)
{
WARN_ON(preemptible());
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
- giveup_altivec_maybe_transactional(current);
- else
- giveup_altivec_notask();
-#else
- giveup_altivec_maybe_transactional(last_task_used_altivec);
-#endif /* CONFIG_SMP */
+ msr_check_and_set(MSR_VEC);
+
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
+ check_if_tm_restore_required(current);
+ __giveup_altivec(current);
+ }
}
EXPORT_SYMBOL(enable_kernel_altivec);
@@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk)
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VEC) {
-#ifdef CONFIG_SMP
BUG_ON(tsk != current);
-#endif
- giveup_altivec_maybe_transactional(tsk);
+ giveup_altivec(tsk);
}
preempt_enable();
}
@@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-void enable_kernel_vsx(void)
+void giveup_vsx(struct task_struct *tsk)
{
- WARN_ON(preemptible());
+ check_if_tm_restore_required(tsk);
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
- giveup_vsx(current);
- else
- giveup_vsx(NULL); /* just enable vsx for kernel - force */
-#else
- giveup_vsx(last_task_used_vsx);
-#endif /* CONFIG_SMP */
+ msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+ if (tsk->thread.regs->msr & MSR_FP)
+ __giveup_fpu(tsk);
+ if (tsk->thread.regs->msr & MSR_VEC)
+ __giveup_altivec(tsk);
+ __giveup_vsx(tsk);
+ msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
-EXPORT_SYMBOL(enable_kernel_vsx);
+EXPORT_SYMBOL(giveup_vsx);
-void giveup_vsx(struct task_struct *tsk)
+void enable_kernel_vsx(void)
{
- giveup_fpu_maybe_transactional(tsk);
- giveup_altivec_maybe_transactional(tsk);
- __giveup_vsx(tsk);
+ WARN_ON(preemptible());
+
+ msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+
+ if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+ check_if_tm_restore_required(current);
+ if (current->thread.regs->msr & MSR_FP)
+ __giveup_fpu(current);
+ if (current->thread.regs->msr & MSR_VEC)
+ __giveup_altivec(current);
+ __giveup_vsx(current);
+ }
}
-EXPORT_SYMBOL(giveup_vsx);
+EXPORT_SYMBOL(enable_kernel_vsx);
void flush_vsx_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_VSX) {
-#ifdef CONFIG_SMP
BUG_ON(tsk != current);
-#endif
giveup_vsx(tsk);
}
preempt_enable();
@@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
+void giveup_spe(struct task_struct *tsk)
+{
+ check_if_tm_restore_required(tsk);
+
+ msr_check_and_set(MSR_SPE);
+ __giveup_spe(tsk);
+ msr_check_and_clear(MSR_SPE);
+}
+EXPORT_SYMBOL(giveup_spe);
void enable_kernel_spe(void)
{
WARN_ON(preemptible());
-#ifdef CONFIG_SMP
- if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
- giveup_spe(current);
- else
- giveup_spe(NULL); /* just enable SPE for kernel - force */
-#else
- giveup_spe(last_task_used_spe);
-#endif /* __SMP __ */
+ msr_check_and_set(MSR_SPE);
+
+ if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
+ check_if_tm_restore_required(current);
+ __giveup_spe(current);
+ }
}
EXPORT_SYMBOL(enable_kernel_spe);
@@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
if (tsk->thread.regs) {
preempt_disable();
if (tsk->thread.regs->msr & MSR_SPE) {
-#ifdef CONFIG_SMP
BUG_ON(tsk != current);
-#endif
tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
giveup_spe(tsk);
}
@@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk)
}
#endif /* CONFIG_SPE */
-#ifndef CONFIG_SMP
-/*
- * If we are doing lazy switching of CPU state (FP, altivec or SPE),
- * and the current task has some state, discard it.
- */
-void discard_lazy_cpu_state(void)
+static unsigned long msr_all_available;
+
+static int __init init_msr_all_available(void)
{
- preempt_disable();
- if (last_task_used_math == current)
- last_task_used_math = NULL;
+#ifdef CONFIG_PPC_FPU
+ msr_all_available |= MSR_FP;
+#endif
#ifdef CONFIG_ALTIVEC
- if (last_task_used_altivec == current)
- last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr_all_available |= MSR_VEC;
+#endif
#ifdef CONFIG_VSX
- if (last_task_used_vsx == current)
- last_task_used_vsx = NULL;
-#endif /* CONFIG_VSX */
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr_all_available |= MSR_VSX;
+#endif
#ifdef CONFIG_SPE
- if (last_task_used_spe == current)
- last_task_used_spe = NULL;
+ if (cpu_has_feature(CPU_FTR_SPE))
+ msr_all_available |= MSR_SPE;
#endif
- preempt_enable();
+
+ return 0;
}
-#endif /* CONFIG_SMP */
+early_initcall(init_msr_all_available);
+
+void giveup_all(struct task_struct *tsk)
+{
+ unsigned long usermsr;
+
+ if (!tsk->thread.regs)
+ return;
+
+ usermsr = tsk->thread.regs->msr;
+
+ if ((usermsr & msr_all_available) == 0)
+ return;
+
+ msr_check_and_set(msr_all_available);
+
+#ifdef CONFIG_PPC_FPU
+ if (usermsr & MSR_FP)
+ __giveup_fpu(tsk);
+#endif
+#ifdef CONFIG_ALTIVEC
+ if (usermsr & MSR_VEC)
+ __giveup_altivec(tsk);
+#endif
+#ifdef CONFIG_VSX
+ if (usermsr & MSR_VSX)
+ __giveup_vsx(tsk);
+#endif
+#ifdef CONFIG_SPE
+ if (usermsr & MSR_SPE)
+ __giveup_spe(tsk);
+#endif
+
+ msr_check_and_clear(msr_all_available);
+}
+EXPORT_SYMBOL(giveup_all);
+
+void flush_all_to_thread(struct task_struct *tsk)
+{
+ if (tsk->thread.regs) {
+ preempt_disable();
+ BUG_ON(tsk != current);
+ giveup_all(tsk);
+
+#ifdef CONFIG_SPE
+ if (tsk->thread.regs->msr & MSR_SPE)
+ tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
+#endif
+
+ preempt_enable();
+ }
+}
+EXPORT_SYMBOL(flush_all_to_thread);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
void do_send_trap(struct pt_regs *regs, unsigned long address,
@@ -569,24 +658,6 @@ static void tm_reclaim_thread(struct thread_struct *thr,
if (!MSR_TM_SUSPENDED(mfmsr()))
return;
- /*
- * Use the current MSR TM suspended bit to track if we have
- * checkpointed state outstanding.
- * On signal delivery, we'd normally reclaim the checkpointed
- * state to obtain stack pointer (see:get_tm_stackpointer()).
- * This will then directly return to userspace without going
- * through __switch_to(). However, if the stack frame is bad,
- * we need to exit this thread which calls __switch_to() which
- * will again attempt to reclaim the already saved tm state.
- * Hence we need to check that we've not already reclaimed
- * this state.
- * We do this using the current MSR, rather tracking it in
- * some specific thread_struct bit, as it has the additional
- * benifit of checking for a potential TM bad thing exception.
- */
- if (!MSR_TM_SUSPENDED(mfmsr()))
- return;
-
tm_reclaim(thr, thr->regs->msr, cause);
/* Having done the reclaim, we now have the checkpointed
@@ -762,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs)
msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
if (msr_diff & MSR_FP) {
- fp_enable();
+ msr_check_and_set(MSR_FP);
load_fp_state(&current->thread.fp_state);
+ msr_check_and_clear(MSR_FP);
regs->msr |= current->thread.fpexc_mode;
}
if (msr_diff & MSR_VEC) {
- vec_enable();
+ msr_check_and_set(MSR_VEC);
load_vr_state(&current->thread.vr_state);
+ msr_check_and_clear(MSR_VEC);
}
regs->msr |= msr_diff;
}
@@ -778,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs)
#define __switch_to_tm(prev)
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *new)
+static inline void save_sprs(struct thread_struct *t)
{
- struct thread_struct *new_thread, *old_thread;
- struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
- struct ppc64_tlb_batch *batch;
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
+ t->vrsave = mfspr(SPRN_VRSAVE);
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_DSCR))
+ t->dscr = mfspr(SPRN_DSCR);
- WARN_ON(!irqs_disabled());
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ t->bescr = mfspr(SPRN_BESCR);
+ t->ebbhr = mfspr(SPRN_EBBHR);
+ t->ebbrr = mfspr(SPRN_EBBRR);
- /* Back up the TAR and DSCR across context switches.
- * Note that the TAR is not available for use in the kernel. (To
- * provide this, the TAR should be backed up/restored on exception
- * entry/exit instead, and be in pt_regs. FIXME, this should be in
- * pt_regs anyway (for debug).)
- * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
- * these will change them.
- */
- save_early_sprs(&prev->thread);
+ t->fscr = mfspr(SPRN_FSCR);
- __switch_to_tm(prev);
+ /*
+ * Note that the TAR is not available for use in the kernel.
+ * (To provide this, the TAR should be backed up/restored on
+ * exception entry/exit instead, and be in pt_regs. FIXME,
+ * this should be in pt_regs anyway (for debug).)
+ */
+ t->tar = mfspr(SPRN_TAR);
+ }
+#endif
+}
-#ifdef CONFIG_SMP
- /* avoid complexity of lazy save/restore of fpu
- * by just saving it every time we switch out if
- * this task used the fpu during the last quantum.
- *
- * If it tries to use the fpu again, it'll trap and
- * reload its fp regs. So we don't have to do a restore
- * every switch, just a save.
- * -- Cort
- */
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
- giveup_fpu(prev);
+static inline void restore_sprs(struct thread_struct *old_thread,
+ struct thread_struct *new_thread)
+{
#ifdef CONFIG_ALTIVEC
- /*
- * If the previous thread used altivec in the last quantum
- * (thus changing altivec regs) then save them.
- * We used to check the VRSAVE register but not all apps
- * set it, so we don't rely on it now (and in fact we need
- * to save & restore VSCR even if VRSAVE == 0). -- paulus
- *
- * On SMP we always save/restore altivec regs just to avoid the
- * complexity of changing processors.
- * -- Cort
- */
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
- giveup_altivec(prev);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
- /* VMX and FPU registers are already save here */
- __giveup_vsx(prev);
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /*
- * If the previous thread used spe in the last quantum
- * (thus changing spe regs) then save them.
- *
- * On SMP we always save/restore spe regs just to avoid the
- * complexity of changing processors.
- */
- if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
- giveup_spe(prev);
-#endif /* CONFIG_SPE */
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ old_thread->vrsave != new_thread->vrsave)
+ mtspr(SPRN_VRSAVE, new_thread->vrsave);
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+ u64 dscr = get_paca()->dscr_default;
+ u64 fscr = old_thread->fscr & ~FSCR_DSCR;
-#else /* CONFIG_SMP */
-#ifdef CONFIG_ALTIVEC
- /* Avoid the trap. On smp this this never happens since
- * we don't set last_task_used_altivec -- Cort
- */
- if (new->thread.regs && last_task_used_altivec == new)
- new->thread.regs->msr |= MSR_VEC;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
- if (new->thread.regs && last_task_used_vsx == new)
- new->thread.regs->msr |= MSR_VSX;
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* Avoid the trap. On smp this this never happens since
- * we don't set last_task_used_spe
- */
- if (new->thread.regs && last_task_used_spe == new)
- new->thread.regs->msr |= MSR_SPE;
-#endif /* CONFIG_SPE */
+ if (new_thread->dscr_inherit) {
+ dscr = new_thread->dscr;
+ fscr |= FSCR_DSCR;
+ }
-#endif /* CONFIG_SMP */
+ if (old_thread->dscr != dscr)
+ mtspr(SPRN_DSCR, dscr);
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- switch_booke_debug_regs(&new->thread.debug);
-#else
-/*
- * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
- * schedule DABR
- */
-#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
- __set_breakpoint(&new->thread.hw_brk);
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ if (old_thread->fscr != fscr)
+ mtspr(SPRN_FSCR, fscr);
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ if (old_thread->bescr != new_thread->bescr)
+ mtspr(SPRN_BESCR, new_thread->bescr);
+ if (old_thread->ebbhr != new_thread->ebbhr)
+ mtspr(SPRN_EBBHR, new_thread->ebbhr);
+ if (old_thread->ebbrr != new_thread->ebbrr)
+ mtspr(SPRN_EBBRR, new_thread->ebbrr);
+
+ if (old_thread->tar != new_thread->tar)
+ mtspr(SPRN_TAR, new_thread->tar);
+ }
#endif
+}
+struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *new)
+{
+ struct thread_struct *new_thread, *old_thread;
+ struct task_struct *last;
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct ppc64_tlb_batch *batch;
+#endif
new_thread = &new->thread;
old_thread = &current->thread;
+ WARN_ON(!irqs_disabled());
+
#ifdef CONFIG_PPC64
/*
* Collect processor utilization data per process
@@ -908,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ switch_booke_debug_regs(&new->thread.debug);
+#else
+/*
+ * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
+ * schedule DABR
+ */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
+ if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
+ __set_breakpoint(&new->thread.hw_brk);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif
+
+ /*
+ * We need to save SPRs before treclaim/trecheckpoint as these will
+ * change a number of them.
+ */
+ save_sprs(&prev->thread);
+
+ __switch_to_tm(prev);
+
+ /* Save FPU, Altivec, VSX and SPE state */
+ giveup_all(prev);
+
/*
* We can't take a PMU exception inside _switch() since there is a
* window where the kernel stack SLB and the kernel stack are out
@@ -917,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
tm_recheckpoint_new_task(new);
+ /*
+ * Call restore_sprs() before calling _switch(). If we move it after
+ * _switch() then we miss out on calling it for new tasks. The reason
+ * for this is we manually create a stack frame for new tasks that
+ * directly returns through ret_from_fork() or
+ * ret_from_kernel_thread(). See copy_thread() for details.
+ */
+ restore_sprs(old_thread, new_thread);
+
last = _switch(old_thread, new_thread);
#ifdef CONFIG_PPC_BOOK3S_64
@@ -970,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs)
printk("\n");
}
-static struct regbit {
+struct regbit {
unsigned long bit;
const char *name;
-} msr_bits[] = {
+};
+
+static struct regbit msr_bits[] = {
#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
{MSR_SF, "SF"},
{MSR_HV, "HV"},
@@ -1003,16 +1086,49 @@ static struct regbit {
{0, NULL}
};
-static void printbits(unsigned long val, struct regbit *bits)
+static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
{
- const char *sep = "";
+ const char *s = "";
- printk("<");
for (; bits->bit; ++bits)
if (val & bits->bit) {
- printk("%s%s", sep, bits->name);
- sep = ",";
+ printk("%s%s", s, bits->name);
+ s = sep;
}
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static struct regbit msr_tm_bits[] = {
+ {MSR_TS_T, "T"},
+ {MSR_TS_S, "S"},
+ {MSR_TM, "E"},
+ {0, NULL}
+};
+
+static void print_tm_bits(unsigned long val)
+{
+/*
+ * This only prints something if at least one of the TM bit is set.
+ * Inside the TM[], the output means:
+ * E: Enabled (bit 32)
+ * S: Suspended (bit 33)
+ * T: Transactional (bit 34)
+ */
+ if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
+ printk(",TM[");
+ print_bits(val, msr_tm_bits, "");
+ printk("]");
+ }
+}
+#else
+static void print_tm_bits(unsigned long val) {}
+#endif
+
+static void print_msr_bits(unsigned long val)
+{
+ printk("<");
+ print_bits(val, msr_bits, ",");
+ print_tm_bits(val);
printk(">");
}
@@ -1037,7 +1153,7 @@ void show_regs(struct pt_regs * regs)
printk("REGS: %p TRAP: %04lx %s (%s)\n",
regs, regs->trap, print_tainted(), init_utsname()->release);
printk("MSR: "REG" ", regs->msr);
- printbits(regs->msr, msr_bits);
+ print_msr_bits(regs->msr);
printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
@@ -1079,13 +1195,10 @@ void show_regs(struct pt_regs * regs)
void exit_thread(void)
{
- discard_lazy_cpu_state();
}
void flush_thread(void)
{
- discard_lazy_cpu_state();
-
#ifdef CONFIG_HAVE_HW_BREAKPOINT
flush_ptrace_hw_breakpoint(current);
#else /* CONFIG_HAVE_HW_BREAKPOINT */
@@ -1104,10 +1217,7 @@ release_thread(struct task_struct *t)
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- flush_fp_to_thread(src);
- flush_altivec_to_thread(src);
- flush_vsx_to_thread(src);
- flush_spe_to_thread(src);
+ flush_all_to_thread(src);
/*
* Flush TM state out so we can copy it. __switch_to_tm() does this
* flush but it removes the checkpointed state from the current CPU and
@@ -1230,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
p->thread.dscr_inherit = current->thread.dscr_inherit;
- p->thread.dscr = current->thread.dscr;
+ p->thread.dscr = mfspr(SPRN_DSCR);
}
if (cpu_has_feature(CPU_FTR_HAS_PPR))
p->thread.ppr = INIT_PPR;
@@ -1323,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->msr = MSR_USER32;
}
#endif
- discard_lazy_cpu_state();
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
@@ -1659,9 +1768,9 @@ static inline unsigned long brk_rnd(void)
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+ rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
else
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+ rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
return rnd << PAGE_SHIFT;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 92dea8df6..da5192590 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -389,6 +389,7 @@ static void __init prom_printf(const char *format, ...)
break;
}
}
+ va_end(args);
}
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 737c0d0b5..30a03c03f 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -60,6 +60,7 @@ struct pt_regs_offset {
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
+ {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 5a753fae8..28736ff27 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -44,6 +44,9 @@
#include <asm/mmu.h>
#include <asm/topology.h>
+/* This is here deliberately so it's only used in this file */
+void enter_rtas(unsigned long);
+
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
};
@@ -93,21 +96,13 @@ static void unlock_rtas(unsigned long flags)
*/
static void call_rtas_display_status(unsigned char c)
{
- struct rtas_args *args = &rtas.args;
unsigned long s;
if (!rtas.base)
return;
- s = lock_rtas();
-
- args->token = cpu_to_be32(10);
- args->nargs = cpu_to_be32(1);
- args->nret = cpu_to_be32(1);
- args->rets = &(args->args[1]);
- args->args[0] = cpu_to_be32(c);
-
- enter_rtas(__pa(args));
+ s = lock_rtas();
+ rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
unlock_rtas(s);
}
@@ -418,6 +413,36 @@ static char *__fetch_rtas_last_error(char *altbuf)
#define get_errorlog_buffer() NULL
#endif
+
+static void
+va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
+ va_list list)
+{
+ int i;
+
+ args->token = cpu_to_be32(token);
+ args->nargs = cpu_to_be32(nargs);
+ args->nret = cpu_to_be32(nret);
+ args->rets = &(args->args[nargs]);
+
+ for (i = 0; i < nargs; ++i)
+ args->args[i] = cpu_to_be32(va_arg(list, __u32));
+
+ for (i = 0; i < nret; ++i)
+ args->rets[i] = 0;
+
+ enter_rtas(__pa(args));
+}
+
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
+{
+ va_list list;
+
+ va_start(list, nret);
+ va_rtas_call_unlocked(args, token, nargs, nret, list);
+ va_end(list);
+}
+
int rtas_call(int token, int nargs, int nret, int *outputs, ...)
{
va_list list;
@@ -431,22 +456,14 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
return -1;
s = lock_rtas();
+
+ /* We use the global rtas args buffer */
rtas_args = &rtas.args;
- rtas_args->token = cpu_to_be32(token);
- rtas_args->nargs = cpu_to_be32(nargs);
- rtas_args->nret = cpu_to_be32(nret);
- rtas_args->rets = &(rtas_args->args[nargs]);
va_start(list, outputs);
- for (i = 0; i < nargs; ++i)
- rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
+ va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
va_end(list);
- for (i = 0; i < nret; ++i)
- rtas_args->rets[i] = 0;
-
- enter_rtas(__pa(rtas_args));
-
/* A -1 return code indicates that the last command couldn't
be completed due to a hardware error. */
if (be32_to_cpu(rtas_args->rets[0]) == -1)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index ef7c24e84..b6aa378af 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
* contains valid data
*/
if (current->thread.used_vsr && ctx_has_vsx_region) {
- __giveup_vsx(current);
+ flush_vsx_to_thread(current);
if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1;
msr |= MSR_VSX;
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
* contains valid data
*/
if (current->thread.used_vsr) {
- __giveup_vsx(current);
+ flush_vsx_to_thread(current);
if (copy_vsx_to_user(&frame->mc_vsregs, current))
return 1;
if (msr & MSR_VSX) {
@@ -687,15 +687,6 @@ static long restore_user_regs(struct pt_regs *regs,
if (sig)
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
- /*
- * Do this before updating the thread state in
- * current->thread.fpr/vr/evr. That way, if we get preempted
- * and another task grabs the FPU/Altivec/SPE, it won't be
- * tempted to save the current CPU state into the thread_struct
- * and corrupt what we are writing there.
- */
- discard_lazy_cpu_state();
-
#ifdef CONFIG_ALTIVEC
/*
* Force the process to reload the altivec registers from
@@ -798,15 +789,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
/* Restore the previous little-endian mode */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
- /*
- * Do this before updating the thread state in
- * current->thread.fpr/vr/evr. That way, if we get preempted
- * and another task grabs the FPU/Altivec/SPE, it won't be
- * tempted to save the current CPU state into the thread_struct
- * and corrupt what we are writing there.
- */
- discard_lazy_cpu_state();
-
#ifdef CONFIG_ALTIVEC
regs->msr &= ~MSR_VEC;
if (msr & MSR_VEC) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c676ecec0..25520794a 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
* VMX data.
*/
if (current->thread.used_vsr && ctx_has_vsx_region) {
- __giveup_vsx(current);
+ flush_vsx_to_thread(current);
v_regs += ELF_NVRREG;
err |= copy_vsx_to_user(v_regs, current);
/* set MSR_VSX in the MSR value in the frame to
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
* VMX data.
*/
if (current->thread.used_vsr) {
- __giveup_vsx(current);
+ flush_vsx_to_thread(current);
v_regs += ELF_NVRREG;
tm_v_regs += ELF_NVRREG;
@@ -350,15 +350,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __get_user(set->sig[0], &sc->oldmask);
/*
- * Do this before updating the thread state in
- * current->thread.fpr/vr. That way, if we get preempted
- * and another task grabs the FPU/Altivec, it won't be
- * tempted to save the current CPU state into the thread_struct
- * and corrupt what we are writing there.
- */
- discard_lazy_cpu_state();
-
- /*
* Force reload of FP/VEC.
* This has to be done before copying stuff into current->thread.fpr/vr
* for the reasons explained in the previous comment.
@@ -469,15 +460,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
/*
- * Do this before updating the thread state in
- * current->thread.fpr/vr. That way, if we get preempted
- * and another task grabs the FPU/Altivec, it won't be
- * tempted to save the current CPU state into the thread_struct
- * and corrupt what we are writing there.
- */
- discard_lazy_cpu_state();
-
- /*
* Force reload of FP/VEC.
* This has to be done before copying stuff into current->thread.fpr/vr
* for the reasons explained in the previous comment.
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index ea43a347a..4f24606af 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -61,3 +61,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
save_context_stack(trace, tsk->thread.ksp, tsk, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void
+save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+ save_context_stack(trace, regs->gpr[1], current, 0);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c
index eae33e10b..6669b1752 100644
--- a/arch/powerpc/kernel/swsusp.c
+++ b/arch/powerpc/kernel/swsusp.c
@@ -20,9 +20,7 @@ void save_processor_state(void)
* flush out all the special registers so we don't need
* to save them in the snapshot
*/
- flush_fp_to_thread(current);
- flush_altivec_to_thread(current);
- flush_spe_to_thread(current);
+ flush_all_to_thread(current);
#ifdef CONFIG_PPC64
hard_irq_disable();
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 2384129f5..55323a620 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -57,4 +57,4 @@
START_TABLE
#include <asm/systbl.h>
-END_TABLE __NR_syscalls
+END_TABLE NR_syscalls
diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh
index 19415e767..31b6e7c35 100644
--- a/arch/powerpc/kernel/systbl_chk.sh
+++ b/arch/powerpc/kernel/systbl_chk.sh
@@ -16,7 +16,7 @@ awk 'BEGIN { num = -1; } # Ignore the beginning of the file
/^START_TABLE/ { num = 0; next; }
/^END_TABLE/ {
if (num != $2) {
- printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n",
+ printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n",
$2, num - 1;
exit(1);
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1be1092c7..81b0900a3 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1002,38 +1002,6 @@ static int month_days[12] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
-/*
- * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
- */
-void GregorianDay(struct rtc_time * tm)
-{
- int leapsToDate;
- int lastYear;
- int day;
- int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
-
- lastYear = tm->tm_year - 1;
-
- /*
- * Number of leap corrections to apply up to end of last year
- */
- leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
-
- /*
- * This year is a leap year if it is divisible by 4 except when it is
- * divisible by 100 unless it is divisible by 400
- *
- * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
- */
- day = tm->tm_mon > 2 && leapyear(tm->tm_year);
-
- day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
- tm->tm_mday;
-
- tm->tm_wday = day % 7;
-}
-EXPORT_SYMBOL_GPL(GregorianDay);
-
void to_tm(int tim, struct rtc_time * tm)
{
register int i;
@@ -1064,9 +1032,9 @@ void to_tm(int tim, struct rtc_time * tm)
tm->tm_mday = day + 1;
/*
- * Determine the day of week
+ * No-one uses the day of the week.
*/
- GregorianDay(tm);
+ tm->tm_wday = -1;
}
EXPORT_SYMBOL(to_tm);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 37de90f8a..b6becc795 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1313,13 +1313,6 @@ void nonrecoverable_exception(struct pt_regs *regs)
die("nonrecoverable exception", regs, SIGKILL);
}
-void trace_syscall(struct pt_regs *regs)
-{
- printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
- current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
- regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
-}
-
void kernel_fp_unavailable_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b457bfa28..def1b8b5e 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -671,7 +671,7 @@ static void __init vdso_setup_syscall_map(void)
extern unsigned long sys_ni_syscall;
- for (i = 0; i < __NR_syscalls; i++) {
+ for (i = 0; i < NR_syscalls; i++) {
#ifdef CONFIG_PPC64
if (sys_call_table[i*2] != sys_ni_syscall)
vdso_data->syscall_map_64[i >> 5] |=
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 6abffb7a8..cbabd143a 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -15,6 +15,7 @@ targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S
index 59cf5f452..3745113fc 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso32/datapage.S
@@ -61,7 +61,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
addi r3,r3,CFG_SYSCALL_MAP32
cmpli cr0,r4,0
beqlr
- li r0,__NR_syscalls
+ li r0,NR_syscalls
stw r0,0(r4)
crclr cr0*4+so
blr
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 8c8f2ae43..c710802b8 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -8,6 +8,7 @@ targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index 2f01c4a0d..184a6ba7f 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -62,7 +62,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
cmpli cr0,r4,0
crclr cr0*4+so
beqlr
- li r0,__NR_syscalls
+ li r0,NR_syscalls
stw r0,0(r4)
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index f5c80d567..162d0f714 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -29,24 +29,10 @@ _GLOBAL(do_load_up_transact_altivec)
addi r10,r3,THREAD_TRANSACT_VRSTATE
REST_32VRS(0,r4,r10)
- /* Disable VEC again. */
- MTMSRD(r6)
- isync
-
blr
#endif
/*
- * Enable use of VMX/Altivec for the caller.
- */
-_GLOBAL(vec_enable)
- mfmsr r3
- oris r3,r3,MSR_VEC@h
- MTMSRD(r3)
- isync
- blr
-
-/*
* Load state from memory into VMX registers including VSCR.
* Assumes the caller has enabled VMX in the MSR.
*/
@@ -84,39 +70,6 @@ _GLOBAL(load_up_altivec)
MTMSRD(r5) /* enable use of AltiVec now */
isync
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
- LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
- toreal(r3)
- PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
- PPC_LCMPI 0,r4,0
- beq 1f
-
- /* Save VMX state to last_task_used_altivec's THREAD struct */
- toreal(r4)
- addi r4,r4,THREAD
- addi r6,r4,THREAD_VRSTATE
- SAVE_32VRS(0,r5,r6)
- mfvscr v0
- li r10,VRSTATE_VSCR
- stvx v0,r10,r6
- /* Disable VMX for last_task_used_altivec */
- PPC_LL r5,PT_REGS(r4)
- toreal(r5)
- PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r10,MSR_VEC@h
- andc r4,r4,r10
- PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-
/* Hack: if we get an altivec unavailable trap with VRSAVE
* set to all zeros, we assume this is a broken application
* that fails to set it properly, and thus we switch it to
@@ -145,39 +98,15 @@ _GLOBAL(load_up_altivec)
lvx v0,r10,r6
mtvscr v0
REST_32VRS(0,r4,r6)
-#ifndef CONFIG_SMP
- /* Update last_task_used_altivec to 'current' */
- subi r4,r5,THREAD /* Back to 'current' */
- fromreal(r4)
- PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
-#endif /* CONFIG_SMP */
/* restore registers and return */
blr
-_GLOBAL(giveup_altivec_notask)
- mfmsr r3
- andis. r4,r3,MSR_VEC@h
- bnelr /* Already enabled? */
- oris r3,r3,MSR_VEC@h
- SYNC
- MTMSRD(r3) /* enable use of VMX now */
- isync
- blr
-
/*
- * giveup_altivec(tsk)
+ * __giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
* and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
*/
-_GLOBAL(giveup_altivec)
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- SYNC
- MTMSRD(r5) /* enable use of VMX now */
- isync
- PPC_LCMPI 0,r3,0
- beqlr /* if no previous owner, done */
+_GLOBAL(__giveup_altivec)
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r7,THREAD_VRSAVEAREA(r3)
PPC_LL r5,PT_REGS(r3)
@@ -203,11 +132,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
andc r4,r4,r3 /* disable FP for previous task */
PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
-#ifndef CONFIG_SMP
- li r5,0
- LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
- PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
-#endif /* CONFIG_SMP */
blr
#ifdef CONFIG_VSX
@@ -230,20 +154,6 @@ _GLOBAL(load_up_vsx)
andis. r5,r12,MSR_VEC@h
beql+ load_up_altivec /* skip if already loaded */
-#ifndef CONFIG_SMP
- ld r3,last_task_used_vsx@got(r2)
- ld r4,0(r3)
- cmpdi 0,r4,0
- beq 1f
- /* Disable VSX for last_task_used_vsx */
- addi r4,r4,THREAD
- ld r5,PT_REGS(r4)
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r6,MSR_VSX@h
- andc r6,r4,r6
- std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
ld r4,PACACURRENT(r13)
addi r4,r4,THREAD /* Get THREAD */
li r6,1
@@ -251,27 +161,14 @@ _GLOBAL(load_up_vsx)
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
-#ifndef CONFIG_SMP
- /* Update last_task_used_vsx to 'current' */
- ld r4,PACACURRENT(r13)
- std r4,0(r3)
-#endif /* CONFIG_SMP */
b fast_exception_return
/*
* __giveup_vsx(tsk)
* Disable VSX for the task given as the argument.
* Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
*/
_GLOBAL(__giveup_vsx)
- mfmsr r5
- oris r5,r5,MSR_VSX@h
- mtmsrd r5 /* enable use of VSX now */
- isync
-
- cmpdi 0,r3,0
- beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3)
cmpdi 0,r5,0
@@ -281,11 +178,6 @@ _GLOBAL(__giveup_vsx)
andc r4,r4,r3 /* disable VSX for previous task */
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
-#ifndef CONFIG_SMP
- li r5,0
- ld r4,last_task_used_vsx@got(r2)
- std r5,0(r4)
-#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_VSX */