diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
commit | d0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch) | |
tree | 7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /arch/powerpc/lib | |
parent | e914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff) |
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'arch/powerpc/lib')
-rw-r--r-- | arch/powerpc/lib/alloc.c | 2 | ||||
-rw-r--r-- | arch/powerpc/lib/checksum_64.S | 12 | ||||
-rw-r--r-- | arch/powerpc/lib/feature-fixups.c | 70 | ||||
-rw-r--r-- | arch/powerpc/lib/locks.c | 16 | ||||
-rw-r--r-- | arch/powerpc/lib/ppc_ksyms.c | 4 | ||||
-rw-r--r-- | arch/powerpc/lib/rheap.c | 2 | ||||
-rw-r--r-- | arch/powerpc/lib/string.S | 44 | ||||
-rw-r--r-- | arch/powerpc/lib/vmx-helper.c | 1 |
8 files changed, 80 insertions, 71 deletions
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index 60b0b3fc8..a58abe4af 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c @@ -6,7 +6,7 @@ #include <asm/setup.h> -void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) +void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 8e6e51016..fdec6e613 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -74,9 +74,9 @@ _GLOBAL(__csum_partial) ld r11,24(r3) /* - * On POWER6 and POWER7 back to back addes take 2 cycles because of - * the XER dependency. This means the fastest this loop can go is - * 16 cycles per iteration. The scheduling of the loop below has + * On POWER6 and POWER7 back to back adde instructions take 2 cycles + * because of the XER dependency. This means the fastest this loop can + * go is 16 cycles per iteration. The scheduling of the loop below has * been shown to hit this on both POWER6 and POWER7. */ .align 5 @@ -275,9 +275,9 @@ source; ld r10,16(r3) source; ld r11,24(r3) /* - * On POWER6 and POWER7 back to back addes take 2 cycles because of - * the XER dependency. This means the fastest this loop can go is - * 16 cycles per iteration. The scheduling of the loop below has + * On POWER6 and POWER7 back to back adde instructions take 2 cycles + * because of the XER dependency. This means the fastest this loop can + * go is 16 cycles per iteration. The scheduling of the loop below has * been shown to hit this on both POWER6 and POWER7. */ .align 5 diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 7ce3870d7..043415f0b 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -13,6 +13,7 @@ */ #include <linux/types.h> +#include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> @@ -20,7 +21,8 @@ #include <asm/code-patching.h> #include <asm/page.h> #include <asm/sections.h> - +#include <asm/setup.h> +#include <asm/firmware.h> struct fixup_entry { unsigned long mask; @@ -130,7 +132,7 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) } } -void do_final_fixups(void) +static void do_final_fixups(void) { #if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE) int *src, *dest; @@ -151,6 +153,70 @@ void do_final_fixups(void) #endif } +static unsigned long __initdata saved_cpu_features; +static unsigned int __initdata saved_mmu_features; +#ifdef CONFIG_PPC64 +static unsigned long __initdata saved_firmware_features; +#endif + +void __init apply_feature_fixups(void) +{ + struct cpu_spec *spec = PTRRELOC(*PTRRELOC(&cur_cpu_spec)); + + *PTRRELOC(&saved_cpu_features) = spec->cpu_features; + *PTRRELOC(&saved_mmu_features) = spec->mmu_features; + + /* + * Apply the CPU-specific and firmware specific fixups to kernel text + * (nop out sections not relevant to this CPU or this firmware). + */ + do_feature_fixups(spec->cpu_features, + PTRRELOC(&__start___ftr_fixup), + PTRRELOC(&__stop___ftr_fixup)); + + do_feature_fixups(spec->mmu_features, + PTRRELOC(&__start___mmu_ftr_fixup), + PTRRELOC(&__stop___mmu_ftr_fixup)); + + do_lwsync_fixups(spec->cpu_features, + PTRRELOC(&__start___lwsync_fixup), + PTRRELOC(&__stop___lwsync_fixup)); + +#ifdef CONFIG_PPC64 + saved_firmware_features = powerpc_firmware_features; + do_feature_fixups(powerpc_firmware_features, + &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); +#endif + do_final_fixups(); +} + +void __init setup_feature_keys(void) +{ + /* + * Initialise jump label. This causes all the cpu/mmu_has_feature() + * checks to take on their correct polarity based on the current set of + * CPU/MMU features. + */ + jump_label_init(); + cpu_feature_keys_init(); + mmu_feature_keys_init(); +} + +static int __init check_features(void) +{ + WARN(saved_cpu_features != cur_cpu_spec->cpu_features, + "CPU features changed after feature patching!\n"); + WARN(saved_mmu_features != cur_cpu_spec->mmu_features, + "MMU features changed after feature patching!\n"); +#ifdef CONFIG_PPC64 + WARN(saved_firmware_features != powerpc_firmware_features, + "Firmware features changed after feature patching!\n"); +#endif + + return 0; +} +late_initcall(check_features); + #ifdef CONFIG_FTR_FIXUP_SELFTEST #define check(x) \ diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index f7deebdf3..b7b1237d4 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -68,19 +68,3 @@ void __rw_yield(arch_rwlock_t *rw) get_hard_smp_processor_id(holder_cpu), yield_count); } #endif - -void arch_spin_unlock_wait(arch_spinlock_t *lock) -{ - smp_mb(); - - while (lock->slock) { - HMT_low(); - if (SHARED_PROCESSOR) - __spin_yield(lock); - } - HMT_medium(); - - smp_mb(); -} - -EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c index c422812f7..ae69d846a 100644 --- a/arch/powerpc/lib/ppc_ksyms.c +++ b/arch/powerpc/lib/ppc_ksyms.c @@ -9,11 +9,7 @@ EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); -EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); -EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); #ifndef CONFIG_GENERIC_CSUM diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c index 69abf844c..94058c21a 100644 --- a/arch/powerpc/lib/rheap.c +++ b/arch/powerpc/lib/rheap.c @@ -325,7 +325,7 @@ void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, } EXPORT_SYMBOL_GPL(rh_init); -/* Attach a free memory region, coalesces regions if adjuscent */ +/* Attach a free memory region, coalesces regions if adjacent */ int rh_attach_region(rh_info_t * info, unsigned long start, int size) { rh_block_t *blk; diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S index c80fb49ce..beabc68d9 100644 --- a/arch/powerpc/lib/string.S +++ b/arch/powerpc/lib/string.S @@ -16,15 +16,6 @@ PPC_LONG_ALIGN .text -_GLOBAL(strcpy) - addi r5,r3,-1 - addi r4,r4,-1 -1: lbzu r0,1(r4) - cmpwi 0,r0,0 - stbu r0,1(r5) - bne 1b - blr - /* This clears out any unused part of the destination buffer, just as the libc version does. -- paulus */ _GLOBAL(strncpy) @@ -33,6 +24,7 @@ _GLOBAL(strncpy) mtctr r5 addi r6,r3,-1 addi r4,r4,-1 + .balign 16 1: lbzu r0,1(r4) cmpwi 0,r0,0 stbu r0,1(r6) @@ -45,36 +37,13 @@ _GLOBAL(strncpy) bdnz 2b blr -_GLOBAL(strcat) - addi r5,r3,-1 - addi r4,r4,-1 -1: lbzu r0,1(r5) - cmpwi 0,r0,0 - bne 1b - addi r5,r5,-1 -1: lbzu r0,1(r4) - cmpwi 0,r0,0 - stbu r0,1(r5) - bne 1b - blr - -_GLOBAL(strcmp) - addi r5,r3,-1 - addi r4,r4,-1 -1: lbzu r3,1(r5) - cmpwi 1,r3,0 - lbzu r0,1(r4) - subf. r3,r0,r3 - beqlr 1 - beq 1b - blr - _GLOBAL(strncmp) PPC_LCMPI 0,r5,0 beq- 2f mtctr r5 addi r5,r3,-1 addi r4,r4,-1 + .balign 16 1: lbzu r3,1(r5) cmpwi 1,r3,0 lbzu r0,1(r4) @@ -85,14 +54,6 @@ _GLOBAL(strncmp) 2: li r3,0 blr -_GLOBAL(strlen) - addi r4,r3,-1 -1: lbzu r0,1(r4) - cmpwi 0,r0,0 - bne 1b - subf r3,r3,r4 - blr - #ifdef CONFIG_PPC32 _GLOBAL(memcmp) PPC_LCMPI 0,r5,0 @@ -114,6 +75,7 @@ _GLOBAL(memchr) beq- 2f mtctr r5 addi r3,r3,-1 + .balign 16 1: lbzu r0,1(r3) cmpw 0,r0,r4 bdnzf 2,1b diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c index b27e030fc..bf925cdca 100644 --- a/arch/powerpc/lib/vmx-helper.c +++ b/arch/powerpc/lib/vmx-helper.c @@ -21,6 +21,7 @@ #include <linux/uaccess.h> #include <linux/hardirq.h> #include <asm/switch_to.h> +#include <asm/asm-prototypes.h> int enter_vmx_usercopy(void) { |