diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
commit | 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch) | |
tree | fa581f6dc1c0596391690d1f67eceef3af8246dc /arch/s390/kernel | |
parent | d4e493caf788ef44982e131ff9c786546904d934 (diff) |
Linux-libre 4.5-gnu
Diffstat (limited to 'arch/s390/kernel')
38 files changed, 815 insertions, 712 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index dc167a23b..2f5586ab8 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -34,8 +34,10 @@ CFLAGS_sysinfo.o += -w # CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE) ifneq ($(CC_FLAGS_MARCH),-march=z900) -CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH) -CFLAGS_sclp.o += -march=z900 +CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH) +CFLAGS_sclp.o += -march=z900 +AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH) +AFLAGS_head.o += -march=z900 endif GCOV_PROFILE_sclp.o := n @@ -50,7 +52,7 @@ extra-y += head.o head64.o vmlinux.lds obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SCHED_BOOK) += topology.o +obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o obj-$(CONFIG_HIBERNATION) += suspend.o swsusp.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index dc6c9c604..53bbc9e8b 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -80,6 +80,8 @@ int main(void) OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); + OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr); + OFFSET(__VDSO_NODE_ID, vdso_per_cpu_data, node_id); BLANK(); /* constants used by the vdso */ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); @@ -97,96 +99,96 @@ int main(void) OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit); BLANK(); /* hardware defined lowcore locations 0x000 - 0x1ff */ - OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params); - OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr); - OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code); - OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc); - OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code); - OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc); - OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code); - OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code); - OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num); - OFFSET(__LC_PER_CODE, _lowcore, per_code); - OFFSET(__LC_PER_ATMID, _lowcore, per_atmid); - OFFSET(__LC_PER_ADDRESS, _lowcore, per_address); - OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id); - OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id); - OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id); - OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id); - OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code); - OFFSET(__LC_MON_CODE, _lowcore, monitor_code); - OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id); - OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr); - OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm); - OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word); - OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list); - OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code); - OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address); - OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr); - OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw); - OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw); - OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw); - OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw); - OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw); - OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw); - OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw); - OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw); - OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw); - OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw); - OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw); - OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw); + OFFSET(__LC_EXT_PARAMS, lowcore, ext_params); + OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr); + OFFSET(__LC_EXT_INT_CODE, lowcore, ext_int_code); + OFFSET(__LC_SVC_ILC, lowcore, svc_ilc); + OFFSET(__LC_SVC_INT_CODE, lowcore, svc_code); + OFFSET(__LC_PGM_ILC, lowcore, pgm_ilc); + OFFSET(__LC_PGM_INT_CODE, lowcore, pgm_code); + OFFSET(__LC_DATA_EXC_CODE, lowcore, data_exc_code); + OFFSET(__LC_MON_CLASS_NR, lowcore, mon_class_num); + OFFSET(__LC_PER_CODE, lowcore, per_code); + OFFSET(__LC_PER_ATMID, lowcore, per_atmid); + OFFSET(__LC_PER_ADDRESS, lowcore, per_address); + OFFSET(__LC_EXC_ACCESS_ID, lowcore, exc_access_id); + OFFSET(__LC_PER_ACCESS_ID, lowcore, per_access_id); + OFFSET(__LC_OP_ACCESS_ID, lowcore, op_access_id); + OFFSET(__LC_AR_MODE_ID, lowcore, ar_mode_id); + OFFSET(__LC_TRANS_EXC_CODE, lowcore, trans_exc_code); + OFFSET(__LC_MON_CODE, lowcore, monitor_code); + OFFSET(__LC_SUBCHANNEL_ID, lowcore, subchannel_id); + OFFSET(__LC_SUBCHANNEL_NR, lowcore, subchannel_nr); + OFFSET(__LC_IO_INT_PARM, lowcore, io_int_parm); + OFFSET(__LC_IO_INT_WORD, lowcore, io_int_word); + OFFSET(__LC_STFL_FAC_LIST, lowcore, stfl_fac_list); + OFFSET(__LC_STFLE_FAC_LIST, lowcore, stfle_fac_list); + OFFSET(__LC_MCCK_CODE, lowcore, mcck_interruption_code); + OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address); + OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr); + OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw); + OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw); + OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw); + OFFSET(__LC_PGM_OLD_PSW, lowcore, program_old_psw); + OFFSET(__LC_MCK_OLD_PSW, lowcore, mcck_old_psw); + OFFSET(__LC_IO_OLD_PSW, lowcore, io_old_psw); + OFFSET(__LC_RST_NEW_PSW, lowcore, restart_psw); + OFFSET(__LC_EXT_NEW_PSW, lowcore, external_new_psw); + OFFSET(__LC_SVC_NEW_PSW, lowcore, svc_new_psw); + OFFSET(__LC_PGM_NEW_PSW, lowcore, program_new_psw); + OFFSET(__LC_MCK_NEW_PSW, lowcore, mcck_new_psw); + OFFSET(__LC_IO_NEW_PSW, lowcore, io_new_psw); /* software defined lowcore locations 0x200 - 0xdff*/ - OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync); - OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async); - OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart); - OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags); - OFFSET(__LC_RETURN_PSW, _lowcore, return_psw); - OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw); - OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer); - OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer); - OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer); - OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer); - OFFSET(__LC_USER_TIMER, _lowcore, user_timer); - OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer); - OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer); - OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer); - OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock); - OFFSET(__LC_INT_CLOCK, _lowcore, int_clock); - OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock); - OFFSET(__LC_CURRENT, _lowcore, current_task); - OFFSET(__LC_THREAD_INFO, _lowcore, thread_info); - OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack); - OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack); - OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack); - OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack); - OFFSET(__LC_RESTART_FN, _lowcore, restart_fn); - OFFSET(__LC_RESTART_DATA, _lowcore, restart_data); - OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source); - OFFSET(__LC_USER_ASCE, _lowcore, user_asce); - OFFSET(__LC_LPP, _lowcore, lpp); - OFFSET(__LC_CURRENT_PID, _lowcore, current_pid); - OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset); - OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data); - OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags); - OFFSET(__LC_GMAP, _lowcore, gmap); - OFFSET(__LC_PASTE, _lowcore, paste); + OFFSET(__LC_SAVE_AREA_SYNC, lowcore, save_area_sync); + OFFSET(__LC_SAVE_AREA_ASYNC, lowcore, save_area_async); + OFFSET(__LC_SAVE_AREA_RESTART, lowcore, save_area_restart); + OFFSET(__LC_CPU_FLAGS, lowcore, cpu_flags); + OFFSET(__LC_RETURN_PSW, lowcore, return_psw); + OFFSET(__LC_RETURN_MCCK_PSW, lowcore, return_mcck_psw); + OFFSET(__LC_SYNC_ENTER_TIMER, lowcore, sync_enter_timer); + OFFSET(__LC_ASYNC_ENTER_TIMER, lowcore, async_enter_timer); + OFFSET(__LC_MCCK_ENTER_TIMER, lowcore, mcck_enter_timer); + OFFSET(__LC_EXIT_TIMER, lowcore, exit_timer); + OFFSET(__LC_USER_TIMER, lowcore, user_timer); + OFFSET(__LC_SYSTEM_TIMER, lowcore, system_timer); + OFFSET(__LC_STEAL_TIMER, lowcore, steal_timer); + OFFSET(__LC_LAST_UPDATE_TIMER, lowcore, last_update_timer); + OFFSET(__LC_LAST_UPDATE_CLOCK, lowcore, last_update_clock); + OFFSET(__LC_INT_CLOCK, lowcore, int_clock); + OFFSET(__LC_MCCK_CLOCK, lowcore, mcck_clock); + OFFSET(__LC_CURRENT, lowcore, current_task); + OFFSET(__LC_THREAD_INFO, lowcore, thread_info); + OFFSET(__LC_KERNEL_STACK, lowcore, kernel_stack); + OFFSET(__LC_ASYNC_STACK, lowcore, async_stack); + OFFSET(__LC_PANIC_STACK, lowcore, panic_stack); + OFFSET(__LC_RESTART_STACK, lowcore, restart_stack); + OFFSET(__LC_RESTART_FN, lowcore, restart_fn); + OFFSET(__LC_RESTART_DATA, lowcore, restart_data); + OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source); + OFFSET(__LC_USER_ASCE, lowcore, user_asce); + OFFSET(__LC_LPP, lowcore, lpp); + OFFSET(__LC_CURRENT_PID, lowcore, current_pid); + OFFSET(__LC_PERCPU_OFFSET, lowcore, percpu_offset); + OFFSET(__LC_VDSO_PER_CPU, lowcore, vdso_per_cpu_data); + OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags); + OFFSET(__LC_GMAP, lowcore, gmap); + OFFSET(__LC_PASTE, lowcore, paste); /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ - OFFSET(__LC_DUMP_REIPL, _lowcore, ipib); + OFFSET(__LC_DUMP_REIPL, lowcore, ipib); /* hardware defined lowcore locations 0x1000 - 0x18ff */ - OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr); - OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2); - OFFSET(SAVE_AREA_BASE, _lowcore, floating_pt_save_area); - OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area); - OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area); - OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area); - OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area); - OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area); - OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area); - OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area); - OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area); - OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area); - OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area); - OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb); + OFFSET(__LC_VX_SAVE_AREA_ADDR, lowcore, vector_save_area_addr); + OFFSET(__LC_EXT_PARAMS2, lowcore, ext_params2); + OFFSET(__LC_FPREGS_SAVE_AREA, lowcore, floating_pt_save_area); + OFFSET(__LC_GPREGS_SAVE_AREA, lowcore, gpregs_save_area); + OFFSET(__LC_PSW_SAVE_AREA, lowcore, psw_save_area); + OFFSET(__LC_PREFIX_SAVE_AREA, lowcore, prefixreg_save_area); + OFFSET(__LC_FP_CREG_SAVE_AREA, lowcore, fpt_creg_save_area); + OFFSET(__LC_TOD_PROGREG_SAVE_AREA, lowcore, tod_progreg_save_area); + OFFSET(__LC_CPU_TIMER_SAVE_AREA, lowcore, cpu_timer_save_area); + OFFSET(__LC_CLOCK_COMP_SAVE_AREA, lowcore, clock_comp_save_area); + OFFSET(__LC_AREGS_SAVE_AREA, lowcore, access_regs_save_area); + OFFSET(__LC_CREGS_SAVE_AREA, lowcore, cregs_save_area); + OFFSET(__LC_PGM_TDB, lowcore, pgm_tdb); BLANK(); /* gmap/sie offsets */ OFFSET(__GMAP_ASCE, gmap, asce); diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index fac4eedde..ae2cda5ee 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -177,3 +177,4 @@ COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); +COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 171e09bb8..3986c9f62 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/bootmem.h> #include <linux/elf.h> +#include <asm/asm-offsets.h> #include <linux/memblock.h> #include <asm/os_info.h> #include <asm/elf.h> @@ -32,7 +33,82 @@ static struct memblock_type oldmem_type = { .regions = &oldmem_region, }; -struct dump_save_areas dump_save_areas; +struct save_area { + struct list_head list; + u64 psw[2]; + u64 ctrs[16]; + u64 gprs[16]; + u32 acrs[16]; + u64 fprs[16]; + u32 fpc; + u32 prefix; + u64 todpreg; + u64 timer; + u64 todcmp; + u64 vxrs_low[16]; + __vector128 vxrs_high[16]; +}; + +static LIST_HEAD(dump_save_areas); + +/* + * Allocate a save area + */ +struct save_area * __init save_area_alloc(bool is_boot_cpu) +{ + struct save_area *sa; + + sa = (void *) memblock_alloc(sizeof(*sa), 8); + if (is_boot_cpu) + list_add(&sa->list, &dump_save_areas); + else + list_add_tail(&sa->list, &dump_save_areas); + return sa; +} + +/* + * Return the address of the save area for the boot CPU + */ +struct save_area * __init save_area_boot_cpu(void) +{ + if (list_empty(&dump_save_areas)) + return NULL; + return list_first_entry(&dump_save_areas, struct save_area, list); +} + +/* + * Copy CPU registers into the save area + */ +void __init save_area_add_regs(struct save_area *sa, void *regs) +{ + struct lowcore *lc; + + lc = (struct lowcore *)(regs - __LC_FPREGS_SAVE_AREA); + memcpy(&sa->psw, &lc->psw_save_area, sizeof(sa->psw)); + memcpy(&sa->ctrs, &lc->cregs_save_area, sizeof(sa->ctrs)); + memcpy(&sa->gprs, &lc->gpregs_save_area, sizeof(sa->gprs)); + memcpy(&sa->acrs, &lc->access_regs_save_area, sizeof(sa->acrs)); + memcpy(&sa->fprs, &lc->floating_pt_save_area, sizeof(sa->fprs)); + memcpy(&sa->fpc, &lc->fpt_creg_save_area, sizeof(sa->fpc)); + memcpy(&sa->prefix, &lc->prefixreg_save_area, sizeof(sa->prefix)); + memcpy(&sa->todpreg, &lc->tod_progreg_save_area, sizeof(sa->todpreg)); + memcpy(&sa->timer, &lc->cpu_timer_save_area, sizeof(sa->timer)); + memcpy(&sa->todcmp, &lc->clock_comp_save_area, sizeof(sa->todcmp)); +} + +/* + * Copy vector registers into the save area + */ +void __init save_area_add_vxrs(struct save_area *sa, __vector128 *vxrs) +{ + int i; + + /* Copy lower halves of vector registers 0-15 */ + for (i = 0; i < 16; i++) + memcpy(&sa->vxrs_low[i], &vxrs[i].u[2], 8); + /* Copy vector registers 16-31 */ + memcpy(sa->vxrs_high, vxrs + 16, 16 * sizeof(__vector128)); +} /* * Return physical address for virtual address @@ -51,79 +127,85 @@ static inline void *load_real_addr(void *addr) } /* - * Copy real to virtual or real memory - */ -static int copy_from_realmem(void *dest, void *src, size_t count) -{ - unsigned long size; - - if (!count) - return 0; - if (!is_vmalloc_or_module_addr(dest)) - return memcpy_real(dest, src, count); - do { - size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK)); - if (memcpy_real(load_real_addr(dest), src, size)) - return -EFAULT; - count -= size; - dest += size; - src += size; - } while (count); - return 0; -} - -/* - * Pointer to ELF header in new kernel + * Copy memory of the old, dumped system to a kernel space virtual address */ -static void *elfcorehdr_newmem; - -/* - * Copy one page from zfcpdump "oldmem" - * - * For pages below HSA size memory from the HSA is copied. Otherwise - * real memory copy is used. - */ -static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, - unsigned long src, int userbuf) +int copy_oldmem_kernel(void *dst, void *src, size_t count) { + unsigned long from, len; + void *ra; int rc; - if (src < sclp.hsa_size) { - rc = memcpy_hsa(buf, src, csize, userbuf); - } else { - if (userbuf) - rc = copy_to_user_real((void __force __user *) buf, - (void *) src, csize); - else - rc = memcpy_real(buf, (void *) src, csize); + while (count) { + from = __pa(src); + if (!OLDMEM_BASE && from < sclp.hsa_size) { + /* Copy from zfcpdump HSA area */ + len = min(count, sclp.hsa_size - from); + rc = memcpy_hsa_kernel(dst, from, len); + if (rc) + return rc; + } else { + /* Check for swapped kdump oldmem areas */ + if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) { + from -= OLDMEM_BASE; + len = min(count, OLDMEM_SIZE - from); + } else if (OLDMEM_BASE && from < OLDMEM_SIZE) { + len = min(count, OLDMEM_SIZE - from); + from += OLDMEM_BASE; + } else { + len = count; + } + if (is_vmalloc_or_module_addr(dst)) { + ra = load_real_addr(dst); + len = min(PAGE_SIZE - offset_in_page(ra), len); + } else { + ra = dst; + } + if (memcpy_real(ra, (void *) from, len)) + return -EFAULT; + } + dst += len; + src += len; + count -= len; } - return rc ? rc : csize; + return 0; } /* - * Copy one page from kdump "oldmem" - * - * For the kdump reserved memory this functions performs a swap operation: - * - [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] is mapped to [0 - OLDMEM_SIZE]. - * - [0 - OLDMEM_SIZE] is mapped to [OLDMEM_BASE - OLDMEM_BASE + OLDMEM_SIZE] + * Copy memory of the old, dumped system to a user space virtual address */ -static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize, - unsigned long src, int userbuf) - +int copy_oldmem_user(void __user *dst, void *src, size_t count) { + unsigned long from, len; int rc; - if (src < OLDMEM_SIZE) - src += OLDMEM_BASE; - else if (src > OLDMEM_BASE && - src < OLDMEM_BASE + OLDMEM_SIZE) - src -= OLDMEM_BASE; - if (userbuf) - rc = copy_to_user_real((void __force __user *) buf, - (void *) src, csize); - else - rc = copy_from_realmem(buf, (void *) src, csize); - return (rc == 0) ? rc : csize; + while (count) { + from = __pa(src); + if (!OLDMEM_BASE && from < sclp.hsa_size) { + /* Copy from zfcpdump HSA area */ + len = min(count, sclp.hsa_size - from); + rc = memcpy_hsa_user(dst, from, len); + if (rc) + return rc; + } else { + /* Check for swapped kdump oldmem areas */ + if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) { + from -= OLDMEM_BASE; + len = min(count, OLDMEM_SIZE - from); + } else if (OLDMEM_BASE && from < OLDMEM_SIZE) { + len = min(count, OLDMEM_SIZE - from); + from += OLDMEM_BASE; + } else { + len = count; + } + rc = copy_to_user_real(dst, (void *) from, count); + if (rc) + return rc; + } + dst += len; + src += len; + count -= len; + } + return 0; } /* @@ -132,15 +214,17 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize, ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { - unsigned long src; + void *src; + int rc; if (!csize) return 0; - src = (pfn << PAGE_SHIFT) + offset; - if (OLDMEM_BASE) - return copy_oldmem_page_kdump(buf, csize, src, userbuf); + src = (void *) (pfn << PAGE_SHIFT) + offset; + if (userbuf) + rc = copy_oldmem_user((void __force __user *) buf, src, csize); else - return copy_oldmem_page_zfcpdump(buf, csize, src, userbuf); + rc = copy_oldmem_kernel((void *) buf, src, csize); + return rc; } /* @@ -209,33 +293,6 @@ int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from, } /* - * Copy memory from old kernel - */ -int copy_from_oldmem(void *dest, void *src, size_t count) -{ - unsigned long copied = 0; - int rc; - - if (OLDMEM_BASE) { - if ((unsigned long) src < OLDMEM_SIZE) { - copied = min(count, OLDMEM_SIZE - (unsigned long) src); - rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied); - if (rc) - return rc; - } - } else { - unsigned long hsa_end = sclp.hsa_size; - if ((unsigned long) src < hsa_end) { - copied = min(count, hsa_end - (unsigned long) src); - rc = memcpy_hsa(dest, (unsigned long) src, copied, 0); - if (rc) - return rc; - } - } - return copy_from_realmem(dest + copied, src + copied, count - copied); -} - -/* * Alloc memory and panic in case of ENOMEM */ static void *kzalloc_panic(int len) @@ -251,8 +308,8 @@ static void *kzalloc_panic(int len) /* * Initialize ELF note */ -static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, - const char *name) +static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len, + const char *name) { Elf64_Nhdr *note; u64 len; @@ -272,136 +329,42 @@ static void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len, return PTR_ADD(buf, len); } -/* - * Initialize prstatus note - */ -static void *nt_prstatus(void *ptr, struct save_area *sa) +static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len) { - struct elf_prstatus nt_prstatus; - static int cpu_nr = 1; - - memset(&nt_prstatus, 0, sizeof(nt_prstatus)); - memcpy(&nt_prstatus.pr_reg.gprs, sa->gp_regs, sizeof(sa->gp_regs)); - memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw)); - memcpy(&nt_prstatus.pr_reg.acrs, sa->acc_regs, sizeof(sa->acc_regs)); - nt_prstatus.pr_pid = cpu_nr; - cpu_nr++; - - return nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus), - "CORE"); + return nt_init_name(buf, type, desc, d_len, KEXEC_CORE_NOTE_NAME); } /* - * Initialize fpregset (floating point) note + * Fill ELF notes for one CPU with save area registers */ -static void *nt_fpregset(void *ptr, struct save_area *sa) +static void *fill_cpu_elf_notes(void *ptr, int cpu, struct save_area *sa) { + struct elf_prstatus nt_prstatus; elf_fpregset_t nt_fpregset; + /* Prepare prstatus note */ + memset(&nt_prstatus, 0, sizeof(nt_prstatus)); + memcpy(&nt_prstatus.pr_reg.gprs, sa->gprs, sizeof(sa->gprs)); + memcpy(&nt_prstatus.pr_reg.psw, sa->psw, sizeof(sa->psw)); + memcpy(&nt_prstatus.pr_reg.acrs, sa->acrs, sizeof(sa->acrs)); + nt_prstatus.pr_pid = cpu; + /* Prepare fpregset (floating point) note */ memset(&nt_fpregset, 0, sizeof(nt_fpregset)); - memcpy(&nt_fpregset.fpc, &sa->fp_ctrl_reg, sizeof(sa->fp_ctrl_reg)); - memcpy(&nt_fpregset.fprs, &sa->fp_regs, sizeof(sa->fp_regs)); - - return nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset), - "CORE"); -} - -/* - * Initialize timer note - */ -static void *nt_s390_timer(void *ptr, struct save_area *sa) -{ - return nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer), - KEXEC_CORE_NOTE_NAME); -} - -/* - * Initialize TOD clock comparator note - */ -static void *nt_s390_tod_cmp(void *ptr, struct save_area *sa) -{ - return nt_init(ptr, NT_S390_TODCMP, &sa->clk_cmp, - sizeof(sa->clk_cmp), KEXEC_CORE_NOTE_NAME); -} - -/* - * Initialize TOD programmable register note - */ -static void *nt_s390_tod_preg(void *ptr, struct save_area *sa) -{ - return nt_init(ptr, NT_S390_TODPREG, &sa->tod_reg, - sizeof(sa->tod_reg), KEXEC_CORE_NOTE_NAME); -} - -/* - * Initialize control register note - */ -static void *nt_s390_ctrs(void *ptr, struct save_area *sa) -{ - return nt_init(ptr, NT_S390_CTRS, &sa->ctrl_regs, - sizeof(sa->ctrl_regs), KEXEC_CORE_NOTE_NAME); -} - -/* - * Initialize prefix register note - */ -static void *nt_s390_prefix(void *ptr, struct save_area *sa) -{ - return nt_init(ptr, NT_S390_PREFIX, &sa->pref_reg, - sizeof(sa->pref_reg), KEXEC_CORE_NOTE_NAME); -} - -/* - * Initialize vxrs high note (full 128 bit VX registers 16-31) - */ -static void *nt_s390_vx_high(void *ptr, __vector128 *vx_regs) -{ - return nt_init(ptr, NT_S390_VXRS_HIGH, &vx_regs[16], - 16 * sizeof(__vector128), KEXEC_CORE_NOTE_NAME); -} - -/* - * Initialize vxrs low note (lower halves of VX registers 0-15) - */ -static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs) -{ - Elf64_Nhdr *note; - u64 len; - int i; - - note = (Elf64_Nhdr *)ptr; - note->n_namesz = strlen(KEXEC_CORE_NOTE_NAME) + 1; - note->n_descsz = 16 * 8; - note->n_type = NT_S390_VXRS_LOW; - len = sizeof(Elf64_Nhdr); - - memcpy(ptr + len, KEXEC_CORE_NOTE_NAME, note->n_namesz); - len = roundup(len + note->n_namesz, 4); - - ptr += len; - /* Copy lower halves of SIMD registers 0-15 */ - for (i = 0; i < 16; i++) { - memcpy(ptr, &vx_regs[i].u[2], 8); - ptr += 8; - } - return ptr; -} - -/* - * Fill ELF notes for one CPU with save area registers - */ -void *fill_cpu_elf_notes(void *ptr, struct save_area *sa, __vector128 *vx_regs) -{ - ptr = nt_prstatus(ptr, sa); - ptr = nt_fpregset(ptr, sa); - ptr = nt_s390_timer(ptr, sa); - ptr = nt_s390_tod_cmp(ptr, sa); - ptr = nt_s390_tod_preg(ptr, sa); - ptr = nt_s390_ctrs(ptr, sa); - ptr = nt_s390_prefix(ptr, sa); - if (MACHINE_HAS_VX && vx_regs) { - ptr = nt_s390_vx_low(ptr, vx_regs); - ptr = nt_s390_vx_high(ptr, vx_regs); + memcpy(&nt_fpregset.fpc, &sa->fpc, sizeof(sa->fpc)); + memcpy(&nt_fpregset.fprs, &sa->fprs, sizeof(sa->fprs)); + /* Create ELF notes for the CPU */ + ptr = nt_init(ptr, NT_PRSTATUS, &nt_prstatus, sizeof(nt_prstatus)); + ptr = nt_init(ptr, NT_PRFPREG, &nt_fpregset, sizeof(nt_fpregset)); + ptr = nt_init(ptr, NT_S390_TIMER, &sa->timer, sizeof(sa->timer)); + ptr = nt_init(ptr, NT_S390_TODCMP, &sa->todcmp, sizeof(sa->todcmp)); + ptr = nt_init(ptr, NT_S390_TODPREG, &sa->todpreg, sizeof(sa->todpreg)); + ptr = nt_init(ptr, NT_S390_CTRS, &sa->ctrs, sizeof(sa->ctrs)); + ptr = nt_init(ptr, NT_S390_PREFIX, &sa->prefix, sizeof(sa->prefix)); + if (MACHINE_HAS_VX) { + ptr = nt_init(ptr, NT_S390_VXRS_HIGH, + &sa->vxrs_high, sizeof(sa->vxrs_high)); + ptr = nt_init(ptr, NT_S390_VXRS_LOW, + &sa->vxrs_low, sizeof(sa->vxrs_low)); } return ptr; } @@ -416,8 +379,7 @@ static void *nt_prpsinfo(void *ptr) memset(&prpsinfo, 0, sizeof(prpsinfo)); prpsinfo.pr_sname = 'R'; strcpy(prpsinfo.pr_fname, "vmlinux"); - return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo), - KEXEC_CORE_NOTE_NAME); + return nt_init(ptr, NT_PRPSINFO, &prpsinfo, sizeof(prpsinfo)); } /* @@ -429,17 +391,18 @@ static void *get_vmcoreinfo_old(unsigned long *size) Elf64_Nhdr note; void *addr; - if (copy_from_oldmem(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) + if (copy_oldmem_kernel(&addr, &S390_lowcore.vmcore_info, sizeof(addr))) return NULL; memset(nt_name, 0, sizeof(nt_name)); - if (copy_from_oldmem(¬e, addr, sizeof(note))) + if (copy_oldmem_kernel(¬e, addr, sizeof(note))) return NULL; - if (copy_from_oldmem(nt_name, addr + sizeof(note), sizeof(nt_name) - 1)) + if (copy_oldmem_kernel(nt_name, addr + sizeof(note), + sizeof(nt_name) - 1)) return NULL; if (strcmp(nt_name, "VMCOREINFO") != 0) return NULL; vmcoreinfo = kzalloc_panic(note.n_descsz); - if (copy_from_oldmem(vmcoreinfo, addr + 24, note.n_descsz)) + if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) return NULL; *size = note.n_descsz; return vmcoreinfo; @@ -458,7 +421,7 @@ static void *nt_vmcoreinfo(void *ptr) vmcoreinfo = get_vmcoreinfo_old(&size); if (!vmcoreinfo) return ptr; - return nt_init(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); + return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); } /* @@ -487,13 +450,12 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) */ static int get_cpu_cnt(void) { - int i, cpus = 0; + struct save_area *sa; + int cpus = 0; - for (i = 0; i < dump_save_areas.count; i++) { - if (dump_save_areas.areas[i]->sa.pref_reg == 0) - continue; - cpus++; - } + list_for_each_entry(sa, &dump_save_areas, list) + if (sa->prefix != 0) + cpus++; return cpus; } @@ -538,18 +500,16 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) */ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) { - struct save_area_ext *sa_ext; + struct save_area *sa; void *ptr_start = ptr; - int i; + int cpu; ptr = nt_prpsinfo(ptr); - for (i = 0; i < dump_save_areas.count; i++) { - sa_ext = dump_save_areas.areas[i]; - if (sa_ext->sa.pref_reg == 0) - continue; - ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs); - } + cpu = 1; + list_for_each_entry(sa, &dump_save_areas, list) + if (sa->prefix != 0) + ptr = fill_cpu_elf_notes(ptr, cpu++, sa); ptr = nt_vmcoreinfo(ptr); memset(phdr, 0, sizeof(*phdr)); phdr->p_type = PT_NOTE; @@ -573,9 +533,6 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) /* If we are not in kdump or zfcpdump mode return */ if (!OLDMEM_BASE && ipl_info.type != IPL_TYPE_FCP_DUMP) return 0; - /* If elfcorehdr= has been passed via cmdline, we use that one */ - if (elfcorehdr_addr != ELFCORE_ADDR_MAX) - return 0; /* If we cannot get HSA size for zfcpdump return error */ if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size) return -ENODEV; @@ -606,7 +563,6 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) hdr_off = PTR_DIFF(ptr, hdr); loads_init(phdr_loads, hdr_off); *addr = (unsigned long long) hdr; - elfcorehdr_newmem = hdr; *size = (unsigned long long) hdr_off; BUG_ON(elfcorehdr_size > alloc_size); return 0; @@ -617,8 +573,6 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) */ void elfcorehdr_free(unsigned long long addr) { - if (!elfcorehdr_newmem) - return; kfree((void *)(unsigned long)addr); } @@ -629,7 +583,6 @@ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) { void *src = (void *)(unsigned long)*ppos; - src = elfcorehdr_newmem ? src : src - OLDMEM_BASE; memcpy(buf, src, count); *ppos += count; return count; @@ -641,15 +594,8 @@ ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos) { void *src = (void *)(unsigned long)*ppos; - int rc; - if (elfcorehdr_newmem) { - memcpy(buf, src, count); - } else { - rc = copy_from_oldmem(buf, src, count); - if (rc) - return rc; - } + memcpy(buf, src, count); *ppos += count; return count; } diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 6fca0e464..c890a5589 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1470,7 +1470,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, except_str = "*"; else except_str = "-"; - caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN; + caller = (unsigned long) entry->caller; rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ", area, (long long)time_spec.tv_sec, time_spec.tv_nsec / 1000, level, except_str, diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 6e7296160..62973efd2 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -2022,7 +2022,7 @@ void show_code(struct pt_regs *regs) *ptr++ = '\t'; ptr += print_insn(ptr, code + start, addr); start += opsize; - printk(buffer); + printk("%s", buffer); ptr = buffer; ptr += sprintf(ptr, "\n "); hops++; @@ -2049,7 +2049,7 @@ void print_fn_code(unsigned char *code, unsigned long len) ptr += print_insn(ptr, code, (unsigned long) code); *ptr++ = '\n'; *ptr++ = 0; - printk(buffer); + printk("%s", buffer); code += opsize; len -= opsize; } diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index dc8e20473..02bd02ff6 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -34,22 +34,21 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) unsigned long addr; while (1) { - sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - addr = sf->gprs[8] & PSW_ADDR_INSN; + addr = sf->gprs[8]; printk("([<%016lx>] %pSR)\n", addr, (void *)addr); /* Follow the backchain. */ while (1) { low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - addr = sf->gprs[8] & PSW_ADDR_INSN; + addr = sf->gprs[8]; printk(" [<%016lx>] %pSR\n", addr, (void *)addr); } /* Zero backchain detected, check for interrupt frame. */ @@ -57,7 +56,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; - addr = regs->psw.addr & PSW_ADDR_INSN; + addr = regs->psw.addr; printk(" [<%016lx>] %pSR\n", addr, (void *)addr); low = sp; sp = regs->gprs[15]; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 3c31609df..c55576bba 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -252,14 +252,14 @@ static void early_pgm_check_handler(void) unsigned long addr; addr = S390_lowcore.program_old_psw.addr; - fixup = search_exception_tables(addr & PSW_ADDR_INSN); + fixup = search_exception_tables(addr); if (!fixup) disabled_wait(0); /* Disable low address protection before storing into lowcore. */ __ctl_store(cr0, 0, 0); cr0_new = cr0 & ~(1UL << 28); __ctl_load(cr0_new, 0, 0); - S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; + S390_lowcore.program_old_psw.addr = extable_fixup(fixup); __ctl_load(cr0, 0, 0); } @@ -268,9 +268,9 @@ static noinline __init void setup_lowcore_early(void) psw_t psw; psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; - psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; + psw.addr = (unsigned long) s390_base_ext_handler; S390_lowcore.external_new_psw = psw; - psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; + psw.addr = (unsigned long) s390_base_pgm_handler; S390_lowcore.program_new_psw = psw; s390_base_pgm_handler_fn = early_pgm_check_handler; } @@ -335,6 +335,14 @@ static __init void detect_machine_facilities(void) } } +static inline void save_vector_registers(void) +{ +#ifdef CONFIG_CRASH_DUMP + if (test_facility(129)) + save_vx_regs(boot_cpu_vector_save_area); +#endif +} + static int __init disable_vector_extension(char *str) { S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; @@ -451,6 +459,7 @@ void __init startup_init(void) detect_diag9c(); detect_diag44(); detect_machine_facilities(); + save_vector_registers(); setup_topology(); sclp_early_detect(); lockdep_on(); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 857b6526d..cd5a19138 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -764,6 +764,7 @@ ENTRY(psw_idle) .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) .Lpsw_idle_stcctm: #endif + oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: @@ -1146,6 +1147,7 @@ cleanup_critical: .quad .Lio_done - 4 .Lcleanup_idle: + ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT # copy interrupt clock & cpu timer mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index e0eaf1113..0f7bfeba6 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -203,7 +203,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; + ip -= MCOUNT_INSN_SIZE; trace.func = ip; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to. */ diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 301ee9c70..fcaefb041 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -25,6 +25,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/thread_info.h> +#include <asm/facility.h> #include <asm/page.h> #include <asm/ptrace.h> @@ -300,27 +301,27 @@ ENTRY(startup_kdump) xc 0x200(256),0x200 # partially clear lowcore xc 0x300(256),0x300 xc 0xe00(256),0xe00 + xc 0xf00(256),0xf00 lctlg %c0,%c15,0x200(%r0) # initialize control registers stck __LC_LAST_UPDATE_CLOCK spt 6f-.LPG0(%r13) mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13) - xc __LC_STFL_FAC_LIST(8),__LC_STFL_FAC_LIST - # check capabilities against MARCH_{G5,Z900,Z990,Z9_109,Z10} - .insn s,0xb2b10000,0 # store facilities @ __LC_STFL_FAC_LIST - tm __LC_STFL_FAC_LIST,0x01 # stfle available ? + stfl 0(%r0) # store facilities @ __LC_STFL_FAC_LIST + mvc __LC_STFLE_FAC_LIST(4),__LC_STFL_FAC_LIST + tm __LC_STFLE_FAC_LIST,0x01 # stfle available ? jz 0f - la %r0,1 - .insn s,0xb2b00000,__LC_STFL_FAC_LIST # store facility list extended + lghi %r0,FACILITIES_ALS_DWORDS-1 + .insn s,0xb2b00000,__LC_STFLE_FAC_LIST # store facility list extended # verify if all required facilities are supported by the machine -0: la %r1,__LC_STFL_FAC_LIST +0: la %r1,__LC_STFLE_FAC_LIST la %r2,3f+8-.LPG0(%r13) - l %r3,0(%r2) -1: l %r0,0(%r1) - n %r0,4(%r2) - cl %r0,4(%r2) + lhi %r3,FACILITIES_ALS_DWORDS +1: lg %r0,0(%r1) + ng %r0,0(%r2) + clg %r0,0(%r2) jne 2f - la %r1,4(%r1) - la %r2,4(%r2) + la %r1,8(%r1) + la %r2,8(%r2) ahi %r3,-1 jnz 1b j 4f @@ -340,24 +341,10 @@ ENTRY(startup_kdump) 3: .long 0x000a0000,0x8badcccc # List of facilities that are required. If not all facilities are present -# the kernel will crash. Format is number of facility words with bits set, -# followed by the facility words. +# the kernel will crash. + + .quad FACILITIES_ALS -#if defined(CONFIG_MARCH_Z13) - .long 2, 0xc100eff2, 0xf46cc800 -#elif defined(CONFIG_MARCH_ZEC12) - .long 2, 0xc100eff2, 0xf46cc800 -#elif defined(CONFIG_MARCH_Z196) - .long 2, 0xc100eff2, 0xf46c0000 -#elif defined(CONFIG_MARCH_Z10) - .long 2, 0xc100eff2, 0xf0680000 -#elif defined(CONFIG_MARCH_Z9_109) - .long 1, 0xc100efc2 -#elif defined(CONFIG_MARCH_Z990) - .long 1, 0xc0002000 -#elif defined(CONFIG_MARCH_Z900) - .long 1, 0xc0000000 -#endif 4: /* Continue with startup code in head64.S */ jg startup_continue diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 58b719fa8..03c2b469c 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -16,7 +16,7 @@ __HEAD ENTRY(startup_continue) - tm __LC_STFL_FAC_LIST+6,0x80 # LPP available ? + tm __LC_STFLE_FAC_LIST+5,0x80 # LPP available ? jz 0f xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid mvi __LC_LPP,0x80 # and set LPP_MAGIC diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index b1f0a90f9..f20abdb56 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2039,21 +2039,15 @@ static void do_reset_calls(void) reset->fn(); } -u32 dump_prefix_page; - -void s390_reset_system(void (*fn_pre)(void), - void (*fn_post)(void *), void *data) +void s390_reset_system(void) { - struct _lowcore *lc; + struct lowcore *lc; - lc = (struct _lowcore *)(unsigned long) store_prefix(); + lc = (struct lowcore *)(unsigned long) store_prefix(); /* Stack for interrupt/machine check handler */ lc->panic_stack = S390_lowcore.panic_stack; - /* Save prefix page address for dump case */ - dump_prefix_page = (u32)(unsigned long) lc; - /* Disable prefixing */ set_prefix(0); @@ -2063,12 +2057,12 @@ void s390_reset_system(void (*fn_pre)(void), /* Set new machine check handler */ S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; S390_lowcore.mcck_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; + (unsigned long) s390_base_mcck_handler; /* Set new program check handler */ S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; S390_lowcore.program_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; + (unsigned long) s390_base_pgm_handler; /* * Clear subchannel ID and number to signal new kernel that no CCW or @@ -2077,14 +2071,5 @@ void s390_reset_system(void (*fn_pre)(void), S390_lowcore.subchannel_id = 0; S390_lowcore.subchannel_nr = 0; - /* Store status at absolute zero */ - store_status(); - - /* Call function before reset */ - if (fn_pre) - fn_pre(); do_reset_calls(); - /* Call function after reset */ - if (fn_post) - fn_post(data); } diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 389db56a2..250f59725 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -226,7 +226,7 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb, __ctl_load(per_kprobe, 9, 11); regs->psw.mask |= PSW_MASK_PER; regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); - regs->psw.addr = ip | PSW_ADDR_AMODE; + regs->psw.addr = ip; } NOKPROBE_SYMBOL(enable_singlestep); @@ -238,7 +238,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb, __ctl_load(kcb->kprobe_saved_ctl, 9, 11); regs->psw.mask &= ~PSW_MASK_PER; regs->psw.mask |= kcb->kprobe_saved_imask; - regs->psw.addr = ip | PSW_ADDR_AMODE; + regs->psw.addr = ip; } NOKPROBE_SYMBOL(disable_singlestep); @@ -310,7 +310,7 @@ static int kprobe_handler(struct pt_regs *regs) */ preempt_disable(); kcb = get_kprobe_ctlblk(); - p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); + p = get_kprobe((void *)(regs->psw.addr - 2)); if (p) { if (kprobe_running()) { @@ -460,7 +460,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) break; } - regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; + regs->psw.addr = orig_ret_address; pop_kprobe(get_kprobe_ctlblk()); kretprobe_hash_unlock(current, &flags); @@ -490,7 +490,7 @@ NOKPROBE_SYMBOL(trampoline_probe_handler); static void resume_execution(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; + unsigned long ip = regs->psw.addr; int fixup = probe_get_fixup_type(p->ainsn.insn); /* Check if the kprobes location is an enabled ftrace caller */ @@ -605,9 +605,9 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) * In case the user-specified fault handler returned * zero, try to fix up. */ - entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + entry = search_exception_tables(regs->psw.addr); if (entry) { - regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(entry); return 1; } @@ -683,7 +683,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* setup return addr to the jprobe handler routine */ - regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; + regs->psw.addr = (unsigned long) jp->entry; regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); /* r15 is the stack pointer */ diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index fb0901ec4..2f1b7217c 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -35,46 +35,6 @@ extern const unsigned long long relocate_kernel_len; #ifdef CONFIG_CRASH_DUMP /* - * Create ELF notes for one CPU - */ -static void add_elf_notes(int cpu) -{ - struct save_area *sa = (void *) 4608 + store_prefix(); - void *ptr; - - memcpy((void *) (4608UL + sa->pref_reg), sa, sizeof(*sa)); - ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); - ptr = fill_cpu_elf_notes(ptr, sa, NULL); - memset(ptr, 0, sizeof(struct elf_note)); -} - -/* - * Initialize CPU ELF notes - */ -static void setup_regs(void) -{ - unsigned long sa = S390_lowcore.prefixreg_save_area + SAVE_AREA_BASE; - struct _lowcore *lc; - int cpu, this_cpu; - - /* Get lowcore pointer from store status of this CPU (absolute zero) */ - lc = (struct _lowcore *)(unsigned long)S390_lowcore.prefixreg_save_area; - this_cpu = smp_find_processor_id(stap()); - add_elf_notes(this_cpu); - for_each_online_cpu(cpu) { - if (cpu == this_cpu) - continue; - if (smp_store_status(cpu)) - continue; - add_elf_notes(cpu); - } - if (MACHINE_HAS_VX) - save_vx_regs_safe((void *) lc->vector_save_area_addr); - /* Copy dump CPU store status info to absolute zero */ - memcpy((void *) SAVE_AREA_BASE, (void *) sa, sizeof(struct save_area)); -} - -/* * PM notifier callback for kdump */ static int machine_kdump_pm_cb(struct notifier_block *nb, unsigned long action, @@ -105,14 +65,66 @@ static int __init machine_kdump_pm_init(void) arch_initcall(machine_kdump_pm_init); /* - * Start kdump: We expect here that a store status has been done on our CPU + * Reset the system, copy boot CPU registers to absolute zero, + * and jump to the kdump image */ static void __do_machine_kdump(void *image) { - int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; + int (*start_kdump)(int); + unsigned long prefix; + + /* store_status() saved the prefix register to lowcore */ + prefix = (unsigned long) S390_lowcore.prefixreg_save_area; + + /* Now do the reset */ + s390_reset_system(); + + /* + * Copy dump CPU store status info to absolute zero. + * This need to be done *after* s390_reset_system set the + * prefix register of this CPU to zero + */ + memcpy((void *) __LC_FPREGS_SAVE_AREA, + (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512); __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA); + start_kdump = (void *)((struct kimage *) image)->start; start_kdump(1); + + /* Die if start_kdump returns */ + disabled_wait((unsigned long) __builtin_return_address(0)); +} + +/* + * Start kdump: create a LGR log entry, store status of all CPUs and + * branch to __do_machine_kdump. + */ +static noinline void __machine_kdump(void *image) +{ + int this_cpu, cpu; + + lgr_info_log(); + /* Get status of the other CPUs */ + this_cpu = smp_find_processor_id(stap()); + for_each_online_cpu(cpu) { + if (cpu == this_cpu) + continue; + if (smp_store_status(cpu)) + continue; + } + /* Store status of the boot CPU */ + if (MACHINE_HAS_VX) + save_vx_regs((void *) &S390_lowcore.vector_save_area); + /* + * To create a good backchain for this CPU in the dump store_status + * is passed the address of a function. The address is saved into + * the PSW save area of the boot CPU and the function is invoked as + * a tail call of store_status. The backchain in the dump will look + * like this: + * restart_int_handler -> __machine_kexec -> __do_machine_kdump + * The call to store_status() will not return. + */ + store_status(__do_machine_kdump, image); } #endif @@ -235,10 +247,14 @@ static void __do_machine_kexec(void *data) relocate_kernel_t data_mover; struct kimage *image = data; + s390_reset_system(); data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page); /* Call the moving routine */ (*data_mover)(&image->head, image->start); + + /* Die if kexec returns */ + disabled_wait((unsigned long) __builtin_return_address(0)); } /* @@ -251,14 +267,10 @@ static void __machine_kexec(void *data) tracing_off(); debug_locks_off(); #ifdef CONFIG_CRASH_DUMP - if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) { - - lgr_info_log(); - s390_reset_system(setup_regs, __do_machine_kdump, data); - } else + if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) + __machine_kdump(data); #endif - s390_reset_system(NULL, __do_machine_kexec, data); - disabled_wait((unsigned long) __builtin_return_address(0)); + __do_machine_kexec(data); } /* diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 0c1a67931..7873e1714 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, /* Increase core size by size of got & plt and set start offsets for got and plt. */ - me->core_size = ALIGN(me->core_size, 4); - me->arch.got_offset = me->core_size; - me->core_size += me->arch.got_size; - me->arch.plt_offset = me->core_size; - me->core_size += me->arch.plt_size; + me->core_layout.size = ALIGN(me->core_layout.size, 4); + me->arch.got_offset = me->core_layout.size; + me->core_layout.size += me->arch.got_size; + me->arch.plt_offset = me->core_layout.size; + me->core_layout.size += me->arch.plt_size; return 0; } @@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, if (info->got_initialized == 0) { Elf_Addr *gotent; - gotent = me->module_core + me->arch.got_offset + + gotent = me->core_layout.base + me->arch.got_offset + info->got_offset; *gotent = val; info->got_initialized = 1; @@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, rc = apply_rela_bits(loc, val, 0, 64, 0); else if (r_type == R_390_GOTENT || r_type == R_390_GOTPLTENT) { - val += (Elf_Addr) me->module_core - loc; + val += (Elf_Addr) me->core_layout.base - loc; rc = apply_rela_bits(loc, val, 1, 32, 1); } break; @@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ if (info->plt_initialized == 0) { unsigned int *ip; - ip = me->module_core + me->arch.plt_offset + + ip = me->core_layout.base + me->arch.plt_offset + info->plt_offset; ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ ip[1] = 0x100a0004; @@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, val - loc + 0xffffUL < 0x1ffffeUL) || (r_type == R_390_PLT32DBL && val - loc + 0xffffffffULL < 0x1fffffffeULL))) - val = (Elf_Addr) me->module_core + + val = (Elf_Addr) me->core_layout.base + me->arch.plt_offset + info->plt_offset; val += rela->r_addend - loc; @@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, case R_390_GOTOFF32: /* 32 bit offset to GOT. */ case R_390_GOTOFF64: /* 64 bit offset to GOT. */ val = val + rela->r_addend - - ((Elf_Addr) me->module_core + me->arch.got_offset); + ((Elf_Addr) me->core_layout.base + me->arch.got_offset); if (r_type == R_390_GOTOFF16) rc = apply_rela_bits(loc, val, 0, 16, 0); else if (r_type == R_390_GOTOFF32) @@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, break; case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ - val = (Elf_Addr) me->module_core + me->arch.got_offset + + val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + rela->r_addend - loc; if (r_type == R_390_GOTPC) rc = apply_rela_bits(loc, val, 1, 32, 0); diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index d112fc66f..87f05e475 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -89,7 +89,7 @@ static void os_info_old_alloc(int nr, int align) goto fail; } buf_align = PTR_ALIGN(buf, align); - if (copy_from_oldmem(buf_align, (void *) addr, size)) { + if (copy_oldmem_kernel(buf_align, (void *) addr, size)) { msg = "copy failed"; goto fail_free; } @@ -122,14 +122,15 @@ static void os_info_old_init(void) return; if (!OLDMEM_BASE) goto fail; - if (copy_from_oldmem(&addr, &S390_lowcore.os_info, sizeof(addr))) + if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr))) goto fail; if (addr == 0 || addr % PAGE_SIZE) goto fail; os_info_old = kzalloc(sizeof(*os_info_old), GFP_KERNEL); if (!os_info_old) goto fail; - if (copy_from_oldmem(os_info_old, (void *) addr, sizeof(*os_info_old))) + if (copy_oldmem_kernel(os_info_old, (void *) addr, + sizeof(*os_info_old))) goto fail_free; if (os_info_old->magic != OS_INFO_MAGIC) goto fail_free; diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 61595c1f0..0943b11a2 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -74,7 +74,7 @@ static unsigned long guest_is_user_mode(struct pt_regs *regs) static unsigned long instruction_pointer_guest(struct pt_regs *regs) { - return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN; + return sie_block(regs)->gpsw.addr; } unsigned long perf_instruction_pointer(struct pt_regs *regs) @@ -231,29 +231,27 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry, struct pt_regs *regs; while (1) { - sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); + perf_callchain_store(entry, sf->gprs[8]); /* Follow the backchain. */ while (1) { low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - perf_callchain_store(entry, - sf->gprs[8] & PSW_ADDR_INSN); + perf_callchain_store(entry, sf->gprs[8]); } /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long) (sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; - perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); + perf_callchain_store(entry, sf->gprs[8]); low = sp; sp = regs->gprs[15]; } @@ -262,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { - unsigned long head; + unsigned long head, frame_size; struct stack_frame *head_sf; if (user_mode(regs)) return; + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); head = regs->gprs[15]; head_sf = (struct stack_frame *) head; @@ -275,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, return; head = head_sf->back_chain; - head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack); + head = __store_trace(entry, head, + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size); __store_trace(entry, head, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 114ee8b96..2bba7df4a 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -56,10 +56,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return 0; low = task_stack_page(tsk); high = (struct stack_frame *) task_pt_regs(tsk); - sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN); + sf = (struct stack_frame *) tsk->thread.ksp; if (sf <= low || sf > high) return 0; - sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); + sf = (struct stack_frame *) sf->back_chain; if (sf <= low || sf > high) return 0; return sf->gprs[8]; @@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, memset(&frame->childregs, 0, sizeof(struct pt_regs)); frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; - frame->childregs.psw.addr = PSW_ADDR_AMODE | + frame->childregs.psw.addr = (unsigned long) kernel_thread_starter; frame->childregs.gprs[9] = new_stackp; /* function */ frame->childregs.gprs[10] = arg; @@ -220,14 +220,14 @@ unsigned long get_wchan(struct task_struct *p) return 0; low = task_stack_page(p); high = (struct stack_frame *) task_pt_regs(p); - sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); + sf = (struct stack_frame *) p->thread.ksp; if (sf <= low || sf > high) return 0; for (count = 0; count < 16; count++) { - sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); + sf = (struct stack_frame *) sf->back_chain; if (sf <= low || sf > high) return 0; - return_address = sf->gprs[8] & PSW_ADDR_INSN; + return_address = sf->gprs[8]; if (!in_sched_functions(return_address)) return return_address; } diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 7ce00e7a7..647128d5b 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -61,6 +61,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp", "edat", "etf3eh", "highgprs", "te", "vx" }; + static const char * const int_hwcap_str[] = { + "sie" + }; unsigned long n = (unsigned long) v - 1; int i; @@ -75,6 +78,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) for (i = 0; i < ARRAY_SIZE(hwcap_str); i++) if (hwcap_str[i] && (elf_hwcap & (1UL << i))) seq_printf(m, "%s ", hwcap_str[i]); + for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++) + if (int_hwcap_str[i] && (int_hwcap & (1UL << i))) + seq_printf(m, "%s ", int_hwcap_str[i]); seq_puts(m, "\n"); show_cacheinfo(m); } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 01c37b36c..49b1c13bf 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -84,7 +84,7 @@ void update_cr_regs(struct task_struct *task) if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) new.control |= PER_EVENT_IFETCH; new.start = 0; - new.end = PSW_ADDR_INSN; + new.end = -1UL; } /* Take care of the PER enablement bit in the PSW. */ @@ -148,7 +148,7 @@ static inline unsigned long __peek_user_per(struct task_struct *child, else if (addr == (addr_t) &dummy->cr11) /* End address of the active per set. */ return test_thread_flag(TIF_SINGLE_STEP) ? - PSW_ADDR_INSN : child->thread.per_user.end; + -1UL : child->thread.per_user.end; else if (addr == (addr_t) &dummy->bits) /* Single-step bit. */ return test_thread_flag(TIF_SINGLE_STEP) ? @@ -495,8 +495,6 @@ long arch_ptrace(struct task_struct *child, long request, } return 0; default: - /* Removing high order bit from addr (only for 31 bit). */ - addr &= PSW_ADDR_INSN; return ptrace_request(child, request, addr, data); } } diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S index 52aab0bd8..89ea8c213 100644 --- a/arch/s390/kernel/reipl.S +++ b/arch/s390/kernel/reipl.S @@ -9,60 +9,66 @@ #include <asm/sigp.h> # -# store_status +# Issue "store status" for the current CPU to its prefix page +# and call passed function afterwards # -# Prerequisites to run this function: -# - Prefix register is set to zero -# - Original prefix register is stored in "dump_prefix_page" -# - Lowcore protection is off +# r2 = Function to be called after store status +# r3 = Parameter for function # ENTRY(store_status) /* Save register one and load save area base */ stg %r1,__LC_SAVE_AREA_RESTART - lghi %r1,SAVE_AREA_BASE /* General purpose registers */ - stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - lg %r2,__LC_SAVE_AREA_RESTART - stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) + lghi %r1,__LC_GPREGS_SAVE_AREA + stmg %r0,%r15,0(%r1) + mvc 8(8,%r1),__LC_SAVE_AREA_RESTART /* Control registers */ - stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + lghi %r1,__LC_CREGS_SAVE_AREA + stctg %c0,%c15,0(%r1) /* Access registers */ - stam %a0,%a15,__LC_AREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + lghi %r1,__LC_AREGS_SAVE_AREA + stam %a0,%a15,0(%r1) /* Floating point registers */ - std %f0, 0x00 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f1, 0x08 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f2, 0x10 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f3, 0x18 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f4, 0x20 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f5, 0x28 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f6, 0x30 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f7, 0x38 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f8, 0x40 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f9, 0x48 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f10,0x50 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f11,0x58 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f12,0x60 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f13,0x68 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f14,0x70 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - std %f15,0x78 + __LC_FPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) + lghi %r1,__LC_FPREGS_SAVE_AREA + std %f0, 0x00(%r1) + std %f1, 0x08(%r1) + std %f2, 0x10(%r1) + std %f3, 0x18(%r1) + std %f4, 0x20(%r1) + std %f5, 0x28(%r1) + std %f6, 0x30(%r1) + std %f7, 0x38(%r1) + std %f8, 0x40(%r1) + std %f9, 0x48(%r1) + std %f10,0x50(%r1) + std %f11,0x58(%r1) + std %f12,0x60(%r1) + std %f13,0x68(%r1) + std %f14,0x70(%r1) + std %f15,0x78(%r1) /* Floating point control register */ - stfpc __LC_FP_CREG_SAVE_AREA-SAVE_AREA_BASE(%r1) + lghi %r1,__LC_FP_CREG_SAVE_AREA + stfpc 0(%r1) /* CPU timer */ - stpt __LC_CPU_TIMER_SAVE_AREA-SAVE_AREA_BASE(%r1) - /* Saved prefix register */ - larl %r2,dump_prefix_page - mvc __LC_PREFIX_SAVE_AREA-SAVE_AREA_BASE(4,%r1),0(%r2) + lghi %r1,__LC_CPU_TIMER_SAVE_AREA + stpt 0(%r1) + /* Store prefix register */ + lghi %r1,__LC_PREFIX_SAVE_AREA + stpx 0(%r1) /* Clock comparator - seven bytes */ - larl %r2,.Lclkcmp - stckc 0(%r2) - mvc __LC_CLOCK_COMP_SAVE_AREA-SAVE_AREA_BASE + 1(7,%r1),1(%r2) + lghi %r1,__LC_CLOCK_COMP_SAVE_AREA + larl %r4,.Lclkcmp + stckc 0(%r4) + mvc 1(7,%r1),1(%r4) /* Program status word */ - epsw %r2,%r3 - st %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 0(%r1) - st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1) - larl %r2,store_status - stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1) - br %r14 + lghi %r1,__LC_PSW_SAVE_AREA + epsw %r4,%r5 + st %r4,0(%r1) + st %r5,4(%r1) + stg %r2,8(%r1) + lgr %r1,%r2 + lgr %r2,%r3 + br %r1 .section .bss .align 8 @@ -77,9 +83,11 @@ ENTRY(store_status) ENTRY(do_reipl_asm) basr %r13,0 .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) -.Lpg1: brasl %r14,store_status +.Lpg1: lgr %r3,%r2 + larl %r2,.Lstatus + brasl %r14,store_status - lctlg %c6,%c6,.Lall-.Lpg0(%r13) +.Lstatus: lctlg %c6,%c6,.Lall-.Lpg0(%r13) lgr %r1,%r2 mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13) stsch .Lschib-.Lpg0(%r13) diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c index 9fe7781a4..d88db40bd 100644 --- a/arch/s390/kernel/sclp.c +++ b/arch/s390/kernel/sclp.c @@ -9,7 +9,11 @@ #include <asm/processor.h> #include <asm/sclp.h> +#define EVTYP_VT220MSG_MASK 0x00000040 +#define EVTYP_MSG_MASK 0x40000000 + static char _sclp_work_area[4096] __aligned(PAGE_SIZE); +static bool have_vt220, have_linemode; static void _sclp_wait_int(void) { @@ -68,7 +72,7 @@ static int _sclp_setup(int disable) 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, - 0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, + 0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; unsigned int *masks; @@ -82,13 +86,13 @@ static int _sclp_setup(int disable) rc = _sclp_servc(0x00780005, _sclp_work_area); if (rc) return rc; - if ((masks[0] & masks[3]) != masks[0] || - (masks[1] & masks[2]) != masks[1]) - return -EIO; + have_vt220 = masks[2] & EVTYP_VT220MSG_MASK; + have_linemode = masks[2] & EVTYP_MSG_MASK; return 0; } -static int _sclp_print(const char *str) +/* Output multi-line text using SCLP Message interface. */ +static void _sclp_print_lm(const char *str) { static unsigned char write_head[] = { /* sccb header */ @@ -143,18 +147,49 @@ static int _sclp_print(const char *str) } while (ch != 0); /* SCLP write data */ - return _sclp_servc(0x00760005, _sclp_work_area); + _sclp_servc(0x00760005, _sclp_work_area); } -int _sclp_print_early(const char *str) +/* Output multi-line text (plus a newline) using SCLP VT220 + * interface. + */ +static void _sclp_print_vt220(const char *str) { - int rc; + static unsigned char const write_head[] = { + /* sccb header */ + 0x00, 0x0e, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* evbuf header */ + 0x00, 0x06, + 0x1a, 0x00, 0x00, 0x00, + }; + size_t len = strlen(str); - rc = _sclp_setup(0); - if (rc) - return rc; - rc = _sclp_print(str); - if (rc) - return rc; - return _sclp_setup(1); + if (sizeof(write_head) + len >= sizeof(_sclp_work_area)) + len = sizeof(_sclp_work_area) - sizeof(write_head) - 1; + + memcpy(_sclp_work_area, write_head, sizeof(write_head)); + memcpy(_sclp_work_area + sizeof(write_head), str, len); + _sclp_work_area[sizeof(write_head) + len] = '\n'; + + /* Update length fields in evbuf and sccb headers */ + *(unsigned short *)(_sclp_work_area + 8) += len + 1; + *(unsigned short *)(_sclp_work_area + 0) += len + 1; + + /* SCLP write data */ + (void)_sclp_servc(0x00760005, _sclp_work_area); +} + +/* Output one or more lines of text on the SCLP console (VT220 and / + * or line-mode). All lines get terminated; no need for a trailing LF. + */ +void _sclp_print_early(const char *str) +{ + if (_sclp_setup(0) != 0) + return; + if (have_linemode) + _sclp_print_lm(str); + if (have_vt220) + _sclp_print_vt220(str); + _sclp_setup(1); } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c837bcacf..9220db5c9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -80,6 +80,8 @@ EXPORT_SYMBOL(console_irq); unsigned long elf_hwcap __read_mostly = 0; char elf_platform[ELF_PLATFORM_SIZE]; +unsigned long int_hwcap = 0; + int __initdata memory_end_set; unsigned long __initdata memory_end; unsigned long __initdata max_physmem_end; @@ -97,7 +99,7 @@ unsigned long MODULES_VADDR; unsigned long MODULES_END; /* An array with a pointer to the lowcore of every CPU. */ -struct _lowcore *lowcore_ptr[NR_CPUS]; +struct lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); /* @@ -291,33 +293,29 @@ void *restart_stack __attribute__((__section__(".data"))); static void __init setup_lowcore(void) { - struct _lowcore *lc; + struct lowcore *lc; /* * Setup lowcore for boot cpu */ - BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); + BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_KERNEL_BITS; - lc->restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + lc->restart_psw.addr = (unsigned long) restart_int_handler; lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; - lc->external_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) ext_int_handler; + lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; - lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; + lc->svc_new_psw.addr = (unsigned long) system_call; lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; - lc->program_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; + lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; - lc->mcck_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; + lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; - lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; + lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); @@ -661,15 +659,6 @@ static void __init reserve_kernel(void) #endif } -static void __init reserve_elfcorehdr(void) -{ -#ifdef CONFIG_CRASH_DUMP - if (is_kdump_kernel()) - memblock_reserve(elfcorehdr_addr - OLDMEM_BASE, - PAGE_ALIGN(elfcorehdr_size)); -#endif -} - static void __init setup_memory(void) { struct memblock_region *reg; @@ -793,6 +782,13 @@ static int __init setup_hwcaps(void) strcpy(elf_platform, "z13"); break; } + + /* + * Virtualization support HWCAP_INT_SIE is bit 0. + */ + if (sclp.has_sief2) + int_hwcap |= HWCAP_INT_SIE; + return 0; } arch_initcall(setup_hwcaps); @@ -841,6 +837,11 @@ void __init setup_arch(char **cmdline_p) init_mm.brk = (unsigned long) &_end; parse_early_param(); +#ifdef CONFIG_CRASH_DUMP + /* Deactivate elfcorehdr= kernel parameter */ + elfcorehdr_addr = ELFCORE_ADDR_MAX; +#endif + os_info_init(); setup_ipl(); @@ -849,7 +850,6 @@ void __init setup_arch(char **cmdline_p) reserve_oldmem(); reserve_kernel(); reserve_initrd(); - reserve_elfcorehdr(); memblock_allow_resize(); /* Get information about *all* installed memory */ @@ -870,11 +870,13 @@ void __init setup_arch(char **cmdline_p) check_initrd(); reserve_crashkernel(); +#ifdef CONFIG_CRASH_DUMP /* * Be aware that smp_save_dump_cpus() triggers a system reset. * Therefore CPU and device initialization should be done afterwards. */ smp_save_dump_cpus(); +#endif setup_resources(); setup_vmcoreinfo(); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 028cc46cb..d82562cf0 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -331,13 +331,13 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; + restorer = (unsigned long) ka->sa.sa_restorer; } else { /* Signal frame without vector registers are short ! */ __u16 __user *svc = (void __user *) frame + frame_size - 2; if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) return -EFAULT; - restorer = (unsigned long) svc | PSW_ADDR_AMODE; + restorer = (unsigned long) svc; } /* Set up registers for signal handler */ @@ -347,7 +347,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | (PSW_USER_BITS & PSW_MASK_ASC) | (regs->psw.mask & ~PSW_MASK_ASC); - regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; + regs->psw.addr = (unsigned long) ka->sa.sa_handler; regs->gprs[2] = sig; regs->gprs[3] = (unsigned long) &frame->sc; @@ -394,13 +394,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ksig->ka.sa.sa_flags & SA_RESTORER) { - restorer = (unsigned long) - ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; + restorer = (unsigned long) ksig->ka.sa.sa_restorer; } else { __u16 __user *svc = &frame->svc_insn; if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc)) return -EFAULT; - restorer = (unsigned long) svc | PSW_ADDR_AMODE; + restorer = (unsigned long) svc; } /* Create siginfo on the signal stack */ @@ -426,7 +425,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | (PSW_USER_BITS & PSW_MASK_ASC) | (regs->psw.mask & ~PSW_MASK_ASC); - regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE; + regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler; regs->gprs[2] = ksig->sig; regs->gprs[3] = (unsigned long) &frame->info; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 9062df575..3c65a8eae 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -64,8 +64,9 @@ enum { static DEFINE_PER_CPU(struct cpu *, cpu_device); struct pcpu { - struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ + struct lowcore *lowcore; /* lowcore page(s) for the cpu */ unsigned long ec_mask; /* bit mask for ec_xxx functions */ + unsigned long ec_clk; /* sigp timestamp for ec_xxx */ signed char state; /* physical cpu state */ signed char polarization; /* physical polarization */ u16 address; /* physical cpu address */ @@ -80,6 +81,10 @@ EXPORT_SYMBOL(smp_cpu_mt_shift); unsigned int smp_cpu_mtid; EXPORT_SYMBOL(smp_cpu_mtid); +#ifdef CONFIG_CRASH_DUMP +__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; +#endif + static unsigned int smp_max_threads __initdata = -1U; static int __init early_nosmt(char *s) @@ -105,8 +110,7 @@ DEFINE_MUTEX(smp_cpu_state_mutex); /* * Signal processor helper functions. */ -static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm, - u32 *status) +static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm) { int cc; @@ -171,6 +175,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) if (test_and_set_bit(ec_bit, &pcpu->ec_mask)) return; order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL; + pcpu->ec_clk = get_tod_clock_fast(); pcpu_sigp_retry(pcpu, order, 0); } @@ -180,10 +185,10 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) { unsigned long async_stack, panic_stack; - struct _lowcore *lc; + struct lowcore *lc; if (pcpu != &pcpu_devices[0]) { - pcpu->lowcore = (struct _lowcore *) + pcpu->lowcore = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); panic_stack = __get_free_page(GFP_KERNEL); @@ -235,7 +240,7 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) { - struct _lowcore *lc = pcpu->lowcore; + struct lowcore *lc = pcpu->lowcore; if (MACHINE_HAS_TLB_LC) cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask); @@ -255,7 +260,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) { - struct _lowcore *lc = pcpu->lowcore; + struct lowcore *lc = pcpu->lowcore; struct thread_info *ti = task_thread_info(tsk); lc->kernel_stack = (unsigned long) task_stack_page(tsk) @@ -271,7 +276,7 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) { - struct _lowcore *lc = pcpu->lowcore; + struct lowcore *lc = pcpu->lowcore; lc->restart_stack = lc->kernel_stack; lc->restart_fn = (unsigned long) func; @@ -286,7 +291,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data) static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), void *data, unsigned long stack) { - struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; + struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices]; unsigned long source_cpu = stap(); __load_psw_mask(PSW_KERNEL_BITS); @@ -538,53 +543,24 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); #ifdef CONFIG_CRASH_DUMP -static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext, - u16 address, int is_boot_cpu) -{ - void *lc = (void *)(unsigned long) store_prefix(); - unsigned long vx_sa; - - if (is_boot_cpu) { - /* Copy the registers of the boot CPU. */ - copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), - SAVE_AREA_BASE - PAGE_SIZE, 0); - if (MACHINE_HAS_VX) - save_vx_regs_safe(sa_ext->vx_regs); - return; - } - /* Get the registers of a non-boot cpu. */ - __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL); - memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa)); - if (!MACHINE_HAS_VX) - return; - /* Get the VX registers */ - vx_sa = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!vx_sa) - panic("could not allocate memory for VX save area\n"); - __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL); - memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs)); - memblock_free(vx_sa, PAGE_SIZE); -} - int smp_store_status(int cpu) { - unsigned long vx_sa; - struct pcpu *pcpu; + struct pcpu *pcpu = pcpu_devices + cpu; + unsigned long pa; - pcpu = pcpu_devices + cpu; - if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, - 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) + pa = __pa(&pcpu->lowcore->floating_pt_save_area); + if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS, + pa) != SIGP_CC_ORDER_CODE_ACCEPTED) return -EIO; if (!MACHINE_HAS_VX) return 0; - vx_sa = __pa(pcpu->lowcore->vector_save_area_addr); - __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, - vx_sa, NULL); + pa = __pa(pcpu->lowcore->vector_save_area_addr); + if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, + pa) != SIGP_CC_ORDER_CODE_ACCEPTED) + return -EIO; return 0; } -#endif /* CONFIG_CRASH_DUMP */ - /* * Collect CPU state of the previous, crashed system. * There are four cases: @@ -593,7 +569,7 @@ int smp_store_status(int cpu) * The state for all CPUs except the boot CPU needs to be collected * with sigp stop-and-store-status. The boot CPU state is located in * the absolute lowcore of the memory stored in the HSA. The zcore code - * will allocate the save area and copy the boot CPU state from the HSA. + * will copy the boot CPU state from the HSA. * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory) * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP * The state for all CPUs except the boot CPU needs to be collected @@ -608,55 +584,76 @@ int smp_store_status(int cpu) * stored the registers of the boot CPU in the memory of the old system. * 4) kdump and the old kernel stored the CPU state * condition: OLDMEM_BASE != NULL && is_kdump_kernel() - * The state of all CPUs is stored in ELF sections in the memory of the - * old system. The ELF sections are picked up by the crash_dump code - * via elfcorehdr_addr. + * This case does not exist for s390 anymore, setup_arch explicitly + * deactivates the elfcorehdr= kernel parameter */ +static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr, + bool is_boot_cpu, unsigned long page) +{ + __vector128 *vxrs = (__vector128 *) page; + + if (is_boot_cpu) + vxrs = boot_cpu_vector_save_area; + else + __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page); + save_area_add_vxrs(sa, vxrs); +} + +static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr, + bool is_boot_cpu, unsigned long page) +{ + void *regs = (void *) page; + + if (is_boot_cpu) + copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512); + else + __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page); + save_area_add_regs(sa, regs); +} + void __init smp_save_dump_cpus(void) { -#ifdef CONFIG_CRASH_DUMP - int addr, cpu, boot_cpu_addr, max_cpu_addr; - struct save_area_ext *sa_ext; + int addr, boot_cpu_addr, max_cpu_addr; + struct save_area *sa; + unsigned long page; bool is_boot_cpu; - if (is_kdump_kernel()) - /* Previous system stored the CPU states. Nothing to do. */ - return; if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP)) /* No previous system present, normal boot. */ return; + /* Allocate a page as dumping area for the store status sigps */ + page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31); /* Set multi-threading state to the previous system. */ pcpu_set_smt(sclp.mtid_prev); - max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; - for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) { - if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) == - SIGP_CC_NOT_OPERATIONAL) - continue; - cpu += 1; - } - dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8); - dump_save_areas.count = cpu; boot_cpu_addr = stap(); - for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) { - if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) == + max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; + for (addr = 0; addr <= max_cpu_addr; addr++) { + if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) == SIGP_CC_NOT_OPERATIONAL) continue; - sa_ext = (void *) memblock_alloc(sizeof(*sa_ext), 8); - dump_save_areas.areas[cpu] = sa_ext; - if (!sa_ext) - panic("could not allocate memory for save area\n"); is_boot_cpu = (addr == boot_cpu_addr); - cpu += 1; - if (is_boot_cpu && !OLDMEM_BASE) - /* Skip boot CPU for standard zfcp dump. */ - continue; - /* Get state for this CPU. */ - __smp_store_cpu_state(sa_ext, addr, is_boot_cpu); + /* Allocate save area */ + sa = save_area_alloc(is_boot_cpu); + if (!sa) + panic("could not allocate memory for save area\n"); + if (MACHINE_HAS_VX) + /* Get the vector registers */ + smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page); + /* + * For a zfcp dump OLDMEM_BASE == NULL and the registers + * of the boot CPU are stored in the HSA. To retrieve + * these registers an SCLP request is required which is + * done by drivers/s390/char/zcore.c:init_cpu_info() + */ + if (!is_boot_cpu || OLDMEM_BASE) + /* Get the CPU registers */ + smp_save_cpu_regs(sa, addr, is_boot_cpu, page); } + memblock_free(page, PAGE_SIZE); diag308_reset(); pcpu_set_smt(0); -#endif /* CONFIG_CRASH_DUMP */ } +#endif /* CONFIG_CRASH_DUMP */ void smp_cpu_set_polarization(int cpu, int val) { @@ -680,7 +677,7 @@ static struct sclp_core_info *smp_get_core_info(void) for (address = 0; address < (SCLP_MAX_CORES << smp_cpu_mt_shift); address += (1U << smp_cpu_mt_shift)) { - if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == + if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) == SIGP_CC_NOT_OPERATIONAL) continue; info->core[info->configured].core_id = @@ -924,7 +921,7 @@ void __init smp_prepare_boot_cpu(void) pcpu->state = CPU_STATE_CONFIGURED; pcpu->address = stap(); - pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); + pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix(); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 1785cd822..8f64ebd63 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -21,12 +21,11 @@ static unsigned long save_context_stack(struct stack_trace *trace, unsigned long addr; while(1) { - sp &= PSW_ADDR_INSN; if (sp < low || sp > high) return sp; sf = (struct stack_frame *)sp; while(1) { - addr = sf->gprs[8] & PSW_ADDR_INSN; + addr = sf->gprs[8]; if (!trace->skip) trace->entries[trace->nr_entries++] = addr; else @@ -34,7 +33,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, if (trace->nr_entries >= trace->max_entries) return sp; low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) @@ -46,7 +45,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *)sp; - addr = regs->psw.addr & PSW_ADDR_INSN; + addr = regs->psw.addr; if (savesched || !in_sched_functions(addr)) { if (!trace->skip) trace->entries[trace->nr_entries++] = addr; @@ -60,33 +59,43 @@ static unsigned long save_context_stack(struct stack_trace *trace, } } -void save_stack_trace(struct stack_trace *trace) +static void __save_stack_trace(struct stack_trace *trace, unsigned long sp) { - register unsigned long sp asm ("15"); - unsigned long orig_sp, new_sp; + unsigned long new_sp, frame_size; - orig_sp = sp & PSW_ADDR_INSN; - new_sp = save_context_stack(trace, orig_sp, - S390_lowcore.panic_stack - PAGE_SIZE, - S390_lowcore.panic_stack, 1); - if (new_sp != orig_sp) - return; + frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); + new_sp = save_context_stack(trace, sp, + S390_lowcore.panic_stack + frame_size - PAGE_SIZE, + S390_lowcore.panic_stack + frame_size, 1); new_sp = save_context_stack(trace, new_sp, - S390_lowcore.async_stack - ASYNC_SIZE, - S390_lowcore.async_stack, 1); - if (new_sp != orig_sp) - return; + S390_lowcore.async_stack + frame_size - ASYNC_SIZE, + S390_lowcore.async_stack + frame_size, 1); save_context_stack(trace, new_sp, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE, 1); } + +void save_stack_trace(struct stack_trace *trace) +{ + register unsigned long r15 asm ("15"); + unsigned long sp; + + sp = r15; + __save_stack_trace(trace, sp); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} EXPORT_SYMBOL_GPL(save_stack_trace); void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long sp, low, high; - sp = tsk->thread.ksp & PSW_ADDR_INSN; + sp = tsk->thread.ksp; + if (tsk == current) { + /* Get current stack pointer. */ + asm volatile("la %0,0(15)" : "=a" (sp)); + } low = (unsigned long) task_stack_page(tsk); high = (unsigned long) task_pt_regs(tsk); save_context_stack(trace, sp, low, high, 0); @@ -94,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + unsigned long sp; + + sp = kernel_stack_pointer(regs); + __save_stack_trace(trace, sp); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} +EXPORT_SYMBOL_GPL(save_stack_trace_regs); diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 5378c3ea1..293d8b98f 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -383,3 +383,4 @@ SYSCALL(sys_recvfrom,compat_sys_recvfrom) SYSCALL(sys_recvmsg,compat_sys_recvmsg) SYSCALL(sys_shutdown,sys_shutdown) SYSCALL(sys_mlock2,compat_sys_mlock2) +SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index 99babea02..f7dba3887 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -111,8 +111,7 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info) static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info) { - static int max_mnest; - int i, rc; + int i; seq_putc(m, '\n'); if (!MACHINE_HAS_TOPOLOGY) @@ -123,7 +122,7 @@ static void stsi_15_1_x(struct seq_file *m, struct sysinfo_15_1_x *info) for (i = 0; i < TOPOLOGY_NR_MAG; i++) seq_printf(m, " %d", info->mag[i]); seq_putc(m, '\n'); -#ifdef CONFIG_SCHED_MC +#ifdef CONFIG_SCHED_TOPOLOGY store_topology(info); seq_printf(m, "CPU Topology SW: "); for (i = 0; i < TOPOLOGY_NR_MAG; i++) @@ -145,6 +144,10 @@ static void stsi_1_2_2(struct seq_file *m, struct sysinfo_1_2_2 *info) seq_printf(m, "CPUs Configured: %d\n", info->cpus_configured); seq_printf(m, "CPUs Standby: %d\n", info->cpus_standby); seq_printf(m, "CPUs Reserved: %d\n", info->cpus_reserved); + if (info->mt_installed) { + seq_printf(m, "CPUs G-MTID: %d\n", info->mt_gtid); + seq_printf(m, "CPUs S-MTID: %d\n", info->mt_stid); + } /* * Sigh 2. According to the specification the alternate * capability field is a 32 bit floating point number @@ -194,13 +197,10 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info) seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved); seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated); seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared); - if (info->mt_installed & 0x80) { - seq_printf(m, "LPAR CPUs G-MTID: %d\n", - info->mt_general & 0x1f); - seq_printf(m, "LPAR CPUs S-MTID: %d\n", - info->mt_installed & 0x1f); - seq_printf(m, "LPAR CPUs PS-MTID: %d\n", - info->mt_psmtid & 0x1f); + if (info->mt_installed) { + seq_printf(m, "LPAR CPUs G-MTID: %d\n", info->mt_gtid); + seq_printf(m, "LPAR CPUs S-MTID: %d\n", info->mt_stid); + seq_printf(m, "LPAR CPUs PS-MTID: %d\n", info->mt_psmtid); } } diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c index 21a5df995..dde7654f5 100644 --- a/arch/s390/kernel/trace.c +++ b/arch/s390/kernel/trace.c @@ -18,6 +18,9 @@ void trace_s390_diagnose_norecursion(int diag_nr) unsigned long flags; unsigned int *depth; + /* Avoid lockdep recursion. */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + return; local_irq_save(flags); depth = this_cpu_ptr(&diagnose_trace_depth); if (*depth == 0) { diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 1b18118bb..017eb03da 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -32,8 +32,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) address = *(unsigned long *)(current->thread.trap_tdb + 24); else address = regs->psw.addr; - return (void __user *) - ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); + return (void __user *) (address - (regs->int_code >> 16)); } static inline void report_user_fault(struct pt_regs *regs, int signr) @@ -46,7 +45,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr) return; printk("User process fault: interruption code %04x ilc:%d ", regs->int_code & 0xffff, regs->int_code >> 17); - print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); + print_vma_addr("in ", regs->psw.addr); printk("\n"); show_regs(regs); } @@ -69,13 +68,13 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) report_user_fault(regs, si_signo); } else { const struct exception_table_entry *fixup; - fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + fixup = search_exception_tables(regs->psw.addr); if (fixup) - regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(fixup); else { enum bug_trap_type btt; - btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); + btt = report_bug(regs->psw.addr, regs); if (btt == BUG_TRAP_TYPE_WARN) return; die(regs, str); @@ -260,11 +259,8 @@ void vector_exception(struct pt_regs *regs) void data_exception(struct pt_regs *regs) { - __u16 __user *location; int signal = 0; - location = get_trap_ip(regs); - save_fpu_regs(); if (current->thread.fpu.fpc & FPC_DXC_MASK) signal = SIGFPE; diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 59eddb0e1..94495cac8 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -80,7 +80,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data; /* * Setup vdso data page. */ -static void vdso_init_data(struct vdso_data *vd) +static void __init vdso_init_data(struct vdso_data *vd) { vd->ectg_available = test_facility(31); } @@ -90,9 +90,10 @@ static void vdso_init_data(struct vdso_data *vd) */ #define SEGMENT_ORDER 2 -int vdso_alloc_per_cpu(struct _lowcore *lowcore) +int vdso_alloc_per_cpu(struct lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; + struct vdso_per_cpu_data *vd; u32 *psal, *aste; int i; @@ -107,6 +108,12 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) if (!segment_table || !page_table || !page_frame) goto out; + /* Initialize per-cpu vdso data page */ + vd = (struct vdso_per_cpu_data *) page_frame; + vd->cpu_nr = lowcore->cpu_nr; + vd->node_id = cpu_to_node(vd->cpu_nr); + + /* Set up access register mode page table */ clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE << SEGMENT_ORDER); clear_table((unsigned long *) page_table, _PAGE_INVALID, @@ -138,7 +145,7 @@ out: return -ENOMEM; } -void vdso_free_per_cpu(struct _lowcore *lowcore) +void vdso_free_per_cpu(struct lowcore *lowcore) { unsigned long segment_table, page_table, page_frame; u32 *psal, *aste; @@ -163,7 +170,7 @@ static void vdso_init_cr5(void) if (!vdso_enabled) return; - cr5 = offsetof(struct _lowcore, paste); + cr5 = offsetof(struct lowcore, paste); __ctl_load(cr5, 5, 5); } @@ -299,8 +306,6 @@ static int __init vdso_init(void) get_page(virt_to_page(vdso_data)); - smp_mb(); - return 0; } early_initcall(vdso_init); diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index ee8a18e50..f9c459586 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -1,6 +1,6 @@ # List of files in the vdso, has to be asm only for now -obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o +obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o # Build rules diff --git a/arch/s390/kernel/vdso32/getcpu.S b/arch/s390/kernel/vdso32/getcpu.S new file mode 100644 index 000000000..c1ed0b720 --- /dev/null +++ b/arch/s390/kernel/vdso32/getcpu.S @@ -0,0 +1,43 @@ +/* + * Userland implementation of getcpu() for 32 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> + + .text + .align 4 + .globl __kernel_getcpu + .type __kernel_getcpu,@function +__kernel_getcpu: + .cfi_startproc + ear %r1,%a4 + lhi %r4,1 + sll %r4,24 + sar %a4,%r4 + la %r4,0 + epsw %r0,0 + sacf 512 + l %r5,__VDSO_CPU_NR(%r4) + l %r4,__VDSO_NODE_ID(%r4) + tml %r0,0x4000 + jo 1f + tml %r0,0x8000 + jno 0f + sacf 256 + j 1f +0: sacf 0 +1: sar %a4,%r1 + ltr %r2,%r2 + jz 2f + st %r5,0(%r2) +2: ltr %r3,%r3 + jz 3f + st %r4,0(%r3) +3: lhi %r2,0 + br %r14 + .cfi_endproc + .size __kernel_getcpu,.-__kernel_getcpu diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S index a8c379fa1..8f048c2d6 100644 --- a/arch/s390/kernel/vdso32/vdso32.lds.S +++ b/arch/s390/kernel/vdso32/vdso32.lds.S @@ -132,6 +132,7 @@ VERSION __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; + __kernel_getcpu; local: *; }; diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index c4b03f9ed..058659c1b 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -1,6 +1,6 @@ # List of files in the vdso, has to be asm only for now -obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o +obj-vdso64 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o # Build rules diff --git a/arch/s390/kernel/vdso64/getcpu.S b/arch/s390/kernel/vdso64/getcpu.S new file mode 100644 index 000000000..4cbe98291 --- /dev/null +++ b/arch/s390/kernel/vdso64/getcpu.S @@ -0,0 +1,42 @@ +/* + * Userland implementation of getcpu() for 64 bits processes in a + * s390 kernel for use in the vDSO + * + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ +#include <asm/vdso.h> +#include <asm/asm-offsets.h> + + .text + .align 4 + .globl __kernel_getcpu + .type __kernel_getcpu,@function +__kernel_getcpu: + .cfi_startproc + ear %r1,%a4 + llilh %r4,0x0100 + sar %a4,%r4 + la %r4,0 + epsw %r0,0 + sacf 512 + l %r5,__VDSO_CPU_NR(%r4) + l %r4,__VDSO_NODE_ID(%r4) + tml %r0,0x4000 + jo 1f + tml %r0,0x8000 + jno 0f + sacf 256 + j 1f +0: sacf 0 +1: sar %a4,%r1 + ltgr %r2,%r2 + jz 2f + st %r5,0(%r2) +2: ltgr %r3,%r3 + jz 3f + st %r4,0(%r3) +3: lghi %r2,0 + br %r14 + .cfi_endproc + .size __kernel_getcpu,.-__kernel_getcpu diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S index 9f5979d10..f35455d49 100644 --- a/arch/s390/kernel/vdso64/vdso64.lds.S +++ b/arch/s390/kernel/vdso64/vdso64.lds.S @@ -132,6 +132,7 @@ VERSION __kernel_gettimeofday; __kernel_clock_gettime; __kernel_clock_getres; + __kernel_getcpu; local: *; }; |