summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
commitb4b7ff4b08e691656c9d77c758fc355833128ac0 (patch)
tree82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /arch/s390/kernel
parent35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff)
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c290
-rw-r--r--arch/s390/kernel/compat_signal.c7
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/cpcmd.c2
-rw-r--r--arch/s390/kernel/crash_dump.c16
-rw-r--r--arch/s390/kernel/diag.c134
-rw-r--r--arch/s390/kernel/dis.c17
-rw-r--r--arch/s390/kernel/early.c15
-rw-r--r--arch/s390/kernel/entry.S230
-rw-r--r--arch/s390/kernel/entry.h3
-rw-r--r--arch/s390/kernel/head.S95
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/kernel/ipl.c74
-rw-r--r--arch/s390/kernel/irq.c1
-rw-r--r--arch/s390/kernel/nmi.c120
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c35
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c10
-rw-r--r--arch/s390/kernel/process.c43
-rw-r--r--arch/s390/kernel/processor.c5
-rw-r--r--arch/s390/kernel/ptrace.c44
-rw-r--r--arch/s390/kernel/runtime_instr.c64
-rw-r--r--arch/s390/kernel/s390_ksyms.c3
-rw-r--r--arch/s390/kernel/sclp.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/signal.c7
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/time.c31
-rw-r--r--arch/s390/kernel/topology.c28
-rw-r--r--arch/s390/kernel/trace.c29
-rw-r--r--arch/s390/kernel/traps.c41
-rw-r--r--arch/s390/kernel/vdso.c2
33 files changed, 704 insertions, 668 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index b756c6348..dc167a23b 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -66,6 +66,8 @@ obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
+obj-$(CONFIG_TRACEPOINTS) += trace.o
+
# vdso
obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 3aeeb1b56..9cd248f63 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,59 +23,64 @@
int main(void)
{
- DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
- DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
- DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
+ /* task struct offsets */
+ OFFSET(__TASK_thread_info, task_struct, stack);
+ OFFSET(__TASK_thread, task_struct, thread);
+ OFFSET(__TASK_pid, task_struct, pid);
BLANK();
- DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
- DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
- DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
- DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
- DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
- DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
- DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
- DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
+ /* thread struct offsets */
+ OFFSET(__THREAD_ksp, thread_struct, ksp);
+ OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc);
+ OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs);
+ OFFSET(__THREAD_per_cause, thread_struct, per_event.cause);
+ OFFSET(__THREAD_per_address, thread_struct, per_event.address);
+ OFFSET(__THREAD_per_paid, thread_struct, per_event.paid);
+ OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb);
BLANK();
- DEFINE(__TI_task, offsetof(struct thread_info, task));
- DEFINE(__TI_flags, offsetof(struct thread_info, flags));
- DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table));
- DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
- DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
- DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer));
- DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer));
- DEFINE(__TI_last_break, offsetof(struct thread_info, last_break));
+ /* thread info offsets */
+ OFFSET(__TI_task, thread_info, task);
+ OFFSET(__TI_flags, thread_info, flags);
+ OFFSET(__TI_sysc_table, thread_info, sys_call_table);
+ OFFSET(__TI_cpu, thread_info, cpu);
+ OFFSET(__TI_precount, thread_info, preempt_count);
+ OFFSET(__TI_user_timer, thread_info, user_timer);
+ OFFSET(__TI_system_timer, thread_info, system_timer);
+ OFFSET(__TI_last_break, thread_info, last_break);
BLANK();
- DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
- DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
- DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
- DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
- DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code));
- DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm));
- DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long));
- DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags));
+ /* pt_regs offsets */
+ OFFSET(__PT_ARGS, pt_regs, args);
+ OFFSET(__PT_PSW, pt_regs, psw);
+ OFFSET(__PT_GPRS, pt_regs, gprs);
+ OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2);
+ OFFSET(__PT_INT_CODE, pt_regs, int_code);
+ OFFSET(__PT_INT_PARM, pt_regs, int_parm);
+ OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long);
+ OFFSET(__PT_FLAGS, pt_regs, flags);
DEFINE(__PT_SIZE, sizeof(struct pt_regs));
BLANK();
- DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
- DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
- DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
+ /* stack_frame offsets */
+ OFFSET(__SF_BACKCHAIN, stack_frame, back_chain);
+ OFFSET(__SF_GPRS, stack_frame, gprs);
+ OFFSET(__SF_EMPTY, stack_frame, empty1);
BLANK();
/* timeval/timezone offsets for use by vdso */
- DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count));
- DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp));
- DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec));
- DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
- DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
- DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec));
- DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
- DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
- DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec));
- DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec));
- DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
- DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
- DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult));
- DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift));
- DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base));
- DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time));
+ OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count);
+ OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp);
+ OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec);
+ OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec);
+ OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec);
+ OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec);
+ OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec);
+ OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec);
+ OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec);
+ OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec);
+ OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest);
+ OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available);
+ OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult);
+ OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
+ OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
+ OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
+ BLANK();
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
@@ -86,102 +91,105 @@ int main(void)
DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
BLANK();
/* idle data offsets */
- DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
- DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
- DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
- DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
- /* lowcore offsets */
- DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
- DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
- DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code));
- DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc));
- DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code));
- DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc));
- DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code));
- DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code));
- DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num));
- DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code));
- DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid));
- DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address));
- DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id));
- DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id));
- DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id));
- DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id));
- DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code));
- DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id));
- DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
- DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm));
- DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word));
- DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list));
- DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code));
- DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code));
- DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw));
- DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw));
- DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw));
- DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw));
- DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw));
- DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw));
- DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw));
- DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw));
- DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw));
- DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw));
- DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw));
- DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw));
+ OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
+ OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit);
+ OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
+ OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit);
BLANK();
- DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync));
- DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async));
- DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart));
- DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags));
- DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw));
- DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw));
- DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer));
- DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer));
- DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer));
- DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer));
- DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer));
- DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer));
- DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer));
- DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer));
- DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock));
- DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task));
- DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid));
- DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info));
- DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack));
- DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack));
- DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
- DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
- DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
- DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
- DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
- DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
- DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
- DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
- DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
- DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
- DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
+ /* hardware defined lowcore locations 0x000 - 0x1ff */
+ OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params);
+ OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr);
+ OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code);
+ OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc);
+ OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code);
+ OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc);
+ OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code);
+ OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code);
+ OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num);
+ OFFSET(__LC_PER_CODE, _lowcore, per_code);
+ OFFSET(__LC_PER_ATMID, _lowcore, per_atmid);
+ OFFSET(__LC_PER_ADDRESS, _lowcore, per_address);
+ OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id);
+ OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id);
+ OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id);
+ OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id);
+ OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code);
+ OFFSET(__LC_MON_CODE, _lowcore, monitor_code);
+ OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id);
+ OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr);
+ OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm);
+ OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word);
+ OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list);
+ OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code);
+ OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address);
+ OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr);
+ OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw);
+ OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw);
+ OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw);
+ OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw);
+ OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw);
+ OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw);
+ OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw);
+ OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw);
+ OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw);
+ OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw);
+ OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw);
+ OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw);
+ /* software defined lowcore locations 0x200 - 0xdff*/
+ OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync);
+ OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async);
+ OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart);
+ OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags);
+ OFFSET(__LC_RETURN_PSW, _lowcore, return_psw);
+ OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw);
+ OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer);
+ OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer);
+ OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer);
+ OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer);
+ OFFSET(__LC_USER_TIMER, _lowcore, user_timer);
+ OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer);
+ OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer);
+ OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer);
+ OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock);
+ OFFSET(__LC_INT_CLOCK, _lowcore, int_clock);
+ OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock);
+ OFFSET(__LC_CURRENT, _lowcore, current_task);
+ OFFSET(__LC_THREAD_INFO, _lowcore, thread_info);
+ OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack);
+ OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack);
+ OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack);
+ OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack);
+ OFFSET(__LC_RESTART_FN, _lowcore, restart_fn);
+ OFFSET(__LC_RESTART_DATA, _lowcore, restart_data);
+ OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source);
+ OFFSET(__LC_USER_ASCE, _lowcore, user_asce);
+ OFFSET(__LC_LPP, _lowcore, lpp);
+ OFFSET(__LC_CURRENT_PID, _lowcore, current_pid);
+ OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset);
+ OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data);
+ OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags);
+ OFFSET(__LC_GMAP, _lowcore, gmap);
+ OFFSET(__LC_PASTE, _lowcore, paste);
+ /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
+ OFFSET(__LC_DUMP_REIPL, _lowcore, ipib);
+ /* hardware defined lowcore locations 0x1000 - 0x18ff */
+ OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr);
+ OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2);
+ OFFSET(SAVE_AREA_BASE, _lowcore, floating_pt_save_area);
+ OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area);
+ OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area);
+ OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
+ OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
+ OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
+ OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
+ OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
+ OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
+ OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area);
+ OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb);
BLANK();
- DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
- DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area));
- DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area));
- DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area));
- DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area));
- DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area));
- DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area));
- DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area));
- DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
- DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
- DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
- DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
- DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
- DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
- DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
- DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
- DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
- DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
- DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
- DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
- DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
- DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
- DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
+ /* gmap/sie offsets */
+ OFFSET(__GMAP_ASCE, gmap, asce);
+ OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
+ OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
return 0;
}
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index e0f9d270b..66c94417c 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -249,7 +249,7 @@ static int save_sigregs_ext32(struct pt_regs *regs,
return -EFAULT;
/* Save vector registers to signal stack */
- if (is_vx_task(current)) {
+ if (MACHINE_HAS_VX) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
@@ -277,7 +277,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
*(__u32 *)&regs->gprs[i] = gprs_high[i];
/* Restore vector registers from signal stack */
- if (is_vx_task(current)) {
+ if (MACHINE_HAS_VX) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
@@ -470,8 +470,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
*/
uc_flags = UC_GPRS_HIGH;
if (MACHINE_HAS_VX) {
- if (is_vx_task(current))
- uc_flags |= UC_VXRS;
+ uc_flags |= UC_VXRS;
} else
frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 09f194052..fac4eedde 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
+COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index 199ec92ef..7f768914f 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -14,6 +14,7 @@
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/string.h>
+#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <asm/io.h>
@@ -70,6 +71,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
memcpy(cpcmd_buf, cmd, cmdlen);
ASCEBC(cpcmd_buf, cmdlen);
+ diag_stat_inc(DIAG_STAT_X008);
if (response) {
memset(response, 0, rlen);
response_len = rlen;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index 0c6c01eb3..171e09bb8 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -32,16 +32,6 @@ static struct memblock_type oldmem_type = {
.regions = &oldmem_region,
};
-#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
- for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \
- &memblock.physmem, \
- &oldmem_type, p_start, \
- p_end, p_nid); \
- i != (u64)ULLONG_MAX; \
- __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\
- &oldmem_type, \
- p_start, p_end, p_nid))
-
struct dump_save_areas dump_save_areas;
/*
@@ -515,7 +505,8 @@ static int get_mem_chunk_cnt(void)
int cnt = 0;
u64 idx;
- for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL)
+ for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
+ MEMBLOCK_NONE, NULL, NULL, NULL)
cnt++;
return cnt;
}
@@ -528,7 +519,8 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset)
phys_addr_t start, end;
u64 idx;
- for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) {
+ for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE,
+ MEMBLOCK_NONE, &start, &end, NULL) {
phdr->p_filesz = end - start;
phdr->p_type = PT_LOAD;
phdr->p_offset = start;
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 2f69243bf..48b37b835 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -6,12 +6,137 @@
*/
#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
#include <asm/diag.h>
+#include <asm/trace/diag.h>
+
+struct diag_stat {
+ unsigned int counter[NR_DIAG_STAT];
+};
+
+static DEFINE_PER_CPU(struct diag_stat, diag_stat);
+
+struct diag_desc {
+ int code;
+ char *name;
+};
+
+static const struct diag_desc diag_map[NR_DIAG_STAT] = {
+ [DIAG_STAT_X008] = { .code = 0x008, .name = "Console Function" },
+ [DIAG_STAT_X00C] = { .code = 0x00c, .name = "Pseudo Timer" },
+ [DIAG_STAT_X010] = { .code = 0x010, .name = "Release Pages" },
+ [DIAG_STAT_X014] = { .code = 0x014, .name = "Spool File Services" },
+ [DIAG_STAT_X044] = { .code = 0x044, .name = "Voluntary Timeslice End" },
+ [DIAG_STAT_X064] = { .code = 0x064, .name = "NSS Manipulation" },
+ [DIAG_STAT_X09C] = { .code = 0x09c, .name = "Relinquish Timeslice" },
+ [DIAG_STAT_X0DC] = { .code = 0x0dc, .name = "Appldata Control" },
+ [DIAG_STAT_X204] = { .code = 0x204, .name = "Logical-CPU Utilization" },
+ [DIAG_STAT_X210] = { .code = 0x210, .name = "Device Information" },
+ [DIAG_STAT_X224] = { .code = 0x224, .name = "EBCDIC-Name Table" },
+ [DIAG_STAT_X250] = { .code = 0x250, .name = "Block I/O" },
+ [DIAG_STAT_X258] = { .code = 0x258, .name = "Page-Reference Services" },
+ [DIAG_STAT_X288] = { .code = 0x288, .name = "Time Bomb" },
+ [DIAG_STAT_X2C4] = { .code = 0x2c4, .name = "FTP Services" },
+ [DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" },
+ [DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" },
+ [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" },
+ [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
+};
+
+static int show_diag_stat(struct seq_file *m, void *v)
+{
+ struct diag_stat *stat;
+ unsigned long n = (unsigned long) v - 1;
+ int cpu, prec, tmp;
+
+ get_online_cpus();
+ if (n == 0) {
+ seq_puts(m, " ");
+
+ for_each_online_cpu(cpu) {
+ prec = 10;
+ for (tmp = 10; cpu >= tmp; tmp *= 10)
+ prec--;
+ seq_printf(m, "%*s%d", prec, "CPU", cpu);
+ }
+ seq_putc(m, '\n');
+ } else if (n <= NR_DIAG_STAT) {
+ seq_printf(m, "diag %03x:", diag_map[n-1].code);
+ for_each_online_cpu(cpu) {
+ stat = &per_cpu(diag_stat, cpu);
+ seq_printf(m, " %10u", stat->counter[n-1]);
+ }
+ seq_printf(m, " %s\n", diag_map[n-1].name);
+ }
+ put_online_cpus();
+ return 0;
+}
+
+static void *show_diag_stat_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL;
+}
+
+static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return show_diag_stat_start(m, pos);
+}
+
+static void show_diag_stat_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations show_diag_stat_sops = {
+ .start = show_diag_stat_start,
+ .next = show_diag_stat_next,
+ .stop = show_diag_stat_stop,
+ .show = show_diag_stat,
+};
+
+static int show_diag_stat_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &show_diag_stat_sops);
+}
+
+static const struct file_operations show_diag_stat_fops = {
+ .open = show_diag_stat_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+
+static int __init show_diag_stat_init(void)
+{
+ debugfs_create_file("diag_stat", 0400, NULL, NULL,
+ &show_diag_stat_fops);
+ return 0;
+}
+
+device_initcall(show_diag_stat_init);
+
+void diag_stat_inc(enum diag_stat_enum nr)
+{
+ this_cpu_inc(diag_stat.counter[nr]);
+ trace_s390_diagnose(diag_map[nr].code);
+}
+EXPORT_SYMBOL(diag_stat_inc);
+
+void diag_stat_inc_norecursion(enum diag_stat_enum nr)
+{
+ this_cpu_inc(diag_stat.counter[nr]);
+ trace_s390_diagnose_norecursion(diag_map[nr].code);
+}
+EXPORT_SYMBOL(diag_stat_inc_norecursion);
/*
* Diagnose 14: Input spool file manipulation
*/
-int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
+static inline int __diag14(unsigned long rx, unsigned long ry1,
+ unsigned long subcode)
{
register unsigned long _ry1 asm("2") = ry1;
register unsigned long _ry2 asm("3") = subcode;
@@ -29,6 +154,12 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
return rc;
}
+
+int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
+{
+ diag_stat_inc(DIAG_STAT_X014);
+ return __diag14(rx, ry1, subcode);
+}
EXPORT_SYMBOL(diag14);
/*
@@ -48,6 +179,7 @@ int diag210(struct diag210 *addr)
spin_lock_irqsave(&diag210_lock, flags);
diag210_tmp = *addr;
+ diag_stat_inc(DIAG_STAT_X210);
asm volatile(
" lhi %0,-1\n"
" sam31\n"
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 8140d10c6..6e7296160 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
}
if (separator)
ptr += sprintf(ptr, "%c", separator);
+ /*
+ * Use four '%' characters below because of the
+ * following two conversions:
+ *
+ * 1) sprintf: %%%%r -> %%r
+ * 2) printk : %%r -> %r
+ */
if (operand->flags & OPERAND_GPR)
- ptr += sprintf(ptr, "%%r%i", value);
+ ptr += sprintf(ptr, "%%%%r%i", value);
else if (operand->flags & OPERAND_FPR)
- ptr += sprintf(ptr, "%%f%i", value);
+ ptr += sprintf(ptr, "%%%%f%i", value);
else if (operand->flags & OPERAND_AR)
- ptr += sprintf(ptr, "%%a%i", value);
+ ptr += sprintf(ptr, "%%%%a%i", value);
else if (operand->flags & OPERAND_CR)
- ptr += sprintf(ptr, "%%c%i", value);
+ ptr += sprintf(ptr, "%%%%c%i", value);
else if (operand->flags & OPERAND_VR)
- ptr += sprintf(ptr, "%%v%i", value);
+ ptr += sprintf(ptr, "%%%%v%i", value);
else if (operand->flags & OPERAND_PCREL)
ptr += sprintf(ptr, "%lx", (signed int) value
+ addr);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 549a73a4b..3c31609df 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -17,6 +17,7 @@
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
+#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
@@ -286,6 +287,7 @@ static __init void detect_diag9c(void)
int rc;
cpu_address = stap();
+ diag_stat_inc(DIAG_STAT_X09C);
asm volatile(
" diag %2,0,0x9c\n"
"0: la %0,0\n"
@@ -300,6 +302,7 @@ static __init void detect_diag44(void)
{
int rc;
+ diag_stat_inc(DIAG_STAT_X044);
asm volatile(
" diag 0,0,0x44\n"
"0: la %0,0\n"
@@ -326,9 +329,19 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
- if (test_facility(129))
+ if (test_facility(129)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
+ __ctl_set_bit(0, 17);
+ }
+}
+
+static int __init disable_vector_extension(char *str)
+{
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
+ __ctl_clear_bit(0, 17);
+ return 1;
}
+early_param("novx", disable_vector_extension);
static int __init cad_setup(char *str)
{
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 582fe44ab..857b6526d 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -20,8 +20,9 @@
#include <asm/page.h>
#include <asm/sigp.h>
#include <asm/irq.h>
-#include <asm/fpu-internal.h>
#include <asm/vx-insn.h>
+#include <asm/setup.h>
+#include <asm/nmi.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
@@ -139,6 +140,28 @@ _PIF_WORK = (_PIF_PER_TRAP)
#endif
.endm
+ /*
+ * The TSTMSK macro generates a test-under-mask instruction by
+ * calculating the memory offset for the specified mask value.
+ * Mask value can be any constant. The macro shifts the mask
+ * value to calculate the memory offset for the test-under-mask
+ * instruction.
+ */
+ .macro TSTMSK addr, mask, size=8, bytepos=0
+ .if (\bytepos < \size) && (\mask >> 8)
+ .if (\mask & 0xff)
+ .error "Mask exceeds byte boundary"
+ .endif
+ TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
+ .exitm
+ .endif
+ .ifeq \mask
+ .error "Mask must not be zero"
+ .endif
+ off = \size - \bytepos - 1
+ tm off+\addr, \mask
+ .endm
+
.section .kprobes.text, "ax"
/*
@@ -164,8 +187,11 @@ ENTRY(__switch_to)
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
- mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
+ mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+ bzr %r14
+ .insn s,0xb2800000,__LC_LPP # set program parameter
br %r14
.L__critical_start:
@@ -180,8 +206,8 @@ ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
- xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
- tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ?
+ xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
brasl %r14,load_fpu_regs # load guest fp/vx regs
.Lsie_load_guest_gprs:
@@ -195,16 +221,9 @@ ENTRY(sie64a)
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
tm __SIE_PROG20+3(%r14),3 # last exit...
jnz .Lsie_skip
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
- jz .Lsie_enter
- .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid
-.Lsie_enter:
sie 0(%r14)
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
- jz .Lsie_skip
- .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -221,11 +240,11 @@ sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
- lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
+ lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
br %r14
.Lsie_fault:
lghi %r14,-EFAULT
- stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
+ stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
EX_TABLE(.Lrewind_pad,.Lsie_fault)
@@ -271,7 +290,7 @@ ENTRY(system_call)
stg %r2,__PT_ORIG_GPR2(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lgf %r9,0(%r8,%r10) # get system call add.
- tm __TI_flags+7(%r12),_TIF_TRACE
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
basr %r14,%r9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
@@ -279,11 +298,11 @@ ENTRY(system_call)
.Lsysc_return:
LOCKDEP_SYS_EXIT
.Lsysc_tif:
- tm __PT_FLAGS+7(%r11),_PIF_WORK
+ TSTMSK __PT_FLAGS(%r11),_PIF_WORK
jnz .Lsysc_work
- tm __TI_flags+7(%r12),_TIF_WORK
+ TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lsysc_work # check for work
- tm __LC_CPU_FLAGS+7,_CIF_WORK
+ TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lsysc_work
.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
@@ -299,23 +318,23 @@ ENTRY(system_call)
# One of the work bits is on. Find out which one.
#
.Lsysc_work:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
+ TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jo .Lsysc_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
+ TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lsysc_reschedule
#ifdef CONFIG_UPROBES
- tm __TI_flags+7(%r12),_TIF_UPROBE
+ TSTMSK __TI_flags(%r12),_TIF_UPROBE
jo .Lsysc_uprobe_notify
#endif
- tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
jo .Lsysc_singlestep
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
+ TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
jo .Lsysc_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
+ TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lsysc_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsysc_vxrs
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
jo .Lsysc_uaccess
j .Lsysc_return # beware of critical section cleanup
@@ -354,7 +373,7 @@ ENTRY(system_call)
.Lsysc_sigpending:
lgr %r2,%r11 # pass pointer to pt_regs
brasl %r14,do_signal
- tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
+ TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jno .Lsysc_return
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lg %r10,__TI_sysc_table(%r12) # address of system call table
@@ -414,7 +433,7 @@ ENTRY(system_call)
basr %r14,%r9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
- tm __TI_flags+7(%r12),_TIF_TRACE
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
jz .Lsysc_return
lgr %r2,%r11 # pass pointer to pt_regs
larl %r14,.Lsysc_return
@@ -544,6 +563,8 @@ ENTRY(io_int_handler)
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
+ jo .Lio_restore
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
.Lio_loop:
@@ -554,7 +575,7 @@ ENTRY(io_int_handler)
lghi %r3,THIN_INTERRUPT
.Lio_call:
brasl %r14,do_IRQ
- tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
jz .Lio_return
tpi 0
jz .Lio_return
@@ -564,9 +585,9 @@ ENTRY(io_int_handler)
LOCKDEP_SYS_EXIT
TRACE_IRQS_ON
.Lio_tif:
- tm __TI_flags+7(%r12),_TIF_WORK
+ TSTMSK __TI_flags(%r12),_TIF_WORK
jnz .Lio_work # there is work to do (signals etc.)
- tm __LC_CPU_FLAGS+7,_CIF_WORK
+ TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lio_work
.Lio_restore:
lg %r14,__LC_VDSO_PER_CPU
@@ -594,7 +615,7 @@ ENTRY(io_int_handler)
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r12)
jnz .Lio_restore # preemption is disabled
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
+ TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jno .Lio_restore
# switch to kernel stack
lg %r1,__PT_R15(%r11)
@@ -626,17 +647,17 @@ ENTRY(io_int_handler)
# One of the work bits is on. Find out which one.
#
.Lio_work_tif:
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
+ TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jo .Lio_mcck_pending
- tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
+ TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
jo .Lio_reschedule
- tm __TI_flags+7(%r12),_TIF_SIGPENDING
+ TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
jo .Lio_sigpending
- tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
+ TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
jo .Lio_notify_resume
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lio_vxrs
- tm __LC_CPU_FLAGS+7,_CIF_ASCE
+ TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
jo .Lio_uaccess
j .Lio_return # beware of critical section cleanup
@@ -719,6 +740,8 @@ ENTRY(ext_int_handler)
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
+ TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
+ jo .Lio_restore
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
@@ -748,27 +771,22 @@ ENTRY(psw_idle)
br %r14
.Lpsw_idle_end:
-/* Store floating-point controls and floating-point or vector extension
- * registers instead. A critical section cleanup assures that the registers
- * are stored even if interrupted for some other work. The register %r2
- * designates a struct fpu to store register contents. If the specified
- * structure does not contain a register save area, the register store is
- * omitted (see also comments in arch_dup_task_struct()).
- *
- * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore
- * of the register contents at system call or io return.
+/*
+ * Store floating-point controls and floating-point or vector register
+ * depending whether the vector facility is available. A critical section
+ * cleanup assures that the registers are stored even if interrupted for
+ * some other work. The CIF_FPU flag is set to trigger a lazy restore
+ * of the register contents at return from io or a system call.
*/
ENTRY(save_fpu_regs)
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bor %r14
stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
- ltgr %r3,%r3
- jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU
- tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz .Lsave_fpu_regs_fp # no -> store FP regs
.Lsave_fpu_regs_vx_low:
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -797,41 +815,30 @@ ENTRY(save_fpu_regs)
br %r14
.Lsave_fpu_regs_end:
-/* Load floating-point controls and floating-point or vector extension
- * registers. A critical section cleanup assures that the register contents
- * are loaded even if interrupted for some other work. Depending on the saved
- * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
+/*
+ * Load floating-point controls and floating-point or vector registers.
+ * A critical section cleanup assures that the register contents are
+ * loaded even if interrupted for some other work.
*
* There are special calling conventions to fit into sysc and io return work:
* %r15: <kernel stack>
* The function requires:
- * %r4 and __SF_EMPTY+32(%r15)
+ * %r4
*/
load_fpu_regs:
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bnor %r14
lfpc __THREAD_FPU_fpc(%r4)
- stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
- tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
- jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs
-.Lload_fpu_regs_vx_ctl:
- tm __SF_EMPTY+32+5(%r15),2 # test VX control
- jo .Lload_fpu_regs_vx
- oi __SF_EMPTY+32+5(%r15),2 # set VX control
- lctlg %c0,%c0,__SF_EMPTY+32(%r15)
+ jz .Lload_fpu_regs_fp # -> no VX, load FP regs
.Lload_fpu_regs_vx:
VLM %v0,%v15,0,%r4
.Lload_fpu_regs_vx_high:
VLM %v16,%v31,256,%r4
j .Lload_fpu_regs_done
-.Lload_fpu_regs_fp_ctl:
- tm __SF_EMPTY+32+5(%r15),2 # test VX control
- jz .Lload_fpu_regs_fp
- ni __SF_EMPTY+32+5(%r15),253 # clear VX control
- lctlg %c0,%c0,__SF_EMPTY+32(%r15)
.Lload_fpu_regs_fp:
ld 0,0(%r4)
ld 1,8(%r4)
@@ -854,16 +861,6 @@ load_fpu_regs:
br %r14
.Lload_fpu_regs_end:
-/* Test and set the vector enablement control in CR0.46 */
-ENTRY(__ctl_set_vx)
- stctg %c0,%c0,__SF_EMPTY(%r15)
- tm __SF_EMPTY+5(%r15),2
- bor %r14
- oi __SF_EMPTY+5(%r15),2
- lctlg %c0,%c0,__SF_EMPTY(%r15)
- br %r14
-.L__ctl_set_vx_end:
-
.L__critical_end:
/*
@@ -878,11 +875,11 @@ ENTRY(mcck_int_handler)
lg %r12,__LC_THREAD_INFO
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
- tm __LC_MCCK_CODE,0x80 # system damage?
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
jo .Lmcck_panic # yes -> rest of mcck code invalid
lghi %r14,__LC_CPU_TIMER_SAVE_AREA
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
- tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
+ TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
jo 3f
la %r14,__LC_SYNC_ENTER_TIMER
clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
@@ -896,7 +893,7 @@ ENTRY(mcck_int_handler)
la %r14,__LC_LAST_UPDATE_TIMER
2: spt 0(%r14)
mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
-3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
+3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
jno .Lmcck_panic # no -> skip cleanup critical
SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
.Lmcck_skip:
@@ -916,7 +913,7 @@ ENTRY(mcck_int_handler)
la %r11,STACK_FRAME_OVERHEAD(%r1)
lgr %r15,%r1
ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
- tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
+ TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
jno .Lmcck_return
TRACE_IRQS_OFF
brasl %r14,s390_handle_mcck
@@ -941,7 +938,10 @@ ENTRY(mcck_int_handler)
# PSW restart interrupt handler
#
ENTRY(restart_int_handler)
- stg %r15,__LC_SAVE_AREA_RESTART
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+ jz 0f
+ .insn s,0xb2800000,__LC_LPP
+0: stg %r15,__LC_SAVE_AREA_RESTART
lg %r15,__LC_RESTART_STACK
aghi %r15,-__PT_SIZE # create pt_regs on stack
xc 0(__PT_SIZE,%r15),0(%r15)
@@ -1019,10 +1019,6 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
- clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx
- jl 0f
- clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end
- jl .Lcleanup___ctl_set_vx
0: br %r14
.align 8
@@ -1041,8 +1037,6 @@ cleanup_critical:
.quad .Lsave_fpu_regs_end
.quad load_fpu_regs
.quad .Lload_fpu_regs_end
- .quad __ctl_set_vx
- .quad .L__ctl_set_vx_end
#if IS_ENABLED(CONFIG_KVM)
.Lcleanup_table_sie:
@@ -1051,10 +1045,7 @@ cleanup_critical:
.Lcleanup_sie:
lg %r9,__SF_EMPTY(%r15) # get control block pointer
- tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP
- jz 0f
- .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
-0: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
br %r14
@@ -1206,7 +1197,7 @@ cleanup_critical:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bor %r14
clg %r9,BASED(.Lcleanup_save_fpu_regs_done)
jhe 5f
@@ -1224,9 +1215,7 @@ cleanup_critical:
stfpc __THREAD_FPU_fpc(%r2)
1: # Load register save area and check if VX is active
lg %r3,__THREAD_FPU_regs(%r2)
- ltgr %r3,%r3
- jz 5f # no save area -> set CIF_FPU
- tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
jz 4f # no VX -> store FP regs
2: # Store vector registers (V0-V15)
VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
@@ -1266,43 +1255,27 @@ cleanup_critical:
.quad .Lsave_fpu_regs_done
.Lcleanup_load_fpu_regs:
- tm __LC_CPU_FLAGS+7,_CIF_FPU
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
bnor %r14
clg %r9,BASED(.Lcleanup_load_fpu_regs_done)
jhe 1f
clg %r9,BASED(.Lcleanup_load_fpu_regs_fp)
jhe 2f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl)
- jhe 3f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
- jhe 4f
+ jhe 3f
clg %r9,BASED(.Lcleanup_load_fpu_regs_vx)
- jhe 5f
- clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
- jhe 6f
+ jhe 4f
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
lfpc __THREAD_FPU_fpc(%r4)
- tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ?
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
- jz 3f # -> no VX, load FP regs
-6: # Set VX-enablement control
- stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
- tm __SF_EMPTY+32+5(%r15),2 # test VX control
- jo 5f
- oi __SF_EMPTY+32+5(%r15),2 # set VX control
- lctlg %c0,%c0,__SF_EMPTY+32(%r15)
-5: # Load V0 ..V15 registers
+ jz 2f # -> no VX, load FP regs
+4: # Load V0 ..V15 registers
VLM %v0,%v15,0,%r4
-4: # Load V16..V31 registers
+3: # Load V16..V31 registers
VLM %v16,%v31,256,%r4
j 1f
-3: # Clear VX-enablement control for FP
- stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0
- tm __SF_EMPTY+32+5(%r15),2 # test VX control
- jz 2f
- ni __SF_EMPTY+32+5(%r15),253 # clear VX control
- lctlg %c0,%c0,__SF_EMPTY+32(%r15)
2: # Load floating-point registers
ld 0,0(%r4)
ld 1,8(%r4)
@@ -1324,28 +1297,15 @@ cleanup_critical:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
lg %r9,48(%r11) # return from load_fpu_regs
br %r14
-.Lcleanup_load_fpu_regs_vx_ctl:
- .quad .Lload_fpu_regs_vx_ctl
.Lcleanup_load_fpu_regs_vx:
.quad .Lload_fpu_regs_vx
.Lcleanup_load_fpu_regs_vx_high:
.quad .Lload_fpu_regs_vx_high
-.Lcleanup_load_fpu_regs_fp_ctl:
- .quad .Lload_fpu_regs_fp_ctl
.Lcleanup_load_fpu_regs_fp:
.quad .Lload_fpu_regs_fp
.Lcleanup_load_fpu_regs_done:
.quad .Lload_fpu_regs_done
-.Lcleanup___ctl_set_vx:
- stctg %c0,%c0,__SF_EMPTY(%r15)
- tm __SF_EMPTY+5(%r15),2
- bor %r14
- oi __SF_EMPTY+5(%r15),2
- lctlg %c0,%c0,__SF_EMPTY(%r15)
- lg %r9,48(%r11) # return from __ctl_set_vx
- br %r14
-
/*
* Integer constants
*/
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 834df047d..b7019ab74 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -16,13 +16,10 @@ void io_int_handler(void);
void mcck_int_handler(void);
void restart_int_handler(void);
void restart_call_handler(void);
-void psw_idle(struct s390_idle_data *, unsigned long);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
-int alloc_vector_registers(struct task_struct *tsk);
-
void do_protection_exception(struct pt_regs *regs);
void do_dat_exception(struct pt_regs *regs);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1255c6c53..301ee9c70 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -26,6 +26,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/ptrace.h>
#define ARCH_OFFSET 4
@@ -59,19 +60,6 @@ __HEAD
.long 0x020006e0,0x20000050
.org 0x200
-#
-# subroutine to set architecture mode
-#
-.Lsetmode:
- mvi __LC_AR_MODE_ID,1 # set esame flag
- slr %r0,%r0 # set cpuid to zero
- lhi %r1,2 # mode 2 = esame (dump)
- sigp %r1,%r0,0x12 # switch to esame mode
- bras %r13,0f
- .fill 16,4,0x0
-0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
- sam31 # switch to 31 bit addressing mode
- br %r14
#
# subroutine to wait for end I/O
@@ -159,7 +147,14 @@ __HEAD
.long 0x02200050,0x00000000
iplstart:
- bas %r14,.Lsetmode # Immediately switch to 64 bit mode
+ mvi __LC_AR_MODE_ID,1 # set esame flag
+ slr %r0,%r0 # set cpuid to zero
+ lhi %r1,2 # mode 2 = esame (dump)
+ sigp %r1,%r0,0x12 # switch to esame mode
+ bras %r13,0f
+ .fill 16,4,0x0
+0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
+ sam31 # switch to 31 bit addressing mode
lh %r1,0xb8 # test if subchannel number
bct %r1,.Lnoload # is valid
l %r1,0xb8 # load ipl subchannel number
@@ -269,71 +264,6 @@ iplstart:
.Lcpuid:.fill 8,1,0
#
-# SALIPL loader support. Based on a patch by Rob van der Heij.
-# This entry point is called directly from the SALIPL loader and
-# doesn't need a builtin ipl record.
-#
- .org 0x800
-ENTRY(start)
- stm %r0,%r15,0x07b0 # store registers
- bas %r14,.Lsetmode # Immediately switch to 64 bit mode
- basr %r12,%r0
-.base:
- l %r11,.parm
- l %r8,.cmd # pointer to command buffer
-
- ltr %r9,%r9 # do we have SALIPL parameters?
- bp .sk8x8
-
- mvc 0(64,%r8),0x00b0 # copy saved registers
- xc 64(240-64,%r8),0(%r8) # remainder of buffer
- tr 0(64,%r8),.lowcase
- b .gotr
-.sk8x8:
- mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
-.gotr:
- slr %r0,%r0
- st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
- st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
- j startup # continue with startup
-.cmd: .long COMMAND_LINE # address of command line buffer
-.parm: .long PARMAREA
-.lowcase:
- .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
- .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
- .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
- .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
- .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
- .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
- .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
- .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
- .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
- .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
- .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
- .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
- .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
- .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
- .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
- .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
-
- .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
- .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
- .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
- .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
- .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
- .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
- .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
- .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
- .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
- .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
- .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
- .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
- .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
- .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
- .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
- .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
-
-#
# startup-code at 0x10000, running in absolute addressing mode
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
@@ -364,7 +294,7 @@ ENTRY(startup_kdump)
bras %r13,0f
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
- sam31 # switch to 31 bit addressing mode
+ sam64 # switch to 64 bit addressing mode
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
@@ -395,7 +325,7 @@ ENTRY(startup_kdump)
jnz 1b
j 4f
2: l %r15,.Lstack-.LPG0(%r13)
- ahi %r15,-96
+ ahi %r15,-STACK_FRAME_OVERHEAD
la %r2,.Lals_string-.LPG0(%r13)
l %r3,.Lsclp_print-.LPG0(%r13)
basr %r14,%r3
@@ -429,8 +359,7 @@ ENTRY(startup_kdump)
.long 1, 0xc0000000
#endif
4:
- /* Continue with 64bit startup code in head64.S */
- sam64 # switch to 64 bit mode
+ /* Continue with startup code in head64.S */
jg startup_continue
.align 8
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index d7c005075..58b719fa8 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -16,7 +16,12 @@
__HEAD
ENTRY(startup_continue)
- larl %r1,sched_clock_base_cc
+ tm __LC_STFL_FAC_LIST+6,0x80 # LPP available ?
+ jz 0f
+ xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid
+ mvi __LC_LPP,0x80 # and set LPP_MAGIC
+ .insn s,0xb2800000,__LC_LPP # load program parameter
+0: larl %r1,sched_clock_base_cc
mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK
larl %r13,.LPG1 # get base
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 52fbef91d..b1f0a90f9 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -17,6 +17,7 @@
#include <linux/gfp.h>
#include <linux/crash_dump.h>
#include <linux/debug_locks.h>
+#include <asm/diag.h>
#include <asm/ipl.h>
#include <asm/smp.h>
#include <asm/setup.h>
@@ -120,6 +121,7 @@ static char *dump_type_str(enum dump_type type)
* Must be in data section since the bss section
* is not cleared when these are accessed.
*/
+static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
u32 ipl_flags __attribute__((__section__(".data"))) = 0;
@@ -165,7 +167,7 @@ static struct ipl_parameter_block *dump_block_ccw;
static struct sclp_ipl_info sclp_ipl_info;
-int diag308(unsigned long subcode, void *addr)
+static inline int __diag308(unsigned long subcode, void *addr)
{
register unsigned long _addr asm("0") = (unsigned long) addr;
register unsigned long _rc asm("1") = 0;
@@ -178,6 +180,12 @@ int diag308(unsigned long subcode, void *addr)
: "d" (subcode) : "cc", "memory");
return _rc;
}
+
+int diag308(unsigned long subcode, void *addr)
+{
+ diag_stat_inc(DIAG_STAT_X308);
+ return __diag308(subcode, addr);
+}
EXPORT_SYMBOL_GPL(diag308);
/* SYSFS */
@@ -190,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
return snprintf(page, PAGE_SIZE, _format, ##args); \
}
+#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ unsigned long long ssid, devno; \
+ \
+ if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
+ return -EINVAL; \
+ \
+ if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
+ return -EINVAL; \
+ \
+ _ipl_blk.ssid = ssid; \
+ _ipl_blk.devno = devno; \
+ return len; \
+}
+
+#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
+IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
+ _ipl_blk.ssid, _ipl_blk.devno); \
+IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
+static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
+ __ATTR(_name, (S_IRUGO | S_IWUSR), \
+ sys_##_prefix##_##_name##_show, \
+ sys_##_prefix##_##_name##_store) \
+
#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -388,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- return sprintf(page, "0.0.%04x\n", ipl_devno);
+ return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
@@ -680,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
+ size_t scpdata_len = count;
size_t padding;
- size_t scpdata_len;
-
- if (off < 0)
- return -EINVAL;
- if (off >= DIAG308_SCPDATA_SIZE)
- return -ENOSPC;
- if (count > DIAG308_SCPDATA_SIZE - off)
- count = DIAG308_SCPDATA_SIZE - off;
-
- memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
- scpdata_len = off + count;
+ if (off)
+ return -EINVAL;
+ memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
@@ -710,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
}
static struct bin_attribute sys_reipl_fcp_scp_data_attr =
__BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
- reipl_fcp_scpdata_write, PAGE_SIZE);
+ reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
&sys_reipl_fcp_scp_data_attr,
@@ -807,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = {
};
/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- reipl_block_ccw->ipl_info.ccw.devno);
+DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -1049,8 +1075,8 @@ static void __reipl_run(void *unused)
switch (reipl_method) {
case REIPL_METHOD_CCW_CIO:
+ devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
- devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
case REIPL_METHOD_CCW_VM:
@@ -1185,6 +1211,7 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
+ reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
@@ -1329,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = {
};
/* CCW dump device attributes */
-
-DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- dump_block_ccw->ipl_info.ccw.devno);
+DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
@@ -1411,8 +1436,8 @@ static void __dump_run(void *unused)
switch (dump_method) {
case DUMP_METHOD_CCW_CIO:
+ devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
devid.devno = dump_block_ccw->ipl_info.ccw.devno;
- devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
case DUMP_METHOD_CCW_VM:
@@ -1932,14 +1957,14 @@ void __init setup_ipl(void)
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
+ ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
ipl_info.data.ccw.dev_id.devno = ipl_devno;
- ipl_info.data.ccw.dev_id.ssid = 0;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
+ ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.dev_id.devno =
IPL_PARMBLOCK_START->ipl_info.fcp.devno;
- ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
break;
@@ -1971,6 +1996,7 @@ void __init ipl_save_parameters(void)
if (cio_get_iplinfo(&iplinfo))
return;
+ ipl_ssid = iplinfo.ssid;
ipl_devno = iplinfo.devno;
ipl_flags |= IPL_DEVNO_VALID;
if (!iplinfo.is_qdio)
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index e9d9addfa..f41d5208a 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -69,7 +69,6 @@ static const struct irq_class irqclass_sub_desc[] = {
{.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
{.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
{.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
- {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
{.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
{.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
{.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 0ae6f8e74..07302ce37 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -21,19 +21,20 @@
#include <asm/nmi.h>
#include <asm/crw.h>
#include <asm/switch_to.h>
-#include <asm/fpu-internal.h>
#include <asm/ctl_reg.h>
struct mcck_struct {
- int kill_task;
- int channel_report;
- int warning;
- unsigned long long mcck_code;
+ unsigned int kill_task : 1;
+ unsigned int channel_report : 1;
+ unsigned int warning : 1;
+ unsigned int etr_queue : 1;
+ unsigned int stp_queue : 1;
+ unsigned long mcck_code;
};
static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
-static void s390_handle_damage(char *msg)
+static void s390_handle_damage(void)
{
smp_send_stop();
disabled_wait((unsigned long) __builtin_return_address(0));
@@ -81,10 +82,14 @@ void s390_handle_mcck(void)
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
+ if (mcck.etr_queue)
+ etr_queue_work();
+ if (mcck.stp_queue)
+ stp_queue_work();
if (mcck.kill_task) {
local_irq_enable();
printk(KERN_EMERG "mcck: Terminating task because of machine "
- "malfunction (code 0x%016llx).\n", mcck.mcck_code);
+ "malfunction (code 0x%016lx).\n", mcck.mcck_code);
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
current->comm, current->pid);
do_exit(SIGSEGV);
@@ -96,7 +101,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
* returns 0 if all registers could be validated
* returns 1 otherwise
*/
-static int notrace s390_revalidate_registers(struct mci *mci)
+static int notrace s390_validate_registers(union mci mci)
{
int kill_task;
u64 zero;
@@ -105,14 +110,14 @@ static int notrace s390_revalidate_registers(struct mci *mci)
kill_task = 0;
zero = 0;
- if (!mci->gr) {
+ if (!mci.gr) {
/*
* General purpose registers couldn't be restored and have
* unknown contents. Process needs to be terminated.
*/
kill_task = 1;
}
- if (!mci->fp) {
+ if (!mci.fp) {
/*
* Floating point registers can't be restored and
* therefore the process needs to be terminated.
@@ -121,7 +126,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
}
fpt_save_area = &S390_lowcore.floating_pt_save_area;
fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
- if (!mci->fc) {
+ if (!mci.fc) {
/*
* Floating point control register can't be restored.
* Task will be terminated.
@@ -132,7 +137,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
if (!MACHINE_HAS_VX) {
- /* Revalidate floating point registers */
+ /* Validate floating point registers */
asm volatile(
" ld 0,0(%0)\n"
" ld 1,8(%0)\n"
@@ -152,10 +157,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" ld 15,120(%0)\n"
: : "a" (fpt_save_area));
} else {
- /* Revalidate vector registers */
+ /* Validate vector registers */
union ctlreg0 cr0;
- if (!mci->vr) {
+ if (!mci.vr) {
/*
* Vector registers can't be restored and therefore
* the process needs to be terminated.
@@ -173,38 +178,38 @@ static int notrace s390_revalidate_registers(struct mci *mci)
&S390_lowcore.vector_save_area) : "1");
__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
- /* Revalidate access registers */
+ /* Validate access registers */
asm volatile(
" lam 0,15,0(%0)"
: : "a" (&S390_lowcore.access_regs_save_area));
- if (!mci->ar) {
+ if (!mci.ar) {
/*
* Access registers have unknown contents.
* Terminating task.
*/
kill_task = 1;
}
- /* Revalidate control registers */
- if (!mci->cr) {
+ /* Validate control registers */
+ if (!mci.cr) {
/*
* Control registers have unknown contents.
* Can't recover and therefore stopping machine.
*/
- s390_handle_damage("invalid control registers.");
+ s390_handle_damage();
} else {
asm volatile(
" lctlg 0,15,0(%0)"
: : "a" (&S390_lowcore.cregs_save_area));
}
/*
- * We don't even try to revalidate the TOD register, since we simply
+ * We don't even try to validate the TOD register, since we simply
* can't write something sensible into that register.
*/
/*
- * See if we can revalidate the TOD programmable register with its
+ * See if we can validate the TOD programmable register with its
* old contents (should be zero) otherwise set it to zero.
*/
- if (!mci->pr)
+ if (!mci.pr)
asm volatile(
" sr 0,0\n"
" sckpf"
@@ -215,17 +220,17 @@ static int notrace s390_revalidate_registers(struct mci *mci)
" sckpf"
: : "a" (&S390_lowcore.tod_progreg_save_area)
: "0", "cc");
- /* Revalidate clock comparator register */
+ /* Validate clock comparator register */
set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
- if (!mci->wp)
+ if (!mci.wp)
/*
* Can't tell if we come from user or kernel mode
* -> stopping machine.
*/
- s390_handle_damage("old psw invalid.");
+ s390_handle_damage();
- if (!mci->ms || !mci->pm || !mci->ia)
+ if (!mci.ms || !mci.pm || !mci.ia)
kill_task = 1;
return kill_task;
@@ -249,21 +254,21 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
static unsigned long long last_ipd;
struct mcck_struct *mcck;
unsigned long long tmp;
- struct mci *mci;
+ union mci mci;
int umode;
nmi_enter();
inc_irq_stat(NMI_NMI);
- mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
+ mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
umode = user_mode(regs);
- if (mci->sd) {
+ if (mci.sd) {
/* System damage -> stopping machine */
- s390_handle_damage("received system damage machine check.");
+ s390_handle_damage();
}
- if (mci->pd) {
- if (mci->b) {
+ if (mci.pd) {
+ if (mci.b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
@@ -271,12 +276,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
1ULL<<16);
- t_mcic = *(u64 *)mci;
+ t_mcic = mci.val;
if (((t_mcic & z_mcic) != 0) ||
((t_mcic & o_mcic) != o_mcic)) {
- s390_handle_damage("processing backup machine "
- "check with damage.");
+ s390_handle_damage();
}
/*
@@ -291,64 +295,62 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
ipd_count = 1;
last_ipd = tmp;
if (ipd_count == MAX_IPD_COUNT)
- s390_handle_damage("too many ipd retries.");
+ s390_handle_damage();
spin_unlock(&ipd_lock);
} else {
/* Processing damage -> stopping machine */
- s390_handle_damage("received instruction processing "
- "damage machine check.");
+ s390_handle_damage();
}
}
- if (s390_revalidate_registers(mci)) {
+ if (s390_validate_registers(mci)) {
if (umode) {
/*
* Couldn't restore all register contents while in
* user mode -> mark task for termination.
*/
mcck->kill_task = 1;
- mcck->mcck_code = *(unsigned long long *) mci;
+ mcck->mcck_code = mci.val;
set_cpu_flag(CIF_MCCK_PENDING);
} else {
/*
* Couldn't restore all register contents while in
* kernel mode -> stopping machine.
*/
- s390_handle_damage("unable to revalidate registers.");
+ s390_handle_damage();
}
}
- if (mci->cd) {
+ if (mci.cd) {
/* Timing facility damage */
- s390_handle_damage("TOD clock damaged");
+ s390_handle_damage();
}
- if (mci->ed && mci->ec) {
+ if (mci.ed && mci.ec) {
/* External damage */
if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
- etr_sync_check();
+ mcck->etr_queue |= etr_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
- etr_switch_to_local();
+ mcck->etr_queue |= etr_switch_to_local();
if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
- stp_sync_check();
+ mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
- stp_island_check();
+ mcck->stp_queue |= stp_island_check();
+ if (mcck->etr_queue || mcck->stp_queue)
+ set_cpu_flag(CIF_MCCK_PENDING);
}
- if (mci->se)
+ if (mci.se)
/* Storage error uncorrected */
- s390_handle_damage("received storage error uncorrected "
- "machine check.");
- if (mci->ke)
+ s390_handle_damage();
+ if (mci.ke)
/* Storage key-error uncorrected */
- s390_handle_damage("received storage key-error uncorrected "
- "machine check.");
- if (mci->ds && mci->fa)
+ s390_handle_damage();
+ if (mci.ds && mci.fa)
/* Storage degradation */
- s390_handle_damage("received storage degradation machine "
- "check.");
- if (mci->cp) {
+ s390_handle_damage();
+ if (mci.cp) {
/* Channel report word pending */
mcck->channel_report = 1;
set_cpu_flag(CIF_MCCK_PENDING);
}
- if (mci->w) {
+ if (mci.w) {
/* Warning pending */
mcck->warning = 1;
set_cpu_flag(CIF_MCCK_PENDING);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index a9563409c..929c147e0 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -72,6 +72,7 @@ struct cpu_hw_events {
atomic_t ctr_set[CPUMF_CTR_SET_MAX];
u64 state, tx_state;
unsigned int flags;
+ unsigned int txn_flags;
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.ctr_set = {
@@ -82,6 +83,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
},
.state = 0,
.flags = 0,
+ .txn_flags = 0,
};
static int get_counter_set(u64 event)
@@ -538,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
* For group events transaction, the authorization check is
* done in cpumf_pmu_commit_txn().
*/
- if (!(cpuhw->flags & PERF_EVENT_TXN))
+ if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
if (validate_ctr_auth(&event->hw))
return -ENOENT;
@@ -576,13 +578,22 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
/*
* Start group events scheduling transaction.
* Set flags to perform a single test at commit time.
+ *
+ * We only support PERF_PMU_TXN_ADD transactions. Save the
+ * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
+ * transactions.
*/
-static void cpumf_pmu_start_txn(struct pmu *pmu)
+static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
+
+ cpuhw->txn_flags = txn_flags;
+ if (txn_flags & ~PERF_PMU_TXN_ADD)
+ return;
+
perf_pmu_disable(pmu);
- cpuhw->flags |= PERF_EVENT_TXN;
cpuhw->tx_state = cpuhw->state;
}
@@ -593,11 +604,18 @@ static void cpumf_pmu_start_txn(struct pmu *pmu)
*/
static void cpumf_pmu_cancel_txn(struct pmu *pmu)
{
+ unsigned int txn_flags;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
+
+ txn_flags = cpuhw->txn_flags;
+ cpuhw->txn_flags = 0;
+ if (txn_flags & ~PERF_PMU_TXN_ADD)
+ return;
+
WARN_ON(cpuhw->tx_state != cpuhw->state);
- cpuhw->flags &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
}
@@ -611,13 +629,20 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
u64 state;
+ WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
+
+ if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
+ cpuhw->txn_flags = 0;
+ return 0;
+ }
+
/* check if the updated state can be scheduled */
state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
state >>= CPUMF_LCCTL_ENABLE_SHIFT;
if ((state & cpuhw->info.auth_ctl) != state)
return -ENOENT;
- cpuhw->flags &= ~PERF_EVENT_TXN;
+ cpuhw->txn_flags = 0;
perf_pmu_enable(pmu);
return 0;
}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index b973972f6..3d8da1e74 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1019,11 +1019,13 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr)
break;
}
- /* The host-program-parameter (hpp) contains the pid of
- * the CPU thread as set by sie64a() in entry.S.
- * If non-zero assume a guest sample.
+ /*
+ * A non-zero guest program parameter indicates a guest
+ * sample.
+ * Note that some early samples might be misaccounted to
+ * the host.
*/
- if (sfr->basic.hpp)
+ if (sfr->basic.gpp)
sde_regs->in_guest = 1;
overflow = 0;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index f2dac9f07..114ee8b96 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -23,6 +23,7 @@
#include <linux/kprobes.h>
#include <linux/random.h>
#include <linux/module.h>
+#include <linux/init_task.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/vtimer.h>
@@ -36,6 +37,9 @@
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
+/* FPU save area for the init task */
+__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
+
/*
* Return saved PC of a blocked thread. used in kernel/sched.
* resume in entry.S does not create a new stack frame, it
@@ -87,31 +91,29 @@ void arch_release_task_struct(struct task_struct *tsk)
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
+ size_t fpu_regs_size;
+
*dst = *src;
- /* Set up a new floating-point register save area */
- dst->thread.fpu.fpc = 0;
- dst->thread.fpu.flags = 0; /* Always start with VX disabled */
- dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
- GFP_KERNEL|__GFP_REPEAT);
- if (!dst->thread.fpu.fprs)
+ /*
+ * If the vector extension is available, it is enabled for all tasks,
+ * and, thus, the FPU register save area must be allocated accordingly.
+ */
+ fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
+ : sizeof(freg_t) * __NUM_FPRS;
+ dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
+ if (!dst->thread.fpu.regs)
return -ENOMEM;
/*
* Save the floating-point or vector register state of the current
- * task. The state is not saved for early kernel threads, for example,
- * the init_task, which do not have an allocated save area.
- * The CIF_FPU flag is set in any case to lazy clear or restore a saved
- * state when switching to a different task or returning to user space.
+ * task and set the CIF_FPU flag to lazy restore the FPU register
+ * state when returning to user space.
*/
save_fpu_regs();
dst->thread.fpu.fpc = current->thread.fpu.fpc;
- if (is_vx_task(current))
- convert_vx_to_fp(dst->thread.fpu.fprs,
- current->thread.fpu.vxrs);
- else
- memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs,
- sizeof(freg_t) * __NUM_FPRS);
+ memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
+
return 0;
}
@@ -169,7 +171,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
/* Don't copy runtime instrumentation info */
p->thread.ri_cb = NULL;
- p->thread.ri_signum = 0;
frame->childregs.psw.mask &= ~PSW_MASK_RI;
/* Set a new TLS ? */
@@ -199,7 +200,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
save_fpu_regs();
fpregs->fpc = current->thread.fpu.fpc;
fpregs->pad = 0;
- if (is_vx_task(current))
+ if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)&fpregs->fprs,
current->thread.fpu.vxrs);
else
@@ -242,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
static inline unsigned long brk_rnd(void)
{
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
- else
- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+ return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index e6e077ae3..7ce00e7a7 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -11,6 +11,7 @@
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/cpu.h>
+#include <asm/diag.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
@@ -20,8 +21,10 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
void notrace cpu_relax(void)
{
- if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
+ if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
+ diag_stat_inc(DIAG_STAT_X044);
asm volatile("diag 0,0,0x44");
+ }
barrier();
}
EXPORT_SYMBOL(cpu_relax);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index d319f36f7..01c37b36c 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -239,7 +239,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
- if (is_vx_task(child))
+ if (MACHINE_HAS_VX)
tmp = *(addr_t *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
@@ -383,7 +383,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
- if (is_vx_task(child))
+ if (MACHINE_HAS_VX)
*(addr_t *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = data;
else
@@ -617,7 +617,7 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
- if (is_vx_task(child))
+ if (MACHINE_HAS_VX)
tmp = *(__u32 *)
((addr_t) child->thread.fpu.vxrs + 2*offset);
else
@@ -742,7 +742,7 @@ static int __poke_user_compat(struct task_struct *child,
* or the child->thread.fpu.vxrs array
*/
offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
- if (is_vx_task(child))
+ if (MACHINE_HAS_VX)
*(__u32 *)((addr_t)
child->thread.fpu.vxrs + 2*offset) = tmp;
else
@@ -981,7 +981,7 @@ static int s390_fpregs_set(struct task_struct *target,
if (rc)
return rc;
- if (is_vx_task(target))
+ if (MACHINE_HAS_VX)
convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
else
memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
@@ -1047,13 +1047,10 @@ static int s390_vxrs_low_get(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
- if (is_vx_task(target)) {
- if (target == current)
- save_fpu_regs();
- for (i = 0; i < __NUM_VXRS_LOW; i++)
- vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
- } else
- memset(vxrs, 0, sizeof(vxrs));
+ if (target == current)
+ save_fpu_regs();
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
}
@@ -1067,11 +1064,7 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
- if (!is_vx_task(target)) {
- rc = alloc_vector_registers(target);
- if (rc)
- return rc;
- } else if (target == current)
+ if (target == current)
save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
@@ -1091,13 +1084,10 @@ static int s390_vxrs_high_get(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
- if (is_vx_task(target)) {
- if (target == current)
- save_fpu_regs();
- memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
- sizeof(vxrs));
- } else
- memset(vxrs, 0, sizeof(vxrs));
+ if (target == current)
+ save_fpu_regs();
+ memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs));
+
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
}
@@ -1110,11 +1100,7 @@ static int s390_vxrs_high_set(struct task_struct *target,
if (!MACHINE_HAS_VX)
return -ENODEV;
- if (!is_vx_task(target)) {
- rc = alloc_vector_registers(target);
- if (rc)
- return rc;
- } else if (target == current)
+ if (target == current)
save_fpu_regs();
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 26b4ae96f..fffa0e546 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -18,11 +18,6 @@
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
-static int runtime_instr_avail(void)
-{
- return test_facility(64);
-}
-
static void disable_runtime_instr(void)
{
struct pt_regs *regs = task_pt_regs(current);
@@ -40,7 +35,6 @@ static void disable_runtime_instr(void)
static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
{
cb->buf_limit = 0xfff;
- cb->int_requested = 1;
cb->pstate = 1;
cb->pstate_set_buf = 1;
cb->pstate_sample = 1;
@@ -57,46 +51,14 @@ void exit_thread_runtime_instr(void)
return;
disable_runtime_instr();
kfree(task->thread.ri_cb);
- task->thread.ri_signum = 0;
task->thread.ri_cb = NULL;
}
-static void runtime_instr_int_handler(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
-{
- struct siginfo info;
-
- if (!(param32 & CPU_MF_INT_RI_MASK))
- return;
-
- inc_irq_stat(IRQEXT_CMR);
-
- if (!current->thread.ri_cb)
- return;
- if (current->thread.ri_signum < SIGRTMIN ||
- current->thread.ri_signum > SIGRTMAX) {
- WARN_ON_ONCE(1);
- return;
- }
-
- memset(&info, 0, sizeof(info));
- info.si_signo = current->thread.ri_signum;
- info.si_code = SI_QUEUE;
- if (param32 & CPU_MF_INT_RI_BUF_FULL)
- info.si_int = ENOBUFS;
- else if (param32 & CPU_MF_INT_RI_HALTED)
- info.si_int = ECANCELED;
- else
- return; /* unknown reason */
-
- send_sig_info(current->thread.ri_signum, &info, current);
-}
-
-SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
+SYSCALL_DEFINE1(s390_runtime_instr, int, command)
{
struct runtime_instr_cb *cb;
- if (!runtime_instr_avail())
+ if (!test_facility(64))
return -EOPNOTSUPP;
if (command == S390_RUNTIME_INSTR_STOP) {
@@ -106,8 +68,7 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
return 0;
}
- if (command != S390_RUNTIME_INSTR_START ||
- (signum < SIGRTMIN || signum > SIGRTMAX))
+ if (command != S390_RUNTIME_INSTR_START)
return -EINVAL;
if (!current->thread.ri_cb) {
@@ -120,7 +81,6 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
}
init_runtime_instr_cb(cb);
- current->thread.ri_signum = signum;
/* now load the control block to make it available */
preempt_disable();
@@ -129,21 +89,3 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
preempt_enable();
return 0;
}
-
-static int __init runtime_instr_init(void)
-{
- int rc;
-
- if (!runtime_instr_avail())
- return 0;
-
- irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
- rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
- runtime_instr_int_handler);
- if (rc)
- irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
- else
- pr_info("Runtime instrumentation facility initialized\n");
- return rc;
-}
-device_initcall(runtime_instr_init);
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 5090d3dad..e67453b73 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,6 +1,6 @@
#include <linux/module.h>
#include <linux/kvm_host.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/api.h>
#include <asm/ftrace.h>
#ifdef CONFIG_FUNCTION_TRACER
@@ -10,7 +10,6 @@ EXPORT_SYMBOL(_mcount);
EXPORT_SYMBOL(sie64a);
EXPORT_SYMBOL(sie_exit);
EXPORT_SYMBOL(save_fpu_regs);
-EXPORT_SYMBOL(__ctl_set_vx);
#endif
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index fa0bdff1d..9fe7781a4 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -21,7 +21,7 @@ static void _sclp_wait_int(void)
__ctl_load(cr0_new, 0, 0);
psw_ext_save = S390_lowcore.external_new_psw;
- psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA);
+ psw_mask = __extract_psw();
S390_lowcore.external_new_psw.mask = psw_mask;
psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
S390_lowcore.ext_int_code = 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ce0cbd6ba..c837bcacf 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -764,9 +764,6 @@ static int __init setup_hwcaps(void)
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
- case 0x9672:
- strcpy(elf_platform, "g5");
- break;
case 0x2064:
case 0x2066:
default: /* Use "z900" as default for 64 bit kernels. */
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 9549af102..028cc46cb 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -179,7 +179,7 @@ static int save_sigregs_ext(struct pt_regs *regs,
int i;
/* Save vector registers to signal stack */
- if (is_vx_task(current)) {
+ if (MACHINE_HAS_VX) {
for (i = 0; i < __NUM_VXRS_LOW; i++)
vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
@@ -199,7 +199,7 @@ static int restore_sigregs_ext(struct pt_regs *regs,
int i;
/* Restore vector registers from signal stack */
- if (is_vx_task(current)) {
+ if (MACHINE_HAS_VX) {
if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
sizeof(sregs_ext->vxrs_low)) ||
__copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
@@ -381,8 +381,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
uc_flags = 0;
if (MACHINE_HAS_VX) {
frame_size += sizeof(_sigregs_ext);
- if (is_vx_task(current))
- uc_flags |= UC_VXRS;
+ uc_flags |= UC_VXRS;
}
frame = get_sigframe(&ksig->ka, regs, frame_size);
if (frame == (void __user *) -1UL)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c6355e6f3..9062df575 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -33,6 +33,7 @@
#include <linux/crash_dump.h>
#include <linux/memblock.h>
#include <asm/asm-offsets.h>
+#include <asm/diag.h>
#include <asm/switch_to.h>
#include <asm/facility.h>
#include <asm/ipl.h>
@@ -261,6 +262,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->thread_info = (unsigned long) task_thread_info(tsk);
lc->current_task = (unsigned long) tsk;
+ lc->lpp = LPP_MAGIC;
+ lc->current_pid = tsk->pid;
lc->user_timer = ti->user_timer;
lc->system_timer = ti->system_timer;
lc->steal_timer = 0;
@@ -375,11 +378,14 @@ int smp_vcpu_scheduled(int cpu)
void smp_yield_cpu(int cpu)
{
- if (MACHINE_HAS_DIAG9C)
+ if (MACHINE_HAS_DIAG9C) {
+ diag_stat_inc_norecursion(DIAG_STAT_X09C);
asm volatile("diag %0,0,0x9c"
: : "d" (pcpu_devices[cpu].address));
- else if (MACHINE_HAS_DIAG44)
+ } else if (MACHINE_HAS_DIAG44) {
+ diag_stat_inc_norecursion(DIAG_STAT_X044);
asm volatile("diag 0,0,0x44");
+ }
}
/*
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 8c56929c8..5378c3ea1 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
SYSCALL(sys_recvfrom,compat_sys_recvfrom)
SYSCALL(sys_recvmsg,compat_sys_recvmsg)
SYSCALL(sys_shutdown,sys_shutdown)
+SYSCALL(sys_mlock2,compat_sys_mlock2)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 017c3a9bf..99f84ac31 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -542,16 +542,17 @@ arch_initcall(etr_init);
* Switch to local machine check. This is called when the last usable
* ETR port goes inactive. After switch to local the clock is not in sync.
*/
-void etr_switch_to_local(void)
+int etr_switch_to_local(void)
{
if (!etr_eacr.sl)
- return;
+ return 0;
disable_sync_clock(NULL);
if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
etr_eacr.es = etr_eacr.sl = 0;
etr_setr(&etr_eacr);
- queue_work(time_sync_wq, &etr_work);
+ return 1;
}
+ return 0;
}
/*
@@ -560,16 +561,22 @@ void etr_switch_to_local(void)
* After a ETR sync check the clock is not in sync. The machine check
* is broadcasted to all cpus at the same time.
*/
-void etr_sync_check(void)
+int etr_sync_check(void)
{
if (!etr_eacr.es)
- return;
+ return 0;
disable_sync_clock(NULL);
if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
etr_eacr.es = 0;
etr_setr(&etr_eacr);
- queue_work(time_sync_wq, &etr_work);
+ return 1;
}
+ return 0;
+}
+
+void etr_queue_work(void)
+{
+ queue_work(time_sync_wq, &etr_work);
}
/*
@@ -1504,10 +1511,10 @@ static void stp_timing_alert(struct stp_irq_parm *intparm)
* After a STP sync check the clock is not in sync. The machine check
* is broadcasted to all cpus at the same time.
*/
-void stp_sync_check(void)
+int stp_sync_check(void)
{
disable_sync_clock(NULL);
- queue_work(time_sync_wq, &stp_work);
+ return 1;
}
/*
@@ -1516,12 +1523,16 @@ void stp_sync_check(void)
* have matching CTN ids and have a valid stratum-1 configuration
* but the configurations do not match.
*/
-void stp_island_check(void)
+int stp_island_check(void)
{
disable_sync_clock(NULL);
- queue_work(time_sync_wq, &stp_work);
+ return 1;
}
+void stp_queue_work(void)
+{
+ queue_work(time_sync_wq, &stp_work);
+}
static int stp_sync_clock(void *data)
{
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index bf05e7fc3..40b8102fd 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -84,6 +84,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
struct mask_info *socket,
int one_socket_per_cpu)
{
+ struct cpu_topology_s390 *topo;
unsigned int core;
for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
@@ -95,15 +96,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
if (lcpu < 0)
continue;
for (i = 0; i <= smp_cpu_mtid; i++) {
- per_cpu(cpu_topology, lcpu + i).book_id = book->id;
- per_cpu(cpu_topology, lcpu + i).core_id = rcore;
- per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
+ topo = &per_cpu(cpu_topology, lcpu + i);
+ topo->book_id = book->id;
+ topo->core_id = rcore;
+ topo->thread_id = lcpu + i;
cpumask_set_cpu(lcpu + i, &book->mask);
cpumask_set_cpu(lcpu + i, &socket->mask);
if (one_socket_per_cpu)
- per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
+ topo->socket_id = rcore;
else
- per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
+ topo->socket_id = socket->id;
smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
if (one_socket_per_cpu)
@@ -247,17 +249,19 @@ int topology_set_cpu_management(int fc)
static void update_cpu_masks(void)
{
+ struct cpu_topology_s390 *topo;
int cpu;
for_each_possible_cpu(cpu) {
- per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
- per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
- per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
+ topo = &per_cpu(cpu_topology, cpu);
+ topo->thread_mask = cpu_thread_map(cpu);
+ topo->core_mask = cpu_group_map(&socket_info, cpu);
+ topo->book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
- per_cpu(cpu_topology, cpu).thread_id = cpu;
- per_cpu(cpu_topology, cpu).core_id = cpu;
- per_cpu(cpu_topology, cpu).socket_id = cpu;
- per_cpu(cpu_topology, cpu).book_id = cpu;
+ topo->thread_id = cpu;
+ topo->core_id = cpu;
+ topo->socket_id = cpu;
+ topo->book_id = cpu;
}
}
numa_update_cpu_topology();
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
new file mode 100644
index 000000000..21a5df995
--- /dev/null
+++ b/arch/s390/kernel/trace.c
@@ -0,0 +1,29 @@
+/*
+ * Tracepoint definitions for s390
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/percpu.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace/diag.h>
+
+EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
+
+static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
+
+void trace_s390_diagnose_norecursion(int diag_nr)
+{
+ unsigned long flags;
+ unsigned int *depth;
+
+ local_irq_save(flags);
+ depth = this_cpu_ptr(&diagnose_trace_depth);
+ if (*depth == 0) {
+ (*depth)++;
+ trace_s390_diagnose(diag_nr);
+ (*depth)--;
+ }
+ local_irq_restore(flags);
+}
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 9861613fb..1b18118bb 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -19,7 +19,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/slab.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/api.h>
#include "entry.h"
int show_unhandled_signals = 1;
@@ -224,29 +224,6 @@ NOKPROBE_SYMBOL(illegal_op);
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
-int alloc_vector_registers(struct task_struct *tsk)
-{
- __vector128 *vxrs;
- freg_t *fprs;
-
- /* Allocate vector register save area. */
- vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
- GFP_KERNEL|__GFP_REPEAT);
- if (!vxrs)
- return -ENOMEM;
- preempt_disable();
- if (tsk == current)
- save_fpu_regs();
- /* Copy the 16 floating point registers */
- convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
- fprs = tsk->thread.fpu.fprs;
- tsk->thread.fpu.vxrs = vxrs;
- tsk->thread.fpu.flags |= FPU_USE_VX;
- kfree(fprs);
- preempt_enable();
- return 0;
-}
-
void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
@@ -281,13 +258,6 @@ void vector_exception(struct pt_regs *regs)
do_trap(regs, SIGFPE, si_code, "vector exception");
}
-static int __init disable_vector_extension(char *str)
-{
- S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
- return 1;
-}
-__setup("novx", disable_vector_extension);
-
void data_exception(struct pt_regs *regs)
{
__u16 __user *location;
@@ -296,15 +266,6 @@ void data_exception(struct pt_regs *regs)
location = get_trap_ip(regs);
save_fpu_regs();
- /* Check for vector register enablement */
- if (MACHINE_HAS_VX && !is_vx_task(current) &&
- (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
- alloc_vector_registers(current);
- /* Vector data exception is suppressing, rewind psw. */
- regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
- clear_pt_regs_flag(regs, PIF_PER_TRAP);
- return;
- }
if (current->thread.fpu.fpc & FPC_DXC_MASK)
signal = SIGFPE;
else
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 0d58269ff..59eddb0e1 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -299,7 +299,7 @@ static int __init vdso_init(void)
get_page(virt_to_page(vdso_data));
- smp_wmb();
+ smp_mb();
return 0;
}