diff options
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/asm-offsets.c | 15 | ||||
-rw-r--r-- | arch/s390/kernel/base.S | 21 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/crash_dump.c | 38 | ||||
-rw-r--r-- | arch/s390/kernel/debug.c | 11 | ||||
-rw-r--r-- | arch/s390/kernel/entry.S | 15 | ||||
-rw-r--r-- | arch/s390/kernel/perf_cpum_sf.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 28 | ||||
-rw-r--r-- | arch/s390/kernel/smp.c | 159 | ||||
-rw-r--r-- | arch/s390/kernel/suspend.c | 2 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 4 |
12 files changed, 159 insertions, 144 deletions
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index c7d1b9d09..a2da259d9 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -23,15 +23,15 @@ int main(void) { - DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); - DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); - DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); - BLANK(); + DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); + DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); - DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); - DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); - DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); + DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); + DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); + DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); + DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); + DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); BLANK(); DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); @@ -176,7 +176,6 @@ int main(void) DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); - DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index daed3fde4..326f717df 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -78,15 +78,20 @@ s390_base_pgm_handler_fn: # # Calls diag 308 subcode 1 and continues execution # -# The following conditions must be ensured before calling this function: -# * Prefix register = 0 -# * Lowcore protection is disabled -# ENTRY(diag308_reset) larl %r4,.Lctlregs # Save control registers stctg %c0,%c15,0(%r4) + lg %r2,0(%r4) # Disable lowcore protection + nilh %r2,0xefff + larl %r4,.Lctlreg0 + stg %r2,0(%r4) + lctlg %c0,%c0,0(%r4) larl %r4,.Lfpctl # Floating point control register stfpc 0(%r4) + larl %r4,.Lprefix # Save prefix register + stpx 0(%r4) + larl %r4,.Lprefix_zero # Set prefix register to 0 + spx 0(%r4) larl %r4,.Lcontinue_psw # Save PSW flags epsw %r2,%r3 stm %r2,%r3,0(%r4) @@ -106,6 +111,8 @@ ENTRY(diag308_reset) lctlg %c0,%c15,0(%r4) larl %r4,.Lfpctl # Restore floating point ctl register lfpc 0(%r4) + larl %r4,.Lprefix # Restore prefix register + spx 0(%r4) larl %r4,.Lcontinue_psw # Restore PSW flags lpswe 0(%r4) .Lcontinue: @@ -122,10 +129,16 @@ ENTRY(diag308_reset) .section .bss .align 8 +.Lctlreg0: + .quad 0 .Lctlregs: .rept 16 .quad 0 .endr .Lfpctl: .long 0 +.Lprefix: + .long 0 +.Lprefix_zero: + .long 0 .previous diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index d7fa2f0f1..f8498dde6 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -202,7 +202,7 @@ COMPAT_SYSCALL_WRAP1(epoll_create1, int, flags); COMPAT_SYSCALL_WRAP2(tkill, int, pid, int, sig); COMPAT_SYSCALL_WRAP3(tgkill, int, tgid, int, pid, int, sig); COMPAT_SYSCALL_WRAP5(perf_event_open, struct perf_event_attr __user *, attr_uptr, pid_t, pid, int, cpu, int, group_fd, unsigned long, flags); -COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, int, tls_val); +COMPAT_SYSCALL_WRAP5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls); COMPAT_SYSCALL_WRAP2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags); COMPAT_SYSCALL_WRAP4(prlimit64, pid_t, pid, unsigned int, resource, const struct rlimit64 __user *, new_rlim, struct rlimit64 __user *, old_rlim); COMPAT_SYSCALL_WRAP5(name_to_handle_at, int, dfd, const char __user *, name, struct file_handle __user *, handle, int __user *, mnt_id, int, flag); diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 49b74454d..0c6c01eb3 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -33,42 +33,18 @@ static struct memblock_type oldmem_type = { }; #define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ - for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \ + for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \ + &memblock.physmem, \ &oldmem_type, p_start, \ p_end, p_nid); \ i != (u64)ULLONG_MAX; \ - __next_mem_range(&i, nid, &memblock.physmem, \ + __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\ &oldmem_type, \ p_start, p_end, p_nid)) struct dump_save_areas dump_save_areas; /* - * Allocate and add a save area for a CPU - */ -struct save_area_ext *dump_save_area_create(int cpu) -{ - struct save_area_ext **save_areas, *save_area; - - save_area = kmalloc(sizeof(*save_area), GFP_KERNEL); - if (!save_area) - return NULL; - if (cpu + 1 > dump_save_areas.count) { - dump_save_areas.count = cpu + 1; - save_areas = krealloc(dump_save_areas.areas, - dump_save_areas.count * sizeof(void *), - GFP_KERNEL | __GFP_ZERO); - if (!save_areas) { - kfree(save_area); - return NULL; - } - dump_save_areas.areas = save_areas; - } - dump_save_areas.areas[cpu] = save_area; - return save_area; -} - -/* * Return physical address for virtual address */ static inline void *load_real_addr(void *addr) @@ -122,7 +98,7 @@ static ssize_t copy_oldmem_page_zfcpdump(char *buf, size_t csize, { int rc; - if (src < sclp_get_hsa_size()) { + if (src < sclp.hsa_size) { rc = memcpy_hsa(buf, src, csize, userbuf); } else { if (userbuf) @@ -215,7 +191,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma, unsigned long pfn, unsigned long size, pgprot_t prot) { - unsigned long hsa_end = sclp_get_hsa_size(); + unsigned long hsa_end = sclp.hsa_size; unsigned long size_hsa; if (pfn < hsa_end >> PAGE_SHIFT) { @@ -258,7 +234,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count) return rc; } } else { - unsigned long hsa_end = sclp_get_hsa_size(); + unsigned long hsa_end = sclp.hsa_size; if ((unsigned long) src < hsa_end) { copied = min(count, hsa_end - (unsigned long) src); rc = memcpy_hsa(dest, (unsigned long) src, copied, 0); @@ -609,7 +585,7 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) if (elfcorehdr_addr != ELFCORE_ADDR_MAX) return 0; /* If we cannot get HSA size for zfcpdump return error */ - if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp_get_hsa_size()) + if (ipl_info.type == IPL_TYPE_FCP_DUMP && !sclp.hsa_size) return -ENODEV; /* For kdump, exclude previous crashkernel memory */ diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index c1f21aca7..6fca0e464 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1457,23 +1457,24 @@ int debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, int area, debug_entry_t * entry, char *out_buf) { - struct timespec time_spec; + struct timespec64 time_spec; char *except_str; unsigned long caller; int rc = 0; unsigned int level; level = entry->id.fields.level; - stck_to_timespec(entry->id.stck, &time_spec); + stck_to_timespec64(entry->id.stck, &time_spec); if (entry->id.fields.exception) except_str = "*"; else except_str = "-"; caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN; - rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p ", - area, time_spec.tv_sec, time_spec.tv_nsec / 1000, level, - except_str, entry->id.fields.cpuid, (void *) caller); + rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ", + area, (long long)time_spec.tv_sec, + time_spec.tv_nsec / 1000, level, except_str, + entry->id.fields.cpuid, (void *)caller); return rc; } EXPORT_SYMBOL(debug_dflt_header_fn); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 99b44acbf..84062e7a7 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP) */ ENTRY(__switch_to) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev - lg %r4,__THREAD_info(%r2) # get thread_info of prev - lg %r5,__THREAD_info(%r3) # get thread_info of next + lgr %r1,%r2 + aghi %r1,__TASK_thread # thread_struct of prev task + lg %r4,__TASK_thread_info(%r2) # get thread_info of prev + lg %r5,__TASK_thread_info(%r3) # get thread_info of next + stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev + lgr %r1,%r3 + aghi %r1,__TASK_thread # thread_struct of next task lgr %r15,%r5 aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r5,__LC_THREAD_INFO # store thread info of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack + lg %r15,__THREAD_ksp(%r1) # load kernel stack of next lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 @@ -417,6 +421,7 @@ ENTRY(pgm_check_handler) LAST_BREAK %r14 lg %r15,__LC_KERNEL_STACK lg %r14,__TI_task(%r12) + aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 2f @@ -1005,7 +1010,7 @@ ENTRY(sie64a) .Lsie_gmap: lg %r14,__SF_EMPTY(%r15) # get control block pointer oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now - tm __SIE_PROG20+3(%r14),1 # last exit... + tm __SIE_PROG20+3(%r14),3 # last exit... jnz .Lsie_done LPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index e6a1578fc..afe05bfb7 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1572,7 +1572,7 @@ static int param_set_sfb_size(const char *val, const struct kernel_param *kp) } #define param_check_sfb_size(name, p) __param_check(name, p, void) -static struct kernel_param_ops param_ops_sfb_size = { +static const struct kernel_param_ops param_ops_sfb_size = { .set = param_set_sfb_size, .get = param_get_sfb_size, }; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7262fe438..ca070d260 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -128,9 +128,9 @@ __setup("condev=", condev_setup); static void __init set_preferred_console(void) { if (MACHINE_IS_KVM) { - if (sclp_has_vt220()) + if (sclp.has_vt220) add_preferred_console("ttyS", 1, NULL); - else if (sclp_has_linemode()) + else if (sclp.has_linemode) add_preferred_console("ttyS", 0, NULL); else add_preferred_console("hvc", 0, NULL); @@ -510,8 +510,8 @@ static void reserve_memory_end(void) { #ifdef CONFIG_CRASH_DUMP if (ipl_info.type == IPL_TYPE_FCP_DUMP && - !OLDMEM_BASE && sclp_get_hsa_size()) { - memory_end = sclp_get_hsa_size(); + !OLDMEM_BASE && sclp.hsa_size) { + memory_end = sclp.hsa_size; memory_end &= PAGE_MASK; memory_end_set = 1; } @@ -576,7 +576,7 @@ static void __init reserve_crashkernel(void) crash_base = low; } else { /* Find suitable area in free memory */ - low = max_t(unsigned long, crash_size, sclp_get_hsa_size()); + low = max_t(unsigned long, crash_size, sclp.hsa_size); high = crash_base ? crash_base + crash_size : ULONG_MAX; if (crash_base && crash_base < low) { @@ -640,19 +640,24 @@ static void __init check_initrd(void) } /* - * Reserve all kernel text + * Reserve memory used for lowcore/command line/kernel image. */ static void __init reserve_kernel(void) { - unsigned long start_pfn; - start_pfn = PFN_UP(__pa(&_end)); + unsigned long start_pfn = PFN_UP(__pa(&_end)); +#ifdef CONFIG_DMA_API_DEBUG /* - * Reserve memory used for lowcore/command line/kernel image. + * DMA_API_DEBUG code stumbles over addresses from the + * range [_ehead, _stext]. Mark the memory as reserved + * so it is not used for CONFIG_DMA_API_DEBUG=y. */ + memblock_reserve(0, PFN_PHYS(start_pfn)); +#else memblock_reserve(0, (unsigned long)_ehead); memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn) - (unsigned long)_stext); +#endif } static void __init reserve_elfcorehdr(void) @@ -863,6 +868,11 @@ void __init setup_arch(char **cmdline_p) check_initrd(); reserve_crashkernel(); + /* + * Be aware that smp_save_dump_cpus() triggers a system reset. + * Therefore CPU and device initialization should be done afterwards. + */ + smp_save_dump_cpus(); setup_resources(); setup_vmcoreinfo(); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index efd2c1968..6f54c175f 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/cpu.h> #include <linux/slab.h> #include <linux/crash_dump.h> +#include <linux/memblock.h> #include <asm/asm-offsets.h> #include <asm/switch_to.h> #include <asm/facility.h> @@ -69,7 +70,7 @@ struct pcpu { u16 address; /* physical cpu address */ }; -static u8 boot_cpu_type; +static u8 boot_core_type; static struct pcpu pcpu_devices[NR_CPUS]; unsigned int smp_cpu_mt_shift; @@ -531,15 +532,12 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); #ifdef CONFIG_CRASH_DUMP -static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) +static void __smp_store_cpu_state(struct save_area_ext *sa_ext, u16 address, + int is_boot_cpu) { - void *lc = pcpu_devices[0].lowcore; - struct save_area_ext *sa_ext; + void *lc = (void *)(unsigned long) store_prefix(); unsigned long vx_sa; - sa_ext = dump_save_area_create(cpu); - if (!sa_ext) - panic("could not allocate memory for save area\n"); if (is_boot_cpu) { /* Copy the registers of the boot CPU. */ copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa), @@ -554,14 +552,33 @@ static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) if (!MACHINE_HAS_VX) return; /* Get the VX registers */ - vx_sa = __get_free_page(GFP_KERNEL); + vx_sa = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!vx_sa) panic("could not allocate memory for VX save area\n"); __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL); memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs)); - free_page(vx_sa); + memblock_free(vx_sa, PAGE_SIZE); } +int smp_store_status(int cpu) +{ + unsigned long vx_sa; + struct pcpu *pcpu; + + pcpu = pcpu_devices + cpu; + if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, + 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) + return -EIO; + if (!MACHINE_HAS_VX) + return 0; + vx_sa = __pa(pcpu->lowcore->vector_save_area_addr); + __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, + vx_sa, NULL); + return 0; +} + +#endif /* CONFIG_CRASH_DUMP */ + /* * Collect CPU state of the previous, crashed system. * There are four cases: @@ -589,10 +606,12 @@ static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu) * old system. The ELF sections are picked up by the crash_dump code * via elfcorehdr_addr. */ -static void __init smp_store_cpu_states(struct sclp_cpu_info *info) +void __init smp_save_dump_cpus(void) { - unsigned int cpu, address, i, j; - int is_boot_cpu; +#ifdef CONFIG_CRASH_DUMP + int addr, cpu, boot_cpu_addr, max_cpu_addr; + struct save_area_ext *sa_ext; + bool is_boot_cpu; if (is_kdump_kernel()) /* Previous system stored the CPU states. Nothing to do. */ @@ -601,43 +620,37 @@ static void __init smp_store_cpu_states(struct sclp_cpu_info *info) /* No previous system present, normal boot. */ return; /* Set multi-threading state to the previous system. */ - pcpu_set_smt(sclp_get_mtid_prev()); - /* Collect CPU states. */ - cpu = 0; - for (i = 0; i < info->configured; i++) { - /* Skip CPUs with different CPU type. */ - if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) + pcpu_set_smt(sclp.mtid_prev); + max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev; + for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) { + if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) == + SIGP_CC_NOT_OPERATIONAL) continue; - for (j = 0; j <= smp_cpu_mtid; j++, cpu++) { - address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j; - is_boot_cpu = (address == pcpu_devices[0].address); - if (is_boot_cpu && !OLDMEM_BASE) - /* Skip boot CPU for standard zfcp dump. */ - continue; - /* Get state for this CPu. */ - __smp_store_cpu_state(cpu, address, is_boot_cpu); - } + cpu += 1; } -} - -int smp_store_status(int cpu) -{ - unsigned long vx_sa; - struct pcpu *pcpu; - - pcpu = pcpu_devices + cpu; - if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS, - 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED) - return -EIO; - if (!MACHINE_HAS_VX) - return 0; - vx_sa = __pa(pcpu->lowcore->vector_save_area_addr); - __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS, - vx_sa, NULL); - return 0; -} - + dump_save_areas.areas = (void *)memblock_alloc(sizeof(void *) * cpu, 8); + dump_save_areas.count = cpu; + boot_cpu_addr = stap(); + for (cpu = 0, addr = 0; addr <= max_cpu_addr; addr++) { + if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0, NULL) == + SIGP_CC_NOT_OPERATIONAL) + continue; + sa_ext = (void *) memblock_alloc(sizeof(*sa_ext), 8); + dump_save_areas.areas[cpu] = sa_ext; + if (!sa_ext) + panic("could not allocate memory for save area\n"); + is_boot_cpu = (addr == boot_cpu_addr); + cpu += 1; + if (is_boot_cpu && !OLDMEM_BASE) + /* Skip boot CPU for standard zfcp dump. */ + continue; + /* Get state for this CPU. */ + __smp_store_cpu_state(sa_ext, addr, is_boot_cpu); + } + diag308_reset(); + pcpu_set_smt(0); #endif /* CONFIG_CRASH_DUMP */ +} void smp_cpu_set_polarization(int cpu, int val) { @@ -649,21 +662,22 @@ int smp_cpu_get_polarization(int cpu) return pcpu_devices[cpu].polarization; } -static struct sclp_cpu_info *smp_get_cpu_info(void) +static struct sclp_core_info *smp_get_core_info(void) { static int use_sigp_detection; - struct sclp_cpu_info *info; + struct sclp_core_info *info; int address; info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info && (use_sigp_detection || sclp_get_cpu_info(info))) { + if (info && (use_sigp_detection || sclp_get_core_info(info))) { use_sigp_detection = 1; - for (address = 0; address <= MAX_CPU_ADDRESS; + for (address = 0; + address < (SCLP_MAX_CORES << smp_cpu_mt_shift); address += (1U << smp_cpu_mt_shift)) { if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) == SIGP_CC_NOT_OPERATIONAL) continue; - info->cpu[info->configured].core_id = + info->core[info->configured].core_id = address >> smp_cpu_mt_shift; info->configured++; } @@ -674,7 +688,7 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) static int smp_add_present_cpu(int cpu); -static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) +static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add) { struct pcpu *pcpu; cpumask_t avail; @@ -685,9 +699,9 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask); cpu = cpumask_first(&avail); for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) { - if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type) + if (sclp.has_core_type && info->core[i].type != boot_core_type) continue; - address = info->cpu[i].core_id << smp_cpu_mt_shift; + address = info->core[i].core_id << smp_cpu_mt_shift; for (j = 0; j <= smp_cpu_mtid; j++) { if (pcpu_find_address(cpu_present_mask, address + j)) continue; @@ -713,41 +727,37 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) static void __init smp_detect_cpus(void) { unsigned int cpu, mtid, c_cpus, s_cpus; - struct sclp_cpu_info *info; + struct sclp_core_info *info; u16 address; /* Get CPU information */ - info = smp_get_cpu_info(); + info = smp_get_core_info(); if (!info) panic("smp_detect_cpus failed to allocate memory\n"); /* Find boot CPU type */ - if (info->has_cpu_type) { + if (sclp.has_core_type) { address = stap(); for (cpu = 0; cpu < info->combined; cpu++) - if (info->cpu[cpu].core_id == address) { + if (info->core[cpu].core_id == address) { /* The boot cpu dictates the cpu type. */ - boot_cpu_type = info->cpu[cpu].type; + boot_core_type = info->core[cpu].type; break; } if (cpu >= info->combined) panic("Could not find boot CPU type"); } -#ifdef CONFIG_CRASH_DUMP - /* Collect CPU state of previous system */ - smp_store_cpu_states(info); -#endif - /* Set multi-threading state for the current system */ - mtid = sclp_get_mtid(boot_cpu_type); + mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp; mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1; pcpu_set_smt(mtid); /* Print number of CPUs */ c_cpus = s_cpus = 0; for (cpu = 0; cpu < info->combined; cpu++) { - if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type) + if (sclp.has_core_type && + info->core[cpu].type != boot_core_type) continue; if (cpu < info->configured) c_cpus += smp_cpu_mtid + 1; @@ -880,12 +890,13 @@ void __noreturn cpu_die(void) void __init smp_fill_possible_mask(void) { - unsigned int possible, sclp, cpu; + unsigned int possible, sclp_max, cpu; - sclp = min(smp_max_threads, sclp_get_mtid_max() + 1); - sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids; + sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1; + sclp_max = min(smp_max_threads, sclp_max); + sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids; possible = setup_possible_cpus ?: nr_cpu_ids; - possible = min(possible, sclp); + possible = min(possible, sclp_max); for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) set_cpu_possible(cpu, true); } @@ -976,7 +987,7 @@ static ssize_t cpu_configure_store(struct device *dev, case 0: if (pcpu->state != CPU_STATE_CONFIGURED) break; - rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift); + rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift); if (rc) break; for (i = 0; i <= smp_cpu_mtid; i++) { @@ -991,7 +1002,7 @@ static ssize_t cpu_configure_store(struct device *dev, case 1: if (pcpu->state != CPU_STATE_STANDBY) break; - rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift); + rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift); if (rc) break; for (i = 0; i <= smp_cpu_mtid; i++) { @@ -1106,10 +1117,10 @@ out: int __ref smp_rescan_cpus(void) { - struct sclp_cpu_info *info; + struct sclp_core_info *info; int nr; - info = smp_get_cpu_info(); + info = smp_get_core_info(); if (!info) return -ENOMEM; get_online_cpus(); diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index d3236c9e2..39e2f41b6 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -9,10 +9,10 @@ #include <linux/pfn.h> #include <linux/suspend.h> #include <linux/mm.h> +#include <linux/pci.h> #include <asm/ctl_reg.h> #include <asm/ipl.h> #include <asm/cio.h> -#include <asm/pci.h> #include <asm/sections.h> #include "entry.h" diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 170ddd201..9e733d965 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -76,7 +76,7 @@ unsigned long long monotonic_clock(void) } EXPORT_SYMBOL(monotonic_clock); -void tod_to_timeval(__u64 todval, struct timespec *xt) +void tod_to_timeval(__u64 todval, struct timespec64 *xt) { unsigned long long sec; @@ -181,12 +181,12 @@ static void timing_alert_interrupt(struct ext_code ext_code, static void etr_reset(void); static void stp_reset(void); -void read_persistent_clock(struct timespec *ts) +void read_persistent_clock64(struct timespec64 *ts) { tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts); } -void read_boot_clock(struct timespec *ts) +void read_boot_clock64(struct timespec64 *ts) { tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts); } diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 4d96c9f53..7bea81d8a 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); + asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ @@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); + asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); /* Check for vector register enablement */ if (MACHINE_HAS_VX && !current->thread.vxrs && (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { |