diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-01-20 14:01:31 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-01-20 14:01:31 -0300 |
commit | b4b7ff4b08e691656c9d77c758fc355833128ac0 (patch) | |
tree | 82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /arch/s390 | |
parent | 35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff) |
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'arch/s390')
88 files changed, 1477 insertions, 1257 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1d57000b1..3a55f493c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -101,6 +101,7 @@ config S390 select ARCH_SAVE_PAGE_KEYS if HIBERNATION select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANTS_PROT_NUMA_PROT_NONE select ARCH_WANT_IPC_PARSE_VERSION @@ -118,6 +119,7 @@ config S390 select HAVE_ARCH_EARLY_PFN_TO_NID select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES @@ -582,6 +584,7 @@ menuconfig PCI bool "PCI support" select HAVE_DMA_ATTRS select PCI_MSI + select IOMMU_SUPPORT help Enable PCI support. diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h index f4e9dc716..10f200790 100644 --- a/arch/s390/crypto/sha.h +++ b/arch/s390/crypto/sha.h @@ -19,7 +19,7 @@ #include <crypto/sha.h> /* must be big enough for the largest SHA variant */ -#define SHA_MAX_STATE_SIZE 16 +#define SHA_MAX_STATE_SIZE (SHA512_DIGEST_SIZE / 4) #define SHA_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE struct s390_sha_ctx { diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c index 5eeffeefa..045035796 100644 --- a/arch/s390/hypfs/hypfs_diag.c +++ b/arch/s390/hypfs/hypfs_diag.c @@ -15,6 +15,7 @@ #include <linux/string.h> #include <linux/vmalloc.h> #include <linux/mm.h> +#include <asm/diag.h> #include <asm/ebcdic.h> #include "hypfs.h" @@ -336,7 +337,7 @@ static inline __u64 phys_cpu__ctidx(enum diag204_format type, void *hdr) /* Diagnose 204 functions */ -static int diag204(unsigned long subcode, unsigned long size, void *addr) +static inline int __diag204(unsigned long subcode, unsigned long size, void *addr) { register unsigned long _subcode asm("0") = subcode; register unsigned long _size asm("1") = size; @@ -351,6 +352,12 @@ static int diag204(unsigned long subcode, unsigned long size, void *addr) return _size; } +static int diag204(unsigned long subcode, unsigned long size, void *addr) +{ + diag_stat_inc(DIAG_STAT_X204); + return __diag204(subcode, size, addr); +} + /* * For the old diag subcode 4 with simple data format we have to use real * memory. If we use subcode 6 or 7 with extended data format, we can (and @@ -505,6 +512,7 @@ static int diag224(void *ptr) { int rc = -EOPNOTSUPP; + diag_stat_inc(DIAG_STAT_X224); asm volatile( " diag %1,%2,0x224\n" "0: lhi %0,0x0\n" diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c index 24c747a0f..0f1927cbb 100644 --- a/arch/s390/hypfs/hypfs_diag0c.c +++ b/arch/s390/hypfs/hypfs_diag0c.c @@ -8,6 +8,7 @@ #include <linux/slab.h> #include <linux/cpu.h> +#include <asm/diag.h> #include <asm/hypfs.h> #include "hypfs.h" @@ -18,6 +19,7 @@ */ static void diag0c(struct hypfs_diag0c_entry *entry) { + diag_stat_inc(DIAG_STAT_X00C); asm volatile ( " sam31\n" " diag %0,%0,0x0c\n" diff --git a/arch/s390/hypfs/hypfs_sprp.c b/arch/s390/hypfs/hypfs_sprp.c index dd42a26d0..c9e5c72f7 100644 --- a/arch/s390/hypfs/hypfs_sprp.c +++ b/arch/s390/hypfs/hypfs_sprp.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/uaccess.h> #include <asm/compat.h> +#include <asm/diag.h> #include <asm/sclp.h> #include "hypfs.h" @@ -22,7 +23,7 @@ #define DIAG304_CMD_MAX 2 -static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) +static inline unsigned long __hypfs_sprp_diag304(void *data, unsigned long cmd) { register unsigned long _data asm("2") = (unsigned long) data; register unsigned long _rc asm("3"); @@ -34,6 +35,12 @@ static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) return _rc; } +static unsigned long hypfs_sprp_diag304(void *data, unsigned long cmd) +{ + diag_stat_inc(DIAG_STAT_X304); + return __hypfs_sprp_diag304(data, cmd); +} + static void hypfs_sprp_free(const void *data) { free_page((unsigned long) data); diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index afbe07907..44feac38c 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -9,6 +9,7 @@ #include <linux/errno.h> #include <linux/string.h> #include <linux/vmalloc.h> +#include <asm/diag.h> #include <asm/ebcdic.h> #include <asm/timex.h> #include "hypfs.h" @@ -66,6 +67,7 @@ static int diag2fc(int size, char* query, void *addr) memset(parm_list.aci_grp, 0x40, NAME_LEN); rc = -1; + diag_stat_inc(DIAG_STAT_X2FC); asm volatile( " diag %0,%1,0x2fc\n" "0:\n" diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h index 16887c5fd..a6263d4e8 100644 --- a/arch/s390/include/asm/appldata.h +++ b/arch/s390/include/asm/appldata.h @@ -7,6 +7,7 @@ #ifndef _ASM_S390_APPLDATA_H #define _ASM_S390_APPLDATA_H +#include <asm/diag.h> #include <asm/io.h> #define APPLDATA_START_INTERVAL_REC 0x80 @@ -53,6 +54,7 @@ static inline int appldata_asm(struct appldata_product_id *id, parm_list.buffer_length = length; parm_list.product_id_addr = (unsigned long) id; parm_list.buffer_addr = virt_to_phys(buffer); + diag_stat_inc(DIAG_STAT_X0DC); asm volatile( " diag %1,%0,0xdc" : "=d" (ry) diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 117fa5c92..911064aa5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -36,7 +36,6 @@ \ typecheck(atomic_t *, ptr); \ asm volatile( \ - __barrier \ op_string " %0,%2,%1\n" \ __barrier \ : "=d" (old_val), "+Q" ((ptr)->counter) \ @@ -180,7 +179,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) \ typecheck(atomic64_t *, ptr); \ asm volatile( \ - __barrier \ op_string " %0,%2,%1\n" \ __barrier \ : "=d" (old_val), "+Q" ((ptr)->counter) \ diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index d48fe0162..d68e11e0d 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -22,10 +22,10 @@ #define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0) -#define rmb() mb() -#define wmb() mb() -#define dma_rmb() rmb() -#define dma_wmb() wmb() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() mb() +#define dma_wmb() mb() #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 9b68e98a7..8043f10da 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h @@ -11,30 +11,25 @@ * big-endian system because, unlike little endian, the number of each * bit depends on the word size. * - * The bitop functions are defined to work on unsigned longs, so for an - * s390x system the bits end up numbered: + * The bitop functions are defined to work on unsigned longs, so the bits + * end up numbered: * |63..............0|127............64|191...........128|255...........192| - * and on s390: - * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224| * * There are a few little-endian macros used mostly for filesystem - * bitmaps, these work on similar bit arrays layouts, but - * byte-oriented: + * bitmaps, these work on similar bit array layouts, but byte-oriented: * |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56| * - * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit - * number field needs to be reversed compared to the big-endian bit - * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b). + * The main difference is that bit 3-5 in the bit number field needs to be + * reversed compared to the big-endian bit fields. This can be achieved by + * XOR with 0x38. * - * We also have special functions which work with an MSB0 encoding: - * on an s390x system the bits are numbered: + * We also have special functions which work with an MSB0 encoding. + * The bits are numbered: * |0..............63|64............127|128...........191|192...........255| - * and on s390: - * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| * - * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit - * number field needs to be reversed compared to the LSB0 encoded bit - * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b). + * The main difference is that bit 0-63 in the bit number field needs to be + * reversed compared to the LSB0 encoded bit fields. This can be achieved by + * XOR with 0x3f. * */ @@ -64,7 +59,6 @@ \ typecheck(unsigned long *, (__addr)); \ asm volatile( \ - __barrier \ __op_string " %0,%2,%1\n" \ __barrier \ : "=d" (__old), "+Q" (*(__addr)) \ @@ -276,12 +270,32 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr) return (*addr >> (nr & 7)) & 1; } +static inline int test_and_set_bit_lock(unsigned long nr, + volatile unsigned long *ptr) +{ + if (test_bit(nr, ptr)) + return 1; + return test_and_set_bit(nr, ptr); +} + +static inline void clear_bit_unlock(unsigned long nr, + volatile unsigned long *ptr) +{ + smp_mb__before_atomic(); + clear_bit(nr, ptr); +} + +static inline void __clear_bit_unlock(unsigned long nr, + volatile unsigned long *ptr) +{ + smp_mb(); + __clear_bit(nr, ptr); +} + /* * Functions which use MSB0 bit numbering. - * On an s390x system the bits are numbered: + * The bits are numbered: * |0..............63|64............127|128...........191|192...........255| - * and on s390: - * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| */ unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size); unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, @@ -446,7 +460,6 @@ static inline int fls(int word) #include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/find.h> #include <asm-generic/bitops/hweight.h> -#include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic-setbit.h> diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 096339207..d1e7b0a0f 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -5,6 +5,7 @@ #define _ASM_S390_CIO_H_ #include <linux/spinlock.h> +#include <linux/bitops.h> #include <asm/types.h> #define LPM_ANYPATH 0xff @@ -296,12 +297,22 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, return 0; } +/** + * pathmask_to_pos() - find the position of the left-most bit in a pathmask + * @mask: pathmask with at least one bit set + */ +static inline u8 pathmask_to_pos(u8 mask) +{ + return 8 - ffs(mask); +} + void channel_subsystem_reinit(void); extern void css_schedule_reprobe(void); extern void reipl_ccw_dev(struct ccw_dev_id *id); struct cio_iplinfo { + u8 ssid; u16 devno; int is_qdio; }; diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h index 806eac12e..ed2630c23 100644 --- a/arch/s390/include/asm/cmb.h +++ b/arch/s390/include/asm/cmb.h @@ -6,6 +6,7 @@ struct ccw_device; extern int enable_cmf(struct ccw_device *cdev); extern int disable_cmf(struct ccw_device *cdev); +extern int __disable_cmf(struct ccw_device *cdev); extern u64 cmf_read(struct ccw_device *cdev, int index); extern int cmf_readall(struct ccw_device *cdev, struct cmbdata *data); diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 411464f4c..24ea6948e 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -32,7 +32,7 @@ __old; \ }) -#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ +#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \ ({ \ register __typeof__(*(p1)) __old1 asm("2") = (o1); \ register __typeof__(*(p2)) __old2 asm("3") = (o2); \ @@ -40,7 +40,7 @@ register __typeof__(*(p2)) __new2 asm("5") = (n2); \ int cc; \ asm volatile( \ - insn " %[old],%[new],%[ptr]\n" \ + " cdsg %[old],%[new],%[ptr]\n" \ " ipm %[cc]\n" \ " srl %[cc],28" \ : [cc] "=d" (cc), [old] "+d" (__old1), "+d" (__old2) \ @@ -50,30 +50,6 @@ !cc; \ }) -#define __cmpxchg_double_4(p1, p2, o1, o2, n1, n2) \ - __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cds") - -#define __cmpxchg_double_8(p1, p2, o1, o2, n1, n2) \ - __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, "cdsg") - -extern void __cmpxchg_double_called_with_bad_pointer(void); - -#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \ -({ \ - int __ret; \ - switch (sizeof(*(p1))) { \ - case 4: \ - __ret = __cmpxchg_double_4(p1, p2, o1, o2, n1, n2); \ - break; \ - case 8: \ - __ret = __cmpxchg_double_8(p1, p2, o1, o2, n1, n2); \ - break; \ - default: \ - __cmpxchg_double_called_with_bad_pointer(); \ - } \ - __ret; \ -}) - #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ ({ \ __typeof__(p1) __p1 = (p1); \ @@ -81,7 +57,7 @@ extern void __cmpxchg_double_called_with_bad_pointer(void); BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \ BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \ VM_BUG_ON((unsigned long)((__p1) + 1) != (unsigned long)(__p2));\ - __cmpxchg_double_8(__p1, __p2, o1, o2, n1, n2); \ + __cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \ }) #define system_has_cmpxchg_double() 1 diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h index 5243a8679..9dd04b9e9 100644 --- a/arch/s390/include/asm/cpu_mf.h +++ b/arch/s390/include/asm/cpu_mf.h @@ -22,15 +22,10 @@ #define CPU_MF_INT_SF_LSDA (1 << 22) /* loss of sample data alert */ #define CPU_MF_INT_CF_CACA (1 << 7) /* counter auth. change alert */ #define CPU_MF_INT_CF_LCDA (1 << 6) /* loss of counter data alert */ -#define CPU_MF_INT_RI_HALTED (1 << 5) /* run-time instr. halted */ -#define CPU_MF_INT_RI_BUF_FULL (1 << 4) /* run-time instr. program - buffer full */ - #define CPU_MF_INT_CF_MASK (CPU_MF_INT_CF_CACA|CPU_MF_INT_CF_LCDA) #define CPU_MF_INT_SF_MASK (CPU_MF_INT_SF_IAE|CPU_MF_INT_SF_ISE| \ CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA| \ CPU_MF_INT_SF_LSDA) -#define CPU_MF_INT_RI_MASK (CPU_MF_INT_RI_HALTED|CPU_MF_INT_RI_BUF_FULL) /* CPU measurement facility support */ static inline int cpum_cf_avail(void) diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 17a373576..d7697ab80 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -46,8 +46,6 @@ static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit) __ctl_load(reg, cr, cr); } -void __ctl_set_vx(void); - void smp_ctl_set_bit(int cr, int bit); void smp_ctl_clear_bit(int cr, int bit); diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 7e91c5807..5fac921c1 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -8,6 +8,34 @@ #ifndef _ASM_S390_DIAG_H #define _ASM_S390_DIAG_H +#include <linux/percpu.h> + +enum diag_stat_enum { + DIAG_STAT_X008, + DIAG_STAT_X00C, + DIAG_STAT_X010, + DIAG_STAT_X014, + DIAG_STAT_X044, + DIAG_STAT_X064, + DIAG_STAT_X09C, + DIAG_STAT_X0DC, + DIAG_STAT_X204, + DIAG_STAT_X210, + DIAG_STAT_X224, + DIAG_STAT_X250, + DIAG_STAT_X258, + DIAG_STAT_X288, + DIAG_STAT_X2C4, + DIAG_STAT_X2FC, + DIAG_STAT_X304, + DIAG_STAT_X308, + DIAG_STAT_X500, + NR_DIAG_STAT +}; + +void diag_stat_inc(enum diag_stat_enum nr); +void diag_stat_inc_norecursion(enum diag_stat_enum nr); + /* * Diagnose 10: Release page range */ @@ -18,6 +46,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn) start_addr = start_pfn << PAGE_SHIFT; end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT; + diag_stat_inc(DIAG_STAT_X010); asm volatile( "0: diag %0,%1,0x10\n" "1:\n" diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 3ad48f22d..bab6739a1 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -206,9 +206,16 @@ do { \ } while (0) #endif /* CONFIG_COMPAT */ -extern unsigned long mmap_rnd_mask; - -#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask) +/* + * Cache aliasing on the latest machines calls for a mapping granularity + * of 512KB. For 64-bit processes use a 512KB alignment and a randomization + * of up to 1GB. For 31-bit processes the virtual address space is limited, + * use no alignment and limit the randomization to 8MB. + */ +#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL) +#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL) +#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL) +#define STACK_RND_MASK MMAP_RND_MASK #define ARCH_DLINFO \ do { \ diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h index f7e5c3668..105f90e63 100644 --- a/arch/s390/include/asm/etr.h +++ b/arch/s390/include/asm/etr.h @@ -211,8 +211,9 @@ static inline int etr_ptff(void *ptff_block, unsigned int func) #define ETR_PTFF_SGS 0x43 /* set gross steering rate */ /* Functions needed by the machine check handler */ -void etr_switch_to_local(void); -void etr_sync_check(void); +int etr_switch_to_local(void); +int etr_sync_check(void); +void etr_queue_work(void); /* notifier for syncs */ extern struct atomic_notifier_head s390_epoch_delta_notifier; @@ -253,7 +254,8 @@ struct stp_sstpi { } __attribute__ ((packed)); /* Functions needed by the machine check handler */ -void stp_sync_check(void); -void stp_island_check(void); +int stp_sync_check(void); +int stp_island_check(void); +void stp_queue_work(void); #endif /* __S390_ETR_H */ diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h new file mode 100644 index 000000000..5e04f3cbd --- /dev/null +++ b/arch/s390/include/asm/fpu/api.h @@ -0,0 +1,30 @@ +/* + * In-kernel FPU support functions + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef _ASM_S390_FPU_API_H +#define _ASM_S390_FPU_API_H + +void save_fpu_regs(void); + +static inline int test_fp_ctl(u32 fpc) +{ + u32 orig_fpc; + int rc; + + asm volatile( + " efpc %1\n" + " sfpc %2\n" + "0: sfpc %1\n" + " la %0,0\n" + "1:\n" + EX_TABLE(0b,1b) + : "=d" (rc), "=d" (orig_fpc) + : "d" (fpc), "0" (-EINVAL)); + return rc; +} + +#endif /* _ASM_S390_FPU_API_H */ diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu/internal.h index 55dc2c0fb..2559b16da 100644 --- a/arch/s390/include/asm/fpu-internal.h +++ b/arch/s390/include/asm/fpu/internal.h @@ -1,5 +1,5 @@ /* - * General floating pointer and vector register helpers + * FPU state and register content conversion primitives * * Copyright IBM Corp. 2015 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> @@ -8,50 +8,9 @@ #ifndef _ASM_S390_FPU_INTERNAL_H #define _ASM_S390_FPU_INTERNAL_H -#define FPU_USE_VX 1 /* Vector extension is active */ - -#ifndef __ASSEMBLY__ - -#include <linux/errno.h> #include <linux/string.h> -#include <asm/linkage.h> #include <asm/ctl_reg.h> -#include <asm/sigcontext.h> - -struct fpu { - __u32 fpc; /* Floating-point control */ - __u32 flags; - union { - void *regs; - freg_t *fprs; /* Floating-point register save area */ - __vector128 *vxrs; /* Vector register save area */ - }; -}; - -void save_fpu_regs(void); - -#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX)) -#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX)) - -/* VX array structure for address operand constraints in inline assemblies */ -struct vx_array { __vector128 _[__NUM_VXRS]; }; - -static inline int test_fp_ctl(u32 fpc) -{ - u32 orig_fpc; - int rc; - - asm volatile( - " efpc %1\n" - " sfpc %2\n" - "0: sfpc %1\n" - " la %0,0\n" - "1:\n" - EX_TABLE(0b,1b) - : "=d" (rc), "=d" (orig_fpc) - : "d" (fpc), "0" (-EINVAL)); - return rc; -} +#include <asm/fpu/types.h> static inline void save_vx_regs_safe(__vector128 *vxrs) { @@ -89,7 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs) static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) { fpregs->pad = 0; - if (is_vx_fpu(fpu)) + if (MACHINE_HAS_VX) convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs); else memcpy((freg_t *)&fpregs->fprs, fpu->fprs, @@ -98,13 +57,11 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu) static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu) { - if (is_vx_fpu(fpu)) + if (MACHINE_HAS_VX) convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs); else memcpy(fpu->fprs, (freg_t *)&fpregs->fprs, sizeof(fpregs->fprs)); } -#endif - #endif /* _ASM_S390_FPU_INTERNAL_H */ diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h new file mode 100644 index 000000000..14a8b0c14 --- /dev/null +++ b/arch/s390/include/asm/fpu/types.h @@ -0,0 +1,25 @@ +/* + * FPU data structures + * + * Copyright IBM Corp. 2015 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + */ + +#ifndef _ASM_S390_FPU_TYPES_H +#define _ASM_S390_FPU_TYPES_H + +#include <asm/sigcontext.h> + +struct fpu { + __u32 fpc; /* Floating-point control */ + union { + void *regs; + freg_t *fprs; /* Floating-point register save area */ + __vector128 *vxrs; /* Vector register save area */ + }; +}; + +/* VX array structure for address operand constraints in inline assemblies */ +struct vx_array { __vector128 _[__NUM_VXRS]; }; + +#endif /* _ASM_S390_FPU_TYPES_H */ diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h index 113cd963d..51ff96d9f 100644 --- a/arch/s390/include/asm/idle.h +++ b/arch/s390/include/asm/idle.h @@ -24,4 +24,6 @@ struct s390_idle_data { extern struct device_attribute dev_attr_idle_count; extern struct device_attribute dev_attr_idle_time_us; +void psw_idle(struct s390_idle_data *, unsigned long); + #endif /* _S390_IDLE_H */ diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 39ae6a359..86634e71b 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -64,7 +64,8 @@ struct ipl_block_fcp { struct ipl_block_ccw { u8 reserved1[84]; - u8 reserved2[2]; + u16 reserved2 : 13; + u8 ssid : 3; u16 devno; u8 vm_flags; u8 reserved3[3]; diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index ff95d15a2..f97b055de 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -47,7 +47,6 @@ enum interruption_class { IRQEXT_IUC, IRQEXT_CMS, IRQEXT_CMC, - IRQEXT_CMR, IRQEXT_FTP, IRQIO_CIO, IRQIO_QAI, @@ -96,6 +95,19 @@ enum irq_subclass { IRQ_SUBCLASS_SERVICE_SIGNAL = 9, }; +#define CR0_IRQ_SUBCLASS_MASK \ + ((1UL << (63 - 30)) /* Warning Track */ | \ + (1UL << (63 - 48)) /* Malfunction Alert */ | \ + (1UL << (63 - 49)) /* Emergency Signal */ | \ + (1UL << (63 - 50)) /* External Call */ | \ + (1UL << (63 - 52)) /* Clock Comparator */ | \ + (1UL << (63 - 53)) /* CPU Timer */ | \ + (1UL << (63 - 54)) /* Service Signal */ | \ + (1UL << (63 - 57)) /* Interrupt Key */ | \ + (1UL << (63 - 58)) /* Measurement Alert */ | \ + (1UL << (63 - 59)) /* Timing Alert */ | \ + (1UL << (63 - 62))) /* IUCV */ + void irq_subclass_register(enum irq_subclass subclass); void irq_subclass_unregister(enum irq_subclass subclass); diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 8ced42609..efaac2c3b 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -22,7 +22,7 @@ #include <linux/kvm.h> #include <asm/debug.h> #include <asm/cpu.h> -#include <asm/fpu-internal.h> +#include <asm/fpu/api.h> #include <asm/isc.h> #define KVM_MAX_VCPUS 64 @@ -644,5 +644,7 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslot static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} #endif diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h index e0f842308..41393052a 100644 --- a/arch/s390/include/asm/kvm_para.h +++ b/arch/s390/include/asm/kvm_para.h @@ -27,10 +27,9 @@ #define __S390_KVM_PARA_H #include <uapi/asm/kvm_para.h> +#include <asm/diag.h> - - -static inline long kvm_hypercall0(unsigned long nr) +static inline long __kvm_hypercall0(unsigned long nr) { register unsigned long __nr asm("1") = nr; register long __rc asm("2"); @@ -40,7 +39,13 @@ static inline long kvm_hypercall0(unsigned long nr) return __rc; } -static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) +static inline long kvm_hypercall0(unsigned long nr) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall0(nr); +} + +static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1) { register unsigned long __nr asm("1") = nr; register unsigned long __p1 asm("2") = p1; @@ -51,7 +56,13 @@ static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) return __rc; } -static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, +static inline long kvm_hypercall1(unsigned long nr, unsigned long p1) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall1(nr, p1); +} + +static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1, unsigned long p2) { register unsigned long __nr asm("1") = nr; @@ -65,7 +76,14 @@ static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, return __rc; } -static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, +static inline long kvm_hypercall2(unsigned long nr, unsigned long p1, + unsigned long p2) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall2(nr, p1, p2); +} + +static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1, unsigned long p2, unsigned long p3) { register unsigned long __nr asm("1") = nr; @@ -80,8 +98,14 @@ static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, return __rc; } +static inline long kvm_hypercall3(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall3(nr, p1, p2, p3); +} -static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, +static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1, unsigned long p2, unsigned long p3, unsigned long p4) { @@ -98,7 +122,15 @@ static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, return __rc; } -static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, +static inline long kvm_hypercall4(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall4(nr, p1, p2, p3, p4); +} + +static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1, unsigned long p2, unsigned long p3, unsigned long p4, unsigned long p5) { @@ -116,7 +148,15 @@ static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, return __rc; } -static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, +static inline long kvm_hypercall5(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall5(nr, p1, p2, p3, p4, p5); +} + +static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1, unsigned long p2, unsigned long p3, unsigned long p4, unsigned long p5, unsigned long p6) @@ -137,6 +177,15 @@ static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, return __rc; } +static inline long kvm_hypercall6(unsigned long nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4, unsigned long p5, + unsigned long p6) +{ + diag_stat_inc(DIAG_STAT_X500); + return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6); +} + /* kvm on s390 is always paravirtualization enabled */ static inline int kvm_para_available(void) { diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 663f23e37..afe1cfebf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -67,7 +67,7 @@ struct _lowcore { __u8 pad_0x00c4[0x00c8-0x00c4]; /* 0x00c4 */ __u32 stfl_fac_list; /* 0x00c8 */ __u8 pad_0x00cc[0x00e8-0x00cc]; /* 0x00cc */ - __u32 mcck_interruption_code[2]; /* 0x00e8 */ + __u64 mcck_interruption_code; /* 0x00e8 */ __u8 pad_0x00f0[0x00f4-0x00f0]; /* 0x00f0 */ __u32 external_damage_code; /* 0x00f4 */ __u64 failing_storage_address; /* 0x00f8 */ @@ -132,7 +132,14 @@ struct _lowcore { /* Address space pointer. */ __u64 kernel_asce; /* 0x0358 */ __u64 user_asce; /* 0x0360 */ - __u64 current_pid; /* 0x0368 */ + + /* + * The lpp and current_pid fields form a + * 64-bit value that is set as program + * parameter with the LPP instruction. + */ + __u32 lpp; /* 0x0368 */ + __u32 current_pid; /* 0x036c */ /* SMP info area */ __u32 cpu_nr; /* 0x0370 */ diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 3027a5a72..b75fd9103 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -11,51 +11,62 @@ #ifndef _ASM_S390_NMI_H #define _ASM_S390_NMI_H +#include <linux/const.h> #include <linux/types.h> -struct mci { - __u32 sd : 1; /* 00 system damage */ - __u32 pd : 1; /* 01 instruction-processing damage */ - __u32 sr : 1; /* 02 system recovery */ - __u32 : 1; /* 03 */ - __u32 cd : 1; /* 04 timing-facility damage */ - __u32 ed : 1; /* 05 external damage */ - __u32 : 1; /* 06 */ - __u32 dg : 1; /* 07 degradation */ - __u32 w : 1; /* 08 warning pending */ - __u32 cp : 1; /* 09 channel-report pending */ - __u32 sp : 1; /* 10 service-processor damage */ - __u32 ck : 1; /* 11 channel-subsystem damage */ - __u32 : 2; /* 12-13 */ - __u32 b : 1; /* 14 backed up */ - __u32 : 1; /* 15 */ - __u32 se : 1; /* 16 storage error uncorrected */ - __u32 sc : 1; /* 17 storage error corrected */ - __u32 ke : 1; /* 18 storage-key error uncorrected */ - __u32 ds : 1; /* 19 storage degradation */ - __u32 wp : 1; /* 20 psw mwp validity */ - __u32 ms : 1; /* 21 psw mask and key validity */ - __u32 pm : 1; /* 22 psw program mask and cc validity */ - __u32 ia : 1; /* 23 psw instruction address validity */ - __u32 fa : 1; /* 24 failing storage address validity */ - __u32 vr : 1; /* 25 vector register validity */ - __u32 ec : 1; /* 26 external damage code validity */ - __u32 fp : 1; /* 27 floating point register validity */ - __u32 gr : 1; /* 28 general register validity */ - __u32 cr : 1; /* 29 control register validity */ - __u32 : 1; /* 30 */ - __u32 st : 1; /* 31 storage logical validity */ - __u32 ie : 1; /* 32 indirect storage error */ - __u32 ar : 1; /* 33 access register validity */ - __u32 da : 1; /* 34 delayed access exception */ - __u32 : 7; /* 35-41 */ - __u32 pr : 1; /* 42 tod programmable register validity */ - __u32 fc : 1; /* 43 fp control register validity */ - __u32 ap : 1; /* 44 ancillary report */ - __u32 : 1; /* 45 */ - __u32 ct : 1; /* 46 cpu timer validity */ - __u32 cc : 1; /* 47 clock comparator validity */ - __u32 : 16; /* 47-63 */ +#define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63) +#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46) +#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20) +#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23) + +#ifndef __ASSEMBLY__ + +union mci { + unsigned long val; + struct { + u64 sd : 1; /* 00 system damage */ + u64 pd : 1; /* 01 instruction-processing damage */ + u64 sr : 1; /* 02 system recovery */ + u64 : 1; /* 03 */ + u64 cd : 1; /* 04 timing-facility damage */ + u64 ed : 1; /* 05 external damage */ + u64 : 1; /* 06 */ + u64 dg : 1; /* 07 degradation */ + u64 w : 1; /* 08 warning pending */ + u64 cp : 1; /* 09 channel-report pending */ + u64 sp : 1; /* 10 service-processor damage */ + u64 ck : 1; /* 11 channel-subsystem damage */ + u64 : 2; /* 12-13 */ + u64 b : 1; /* 14 backed up */ + u64 : 1; /* 15 */ + u64 se : 1; /* 16 storage error uncorrected */ + u64 sc : 1; /* 17 storage error corrected */ + u64 ke : 1; /* 18 storage-key error uncorrected */ + u64 ds : 1; /* 19 storage degradation */ + u64 wp : 1; /* 20 psw mwp validity */ + u64 ms : 1; /* 21 psw mask and key validity */ + u64 pm : 1; /* 22 psw program mask and cc validity */ + u64 ia : 1; /* 23 psw instruction address validity */ + u64 fa : 1; /* 24 failing storage address validity */ + u64 vr : 1; /* 25 vector register validity */ + u64 ec : 1; /* 26 external damage code validity */ + u64 fp : 1; /* 27 floating point register validity */ + u64 gr : 1; /* 28 general register validity */ + u64 cr : 1; /* 29 control register validity */ + u64 : 1; /* 30 */ + u64 st : 1; /* 31 storage logical validity */ + u64 ie : 1; /* 32 indirect storage error */ + u64 ar : 1; /* 33 access register validity */ + u64 da : 1; /* 34 delayed access exception */ + u64 : 7; /* 35-41 */ + u64 pr : 1; /* 42 tod programmable register validity */ + u64 fc : 1; /* 43 fp control register validity */ + u64 ap : 1; /* 44 ancillary report */ + u64 : 1; /* 45 */ + u64 ct : 1; /* 46 cpu timer validity */ + u64 cc : 1; /* 47 clock comparator validity */ + u64 : 16; /* 47-63 */ + }; }; struct pt_regs; @@ -63,4 +74,5 @@ struct pt_regs; extern void s390_handle_mcck(void); extern void s390_do_machine_check(struct pt_regs *regs); +#endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_NMI_H */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 34d960353..c873e682b 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -62,6 +62,8 @@ struct zpci_bar_struct { u8 size; /* order 2 exponent */ }; +struct s390_domain; + /* Private data per function */ struct zpci_dev { struct pci_dev *pdev; @@ -118,6 +120,8 @@ struct zpci_dev { struct dentry *debugfs_dev; struct dentry *debugfs_perf; + + struct s390_domain *s390_domain; /* s390 IOMMU domain data */ }; static inline bool zdev_enabled(struct zpci_dev *zdev) diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h index 30b4c179c..1aac41e83 100644 --- a/arch/s390/include/asm/pci_dma.h +++ b/arch/s390/include/asm/pci_dma.h @@ -192,5 +192,10 @@ static inline unsigned long *get_st_pto(unsigned long entry) /* Prototypes */ int zpci_dma_init_device(struct zpci_dev *); void zpci_dma_exit_device(struct zpci_dev *); +void dma_free_seg_table(unsigned long); +unsigned long *dma_alloc_cpu_table(void); +void dma_cleanup_tables(unsigned long *); +unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); +void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags); #endif diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index bdb2f5112..024f85f94 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -193,9 +193,15 @@ static inline int is_module_addr(void *addr) #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */ #define __HAVE_ARCH_PTE_SPECIAL +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */ +#else +#define _PAGE_SOFT_DIRTY 0x000 +#endif + /* Set of bits not changed in pte_modify */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \ - _PAGE_YOUNG) + _PAGE_YOUNG | _PAGE_SOFT_DIRTY) /* * handle_pte_fault uses pte_present and pte_none to find out the pte type @@ -285,6 +291,12 @@ static inline int is_module_addr(void *addr) #define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */ #define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */ +#ifdef CONFIG_MEM_SOFT_DIRTY +#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */ +#else +#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */ +#endif + /* * Segment table entry encoding (R = read-only, I = invalid, y = young bit): * dy..R...I...wr @@ -589,6 +601,43 @@ static inline int pmd_protnone(pmd_t pmd) } #endif +static inline int pte_soft_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_SOFT_DIRTY; +} +#define pte_swp_soft_dirty pte_soft_dirty + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + pte_val(pte) |= _PAGE_SOFT_DIRTY; + return pte; +} +#define pte_swp_mksoft_dirty pte_mksoft_dirty + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SOFT_DIRTY; + return pte; +} +#define pte_swp_clear_soft_dirty pte_clear_soft_dirty + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY; + return pmd; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY; + return pmd; +} + static inline pgste_t pgste_get_lock(pte_t *ptep) { unsigned long new = 0; @@ -889,7 +938,7 @@ static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkdirty(pte_t pte) { - pte_val(pte) |= _PAGE_DIRTY; + pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY; if (pte_val(pte) & _PAGE_WRITE) pte_val(pte) &= ~_PAGE_PROTECT; return pte; @@ -1218,8 +1267,10 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { pgste_t pgste; + pte_t oldpte; - if (pte_same(*ptep, entry)) + oldpte = *ptep; + if (pte_same(oldpte, entry)) return 0; if (mm_has_pgste(vma->vm_mm)) { pgste = pgste_get_lock(ptep); @@ -1229,7 +1280,8 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, ptep_flush_direct(vma->vm_mm, address, ptep); if (mm_has_pgste(vma->vm_mm)) { - pgste_set_key(ptep, pgste, entry, vma->vm_mm); + if (pte_val(oldpte) & _PAGE_INVALID) + pgste_set_key(ptep, pgste, entry, vma->vm_mm); pgste = pgste_set_pte(ptep, pgste, entry); pgste_set_unlock(ptep, pgste); } else @@ -1340,7 +1392,8 @@ static inline pmd_t pmd_mkclean(pmd_t pmd) static inline pmd_t pmd_mkdirty(pmd_t pmd) { if (pmd_large(pmd)) { - pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY; + pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | + _SEGMENT_ENTRY_SOFT_DIRTY; if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; } @@ -1371,7 +1424,8 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) if (pmd_large(pmd)) { pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE | _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG | - _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT; + _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT | + _SEGMENT_ENTRY_SOFT_DIRTY; pmd_val(pmd) |= massage_pgprot_pmd(newprot); if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)) pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 085fb0d3c..b16c3d0a1 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -11,15 +11,19 @@ #ifndef __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H +#include <linux/const.h> + #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_ASCE 1 /* user asce needs fixup / uaccess */ #define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */ -#define CIF_FPU 3 /* restore vector registers */ +#define CIF_FPU 3 /* restore FPU registers */ +#define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */ -#define _CIF_MCCK_PENDING (1<<CIF_MCCK_PENDING) -#define _CIF_ASCE (1<<CIF_ASCE) -#define _CIF_NOHZ_DELAY (1<<CIF_NOHZ_DELAY) -#define _CIF_FPU (1<<CIF_FPU) +#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) +#define _CIF_ASCE _BITUL(CIF_ASCE) +#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) +#define _CIF_FPU _BITUL(CIF_FPU) +#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) #ifndef __ASSEMBLY__ @@ -30,21 +34,22 @@ #include <asm/ptrace.h> #include <asm/setup.h> #include <asm/runtime_instr.h> -#include <asm/fpu-internal.h> +#include <asm/fpu/types.h> +#include <asm/fpu/internal.h> static inline void set_cpu_flag(int flag) { - S390_lowcore.cpu_flags |= (1U << flag); + S390_lowcore.cpu_flags |= (1UL << flag); } static inline void clear_cpu_flag(int flag) { - S390_lowcore.cpu_flags &= ~(1U << flag); + S390_lowcore.cpu_flags &= ~(1UL << flag); } static inline int test_cpu_flag(int flag) { - return !!(S390_lowcore.cpu_flags & (1U << flag)); + return !!(S390_lowcore.cpu_flags & (1UL << flag)); } #define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY) @@ -102,7 +107,6 @@ struct thread_struct { struct list_head list; /* cpu runtime instrumentation */ struct runtime_instr_cb *ri_cb; - int ri_signum; unsigned char trap_tdb[256]; /* Transaction abort diagnose block */ }; @@ -139,8 +143,10 @@ struct stack_frame { #define ARCH_MIN_TASKALIGN 8 +extern __vector128 init_task_fpu_regs[__NUM_VXRS]; #define INIT_THREAD { \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ + .fpu.regs = (void *)&init_task_fpu_regs, \ } /* @@ -217,7 +223,7 @@ static inline void __load_psw(psw_t psw) * Set PSW mask to specified value, while leaving the * PSW addr pointing to the next instruction. */ -static inline void __load_psw_mask (unsigned long mask) +static inline void __load_psw_mask(unsigned long mask) { unsigned long addr; psw_t psw; @@ -243,6 +249,16 @@ static inline unsigned long __extract_psw(void) return (((unsigned long) reg1) << 32) | ((unsigned long) reg2); } +static inline void local_mcck_enable(void) +{ + __load_psw_mask(__extract_psw() | PSW_MASK_MCHECK); +} + +static inline void local_mcck_disable(void) +{ + __load_psw_mask(__extract_psw() & ~PSW_MASK_MCHECK); +} + /* * Rewind PSW instruction address by specified number of bytes. */ @@ -266,65 +282,14 @@ void enabled_wait(void); */ static inline void __noreturn disabled_wait(unsigned long code) { - unsigned long ctl_buf; - psw_t dw_psw; - - dw_psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; - dw_psw.addr = code; - /* - * Store status and then load disabled wait psw, - * the processor is dead afterwards - */ - asm volatile( - " stctg 0,0,0(%2)\n" - " ni 4(%2),0xef\n" /* switch off protection */ - " lctlg 0,0,0(%2)\n" - " lghi 1,0x1000\n" - " stpt 0x328(1)\n" /* store timer */ - " stckc 0x330(1)\n" /* store clock comparator */ - " stpx 0x318(1)\n" /* store prefix register */ - " stam 0,15,0x340(1)\n"/* store access registers */ - " stfpc 0x31c(1)\n" /* store fpu control */ - " std 0,0x200(1)\n" /* store f0 */ - " std 1,0x208(1)\n" /* store f1 */ - " std 2,0x210(1)\n" /* store f2 */ - " std 3,0x218(1)\n" /* store f3 */ - " std 4,0x220(1)\n" /* store f4 */ - " std 5,0x228(1)\n" /* store f5 */ - " std 6,0x230(1)\n" /* store f6 */ - " std 7,0x238(1)\n" /* store f7 */ - " std 8,0x240(1)\n" /* store f8 */ - " std 9,0x248(1)\n" /* store f9 */ - " std 10,0x250(1)\n" /* store f10 */ - " std 11,0x258(1)\n" /* store f11 */ - " std 12,0x260(1)\n" /* store f12 */ - " std 13,0x268(1)\n" /* store f13 */ - " std 14,0x270(1)\n" /* store f14 */ - " std 15,0x278(1)\n" /* store f15 */ - " stmg 0,15,0x280(1)\n"/* store general registers */ - " stctg 0,15,0x380(1)\n"/* store control registers */ - " oi 0x384(1),0x10\n"/* fake protection bit */ - " lpswe 0(%1)" - : "=m" (ctl_buf) - : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); - while (1); -} + psw_t psw; -/* - * Use to set psw mask except for the first byte which - * won't be changed by this function. - */ -static inline void -__set_psw_mask(unsigned long mask) -{ - __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8))); + psw.mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA; + psw.addr = code; + __load_psw(psw); + while (1); } -#define local_mcck_enable() \ - __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK) -#define local_mcck_disable() \ - __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT) - /* * Basic Machine Check/Program Check Handler. */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 6feda2599..37cbc5094 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -6,13 +6,14 @@ #ifndef _S390_PTRACE_H #define _S390_PTRACE_H +#include <linux/const.h> #include <uapi/asm/ptrace.h> #define PIF_SYSCALL 0 /* inside a system call */ #define PIF_PER_TRAP 1 /* deliver sigtrap on return to user */ -#define _PIF_SYSCALL (1<<PIF_SYSCALL) -#define _PIF_PER_TRAP (1<<PIF_PER_TRAP) +#define _PIF_SYSCALL _BITUL(PIF_SYSCALL) +#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP) #ifndef __ASSEMBLY__ @@ -128,17 +129,17 @@ struct per_struct_kernel { static inline void set_pt_regs_flag(struct pt_regs *regs, int flag) { - regs->flags |= (1U << flag); + regs->flags |= (1UL << flag); } static inline void clear_pt_regs_flag(struct pt_regs *regs, int flag) { - regs->flags &= ~(1U << flag); + regs->flags &= ~(1UL << flag); } static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) { - return !!(regs->flags & (1U << flag)); + return !!(regs->flags & (1UL << flag)); } /* diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index b8ffc1bd0..23537661d 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -5,11 +5,38 @@ #ifndef _ASM_S390_SETUP_H #define _ASM_S390_SETUP_H +#include <linux/const.h> #include <uapi/asm/setup.h> #define PARMAREA 0x10400 +/* + * Machine features detected in head.S + */ + +#define MACHINE_FLAG_VM _BITUL(0) +#define MACHINE_FLAG_IEEE _BITUL(1) +#define MACHINE_FLAG_CSP _BITUL(2) +#define MACHINE_FLAG_MVPG _BITUL(3) +#define MACHINE_FLAG_DIAG44 _BITUL(4) +#define MACHINE_FLAG_IDTE _BITUL(5) +#define MACHINE_FLAG_DIAG9C _BITUL(6) +#define MACHINE_FLAG_KVM _BITUL(8) +#define MACHINE_FLAG_ESOP _BITUL(9) +#define MACHINE_FLAG_EDAT1 _BITUL(10) +#define MACHINE_FLAG_EDAT2 _BITUL(11) +#define MACHINE_FLAG_LPAR _BITUL(12) +#define MACHINE_FLAG_LPP _BITUL(13) +#define MACHINE_FLAG_TOPOLOGY _BITUL(14) +#define MACHINE_FLAG_TE _BITUL(15) +#define MACHINE_FLAG_TLB_LC _BITUL(17) +#define MACHINE_FLAG_VX _BITUL(18) +#define MACHINE_FLAG_CAD _BITUL(19) + +#define LPP_MAGIC _BITUL(31) +#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL) + #ifndef __ASSEMBLY__ #include <asm/lowcore.h> @@ -28,29 +55,6 @@ extern unsigned long max_physmem_end; extern void detect_memory_memblock(void); -/* - * Machine features detected in head.S - */ - -#define MACHINE_FLAG_VM (1UL << 0) -#define MACHINE_FLAG_IEEE (1UL << 1) -#define MACHINE_FLAG_CSP (1UL << 2) -#define MACHINE_FLAG_MVPG (1UL << 3) -#define MACHINE_FLAG_DIAG44 (1UL << 4) -#define MACHINE_FLAG_IDTE (1UL << 5) -#define MACHINE_FLAG_DIAG9C (1UL << 6) -#define MACHINE_FLAG_KVM (1UL << 8) -#define MACHINE_FLAG_ESOP (1UL << 9) -#define MACHINE_FLAG_EDAT1 (1UL << 10) -#define MACHINE_FLAG_EDAT2 (1UL << 11) -#define MACHINE_FLAG_LPAR (1UL << 12) -#define MACHINE_FLAG_LPP (1UL << 13) -#define MACHINE_FLAG_TOPOLOGY (1UL << 14) -#define MACHINE_FLAG_TE (1UL << 15) -#define MACHINE_FLAG_TLB_LC (1UL << 17) -#define MACHINE_FLAG_VX (1UL << 18) -#define MACHINE_FLAG_CAD (1UL << 19) - #define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM) #define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM) #define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 0e37cd041..63ebf37d3 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -87,7 +87,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp) { typecheck(unsigned int, lp->lock); asm volatile( - __ASM_BARRIER "st %1,%0\n" : "+Q" (lp->lock) : "d" (0) @@ -169,7 +168,6 @@ static inline int arch_write_trylock_once(arch_rwlock_t *rw) \ typecheck(unsigned int *, ptr); \ asm volatile( \ - "bcr 14,0\n" \ op_string " %0,%2,%1\n" \ : "=d" (old_val), "+Q" (*ptr) \ : "d" (op_val) \ @@ -243,7 +241,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) rw->owner = 0; asm volatile( - __ASM_BARRIER "st %1,%0\n" : "+Q" (rw->lock) : "d" (0) diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index dcadfde32..12d45f0cf 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h @@ -8,7 +8,7 @@ #define __ASM_SWITCH_TO_H #include <linux/thread_info.h> -#include <asm/fpu-internal.h> +#include <asm/fpu/api.h> #include <asm/ptrace.h> extern struct task_struct *__switch_to(void *, void *); diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 4c27ec764..692b9247c 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -7,6 +7,8 @@ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H +#include <linux/const.h> + /* * Size of kernel stack for each process */ @@ -83,16 +85,16 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_BLOCK_STEP 20 /* This task is block stepped */ #define TIF_UPROBE_SINGLESTEP 21 /* This task is uprobe single stepped */ -#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) -#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) -#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) -#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) -#define _TIF_SECCOMP (1<<TIF_SECCOMP) -#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) -#define _TIF_UPROBE (1<<TIF_UPROBE) -#define _TIF_31BIT (1<<TIF_31BIT) -#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) +#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) +#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP _BITUL(TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) +#define _TIF_UPROBE _BITUL(TIF_UPROBE) +#define _TIF_31BIT _BITUL(TIF_31BIT) +#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) #define is_32bit_task() (test_thread_flag(TIF_31BIT)) diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h new file mode 100644 index 000000000..cc6cfe788 --- /dev/null +++ b/arch/s390/include/asm/trace/diag.h @@ -0,0 +1,43 @@ +/* + * Tracepoint header for s390 diagnose calls + * + * Copyright IBM Corp. 2015 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM s390 + +#if !defined(_TRACE_S390_DIAG_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_S390_DIAG_H + +#include <linux/tracepoint.h> + +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE + +#define TRACE_INCLUDE_PATH asm/trace +#define TRACE_INCLUDE_FILE diag + +TRACE_EVENT(s390_diagnose, + TP_PROTO(unsigned short nr), + TP_ARGS(nr), + TP_STRUCT__entry( + __field(unsigned short, nr) + ), + TP_fast_assign( + __entry->nr = nr; + ), + TP_printk("nr=0x%x", __entry->nr) +); + +#ifdef CONFIG_TRACEPOINTS +void trace_s390_diagnose_norecursion(int diag_nr); +#else +static inline void trace_s390_diagnose_norecursion(int diag_nr) { } +#endif + +#endif /* _TRACE_S390_DIAG_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index a848adba1..34ec20247 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -192,14 +192,14 @@ #define __NR_set_tid_address 252 #define __NR_fadvise64 253 #define __NR_timer_create 254 -#define __NR_timer_settime (__NR_timer_create+1) -#define __NR_timer_gettime (__NR_timer_create+2) -#define __NR_timer_getoverrun (__NR_timer_create+3) -#define __NR_timer_delete (__NR_timer_create+4) -#define __NR_clock_settime (__NR_timer_create+5) -#define __NR_clock_gettime (__NR_timer_create+6) -#define __NR_clock_getres (__NR_timer_create+7) -#define __NR_clock_nanosleep (__NR_timer_create+8) +#define __NR_timer_settime 255 +#define __NR_timer_gettime 256 +#define __NR_timer_getoverrun 257 +#define __NR_timer_delete 258 +#define __NR_clock_settime 259 +#define __NR_clock_gettime 260 +#define __NR_clock_getres 261 +#define __NR_clock_nanosleep 262 /* Number 263 is reserved for vserver */ #define __NR_statfs64 265 #define __NR_fstatfs64 266 @@ -309,7 +309,8 @@ #define __NR_recvfrom 371 #define __NR_recvmsg 372 #define __NR_shutdown 373 -#define NR_syscalls 374 +#define __NR_mlock2 374 +#define NR_syscalls 375 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index b756c6348..dc167a23b 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -66,6 +66,8 @@ obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o +obj-$(CONFIG_TRACEPOINTS) += trace.o + # vdso obj-y += vdso64/ obj-$(CONFIG_COMPAT) += vdso32/ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 3aeeb1b56..9cd248f63 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -23,59 +23,64 @@ int main(void) { - DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); - DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); - DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); + /* task struct offsets */ + OFFSET(__TASK_thread_info, task_struct, stack); + OFFSET(__TASK_thread, task_struct, thread); + OFFSET(__TASK_pid, task_struct, pid); BLANK(); - DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); - DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc)); - DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags)); - DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs)); - DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); - DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); - DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); - DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); + /* thread struct offsets */ + OFFSET(__THREAD_ksp, thread_struct, ksp); + OFFSET(__THREAD_FPU_fpc, thread_struct, fpu.fpc); + OFFSET(__THREAD_FPU_regs, thread_struct, fpu.regs); + OFFSET(__THREAD_per_cause, thread_struct, per_event.cause); + OFFSET(__THREAD_per_address, thread_struct, per_event.address); + OFFSET(__THREAD_per_paid, thread_struct, per_event.paid); + OFFSET(__THREAD_trap_tdb, thread_struct, trap_tdb); BLANK(); - DEFINE(__TI_task, offsetof(struct thread_info, task)); - DEFINE(__TI_flags, offsetof(struct thread_info, flags)); - DEFINE(__TI_sysc_table, offsetof(struct thread_info, sys_call_table)); - DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); - DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); - DEFINE(__TI_user_timer, offsetof(struct thread_info, user_timer)); - DEFINE(__TI_system_timer, offsetof(struct thread_info, system_timer)); - DEFINE(__TI_last_break, offsetof(struct thread_info, last_break)); + /* thread info offsets */ + OFFSET(__TI_task, thread_info, task); + OFFSET(__TI_flags, thread_info, flags); + OFFSET(__TI_sysc_table, thread_info, sys_call_table); + OFFSET(__TI_cpu, thread_info, cpu); + OFFSET(__TI_precount, thread_info, preempt_count); + OFFSET(__TI_user_timer, thread_info, user_timer); + OFFSET(__TI_system_timer, thread_info, system_timer); + OFFSET(__TI_last_break, thread_info, last_break); BLANK(); - DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); - DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); - DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); - DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); - DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); - DEFINE(__PT_INT_PARM, offsetof(struct pt_regs, int_parm)); - DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); - DEFINE(__PT_FLAGS, offsetof(struct pt_regs, flags)); + /* pt_regs offsets */ + OFFSET(__PT_ARGS, pt_regs, args); + OFFSET(__PT_PSW, pt_regs, psw); + OFFSET(__PT_GPRS, pt_regs, gprs); + OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); + OFFSET(__PT_INT_CODE, pt_regs, int_code); + OFFSET(__PT_INT_PARM, pt_regs, int_parm); + OFFSET(__PT_INT_PARM_LONG, pt_regs, int_parm_long); + OFFSET(__PT_FLAGS, pt_regs, flags); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); BLANK(); - DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); - DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); - DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); + /* stack_frame offsets */ + OFFSET(__SF_BACKCHAIN, stack_frame, back_chain); + OFFSET(__SF_GPRS, stack_frame, gprs); + OFFSET(__SF_EMPTY, stack_frame, empty1); BLANK(); /* timeval/timezone offsets for use by vdso */ - DEFINE(__VDSO_UPD_COUNT, offsetof(struct vdso_data, tb_update_count)); - DEFINE(__VDSO_XTIME_STAMP, offsetof(struct vdso_data, xtime_tod_stamp)); - DEFINE(__VDSO_XTIME_SEC, offsetof(struct vdso_data, xtime_clock_sec)); - DEFINE(__VDSO_XTIME_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); - DEFINE(__VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); - DEFINE(__VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); - DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec)); - DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); - DEFINE(__VDSO_WTOM_CRS_SEC, offsetof(struct vdso_data, wtom_coarse_sec)); - DEFINE(__VDSO_WTOM_CRS_NSEC, offsetof(struct vdso_data, wtom_coarse_nsec)); - DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); - DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); - DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); - DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); - DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); - DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); + OFFSET(__VDSO_UPD_COUNT, vdso_data, tb_update_count); + OFFSET(__VDSO_XTIME_STAMP, vdso_data, xtime_tod_stamp); + OFFSET(__VDSO_XTIME_SEC, vdso_data, xtime_clock_sec); + OFFSET(__VDSO_XTIME_NSEC, vdso_data, xtime_clock_nsec); + OFFSET(__VDSO_XTIME_CRS_SEC, vdso_data, xtime_coarse_sec); + OFFSET(__VDSO_XTIME_CRS_NSEC, vdso_data, xtime_coarse_nsec); + OFFSET(__VDSO_WTOM_SEC, vdso_data, wtom_clock_sec); + OFFSET(__VDSO_WTOM_NSEC, vdso_data, wtom_clock_nsec); + OFFSET(__VDSO_WTOM_CRS_SEC, vdso_data, wtom_coarse_sec); + OFFSET(__VDSO_WTOM_CRS_NSEC, vdso_data, wtom_coarse_nsec); + OFFSET(__VDSO_TIMEZONE, vdso_data, tz_minuteswest); + OFFSET(__VDSO_ECTG_OK, vdso_data, ectg_available); + OFFSET(__VDSO_TK_MULT, vdso_data, tk_mult); + OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift); + OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); + OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); + BLANK(); /* constants used by the vdso */ DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); @@ -86,102 +91,105 @@ int main(void) DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); BLANK(); /* idle data offsets */ - DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter)); - DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit)); - DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter)); - DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit)); - /* lowcore offsets */ - DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); - DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); - DEFINE(__LC_EXT_INT_CODE, offsetof(struct _lowcore, ext_int_code)); - DEFINE(__LC_SVC_ILC, offsetof(struct _lowcore, svc_ilc)); - DEFINE(__LC_SVC_INT_CODE, offsetof(struct _lowcore, svc_code)); - DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); - DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); - DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); - DEFINE(__LC_MON_CLASS_NR, offsetof(struct _lowcore, mon_class_num)); - DEFINE(__LC_PER_CODE, offsetof(struct _lowcore, per_code)); - DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_atmid)); - DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); - DEFINE(__LC_EXC_ACCESS_ID, offsetof(struct _lowcore, exc_access_id)); - DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); - DEFINE(__LC_OP_ACCESS_ID, offsetof(struct _lowcore, op_access_id)); - DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_mode_id)); - DEFINE(__LC_MON_CODE, offsetof(struct _lowcore, monitor_code)); - DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); - DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr)); - DEFINE(__LC_IO_INT_PARM, offsetof(struct _lowcore, io_int_parm)); - DEFINE(__LC_IO_INT_WORD, offsetof(struct _lowcore, io_int_word)); - DEFINE(__LC_STFL_FAC_LIST, offsetof(struct _lowcore, stfl_fac_list)); - DEFINE(__LC_MCCK_CODE, offsetof(struct _lowcore, mcck_interruption_code)); - DEFINE(__LC_MCCK_EXT_DAM_CODE, offsetof(struct _lowcore, external_damage_code)); - DEFINE(__LC_RST_OLD_PSW, offsetof(struct _lowcore, restart_old_psw)); - DEFINE(__LC_EXT_OLD_PSW, offsetof(struct _lowcore, external_old_psw)); - DEFINE(__LC_SVC_OLD_PSW, offsetof(struct _lowcore, svc_old_psw)); - DEFINE(__LC_PGM_OLD_PSW, offsetof(struct _lowcore, program_old_psw)); - DEFINE(__LC_MCK_OLD_PSW, offsetof(struct _lowcore, mcck_old_psw)); - DEFINE(__LC_IO_OLD_PSW, offsetof(struct _lowcore, io_old_psw)); - DEFINE(__LC_RST_NEW_PSW, offsetof(struct _lowcore, restart_psw)); - DEFINE(__LC_EXT_NEW_PSW, offsetof(struct _lowcore, external_new_psw)); - DEFINE(__LC_SVC_NEW_PSW, offsetof(struct _lowcore, svc_new_psw)); - DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); - DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); - DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); + OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter); + OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit); + OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); + OFFSET(__TIMER_IDLE_EXIT, s390_idle_data, timer_idle_exit); BLANK(); - DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); - DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); - DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); - DEFINE(__LC_CPU_FLAGS, offsetof(struct _lowcore, cpu_flags)); - DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); - DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); - DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); - DEFINE(__LC_ASYNC_ENTER_TIMER, offsetof(struct _lowcore, async_enter_timer)); - DEFINE(__LC_MCCK_ENTER_TIMER, offsetof(struct _lowcore, mcck_enter_timer)); - DEFINE(__LC_EXIT_TIMER, offsetof(struct _lowcore, exit_timer)); - DEFINE(__LC_USER_TIMER, offsetof(struct _lowcore, user_timer)); - DEFINE(__LC_SYSTEM_TIMER, offsetof(struct _lowcore, system_timer)); - DEFINE(__LC_STEAL_TIMER, offsetof(struct _lowcore, steal_timer)); - DEFINE(__LC_LAST_UPDATE_TIMER, offsetof(struct _lowcore, last_update_timer)); - DEFINE(__LC_LAST_UPDATE_CLOCK, offsetof(struct _lowcore, last_update_clock)); - DEFINE(__LC_CURRENT, offsetof(struct _lowcore, current_task)); - DEFINE(__LC_CURRENT_PID, offsetof(struct _lowcore, current_pid)); - DEFINE(__LC_THREAD_INFO, offsetof(struct _lowcore, thread_info)); - DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); - DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); - DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); - DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack)); - DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn)); - DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data)); - DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source)); - DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce)); - DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); - DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); - DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); - DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); - DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); + /* hardware defined lowcore locations 0x000 - 0x1ff */ + OFFSET(__LC_EXT_PARAMS, _lowcore, ext_params); + OFFSET(__LC_EXT_CPU_ADDR, _lowcore, ext_cpu_addr); + OFFSET(__LC_EXT_INT_CODE, _lowcore, ext_int_code); + OFFSET(__LC_SVC_ILC, _lowcore, svc_ilc); + OFFSET(__LC_SVC_INT_CODE, _lowcore, svc_code); + OFFSET(__LC_PGM_ILC, _lowcore, pgm_ilc); + OFFSET(__LC_PGM_INT_CODE, _lowcore, pgm_code); + OFFSET(__LC_DATA_EXC_CODE, _lowcore, data_exc_code); + OFFSET(__LC_MON_CLASS_NR, _lowcore, mon_class_num); + OFFSET(__LC_PER_CODE, _lowcore, per_code); + OFFSET(__LC_PER_ATMID, _lowcore, per_atmid); + OFFSET(__LC_PER_ADDRESS, _lowcore, per_address); + OFFSET(__LC_EXC_ACCESS_ID, _lowcore, exc_access_id); + OFFSET(__LC_PER_ACCESS_ID, _lowcore, per_access_id); + OFFSET(__LC_OP_ACCESS_ID, _lowcore, op_access_id); + OFFSET(__LC_AR_MODE_ID, _lowcore, ar_mode_id); + OFFSET(__LC_TRANS_EXC_CODE, _lowcore, trans_exc_code); + OFFSET(__LC_MON_CODE, _lowcore, monitor_code); + OFFSET(__LC_SUBCHANNEL_ID, _lowcore, subchannel_id); + OFFSET(__LC_SUBCHANNEL_NR, _lowcore, subchannel_nr); + OFFSET(__LC_IO_INT_PARM, _lowcore, io_int_parm); + OFFSET(__LC_IO_INT_WORD, _lowcore, io_int_word); + OFFSET(__LC_STFL_FAC_LIST, _lowcore, stfl_fac_list); + OFFSET(__LC_MCCK_CODE, _lowcore, mcck_interruption_code); + OFFSET(__LC_MCCK_FAIL_STOR_ADDR, _lowcore, failing_storage_address); + OFFSET(__LC_LAST_BREAK, _lowcore, breaking_event_addr); + OFFSET(__LC_RST_OLD_PSW, _lowcore, restart_old_psw); + OFFSET(__LC_EXT_OLD_PSW, _lowcore, external_old_psw); + OFFSET(__LC_SVC_OLD_PSW, _lowcore, svc_old_psw); + OFFSET(__LC_PGM_OLD_PSW, _lowcore, program_old_psw); + OFFSET(__LC_MCK_OLD_PSW, _lowcore, mcck_old_psw); + OFFSET(__LC_IO_OLD_PSW, _lowcore, io_old_psw); + OFFSET(__LC_RST_NEW_PSW, _lowcore, restart_psw); + OFFSET(__LC_EXT_NEW_PSW, _lowcore, external_new_psw); + OFFSET(__LC_SVC_NEW_PSW, _lowcore, svc_new_psw); + OFFSET(__LC_PGM_NEW_PSW, _lowcore, program_new_psw); + OFFSET(__LC_MCK_NEW_PSW, _lowcore, mcck_new_psw); + OFFSET(__LC_IO_NEW_PSW, _lowcore, io_new_psw); + /* software defined lowcore locations 0x200 - 0xdff*/ + OFFSET(__LC_SAVE_AREA_SYNC, _lowcore, save_area_sync); + OFFSET(__LC_SAVE_AREA_ASYNC, _lowcore, save_area_async); + OFFSET(__LC_SAVE_AREA_RESTART, _lowcore, save_area_restart); + OFFSET(__LC_CPU_FLAGS, _lowcore, cpu_flags); + OFFSET(__LC_RETURN_PSW, _lowcore, return_psw); + OFFSET(__LC_RETURN_MCCK_PSW, _lowcore, return_mcck_psw); + OFFSET(__LC_SYNC_ENTER_TIMER, _lowcore, sync_enter_timer); + OFFSET(__LC_ASYNC_ENTER_TIMER, _lowcore, async_enter_timer); + OFFSET(__LC_MCCK_ENTER_TIMER, _lowcore, mcck_enter_timer); + OFFSET(__LC_EXIT_TIMER, _lowcore, exit_timer); + OFFSET(__LC_USER_TIMER, _lowcore, user_timer); + OFFSET(__LC_SYSTEM_TIMER, _lowcore, system_timer); + OFFSET(__LC_STEAL_TIMER, _lowcore, steal_timer); + OFFSET(__LC_LAST_UPDATE_TIMER, _lowcore, last_update_timer); + OFFSET(__LC_LAST_UPDATE_CLOCK, _lowcore, last_update_clock); + OFFSET(__LC_INT_CLOCK, _lowcore, int_clock); + OFFSET(__LC_MCCK_CLOCK, _lowcore, mcck_clock); + OFFSET(__LC_CURRENT, _lowcore, current_task); + OFFSET(__LC_THREAD_INFO, _lowcore, thread_info); + OFFSET(__LC_KERNEL_STACK, _lowcore, kernel_stack); + OFFSET(__LC_ASYNC_STACK, _lowcore, async_stack); + OFFSET(__LC_PANIC_STACK, _lowcore, panic_stack); + OFFSET(__LC_RESTART_STACK, _lowcore, restart_stack); + OFFSET(__LC_RESTART_FN, _lowcore, restart_fn); + OFFSET(__LC_RESTART_DATA, _lowcore, restart_data); + OFFSET(__LC_RESTART_SOURCE, _lowcore, restart_source); + OFFSET(__LC_USER_ASCE, _lowcore, user_asce); + OFFSET(__LC_LPP, _lowcore, lpp); + OFFSET(__LC_CURRENT_PID, _lowcore, current_pid); + OFFSET(__LC_PERCPU_OFFSET, _lowcore, percpu_offset); + OFFSET(__LC_VDSO_PER_CPU, _lowcore, vdso_per_cpu_data); + OFFSET(__LC_MACHINE_FLAGS, _lowcore, machine_flags); + OFFSET(__LC_GMAP, _lowcore, gmap); + OFFSET(__LC_PASTE, _lowcore, paste); + /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */ + OFFSET(__LC_DUMP_REIPL, _lowcore, ipib); + /* hardware defined lowcore locations 0x1000 - 0x18ff */ + OFFSET(__LC_VX_SAVE_AREA_ADDR, _lowcore, vector_save_area_addr); + OFFSET(__LC_EXT_PARAMS2, _lowcore, ext_params2); + OFFSET(SAVE_AREA_BASE, _lowcore, floating_pt_save_area); + OFFSET(__LC_FPREGS_SAVE_AREA, _lowcore, floating_pt_save_area); + OFFSET(__LC_GPREGS_SAVE_AREA, _lowcore, gpregs_save_area); + OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area); + OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area); + OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area); + OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area); + OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area); + OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area); + OFFSET(__LC_CREGS_SAVE_AREA, _lowcore, cregs_save_area); + OFFSET(__LC_PGM_TDB, _lowcore, pgm_tdb); BLANK(); - DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); - DEFINE(__LC_CLOCK_COMP_SAVE_AREA, offsetof(struct _lowcore, clock_comp_save_area)); - DEFINE(__LC_PSW_SAVE_AREA, offsetof(struct _lowcore, psw_save_area)); - DEFINE(__LC_PREFIX_SAVE_AREA, offsetof(struct _lowcore, prefixreg_save_area)); - DEFINE(__LC_AREGS_SAVE_AREA, offsetof(struct _lowcore, access_regs_save_area)); - DEFINE(__LC_FPREGS_SAVE_AREA, offsetof(struct _lowcore, floating_pt_save_area)); - DEFINE(__LC_GPREGS_SAVE_AREA, offsetof(struct _lowcore, gpregs_save_area)); - DEFINE(__LC_CREGS_SAVE_AREA, offsetof(struct _lowcore, cregs_save_area)); - DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); - DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); - DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr)); - DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); - DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); - DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); - DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); - DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); - DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); - DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); - DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); - DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); - DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); - DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); - DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); + /* gmap/sie offsets */ + OFFSET(__GMAP_ASCE, gmap, asce); + OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c); + OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20); return 0; } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index e0f9d270b..66c94417c 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -249,7 +249,7 @@ static int save_sigregs_ext32(struct pt_regs *regs, return -EFAULT; /* Save vector registers to signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { for (i = 0; i < __NUM_VXRS_LOW; i++) vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, @@ -277,7 +277,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs, *(__u32 *)®s->gprs[i] = gprs_high[i]; /* Restore vector registers from signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, @@ -470,8 +470,7 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set, */ uc_flags = UC_GPRS_HIGH; if (MACHINE_HAS_VX) { - if (is_vx_task(current)) - uc_flags |= UC_VXRS; + uc_flags |= UC_VXRS; } else frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) + sizeof(frame->uc.uc_mcontext_ext.vxrs_high); diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 09f194052..fac4eedde 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); +COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c index 199ec92ef..7f768914f 100644 --- a/arch/s390/kernel/cpcmd.c +++ b/arch/s390/kernel/cpcmd.c @@ -14,6 +14,7 @@ #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/string.h> +#include <asm/diag.h> #include <asm/ebcdic.h> #include <asm/cpcmd.h> #include <asm/io.h> @@ -70,6 +71,7 @@ int __cpcmd(const char *cmd, char *response, int rlen, int *response_code) memcpy(cpcmd_buf, cmd, cmdlen); ASCEBC(cpcmd_buf, cmdlen); + diag_stat_inc(DIAG_STAT_X008); if (response) { memset(response, 0, rlen); response_len = rlen; diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 0c6c01eb3..171e09bb8 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -32,16 +32,6 @@ static struct memblock_type oldmem_type = { .regions = &oldmem_region, }; -#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ - for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \ - &memblock.physmem, \ - &oldmem_type, p_start, \ - p_end, p_nid); \ - i != (u64)ULLONG_MAX; \ - __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\ - &oldmem_type, \ - p_start, p_end, p_nid)) - struct dump_save_areas dump_save_areas; /* @@ -515,7 +505,8 @@ static int get_mem_chunk_cnt(void) int cnt = 0; u64 idx; - for_each_dump_mem_range(idx, NUMA_NO_NODE, NULL, NULL, NULL) + for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE, + MEMBLOCK_NONE, NULL, NULL, NULL) cnt++; return cnt; } @@ -528,7 +519,8 @@ static void loads_init(Elf64_Phdr *phdr, u64 loads_offset) phys_addr_t start, end; u64 idx; - for_each_dump_mem_range(idx, NUMA_NO_NODE, &start, &end, NULL) { + for_each_mem_range(idx, &memblock.physmem, &oldmem_type, NUMA_NO_NODE, + MEMBLOCK_NONE, &start, &end, NULL) { phdr->p_filesz = end - start; phdr->p_type = PT_LOAD; phdr->p_offset = start; diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 2f69243bf..48b37b835 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -6,12 +6,137 @@ */ #include <linux/module.h> +#include <linux/cpu.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> #include <asm/diag.h> +#include <asm/trace/diag.h> + +struct diag_stat { + unsigned int counter[NR_DIAG_STAT]; +}; + +static DEFINE_PER_CPU(struct diag_stat, diag_stat); + +struct diag_desc { + int code; + char *name; +}; + +static const struct diag_desc diag_map[NR_DIAG_STAT] = { + [DIAG_STAT_X008] = { .code = 0x008, .name = "Console Function" }, + [DIAG_STAT_X00C] = { .code = 0x00c, .name = "Pseudo Timer" }, + [DIAG_STAT_X010] = { .code = 0x010, .name = "Release Pages" }, + [DIAG_STAT_X014] = { .code = 0x014, .name = "Spool File Services" }, + [DIAG_STAT_X044] = { .code = 0x044, .name = "Voluntary Timeslice End" }, + [DIAG_STAT_X064] = { .code = 0x064, .name = "NSS Manipulation" }, + [DIAG_STAT_X09C] = { .code = 0x09c, .name = "Relinquish Timeslice" }, + [DIAG_STAT_X0DC] = { .code = 0x0dc, .name = "Appldata Control" }, + [DIAG_STAT_X204] = { .code = 0x204, .name = "Logical-CPU Utilization" }, + [DIAG_STAT_X210] = { .code = 0x210, .name = "Device Information" }, + [DIAG_STAT_X224] = { .code = 0x224, .name = "EBCDIC-Name Table" }, + [DIAG_STAT_X250] = { .code = 0x250, .name = "Block I/O" }, + [DIAG_STAT_X258] = { .code = 0x258, .name = "Page-Reference Services" }, + [DIAG_STAT_X288] = { .code = 0x288, .name = "Time Bomb" }, + [DIAG_STAT_X2C4] = { .code = 0x2c4, .name = "FTP Services" }, + [DIAG_STAT_X2FC] = { .code = 0x2fc, .name = "Guest Performance Data" }, + [DIAG_STAT_X304] = { .code = 0x304, .name = "Partition-Resource Service" }, + [DIAG_STAT_X308] = { .code = 0x308, .name = "List-Directed IPL" }, + [DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" }, +}; + +static int show_diag_stat(struct seq_file *m, void *v) +{ + struct diag_stat *stat; + unsigned long n = (unsigned long) v - 1; + int cpu, prec, tmp; + + get_online_cpus(); + if (n == 0) { + seq_puts(m, " "); + + for_each_online_cpu(cpu) { + prec = 10; + for (tmp = 10; cpu >= tmp; tmp *= 10) + prec--; + seq_printf(m, "%*s%d", prec, "CPU", cpu); + } + seq_putc(m, '\n'); + } else if (n <= NR_DIAG_STAT) { + seq_printf(m, "diag %03x:", diag_map[n-1].code); + for_each_online_cpu(cpu) { + stat = &per_cpu(diag_stat, cpu); + seq_printf(m, " %10u", stat->counter[n-1]); + } + seq_printf(m, " %s\n", diag_map[n-1].name); + } + put_online_cpus(); + return 0; +} + +static void *show_diag_stat_start(struct seq_file *m, loff_t *pos) +{ + return *pos <= nr_cpu_ids ? (void *)((unsigned long) *pos + 1) : NULL; +} + +static void *show_diag_stat_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return show_diag_stat_start(m, pos); +} + +static void show_diag_stat_stop(struct seq_file *m, void *v) +{ +} + +static const struct seq_operations show_diag_stat_sops = { + .start = show_diag_stat_start, + .next = show_diag_stat_next, + .stop = show_diag_stat_stop, + .show = show_diag_stat, +}; + +static int show_diag_stat_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &show_diag_stat_sops); +} + +static const struct file_operations show_diag_stat_fops = { + .open = show_diag_stat_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + + +static int __init show_diag_stat_init(void) +{ + debugfs_create_file("diag_stat", 0400, NULL, NULL, + &show_diag_stat_fops); + return 0; +} + +device_initcall(show_diag_stat_init); + +void diag_stat_inc(enum diag_stat_enum nr) +{ + this_cpu_inc(diag_stat.counter[nr]); + trace_s390_diagnose(diag_map[nr].code); +} +EXPORT_SYMBOL(diag_stat_inc); + +void diag_stat_inc_norecursion(enum diag_stat_enum nr) +{ + this_cpu_inc(diag_stat.counter[nr]); + trace_s390_diagnose_norecursion(diag_map[nr].code); +} +EXPORT_SYMBOL(diag_stat_inc_norecursion); /* * Diagnose 14: Input spool file manipulation */ -int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) +static inline int __diag14(unsigned long rx, unsigned long ry1, + unsigned long subcode) { register unsigned long _ry1 asm("2") = ry1; register unsigned long _ry2 asm("3") = subcode; @@ -29,6 +154,12 @@ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) return rc; } + +int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) +{ + diag_stat_inc(DIAG_STAT_X014); + return __diag14(rx, ry1, subcode); +} EXPORT_SYMBOL(diag14); /* @@ -48,6 +179,7 @@ int diag210(struct diag210 *addr) spin_lock_irqsave(&diag210_lock, flags); diag210_tmp = *addr; + diag_stat_inc(DIAG_STAT_X210); asm volatile( " lhi %0,-1\n" " sam31\n" diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 8140d10c6..6e7296160 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr) } if (separator) ptr += sprintf(ptr, "%c", separator); + /* + * Use four '%' characters below because of the + * following two conversions: + * + * 1) sprintf: %%%%r -> %%r + * 2) printk : %%r -> %r + */ if (operand->flags & OPERAND_GPR) - ptr += sprintf(ptr, "%%r%i", value); + ptr += sprintf(ptr, "%%%%r%i", value); else if (operand->flags & OPERAND_FPR) - ptr += sprintf(ptr, "%%f%i", value); + ptr += sprintf(ptr, "%%%%f%i", value); else if (operand->flags & OPERAND_AR) - ptr += sprintf(ptr, "%%a%i", value); + ptr += sprintf(ptr, "%%%%a%i", value); else if (operand->flags & OPERAND_CR) - ptr += sprintf(ptr, "%%c%i", value); + ptr += sprintf(ptr, "%%%%c%i", value); else if (operand->flags & OPERAND_VR) - ptr += sprintf(ptr, "%%v%i", value); + ptr += sprintf(ptr, "%%%%v%i", value); else if (operand->flags & OPERAND_PCREL) ptr += sprintf(ptr, "%lx", (signed int) value + addr); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 549a73a4b..3c31609df 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -17,6 +17,7 @@ #include <linux/pfn.h> #include <linux/uaccess.h> #include <linux/kernel.h> +#include <asm/diag.h> #include <asm/ebcdic.h> #include <asm/ipl.h> #include <asm/lowcore.h> @@ -286,6 +287,7 @@ static __init void detect_diag9c(void) int rc; cpu_address = stap(); + diag_stat_inc(DIAG_STAT_X09C); asm volatile( " diag %2,0,0x9c\n" "0: la %0,0\n" @@ -300,6 +302,7 @@ static __init void detect_diag44(void) { int rc; + diag_stat_inc(DIAG_STAT_X044); asm volatile( " diag 0,0,0x44\n" "0: la %0,0\n" @@ -326,9 +329,19 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TE; if (test_facility(51)) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; - if (test_facility(129)) + if (test_facility(129)) { S390_lowcore.machine_flags |= MACHINE_FLAG_VX; + __ctl_set_bit(0, 17); + } +} + +static int __init disable_vector_extension(char *str) +{ + S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; + __ctl_clear_bit(0, 17); + return 1; } +early_param("novx", disable_vector_extension); static int __init cad_setup(char *str) { diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 582fe44ab..857b6526d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -20,8 +20,9 @@ #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> -#include <asm/fpu-internal.h> #include <asm/vx-insn.h> +#include <asm/setup.h> +#include <asm/nmi.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -139,6 +140,28 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm + /* + * The TSTMSK macro generates a test-under-mask instruction by + * calculating the memory offset for the specified mask value. + * Mask value can be any constant. The macro shifts the mask + * value to calculate the memory offset for the test-under-mask + * instruction. + */ + .macro TSTMSK addr, mask, size=8, bytepos=0 + .if (\bytepos < \size) && (\mask >> 8) + .if (\mask & 0xff) + .error "Mask exceeds byte boundary" + .endif + TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)" + .exitm + .endif + .ifeq \mask + .error "Mask must not be zero" + .endif + off = \size - \bytepos - 1 + tm off+\addr, \mask + .endm + .section .kprobes.text, "ax" /* @@ -164,8 +187,11 @@ ENTRY(__switch_to) stg %r15,__LC_KERNEL_STACK # store end of kernel stack lg %r15,__THREAD_ksp(%r1) # load kernel stack of next lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 - mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next + mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP + bzr %r14 + .insn s,0xb2800000,__LC_LPP # set program parameter br %r14 .L__critical_start: @@ -180,8 +206,8 @@ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area - xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason - tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? + xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0 + TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ? jno .Lsie_load_guest_gprs brasl %r14,load_fpu_regs # load guest fp/vx regs .Lsie_load_guest_gprs: @@ -195,16 +221,9 @@ ENTRY(sie64a) oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now tm __SIE_PROG20+3(%r14),3 # last exit... jnz .Lsie_skip - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsie_skip # exit if fp/vx regs changed - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz .Lsie_enter - .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid -.Lsie_enter: sie 0(%r14) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz .Lsie_skip - .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id .Lsie_skip: ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce @@ -221,11 +240,11 @@ sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area stmg %r0,%r13,0(%r14) # save guest gprs 0-13 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lg %r2,__SF_EMPTY+24(%r15) # return exit reason code + lg %r2,__SF_EMPTY+16(%r15) # return exit reason code br %r14 .Lsie_fault: lghi %r14,-EFAULT - stg %r14,__SF_EMPTY+24(%r15) # set exit reason code + stg %r14,__SF_EMPTY+16(%r15) # set exit reason code j sie_exit EX_TABLE(.Lrewind_pad,.Lsie_fault) @@ -271,7 +290,7 @@ ENTRY(system_call) stg %r2,__PT_ORIG_GPR2(%r11) stg %r7,STACK_FRAME_OVERHEAD(%r15) lgf %r9,0(%r8,%r10) # get system call add. - tm __TI_flags+7(%r12),_TIF_TRACE + TSTMSK __TI_flags(%r12),_TIF_TRACE jnz .Lsysc_tracesys basr %r14,%r9 # call sys_xxxx stg %r2,__PT_R2(%r11) # store return value @@ -279,11 +298,11 @@ ENTRY(system_call) .Lsysc_return: LOCKDEP_SYS_EXIT .Lsysc_tif: - tm __PT_FLAGS+7(%r11),_PIF_WORK + TSTMSK __PT_FLAGS(%r11),_PIF_WORK jnz .Lsysc_work - tm __TI_flags+7(%r12),_TIF_WORK + TSTMSK __TI_flags(%r12),_TIF_WORK jnz .Lsysc_work # check for work - tm __LC_CPU_FLAGS+7,_CIF_WORK + TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lsysc_work .Lsysc_restore: lg %r14,__LC_VDSO_PER_CPU @@ -299,23 +318,23 @@ ENTRY(system_call) # One of the work bits is on. Find out which one. # .Lsysc_work: - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING + TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING jo .Lsysc_mcck_pending - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED + TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jo .Lsysc_reschedule #ifdef CONFIG_UPROBES - tm __TI_flags+7(%r12),_TIF_UPROBE + TSTMSK __TI_flags(%r12),_TIF_UPROBE jo .Lsysc_uprobe_notify #endif - tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP + TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP jo .Lsysc_singlestep - tm __TI_flags+7(%r12),_TIF_SIGPENDING + TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lsysc_sigpending - tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME + TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lsysc_vxrs - tm __LC_CPU_FLAGS+7,_CIF_ASCE + TSTMSK __LC_CPU_FLAGS,_CIF_ASCE jo .Lsysc_uaccess j .Lsysc_return # beware of critical section cleanup @@ -354,7 +373,7 @@ ENTRY(system_call) .Lsysc_sigpending: lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_signal - tm __PT_FLAGS+7(%r11),_PIF_SYSCALL + TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL jno .Lsysc_return lmg %r2,%r7,__PT_R2(%r11) # load svc arguments lg %r10,__TI_sysc_table(%r12) # address of system call table @@ -414,7 +433,7 @@ ENTRY(system_call) basr %r14,%r9 # call sys_xxx stg %r2,__PT_R2(%r11) # store return value .Lsysc_tracenogo: - tm __TI_flags+7(%r12),_TIF_TRACE + TSTMSK __TI_flags(%r12),_TIF_TRACE jz .Lsysc_return lgr %r2,%r11 # pass pointer to pt_regs larl %r14,.Lsysc_return @@ -544,6 +563,8 @@ ENTRY(io_int_handler) stmg %r8,%r9,__PT_PSW(%r11) mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) + TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ + jo .Lio_restore TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) .Lio_loop: @@ -554,7 +575,7 @@ ENTRY(io_int_handler) lghi %r3,THIN_INTERRUPT .Lio_call: brasl %r14,do_IRQ - tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR jz .Lio_return tpi 0 jz .Lio_return @@ -564,9 +585,9 @@ ENTRY(io_int_handler) LOCKDEP_SYS_EXIT TRACE_IRQS_ON .Lio_tif: - tm __TI_flags+7(%r12),_TIF_WORK + TSTMSK __TI_flags(%r12),_TIF_WORK jnz .Lio_work # there is work to do (signals etc.) - tm __LC_CPU_FLAGS+7,_CIF_WORK + TSTMSK __LC_CPU_FLAGS,_CIF_WORK jnz .Lio_work .Lio_restore: lg %r14,__LC_VDSO_PER_CPU @@ -594,7 +615,7 @@ ENTRY(io_int_handler) # check for preemptive scheduling icm %r0,15,__TI_precount(%r12) jnz .Lio_restore # preemption is disabled - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED + TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jno .Lio_restore # switch to kernel stack lg %r1,__PT_R15(%r11) @@ -626,17 +647,17 @@ ENTRY(io_int_handler) # One of the work bits is on. Find out which one. # .Lio_work_tif: - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING + TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING jo .Lio_mcck_pending - tm __TI_flags+7(%r12),_TIF_NEED_RESCHED + TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED jo .Lio_reschedule - tm __TI_flags+7(%r12),_TIF_SIGPENDING + TSTMSK __TI_flags(%r12),_TIF_SIGPENDING jo .Lio_sigpending - tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME + TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU jo .Lio_vxrs - tm __LC_CPU_FLAGS+7,_CIF_ASCE + TSTMSK __LC_CPU_FLAGS,_CIF_ASCE jo .Lio_uaccess j .Lio_return # beware of critical section cleanup @@ -719,6 +740,8 @@ ENTRY(ext_int_handler) mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS mvc __PT_INT_PARM_LONG(8,%r11),0(%r1) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) + TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ + jo .Lio_restore TRACE_IRQS_OFF xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs @@ -748,27 +771,22 @@ ENTRY(psw_idle) br %r14 .Lpsw_idle_end: -/* Store floating-point controls and floating-point or vector extension - * registers instead. A critical section cleanup assures that the registers - * are stored even if interrupted for some other work. The register %r2 - * designates a struct fpu to store register contents. If the specified - * structure does not contain a register save area, the register store is - * omitted (see also comments in arch_dup_task_struct()). - * - * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore - * of the register contents at system call or io return. +/* + * Store floating-point controls and floating-point or vector register + * depending whether the vector facility is available. A critical section + * cleanup assures that the registers are stored even if interrupted for + * some other work. The CIF_FPU flag is set to trigger a lazy restore + * of the register contents at return from io or a system call. */ ENTRY(save_fpu_regs) lg %r2,__LC_CURRENT aghi %r2,__TASK_thread - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU bor %r14 stfpc __THREAD_FPU_fpc(%r2) .Lsave_fpu_regs_fpc_end: lg %r3,__THREAD_FPU_regs(%r2) - ltgr %r3,%r3 - jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU - tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX jz .Lsave_fpu_regs_fp # no -> store FP regs .Lsave_fpu_regs_vx_low: VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) @@ -797,41 +815,30 @@ ENTRY(save_fpu_regs) br %r14 .Lsave_fpu_regs_end: -/* Load floating-point controls and floating-point or vector extension - * registers. A critical section cleanup assures that the register contents - * are loaded even if interrupted for some other work. Depending on the saved - * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. +/* + * Load floating-point controls and floating-point or vector registers. + * A critical section cleanup assures that the register contents are + * loaded even if interrupted for some other work. * * There are special calling conventions to fit into sysc and io return work: * %r15: <kernel stack> * The function requires: - * %r4 and __SF_EMPTY+32(%r15) + * %r4 */ load_fpu_regs: lg %r4,__LC_CURRENT aghi %r4,__TASK_thread - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU bnor %r14 lfpc __THREAD_FPU_fpc(%r4) - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area - jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs -.Lload_fpu_regs_vx_ctl: - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jo .Lload_fpu_regs_vx - oi __SF_EMPTY+32+5(%r15),2 # set VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) + jz .Lload_fpu_regs_fp # -> no VX, load FP regs .Lload_fpu_regs_vx: VLM %v0,%v15,0,%r4 .Lload_fpu_regs_vx_high: VLM %v16,%v31,256,%r4 j .Lload_fpu_regs_done -.Lload_fpu_regs_fp_ctl: - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jz .Lload_fpu_regs_fp - ni __SF_EMPTY+32+5(%r15),253 # clear VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) .Lload_fpu_regs_fp: ld 0,0(%r4) ld 1,8(%r4) @@ -854,16 +861,6 @@ load_fpu_regs: br %r14 .Lload_fpu_regs_end: -/* Test and set the vector enablement control in CR0.46 */ -ENTRY(__ctl_set_vx) - stctg %c0,%c0,__SF_EMPTY(%r15) - tm __SF_EMPTY+5(%r15),2 - bor %r14 - oi __SF_EMPTY+5(%r15),2 - lctlg %c0,%c0,__SF_EMPTY(%r15) - br %r14 -.L__ctl_set_vx_end: - .L__critical_end: /* @@ -878,11 +875,11 @@ ENTRY(mcck_int_handler) lg %r12,__LC_THREAD_INFO larl %r13,cleanup_critical lmg %r8,%r9,__LC_MCK_OLD_PSW - tm __LC_MCCK_CODE,0x80 # system damage? + TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE jo .Lmcck_panic # yes -> rest of mcck code invalid lghi %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) - tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? + TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID jo 3f la %r14,__LC_SYNC_ENTER_TIMER clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER @@ -896,7 +893,7 @@ ENTRY(mcck_int_handler) la %r14,__LC_LAST_UPDATE_TIMER 2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) -3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? +3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID) jno .Lmcck_panic # no -> skip cleanup critical SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER .Lmcck_skip: @@ -916,7 +913,7 @@ ENTRY(mcck_int_handler) la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off - tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING + TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING jno .Lmcck_return TRACE_IRQS_OFF brasl %r14,s390_handle_mcck @@ -941,7 +938,10 @@ ENTRY(mcck_int_handler) # PSW restart interrupt handler # ENTRY(restart_int_handler) - stg %r15,__LC_SAVE_AREA_RESTART + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP + jz 0f + .insn s,0xb2800000,__LC_LPP +0: stg %r15,__LC_SAVE_AREA_RESTART lg %r15,__LC_RESTART_STACK aghi %r15,-__PT_SIZE # create pt_regs on stack xc 0(__PT_SIZE,%r15),0(%r15) @@ -1019,10 +1019,6 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end jl .Lcleanup_load_fpu_regs - clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx - jl 0f - clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end - jl .Lcleanup___ctl_set_vx 0: br %r14 .align 8 @@ -1041,8 +1037,6 @@ cleanup_critical: .quad .Lsave_fpu_regs_end .quad load_fpu_regs .quad .Lload_fpu_regs_end - .quad __ctl_set_vx - .quad .L__ctl_set_vx_end #if IS_ENABLED(CONFIG_KVM) .Lcleanup_table_sie: @@ -1051,10 +1045,7 @@ cleanup_critical: .Lcleanup_sie: lg %r9,__SF_EMPTY(%r15) # get control block pointer - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz 0f - .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id -0: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_USER_ASCE # load primary asce larl %r9,sie_exit # skip forward to sie_exit br %r14 @@ -1206,7 +1197,7 @@ cleanup_critical: .quad .Lpsw_idle_lpsw .Lcleanup_save_fpu_regs: - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU bor %r14 clg %r9,BASED(.Lcleanup_save_fpu_regs_done) jhe 5f @@ -1224,9 +1215,7 @@ cleanup_critical: stfpc __THREAD_FPU_fpc(%r2) 1: # Load register save area and check if VX is active lg %r3,__THREAD_FPU_regs(%r2) - ltgr %r3,%r3 - jz 5f # no save area -> set CIF_FPU - tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX jz 4f # no VX -> store FP regs 2: # Store vector registers (V0-V15) VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) @@ -1266,43 +1255,27 @@ cleanup_critical: .quad .Lsave_fpu_regs_done .Lcleanup_load_fpu_regs: - tm __LC_CPU_FLAGS+7,_CIF_FPU + TSTMSK __LC_CPU_FLAGS,_CIF_FPU bnor %r14 clg %r9,BASED(.Lcleanup_load_fpu_regs_done) jhe 1f clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) jhe 2f - clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl) - jhe 3f clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) - jhe 4f + jhe 3f clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) - jhe 5f - clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) - jhe 6f + jhe 4f lg %r4,__LC_CURRENT aghi %r4,__TASK_thread lfpc __THREAD_FPU_fpc(%r4) - tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area - jz 3f # -> no VX, load FP regs -6: # Set VX-enablement control - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jo 5f - oi __SF_EMPTY+32+5(%r15),2 # set VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) -5: # Load V0 ..V15 registers + jz 2f # -> no VX, load FP regs +4: # Load V0 ..V15 registers VLM %v0,%v15,0,%r4 -4: # Load V16..V31 registers +3: # Load V16..V31 registers VLM %v16,%v31,256,%r4 j 1f -3: # Clear VX-enablement control for FP - stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 - tm __SF_EMPTY+32+5(%r15),2 # test VX control - jz 2f - ni __SF_EMPTY+32+5(%r15),253 # clear VX control - lctlg %c0,%c0,__SF_EMPTY+32(%r15) 2: # Load floating-point registers ld 0,0(%r4) ld 1,8(%r4) @@ -1324,28 +1297,15 @@ cleanup_critical: ni __LC_CPU_FLAGS+7,255-_CIF_FPU lg %r9,48(%r11) # return from load_fpu_regs br %r14 -.Lcleanup_load_fpu_regs_vx_ctl: - .quad .Lload_fpu_regs_vx_ctl .Lcleanup_load_fpu_regs_vx: .quad .Lload_fpu_regs_vx .Lcleanup_load_fpu_regs_vx_high: .quad .Lload_fpu_regs_vx_high -.Lcleanup_load_fpu_regs_fp_ctl: - .quad .Lload_fpu_regs_fp_ctl .Lcleanup_load_fpu_regs_fp: .quad .Lload_fpu_regs_fp .Lcleanup_load_fpu_regs_done: .quad .Lload_fpu_regs_done -.Lcleanup___ctl_set_vx: - stctg %c0,%c0,__SF_EMPTY(%r15) - tm __SF_EMPTY+5(%r15),2 - bor %r14 - oi __SF_EMPTY+5(%r15),2 - lctlg %c0,%c0,__SF_EMPTY(%r15) - lg %r9,48(%r11) # return from __ctl_set_vx - br %r14 - /* * Integer constants */ diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 834df047d..b7019ab74 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -16,13 +16,10 @@ void io_int_handler(void); void mcck_int_handler(void); void restart_int_handler(void); void restart_call_handler(void); -void psw_idle(struct s390_idle_data *, unsigned long); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); -int alloc_vector_registers(struct task_struct *tsk); - void do_protection_exception(struct pt_regs *regs); void do_dat_exception(struct pt_regs *regs); diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 1255c6c53..301ee9c70 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -26,6 +26,7 @@ #include <asm/asm-offsets.h> #include <asm/thread_info.h> #include <asm/page.h> +#include <asm/ptrace.h> #define ARCH_OFFSET 4 @@ -59,19 +60,6 @@ __HEAD .long 0x020006e0,0x20000050 .org 0x200 -# -# subroutine to set architecture mode -# -.Lsetmode: - mvi __LC_AR_MODE_ID,1 # set esame flag - slr %r0,%r0 # set cpuid to zero - lhi %r1,2 # mode 2 = esame (dump) - sigp %r1,%r0,0x12 # switch to esame mode - bras %r13,0f - .fill 16,4,0x0 -0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs - sam31 # switch to 31 bit addressing mode - br %r14 # # subroutine to wait for end I/O @@ -159,7 +147,14 @@ __HEAD .long 0x02200050,0x00000000 iplstart: - bas %r14,.Lsetmode # Immediately switch to 64 bit mode + mvi __LC_AR_MODE_ID,1 # set esame flag + slr %r0,%r0 # set cpuid to zero + lhi %r1,2 # mode 2 = esame (dump) + sigp %r1,%r0,0x12 # switch to esame mode + bras %r13,0f + .fill 16,4,0x0 +0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs + sam31 # switch to 31 bit addressing mode lh %r1,0xb8 # test if subchannel number bct %r1,.Lnoload # is valid l %r1,0xb8 # load ipl subchannel number @@ -269,71 +264,6 @@ iplstart: .Lcpuid:.fill 8,1,0 # -# SALIPL loader support. Based on a patch by Rob van der Heij. -# This entry point is called directly from the SALIPL loader and -# doesn't need a builtin ipl record. -# - .org 0x800 -ENTRY(start) - stm %r0,%r15,0x07b0 # store registers - bas %r14,.Lsetmode # Immediately switch to 64 bit mode - basr %r12,%r0 -.base: - l %r11,.parm - l %r8,.cmd # pointer to command buffer - - ltr %r9,%r9 # do we have SALIPL parameters? - bp .sk8x8 - - mvc 0(64,%r8),0x00b0 # copy saved registers - xc 64(240-64,%r8),0(%r8) # remainder of buffer - tr 0(64,%r8),.lowcase - b .gotr -.sk8x8: - mvc 0(240,%r8),0(%r9) # copy iplparms into buffer -.gotr: - slr %r0,%r0 - st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11) - st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11) - j startup # continue with startup -.cmd: .long COMMAND_LINE # address of command line buffer -.parm: .long PARMAREA -.lowcase: - .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07 - .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f - .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17 - .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f - .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27 - .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f - .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37 - .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f - .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47 - .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f - .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57 - .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f - .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67 - .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f - .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77 - .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f - - .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87 - .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f - .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97 - .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f - .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 - .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf - .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7 - .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf - .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg - .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi - .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop - .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr - .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx - .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz - .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7 - .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff - -# # startup-code at 0x10000, running in absolute addressing mode # this is called either by the ipl loader or directly by PSW restart # or linload or SALIPL @@ -364,7 +294,7 @@ ENTRY(startup_kdump) bras %r13,0f .fill 16,4,0x0 0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs - sam31 # switch to 31 bit addressing mode + sam64 # switch to 64 bit addressing mode basr %r13,0 # get base .LPG0: xc 0x200(256),0x200 # partially clear lowcore @@ -395,7 +325,7 @@ ENTRY(startup_kdump) jnz 1b j 4f 2: l %r15,.Lstack-.LPG0(%r13) - ahi %r15,-96 + ahi %r15,-STACK_FRAME_OVERHEAD la %r2,.Lals_string-.LPG0(%r13) l %r3,.Lsclp_print-.LPG0(%r13) basr %r14,%r3 @@ -429,8 +359,7 @@ ENTRY(startup_kdump) .long 1, 0xc0000000 #endif 4: - /* Continue with 64bit startup code in head64.S */ - sam64 # switch to 64 bit mode + /* Continue with startup code in head64.S */ jg startup_continue .align 8 diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index d7c005075..58b719fa8 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -16,7 +16,12 @@ __HEAD ENTRY(startup_continue) - larl %r1,sched_clock_base_cc + tm __LC_STFL_FAC_LIST+6,0x80 # LPP available ? + jz 0f + xc __LC_LPP+1(7,0),__LC_LPP+1 # clear lpp and current_pid + mvi __LC_LPP,0x80 # and set LPP_MAGIC + .insn s,0xb2800000,__LC_LPP # load program parameter +0: larl %r1,sched_clock_base_cc mvc 0(8,%r1),__LC_LAST_UPDATE_CLOCK larl %r13,.LPG1 # get base lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 52fbef91d..b1f0a90f9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -17,6 +17,7 @@ #include <linux/gfp.h> #include <linux/crash_dump.h> #include <linux/debug_locks.h> +#include <asm/diag.h> #include <asm/ipl.h> #include <asm/smp.h> #include <asm/setup.h> @@ -120,6 +121,7 @@ static char *dump_type_str(enum dump_type type) * Must be in data section since the bss section * is not cleared when these are accessed. */ +static u8 ipl_ssid __attribute__((__section__(".data"))) = 0; static u16 ipl_devno __attribute__((__section__(".data"))) = 0; u32 ipl_flags __attribute__((__section__(".data"))) = 0; @@ -165,7 +167,7 @@ static struct ipl_parameter_block *dump_block_ccw; static struct sclp_ipl_info sclp_ipl_info; -int diag308(unsigned long subcode, void *addr) +static inline int __diag308(unsigned long subcode, void *addr) { register unsigned long _addr asm("0") = (unsigned long) addr; register unsigned long _rc asm("1") = 0; @@ -178,6 +180,12 @@ int diag308(unsigned long subcode, void *addr) : "d" (subcode) : "cc", "memory"); return _rc; } + +int diag308(unsigned long subcode, void *addr) +{ + diag_stat_inc(DIAG_STAT_X308); + return __diag308(subcode, addr); +} EXPORT_SYMBOL_GPL(diag308); /* SYSFS */ @@ -190,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \ return snprintf(page, PAGE_SIZE, _format, ##args); \ } +#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \ +static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + unsigned long long ssid, devno; \ + \ + if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \ + return -EINVAL; \ + \ + if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \ + return -EINVAL; \ + \ + _ipl_blk.ssid = ssid; \ + _ipl_blk.devno = devno; \ + return len; \ +} + +#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \ +IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \ + _ipl_blk.ssid, _ipl_blk.devno); \ +IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \ +static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ + __ATTR(_name, (S_IRUGO | S_IWUSR), \ + sys_##_prefix##_##_name##_show, \ + sys_##_prefix##_##_name##_store) \ + #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \ IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ @@ -388,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, switch (ipl_info.type) { case IPL_TYPE_CCW: - return sprintf(page, "0.0.%04x\n", ipl_devno); + return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno); case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno); @@ -680,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { + size_t scpdata_len = count; size_t padding; - size_t scpdata_len; - - if (off < 0) - return -EINVAL; - if (off >= DIAG308_SCPDATA_SIZE) - return -ENOSPC; - if (count > DIAG308_SCPDATA_SIZE - off) - count = DIAG308_SCPDATA_SIZE - off; - - memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count); - scpdata_len = off + count; + if (off) + return -EINVAL; + memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count); if (scpdata_len % 8) { padding = 8 - (scpdata_len % 8); memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, @@ -710,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, } static struct bin_attribute sys_reipl_fcp_scp_data_attr = __BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read, - reipl_fcp_scpdata_write, PAGE_SIZE); + reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_fcp_bin_attrs[] = { &sys_reipl_fcp_scp_data_attr, @@ -807,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = { }; /* CCW reipl device attributes */ - -DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", - reipl_block_ccw->ipl_info.ccw.devno); +DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw); /* NSS wrapper */ static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, @@ -1049,8 +1075,8 @@ static void __reipl_run(void *unused) switch (reipl_method) { case REIPL_METHOD_CCW_CIO: + devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid; devid.devno = reipl_block_ccw->ipl_info.ccw.devno; - devid.ssid = 0; reipl_ccw_dev(&devid); break; case REIPL_METHOD_CCW_VM: @@ -1185,6 +1211,7 @@ static int __init reipl_ccw_init(void) reipl_block_ccw_init(reipl_block_ccw); if (ipl_info.type == IPL_TYPE_CCW) { + reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid; reipl_block_ccw->ipl_info.ccw.devno = ipl_devno; reipl_block_ccw_fill_parms(reipl_block_ccw); } @@ -1329,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = { }; /* CCW dump device attributes */ - -DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", - dump_block_ccw->ipl_info.ccw.devno); +DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw); static struct attribute *dump_ccw_attrs[] = { &sys_dump_ccw_device_attr.attr, @@ -1411,8 +1436,8 @@ static void __dump_run(void *unused) switch (dump_method) { case DUMP_METHOD_CCW_CIO: + devid.ssid = dump_block_ccw->ipl_info.ccw.ssid; devid.devno = dump_block_ccw->ipl_info.ccw.devno; - devid.ssid = 0; reipl_ccw_dev(&devid); break; case DUMP_METHOD_CCW_VM: @@ -1932,14 +1957,14 @@ void __init setup_ipl(void) ipl_info.type = get_ipl_type(); switch (ipl_info.type) { case IPL_TYPE_CCW: + ipl_info.data.ccw.dev_id.ssid = ipl_ssid; ipl_info.data.ccw.dev_id.devno = ipl_devno; - ipl_info.data.ccw.dev_id.ssid = 0; break; case IPL_TYPE_FCP: case IPL_TYPE_FCP_DUMP: + ipl_info.data.fcp.dev_id.ssid = 0; ipl_info.data.fcp.dev_id.devno = IPL_PARMBLOCK_START->ipl_info.fcp.devno; - ipl_info.data.fcp.dev_id.ssid = 0; ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn; ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun; break; @@ -1971,6 +1996,7 @@ void __init ipl_save_parameters(void) if (cio_get_iplinfo(&iplinfo)) return; + ipl_ssid = iplinfo.ssid; ipl_devno = iplinfo.devno; ipl_flags |= IPL_DEVNO_VALID; if (!iplinfo.is_qdio) diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index e9d9addfa..f41d5208a 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -69,7 +69,6 @@ static const struct irq_class irqclass_sub_desc[] = { {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"}, {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, - {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"}, {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 0ae6f8e74..07302ce37 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -21,19 +21,20 @@ #include <asm/nmi.h> #include <asm/crw.h> #include <asm/switch_to.h> -#include <asm/fpu-internal.h> #include <asm/ctl_reg.h> struct mcck_struct { - int kill_task; - int channel_report; - int warning; - unsigned long long mcck_code; + unsigned int kill_task : 1; + unsigned int channel_report : 1; + unsigned int warning : 1; + unsigned int etr_queue : 1; + unsigned int stp_queue : 1; + unsigned long mcck_code; }; static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); -static void s390_handle_damage(char *msg) +static void s390_handle_damage(void) { smp_send_stop(); disabled_wait((unsigned long) __builtin_return_address(0)); @@ -81,10 +82,14 @@ void s390_handle_mcck(void) if (xchg(&mchchk_wng_posted, 1) == 0) kill_cad_pid(SIGPWR, 1); } + if (mcck.etr_queue) + etr_queue_work(); + if (mcck.stp_queue) + stp_queue_work(); if (mcck.kill_task) { local_irq_enable(); printk(KERN_EMERG "mcck: Terminating task because of machine " - "malfunction (code 0x%016llx).\n", mcck.mcck_code); + "malfunction (code 0x%016lx).\n", mcck.mcck_code); printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", current->comm, current->pid); do_exit(SIGSEGV); @@ -96,7 +101,7 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck); * returns 0 if all registers could be validated * returns 1 otherwise */ -static int notrace s390_revalidate_registers(struct mci *mci) +static int notrace s390_validate_registers(union mci mci) { int kill_task; u64 zero; @@ -105,14 +110,14 @@ static int notrace s390_revalidate_registers(struct mci *mci) kill_task = 0; zero = 0; - if (!mci->gr) { + if (!mci.gr) { /* * General purpose registers couldn't be restored and have * unknown contents. Process needs to be terminated. */ kill_task = 1; } - if (!mci->fp) { + if (!mci.fp) { /* * Floating point registers can't be restored and * therefore the process needs to be terminated. @@ -121,7 +126,7 @@ static int notrace s390_revalidate_registers(struct mci *mci) } fpt_save_area = &S390_lowcore.floating_pt_save_area; fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; - if (!mci->fc) { + if (!mci.fc) { /* * Floating point control register can't be restored. * Task will be terminated. @@ -132,7 +137,7 @@ static int notrace s390_revalidate_registers(struct mci *mci) asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area)); if (!MACHINE_HAS_VX) { - /* Revalidate floating point registers */ + /* Validate floating point registers */ asm volatile( " ld 0,0(%0)\n" " ld 1,8(%0)\n" @@ -152,10 +157,10 @@ static int notrace s390_revalidate_registers(struct mci *mci) " ld 15,120(%0)\n" : : "a" (fpt_save_area)); } else { - /* Revalidate vector registers */ + /* Validate vector registers */ union ctlreg0 cr0; - if (!mci->vr) { + if (!mci.vr) { /* * Vector registers can't be restored and therefore * the process needs to be terminated. @@ -173,38 +178,38 @@ static int notrace s390_revalidate_registers(struct mci *mci) &S390_lowcore.vector_save_area) : "1"); __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0); } - /* Revalidate access registers */ + /* Validate access registers */ asm volatile( " lam 0,15,0(%0)" : : "a" (&S390_lowcore.access_regs_save_area)); - if (!mci->ar) { + if (!mci.ar) { /* * Access registers have unknown contents. * Terminating task. */ kill_task = 1; } - /* Revalidate control registers */ - if (!mci->cr) { + /* Validate control registers */ + if (!mci.cr) { /* * Control registers have unknown contents. * Can't recover and therefore stopping machine. */ - s390_handle_damage("invalid control registers."); + s390_handle_damage(); } else { asm volatile( " lctlg 0,15,0(%0)" : : "a" (&S390_lowcore.cregs_save_area)); } /* - * We don't even try to revalidate the TOD register, since we simply + * We don't even try to validate the TOD register, since we simply * can't write something sensible into that register. */ /* - * See if we can revalidate the TOD programmable register with its + * See if we can validate the TOD programmable register with its * old contents (should be zero) otherwise set it to zero. */ - if (!mci->pr) + if (!mci.pr) asm volatile( " sr 0,0\n" " sckpf" @@ -215,17 +220,17 @@ static int notrace s390_revalidate_registers(struct mci *mci) " sckpf" : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); - /* Revalidate clock comparator register */ + /* Validate clock comparator register */ set_clock_comparator(S390_lowcore.clock_comparator); /* Check if old PSW is valid */ - if (!mci->wp) + if (!mci.wp) /* * Can't tell if we come from user or kernel mode * -> stopping machine. */ - s390_handle_damage("old psw invalid."); + s390_handle_damage(); - if (!mci->ms || !mci->pm || !mci->ia) + if (!mci.ms || !mci.pm || !mci.ia) kill_task = 1; return kill_task; @@ -249,21 +254,21 @@ void notrace s390_do_machine_check(struct pt_regs *regs) static unsigned long long last_ipd; struct mcck_struct *mcck; unsigned long long tmp; - struct mci *mci; + union mci mci; int umode; nmi_enter(); inc_irq_stat(NMI_NMI); - mci = (struct mci *) &S390_lowcore.mcck_interruption_code; + mci.val = S390_lowcore.mcck_interruption_code; mcck = this_cpu_ptr(&cpu_mcck); umode = user_mode(regs); - if (mci->sd) { + if (mci.sd) { /* System damage -> stopping machine */ - s390_handle_damage("received system damage machine check."); + s390_handle_damage(); } - if (mci->pd) { - if (mci->b) { + if (mci.pd) { + if (mci.b) { /* Processing backup -> verify if we can survive this */ u64 z_mcic, o_mcic, t_mcic; z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); @@ -271,12 +276,11 @@ void notrace s390_do_machine_check(struct pt_regs *regs) 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); - t_mcic = *(u64 *)mci; + t_mcic = mci.val; if (((t_mcic & z_mcic) != 0) || ((t_mcic & o_mcic) != o_mcic)) { - s390_handle_damage("processing backup machine " - "check with damage."); + s390_handle_damage(); } /* @@ -291,64 +295,62 @@ void notrace s390_do_machine_check(struct pt_regs *regs) ipd_count = 1; last_ipd = tmp; if (ipd_count == MAX_IPD_COUNT) - s390_handle_damage("too many ipd retries."); + s390_handle_damage(); spin_unlock(&ipd_lock); } else { /* Processing damage -> stopping machine */ - s390_handle_damage("received instruction processing " - "damage machine check."); + s390_handle_damage(); } } - if (s390_revalidate_registers(mci)) { + if (s390_validate_registers(mci)) { if (umode) { /* * Couldn't restore all register contents while in * user mode -> mark task for termination. */ mcck->kill_task = 1; - mcck->mcck_code = *(unsigned long long *) mci; + mcck->mcck_code = mci.val; set_cpu_flag(CIF_MCCK_PENDING); } else { /* * Couldn't restore all register contents while in * kernel mode -> stopping machine. */ - s390_handle_damage("unable to revalidate registers."); + s390_handle_damage(); } } - if (mci->cd) { + if (mci.cd) { /* Timing facility damage */ - s390_handle_damage("TOD clock damaged"); + s390_handle_damage(); } - if (mci->ed && mci->ec) { + if (mci.ed && mci.ec) { /* External damage */ if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC)) - etr_sync_check(); + mcck->etr_queue |= etr_sync_check(); if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH)) - etr_switch_to_local(); + mcck->etr_queue |= etr_switch_to_local(); if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC)) - stp_sync_check(); + mcck->stp_queue |= stp_sync_check(); if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND)) - stp_island_check(); + mcck->stp_queue |= stp_island_check(); + if (mcck->etr_queue || mcck->stp_queue) + set_cpu_flag(CIF_MCCK_PENDING); } - if (mci->se) + if (mci.se) /* Storage error uncorrected */ - s390_handle_damage("received storage error uncorrected " - "machine check."); - if (mci->ke) + s390_handle_damage(); + if (mci.ke) /* Storage key-error uncorrected */ - s390_handle_damage("received storage key-error uncorrected " - "machine check."); - if (mci->ds && mci->fa) + s390_handle_damage(); + if (mci.ds && mci.fa) /* Storage degradation */ - s390_handle_damage("received storage degradation machine " - "check."); - if (mci->cp) { + s390_handle_damage(); + if (mci.cp) { /* Channel report word pending */ mcck->channel_report = 1; set_cpu_flag(CIF_MCCK_PENDING); } - if (mci->w) { + if (mci.w) { /* Warning pending */ mcck->warning = 1; set_cpu_flag(CIF_MCCK_PENDING); diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index a9563409c..929c147e0 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -72,6 +72,7 @@ struct cpu_hw_events { atomic_t ctr_set[CPUMF_CTR_SET_MAX]; u64 state, tx_state; unsigned int flags; + unsigned int txn_flags; }; static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .ctr_set = { @@ -82,6 +83,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { }, .state = 0, .flags = 0, + .txn_flags = 0, }; static int get_counter_set(u64 event) @@ -538,7 +540,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags) * For group events transaction, the authorization check is * done in cpumf_pmu_commit_txn(). */ - if (!(cpuhw->flags & PERF_EVENT_TXN)) + if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD)) if (validate_ctr_auth(&event->hw)) return -ENOENT; @@ -576,13 +578,22 @@ static void cpumf_pmu_del(struct perf_event *event, int flags) /* * Start group events scheduling transaction. * Set flags to perform a single test at commit time. + * + * We only support PERF_PMU_TXN_ADD transactions. Save the + * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD + * transactions. */ -static void cpumf_pmu_start_txn(struct pmu *pmu) +static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags) { struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */ + + cpuhw->txn_flags = txn_flags; + if (txn_flags & ~PERF_PMU_TXN_ADD) + return; + perf_pmu_disable(pmu); - cpuhw->flags |= PERF_EVENT_TXN; cpuhw->tx_state = cpuhw->state; } @@ -593,11 +604,18 @@ static void cpumf_pmu_start_txn(struct pmu *pmu) */ static void cpumf_pmu_cancel_txn(struct pmu *pmu) { + unsigned int txn_flags; struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); + WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ + + txn_flags = cpuhw->txn_flags; + cpuhw->txn_flags = 0; + if (txn_flags & ~PERF_PMU_TXN_ADD) + return; + WARN_ON(cpuhw->tx_state != cpuhw->state); - cpuhw->flags &= ~PERF_EVENT_TXN; perf_pmu_enable(pmu); } @@ -611,13 +629,20 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu) struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events); u64 state; + WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */ + + if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) { + cpuhw->txn_flags = 0; + return 0; + } + /* check if the updated state can be scheduled */ state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1); state >>= CPUMF_LCCTL_ENABLE_SHIFT; if ((state & cpuhw->info.auth_ctl) != state) return -ENOENT; - cpuhw->flags &= ~PERF_EVENT_TXN; + cpuhw->txn_flags = 0; perf_pmu_enable(pmu); return 0; } diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index b973972f6..3d8da1e74 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1019,11 +1019,13 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) break; } - /* The host-program-parameter (hpp) contains the pid of - * the CPU thread as set by sie64a() in entry.S. - * If non-zero assume a guest sample. + /* + * A non-zero guest program parameter indicates a guest + * sample. + * Note that some early samples might be misaccounted to + * the host. */ - if (sfr->basic.hpp) + if (sfr->basic.gpp) sde_regs->in_guest = 1; overflow = 0; diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index f2dac9f07..114ee8b96 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -23,6 +23,7 @@ #include <linux/kprobes.h> #include <linux/random.h> #include <linux/module.h> +#include <linux/init_task.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/vtimer.h> @@ -36,6 +37,9 @@ asmlinkage void ret_from_fork(void) asm ("ret_from_fork"); +/* FPU save area for the init task */ +__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data; + /* * Return saved PC of a blocked thread. used in kernel/sched. * resume in entry.S does not create a new stack frame, it @@ -87,31 +91,29 @@ void arch_release_task_struct(struct task_struct *tsk) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { + size_t fpu_regs_size; + *dst = *src; - /* Set up a new floating-point register save area */ - dst->thread.fpu.fpc = 0; - dst->thread.fpu.flags = 0; /* Always start with VX disabled */ - dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, - GFP_KERNEL|__GFP_REPEAT); - if (!dst->thread.fpu.fprs) + /* + * If the vector extension is available, it is enabled for all tasks, + * and, thus, the FPU register save area must be allocated accordingly. + */ + fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS + : sizeof(freg_t) * __NUM_FPRS; + dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT); + if (!dst->thread.fpu.regs) return -ENOMEM; /* * Save the floating-point or vector register state of the current - * task. The state is not saved for early kernel threads, for example, - * the init_task, which do not have an allocated save area. - * The CIF_FPU flag is set in any case to lazy clear or restore a saved - * state when switching to a different task or returning to user space. + * task and set the CIF_FPU flag to lazy restore the FPU register + * state when returning to user space. */ save_fpu_regs(); dst->thread.fpu.fpc = current->thread.fpu.fpc; - if (is_vx_task(current)) - convert_vx_to_fp(dst->thread.fpu.fprs, - current->thread.fpu.vxrs); - else - memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs, - sizeof(freg_t) * __NUM_FPRS); + memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size); + return 0; } @@ -169,7 +171,6 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, /* Don't copy runtime instrumentation info */ p->thread.ri_cb = NULL; - p->thread.ri_signum = 0; frame->childregs.psw.mask &= ~PSW_MASK_RI; /* Set a new TLS ? */ @@ -199,7 +200,7 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs) save_fpu_regs(); fpregs->fpc = current->thread.fpu.fpc; fpregs->pad = 0; - if (is_vx_task(current)) + if (MACHINE_HAS_VX) convert_vx_to_fp((freg_t *)&fpregs->fprs, current->thread.fpu.vxrs); else @@ -242,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp) static inline unsigned long brk_rnd(void) { - /* 8MB for 32bit, 1GB for 64bit */ - if (is_32bit_task()) - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT; - else - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT; + return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT; } unsigned long arch_randomize_brk(struct mm_struct *mm) diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index e6e077ae3..7ce00e7a7 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -11,6 +11,7 @@ #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/cpu.h> +#include <asm/diag.h> #include <asm/elf.h> #include <asm/lowcore.h> #include <asm/param.h> @@ -20,8 +21,10 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id); void notrace cpu_relax(void) { - if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) + if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) { + diag_stat_inc(DIAG_STAT_X044); asm volatile("diag 0,0,0x44"); + } barrier(); } EXPORT_SYMBOL(cpu_relax); diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index d319f36f7..01c37b36c 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -239,7 +239,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) tmp = *(addr_t *) ((addr_t) child->thread.fpu.vxrs + 2*offset); else @@ -383,7 +383,7 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) *(addr_t *)((addr_t) child->thread.fpu.vxrs + 2*offset) = data; else @@ -617,7 +617,7 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) tmp = *(__u32 *) ((addr_t) child->thread.fpu.vxrs + 2*offset); else @@ -742,7 +742,7 @@ static int __poke_user_compat(struct task_struct *child, * or the child->thread.fpu.vxrs array */ offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs; - if (is_vx_task(child)) + if (MACHINE_HAS_VX) *(__u32 *)((addr_t) child->thread.fpu.vxrs + 2*offset) = tmp; else @@ -981,7 +981,7 @@ static int s390_fpregs_set(struct task_struct *target, if (rc) return rc; - if (is_vx_task(target)) + if (MACHINE_HAS_VX) convert_fp_to_vx(target->thread.fpu.vxrs, fprs); else memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); @@ -1047,13 +1047,10 @@ static int s390_vxrs_low_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (is_vx_task(target)) { - if (target == current) - save_fpu_regs(); - for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); - } else - memset(vxrs, 0, sizeof(vxrs)); + if (target == current) + save_fpu_regs(); + for (i = 0; i < __NUM_VXRS_LOW; i++) + vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1); return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); } @@ -1067,11 +1064,7 @@ static int s390_vxrs_low_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!is_vx_task(target)) { - rc = alloc_vector_registers(target); - if (rc) - return rc; - } else if (target == current) + if (target == current) save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); @@ -1091,13 +1084,10 @@ static int s390_vxrs_high_get(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (is_vx_task(target)) { - if (target == current) - save_fpu_regs(); - memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, - sizeof(vxrs)); - } else - memset(vxrs, 0, sizeof(vxrs)); + if (target == current) + save_fpu_regs(); + memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW, sizeof(vxrs)); + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); } @@ -1110,11 +1100,7 @@ static int s390_vxrs_high_set(struct task_struct *target, if (!MACHINE_HAS_VX) return -ENODEV; - if (!is_vx_task(target)) { - rc = alloc_vector_registers(target); - if (rc) - return rc; - } else if (target == current) + if (target == current) save_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c index 26b4ae96f..fffa0e546 100644 --- a/arch/s390/kernel/runtime_instr.c +++ b/arch/s390/kernel/runtime_instr.c @@ -18,11 +18,6 @@ /* empty control block to disable RI by loading it */ struct runtime_instr_cb runtime_instr_empty_cb; -static int runtime_instr_avail(void) -{ - return test_facility(64); -} - static void disable_runtime_instr(void) { struct pt_regs *regs = task_pt_regs(current); @@ -40,7 +35,6 @@ static void disable_runtime_instr(void) static void init_runtime_instr_cb(struct runtime_instr_cb *cb) { cb->buf_limit = 0xfff; - cb->int_requested = 1; cb->pstate = 1; cb->pstate_set_buf = 1; cb->pstate_sample = 1; @@ -57,46 +51,14 @@ void exit_thread_runtime_instr(void) return; disable_runtime_instr(); kfree(task->thread.ri_cb); - task->thread.ri_signum = 0; task->thread.ri_cb = NULL; } -static void runtime_instr_int_handler(struct ext_code ext_code, - unsigned int param32, unsigned long param64) -{ - struct siginfo info; - - if (!(param32 & CPU_MF_INT_RI_MASK)) - return; - - inc_irq_stat(IRQEXT_CMR); - - if (!current->thread.ri_cb) - return; - if (current->thread.ri_signum < SIGRTMIN || - current->thread.ri_signum > SIGRTMAX) { - WARN_ON_ONCE(1); - return; - } - - memset(&info, 0, sizeof(info)); - info.si_signo = current->thread.ri_signum; - info.si_code = SI_QUEUE; - if (param32 & CPU_MF_INT_RI_BUF_FULL) - info.si_int = ENOBUFS; - else if (param32 & CPU_MF_INT_RI_HALTED) - info.si_int = ECANCELED; - else - return; /* unknown reason */ - - send_sig_info(current->thread.ri_signum, &info, current); -} - -SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) +SYSCALL_DEFINE1(s390_runtime_instr, int, command) { struct runtime_instr_cb *cb; - if (!runtime_instr_avail()) + if (!test_facility(64)) return -EOPNOTSUPP; if (command == S390_RUNTIME_INSTR_STOP) { @@ -106,8 +68,7 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) return 0; } - if (command != S390_RUNTIME_INSTR_START || - (signum < SIGRTMIN || signum > SIGRTMAX)) + if (command != S390_RUNTIME_INSTR_START) return -EINVAL; if (!current->thread.ri_cb) { @@ -120,7 +81,6 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) } init_runtime_instr_cb(cb); - current->thread.ri_signum = signum; /* now load the control block to make it available */ preempt_disable(); @@ -129,21 +89,3 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum) preempt_enable(); return 0; } - -static int __init runtime_instr_init(void) -{ - int rc; - - if (!runtime_instr_avail()) - return 0; - - irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT); - rc = register_external_irq(EXT_IRQ_MEASURE_ALERT, - runtime_instr_int_handler); - if (rc) - irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT); - else - pr_info("Runtime instrumentation facility initialized\n"); - return rc; -} -device_initcall(runtime_instr_init); diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 5090d3dad..e67453b73 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -1,6 +1,6 @@ #include <linux/module.h> #include <linux/kvm_host.h> -#include <asm/fpu-internal.h> +#include <asm/fpu/api.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER @@ -10,7 +10,6 @@ EXPORT_SYMBOL(_mcount); EXPORT_SYMBOL(sie64a); EXPORT_SYMBOL(sie_exit); EXPORT_SYMBOL(save_fpu_regs); -EXPORT_SYMBOL(__ctl_set_vx); #endif EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c index fa0bdff1d..9fe7781a4 100644 --- a/arch/s390/kernel/sclp.c +++ b/arch/s390/kernel/sclp.c @@ -21,7 +21,7 @@ static void _sclp_wait_int(void) __ctl_load(cr0_new, 0, 0); psw_ext_save = S390_lowcore.external_new_psw; - psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA); + psw_mask = __extract_psw(); S390_lowcore.external_new_psw.mask = psw_mask; psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT; S390_lowcore.ext_int_code = 0; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ce0cbd6ba..c837bcacf 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -764,9 +764,6 @@ static int __init setup_hwcaps(void) get_cpu_id(&cpu_id); add_device_randomness(&cpu_id, sizeof(cpu_id)); switch (cpu_id.machine) { - case 0x9672: - strcpy(elf_platform, "g5"); - break; case 0x2064: case 0x2066: default: /* Use "z900" as default for 64 bit kernels. */ diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 9549af102..028cc46cb 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -179,7 +179,7 @@ static int save_sigregs_ext(struct pt_regs *regs, int i; /* Save vector registers to signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { for (i = 0; i < __NUM_VXRS_LOW; i++) vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1); if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, @@ -199,7 +199,7 @@ static int restore_sigregs_ext(struct pt_regs *regs, int i; /* Restore vector registers from signal stack */ - if (is_vx_task(current)) { + if (MACHINE_HAS_VX) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, @@ -381,8 +381,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, uc_flags = 0; if (MACHINE_HAS_VX) { frame_size += sizeof(_sigregs_ext); - if (is_vx_task(current)) - uc_flags |= UC_VXRS; + uc_flags |= UC_VXRS; } frame = get_sigframe(&ksig->ka, regs, frame_size); if (frame == (void __user *) -1UL) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c6355e6f3..9062df575 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -33,6 +33,7 @@ #include <linux/crash_dump.h> #include <linux/memblock.h> #include <asm/asm-offsets.h> +#include <asm/diag.h> #include <asm/switch_to.h> #include <asm/facility.h> #include <asm/ipl.h> @@ -261,6 +262,8 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->thread_info = (unsigned long) task_thread_info(tsk); lc->current_task = (unsigned long) tsk; + lc->lpp = LPP_MAGIC; + lc->current_pid = tsk->pid; lc->user_timer = ti->user_timer; lc->system_timer = ti->system_timer; lc->steal_timer = 0; @@ -375,11 +378,14 @@ int smp_vcpu_scheduled(int cpu) void smp_yield_cpu(int cpu) { - if (MACHINE_HAS_DIAG9C) + if (MACHINE_HAS_DIAG9C) { + diag_stat_inc_norecursion(DIAG_STAT_X09C); asm volatile("diag %0,0,0x9c" : : "d" (pcpu_devices[cpu].address)); - else if (MACHINE_HAS_DIAG44) + } else if (MACHINE_HAS_DIAG44) { + diag_stat_inc_norecursion(DIAG_STAT_X044); asm volatile("diag 0,0,0x44"); + } } /* diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 8c56929c8..5378c3ea1 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */ SYSCALL(sys_recvfrom,compat_sys_recvfrom) SYSCALL(sys_recvmsg,compat_sys_recvmsg) SYSCALL(sys_shutdown,sys_shutdown) +SYSCALL(sys_mlock2,compat_sys_mlock2) diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 017c3a9bf..99f84ac31 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -542,16 +542,17 @@ arch_initcall(etr_init); * Switch to local machine check. This is called when the last usable * ETR port goes inactive. After switch to local the clock is not in sync. */ -void etr_switch_to_local(void) +int etr_switch_to_local(void) { if (!etr_eacr.sl) - return; + return 0; disable_sync_clock(NULL); if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) { etr_eacr.es = etr_eacr.sl = 0; etr_setr(&etr_eacr); - queue_work(time_sync_wq, &etr_work); + return 1; } + return 0; } /* @@ -560,16 +561,22 @@ void etr_switch_to_local(void) * After a ETR sync check the clock is not in sync. The machine check * is broadcasted to all cpus at the same time. */ -void etr_sync_check(void) +int etr_sync_check(void) { if (!etr_eacr.es) - return; + return 0; disable_sync_clock(NULL); if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) { etr_eacr.es = 0; etr_setr(&etr_eacr); - queue_work(time_sync_wq, &etr_work); + return 1; } + return 0; +} + +void etr_queue_work(void) +{ + queue_work(time_sync_wq, &etr_work); } /* @@ -1504,10 +1511,10 @@ static void stp_timing_alert(struct stp_irq_parm *intparm) * After a STP sync check the clock is not in sync. The machine check * is broadcasted to all cpus at the same time. */ -void stp_sync_check(void) +int stp_sync_check(void) { disable_sync_clock(NULL); - queue_work(time_sync_wq, &stp_work); + return 1; } /* @@ -1516,12 +1523,16 @@ void stp_sync_check(void) * have matching CTN ids and have a valid stratum-1 configuration * but the configurations do not match. */ -void stp_island_check(void) +int stp_island_check(void) { disable_sync_clock(NULL); - queue_work(time_sync_wq, &stp_work); + return 1; } +void stp_queue_work(void) +{ + queue_work(time_sync_wq, &stp_work); +} static int stp_sync_clock(void *data) { diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index bf05e7fc3..40b8102fd 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -84,6 +84,7 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, struct mask_info *socket, int one_socket_per_cpu) { + struct cpu_topology_s390 *topo; unsigned int core; for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) { @@ -95,15 +96,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, if (lcpu < 0) continue; for (i = 0; i <= smp_cpu_mtid; i++) { - per_cpu(cpu_topology, lcpu + i).book_id = book->id; - per_cpu(cpu_topology, lcpu + i).core_id = rcore; - per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; + topo = &per_cpu(cpu_topology, lcpu + i); + topo->book_id = book->id; + topo->core_id = rcore; + topo->thread_id = lcpu + i; cpumask_set_cpu(lcpu + i, &book->mask); cpumask_set_cpu(lcpu + i, &socket->mask); if (one_socket_per_cpu) - per_cpu(cpu_topology, lcpu + i).socket_id = rcore; + topo->socket_id = rcore; else - per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; + topo->socket_id = socket->id; smp_cpu_set_polarization(lcpu + i, tl_core->pp); } if (one_socket_per_cpu) @@ -247,17 +249,19 @@ int topology_set_cpu_management(int fc) static void update_cpu_masks(void) { + struct cpu_topology_s390 *topo; int cpu; for_each_possible_cpu(cpu) { - per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); - per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); - per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); + topo = &per_cpu(cpu_topology, cpu); + topo->thread_mask = cpu_thread_map(cpu); + topo->core_mask = cpu_group_map(&socket_info, cpu); + topo->book_mask = cpu_group_map(&book_info, cpu); if (!MACHINE_HAS_TOPOLOGY) { - per_cpu(cpu_topology, cpu).thread_id = cpu; - per_cpu(cpu_topology, cpu).core_id = cpu; - per_cpu(cpu_topology, cpu).socket_id = cpu; - per_cpu(cpu_topology, cpu).book_id = cpu; + topo->thread_id = cpu; + topo->core_id = cpu; + topo->socket_id = cpu; + topo->book_id = cpu; } } numa_update_cpu_topology(); diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c new file mode 100644 index 000000000..21a5df995 --- /dev/null +++ b/arch/s390/kernel/trace.c @@ -0,0 +1,29 @@ +/* + * Tracepoint definitions for s390 + * + * Copyright IBM Corp. 2015 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <linux/percpu.h> +#define CREATE_TRACE_POINTS +#include <asm/trace/diag.h> + +EXPORT_TRACEPOINT_SYMBOL(s390_diagnose); + +static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth); + +void trace_s390_diagnose_norecursion(int diag_nr) +{ + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + depth = this_cpu_ptr(&diagnose_trace_depth); + if (*depth == 0) { + (*depth)++; + trace_s390_diagnose(diag_nr); + (*depth)--; + } + local_irq_restore(flags); +} diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 9861613fb..1b18118bb 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -19,7 +19,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/slab.h> -#include <asm/fpu-internal.h> +#include <asm/fpu/api.h> #include "entry.h" int show_unhandled_signals = 1; @@ -224,29 +224,6 @@ NOKPROBE_SYMBOL(illegal_op); DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); -int alloc_vector_registers(struct task_struct *tsk) -{ - __vector128 *vxrs; - freg_t *fprs; - - /* Allocate vector register save area. */ - vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS, - GFP_KERNEL|__GFP_REPEAT); - if (!vxrs) - return -ENOMEM; - preempt_disable(); - if (tsk == current) - save_fpu_regs(); - /* Copy the 16 floating point registers */ - convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs); - fprs = tsk->thread.fpu.fprs; - tsk->thread.fpu.vxrs = vxrs; - tsk->thread.fpu.flags |= FPU_USE_VX; - kfree(fprs); - preempt_enable(); - return 0; -} - void vector_exception(struct pt_regs *regs) { int si_code, vic; @@ -281,13 +258,6 @@ void vector_exception(struct pt_regs *regs) do_trap(regs, SIGFPE, si_code, "vector exception"); } -static int __init disable_vector_extension(char *str) -{ - S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX; - return 1; -} -__setup("novx", disable_vector_extension); - void data_exception(struct pt_regs *regs) { __u16 __user *location; @@ -296,15 +266,6 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); save_fpu_regs(); - /* Check for vector register enablement */ - if (MACHINE_HAS_VX && !is_vx_task(current) && - (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) { - alloc_vector_registers(current); - /* Vector data exception is suppressing, rewind psw. */ - regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); - clear_pt_regs_flag(regs, PIF_PER_TRAP); - return; - } if (current->thread.fpu.fpc & FPC_DXC_MASK) signal = SIGFPE; else diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index 0d58269ff..59eddb0e1 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -299,7 +299,7 @@ static int __init vdso_init(void) get_page(virt_to_page(vdso_data)); - smp_wmb(); + smp_mb(); return 0; } diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 7365e8a46..b4a5aa110 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -336,28 +336,28 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) return -EOPNOTSUPP; } -static const intercept_handler_t intercept_funcs[] = { - [0x00 >> 2] = handle_noop, - [0x04 >> 2] = handle_instruction, - [0x08 >> 2] = handle_prog, - [0x10 >> 2] = handle_noop, - [0x14 >> 2] = handle_external_interrupt, - [0x18 >> 2] = handle_noop, - [0x1C >> 2] = kvm_s390_handle_wait, - [0x20 >> 2] = handle_validity, - [0x28 >> 2] = handle_stop, - [0x38 >> 2] = handle_partial_execution, -}; - int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu) { - intercept_handler_t func; - u8 code = vcpu->arch.sie_block->icptcode; - - if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs)) + switch (vcpu->arch.sie_block->icptcode) { + case 0x00: + case 0x10: + case 0x18: + return handle_noop(vcpu); + case 0x04: + return handle_instruction(vcpu); + case 0x08: + return handle_prog(vcpu); + case 0x14: + return handle_external_interrupt(vcpu); + case 0x1c: + return kvm_s390_handle_wait(vcpu); + case 0x20: + return handle_validity(vcpu); + case 0x28: + return handle_stop(vcpu); + case 0x38: + return handle_partial_execution(vcpu); + default: return -EOPNOTSUPP; - func = intercept_funcs[code >> 2]; - if (func) - return func(vcpu); - return -EOPNOTSUPP; + } } diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 08de785c9..6a75352f4 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -51,11 +51,9 @@ static int psw_mchk_disabled(struct kvm_vcpu *vcpu) static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) { - if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || - (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || - (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) - return 0; - return 1; + return psw_extint_disabled(vcpu) && + psw_ioint_disabled(vcpu) && + psw_mchk_disabled(vcpu); } static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) @@ -71,13 +69,8 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu) static int ckc_irq_pending(struct kvm_vcpu *vcpu) { - preempt_disable(); - if (!(vcpu->arch.sie_block->ckc < - get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) { - preempt_enable(); + if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm)) return 0; - } - preempt_enable(); return ckc_interrupts_enabled(vcpu); } @@ -109,14 +102,10 @@ static inline u8 int_word_to_isc(u32 int_word) return (int_word & 0x38000000) >> 27; } -static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu) +static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu) { - return vcpu->kvm->arch.float_int.pending_irqs; -} - -static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.local_int.pending_irqs; + return vcpu->kvm->arch.float_int.pending_irqs | + vcpu->arch.local_int.pending_irqs; } static unsigned long disable_iscs(struct kvm_vcpu *vcpu, @@ -135,8 +124,7 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) { unsigned long active_mask; - active_mask = pending_local_irqs(vcpu); - active_mask |= pending_floating_irqs(vcpu); + active_mask = pending_irqs(vcpu); if (!active_mask) return 0; @@ -204,7 +192,7 @@ static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) { - if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK)) return; else if (psw_ioint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_IO_INT); @@ -214,7 +202,7 @@ static void set_intercept_indicators_io(struct kvm_vcpu *vcpu) static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) { - if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK)) return; if (psw_extint_disabled(vcpu)) __set_cpuflag(vcpu, CPUSTAT_EXT_INT); @@ -224,7 +212,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu) static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu) { - if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) + if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK)) return; if (psw_mchk_disabled(vcpu)) vcpu->arch.sie_block->ictl |= ICTL_LPSW; @@ -815,23 +803,21 @@ int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu) int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop) { - int rc; + if (deliverable_irqs(vcpu)) + return 1; - rc = !!deliverable_irqs(vcpu); - - if (!rc && kvm_cpu_has_pending_timer(vcpu)) - rc = 1; + if (kvm_cpu_has_pending_timer(vcpu)) + return 1; /* external call pending and deliverable */ - if (!rc && kvm_s390_ext_call_pending(vcpu) && + if (kvm_s390_ext_call_pending(vcpu) && !psw_extint_disabled(vcpu) && (vcpu->arch.sie_block->gcr[0] & 0x2000ul)) - rc = 1; - - if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) - rc = 1; + return 1; - return rc; + if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu)) + return 1; + return 0; } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) @@ -846,7 +832,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) vcpu->stat.exit_wait_state++; /* fast path */ - if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) + if (kvm_arch_vcpu_runnable(vcpu)) return 0; if (psw_interrupts_disabled(vcpu)) { @@ -860,9 +846,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) goto no_timer; } - preempt_disable(); - now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; - preempt_enable(); + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* underflow */ @@ -901,9 +885,7 @@ enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) u64 now, sltime; vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); - preempt_disable(); - now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; - preempt_enable(); + now = kvm_s390_get_tod_clock_fast(vcpu->kvm); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); /* @@ -981,39 +963,30 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, irq->u.pgm.code, 0); - li->irq.pgm = irq->u.pgm; + if (irq->u.pgm.code == PGM_PER) { + li->irq.pgm.code |= PGM_PER; + /* only modify PER related information */ + li->irq.pgm.per_address = irq->u.pgm.per_address; + li->irq.pgm.per_code = irq->u.pgm.per_code; + li->irq.pgm.per_atmid = irq->u.pgm.per_atmid; + li->irq.pgm.per_access_id = irq->u.pgm.per_access_id; + } else if (!(irq->u.pgm.code & PGM_PER)) { + li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) | + irq->u.pgm.code; + /* only modify non-PER information */ + li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code; + li->irq.pgm.mon_code = irq->u.pgm.mon_code; + li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code; + li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr; + li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id; + li->irq.pgm.op_access_id = irq->u.pgm.op_access_id; + } else { + li->irq.pgm = irq->u.pgm; + } set_bit(IRQ_PEND_PROG, &li->pending_irqs); return 0; } -int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) -{ - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_irq irq; - - spin_lock(&li->lock); - irq.u.pgm.code = code; - __inject_prog(vcpu, &irq); - BUG_ON(waitqueue_active(li->wq)); - spin_unlock(&li->lock); - return 0; -} - -int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info) -{ - struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; - struct kvm_s390_irq irq; - int rc; - - spin_lock(&li->lock); - irq.u.pgm = *pgm_info; - rc = __inject_prog(vcpu, &irq); - BUG_ON(waitqueue_active(li->wq)); - spin_unlock(&li->lock); - return rc; -} - static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) { struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; @@ -1393,12 +1366,9 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) { - struct kvm_s390_float_interrupt *fi; u64 type = READ_ONCE(inti->type); int rc; - fi = &kvm->arch.float_int; - switch (type) { case KVM_S390_MCHK: rc = __inject_float_mchk(kvm, inti); diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1290af834..846589281 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -518,35 +518,20 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (gtod_high != 0) return -EINVAL; - VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high); + VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); return 0; } static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { - struct kvm_vcpu *cur_vcpu; - unsigned int vcpu_idx; - u64 host_tod, gtod; - int r; + u64 gtod; if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) return -EFAULT; - r = store_tod_clock(&host_tod); - if (r) - return r; - - mutex_lock(&kvm->lock); - preempt_disable(); - kvm->arch.epoch = gtod - host_tod; - kvm_s390_vcpu_block_all(kvm); - kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) - cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; - kvm_s390_vcpu_unblock_all(kvm); - preempt_enable(); - mutex_unlock(&kvm->lock); - VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod); + kvm_s390_set_tod_clock(kvm, gtod); + VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod); return 0; } @@ -578,26 +563,19 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) if (copy_to_user((void __user *)attr->addr, >od_high, sizeof(gtod_high))) return -EFAULT; - VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high); + VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); return 0; } static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) { - u64 host_tod, gtod; - int r; + u64 gtod; - r = store_tod_clock(&host_tod); - if (r) - return r; - - preempt_disable(); - gtod = host_tod + kvm->arch.epoch; - preempt_enable(); + gtod = kvm_s390_get_tod_clock_fast(kvm); if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) return -EFAULT; - VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod); + VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); return 0; } @@ -1298,7 +1276,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) static inline void save_fpu_to(struct fpu *dst) { dst->fpc = current->thread.fpu.fpc; - dst->flags = current->thread.fpu.flags; dst->regs = current->thread.fpu.regs; } @@ -1309,7 +1286,6 @@ static inline void save_fpu_to(struct fpu *dst) static inline void load_fpu_from(struct fpu *from) { current->thread.fpu.fpc = from->fpc; - current->thread.fpu.flags = from->flags; current->thread.fpu.regs = from->regs; } @@ -1321,15 +1297,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (test_kvm_facility(vcpu->kvm, 129)) { current->thread.fpu.fpc = vcpu->run->s.regs.fpc; - current->thread.fpu.flags = FPU_USE_VX; /* * Use the register save area in the SIE-control block * for register restore and save in kvm_arch_vcpu_put() */ current->thread.fpu.vxrs = (__vector128 *)&vcpu->run->s.regs.vrs; - /* Always enable the vector extension for KVM */ - __ctl_set_vx(); } else load_fpu_from(&vcpu->arch.guest_fpregs); @@ -1922,6 +1895,22 @@ retry: return 0; } +void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod) +{ + struct kvm_vcpu *vcpu; + int i; + + mutex_lock(&kvm->lock); + preempt_disable(); + kvm->arch.epoch = tod - get_tod_clock(); + kvm_s390_vcpu_block_all(kvm); + kvm_for_each_vcpu(i, vcpu, kvm) + vcpu->arch.sie_block->epoch = kvm->arch.epoch; + kvm_s390_vcpu_unblock_all(kvm); + preempt_enable(); + mutex_unlock(&kvm->lock); +} + /** * kvm_arch_fault_in_page - fault-in guest page if necessary * @vcpu: The corresponding virtual cpu @@ -2332,7 +2321,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * registers and the FPC value and store them in the * guest_fpregs structure. */ - WARN_ON(!is_vx_task(current)); /* XXX remove later */ vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, current->thread.fpu.vxrs); diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index c446aabf6..1e70e00d3 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -175,6 +175,7 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) return kvm->arch.user_cpu_state_ctrl != 0; } +/* implemented in interrupt.c */ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); @@ -185,7 +186,25 @@ int __must_check kvm_s390_inject_vm(struct kvm *kvm, struct kvm_s390_interrupt *s390int); int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq); -int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); +static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, + struct kvm_s390_pgm_info *pgm_info) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_PROGRAM_INT, + .u.pgm = *pgm_info, + }; + + return kvm_s390_inject_vcpu(vcpu, &irq); +} +static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) +{ + struct kvm_s390_irq irq = { + .type = KVM_S390_PROGRAM_INT, + .u.pgm.code = code, + }; + + return kvm_s390_inject_vcpu(vcpu, &irq); +} struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, u64 isc_mask, u32 schid); int kvm_s390_reinject_io_int(struct kvm *kvm, @@ -212,6 +231,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); /* implemented in kvm-s390.c */ +void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, @@ -231,9 +251,6 @@ extern unsigned long kvm_s390_fac_list_mask[]; /* implemented in diag.c */ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); -/* implemented in interrupt.c */ -int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, - struct kvm_s390_pgm_info *pgm_info); static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) { @@ -254,6 +271,16 @@ static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) kvm_s390_vcpu_unblock(vcpu); } +static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm) +{ + u64 rc; + + preempt_disable(); + rc = get_tod_clock_fast() + kvm->arch.epoch; + preempt_enable(); + return rc; +} + /** * kvm_s390_inject_prog_cond - conditionally inject a program check * @vcpu: virtual cpu diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 4d21dc4d1..d76b51cb4 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -33,11 +33,9 @@ /* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *cpup; - s64 hostclk, val; - int i, rc; + int rc; ar_t ar; - u64 op2; + u64 op2, val; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); @@ -49,19 +47,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) if (rc) return kvm_s390_inject_prog_cond(vcpu, rc); - if (store_tod_clock(&hostclk)) { - kvm_s390_set_psw_cc(vcpu, 3); - return 0; - } VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val); - val = (val - hostclk) & ~0x3fUL; - - mutex_lock(&vcpu->kvm->lock); - preempt_disable(); - kvm_for_each_vcpu(i, cpup, vcpu->kvm) - cpup->arch.sie_block->epoch = val; - preempt_enable(); - mutex_unlock(&vcpu->kvm->lock); + kvm_s390_set_tod_clock(vcpu->kvm, val); kvm_s390_set_psw_cc(vcpu, 0); return 0; @@ -673,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) kvm_s390_get_regs_rre(vcpu, ®1, ®2); - if (!MACHINE_HAS_PFMF) + if (!test_kvm_facility(vcpu->kvm, 8)) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 246a7eb4b..501dcd4ca 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -12,8 +12,10 @@ #include <linux/module.h> #include <linux/irqflags.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <asm/vtimer.h> #include <asm/div64.h> +#include <asm/idle.h> void __delay(unsigned long loops) { @@ -30,26 +32,22 @@ EXPORT_SYMBOL(__delay); static void __udelay_disabled(unsigned long long usecs) { - unsigned long cr0, cr6, new; - u64 clock_saved, end; + unsigned long cr0, cr0_new, psw_mask; + struct s390_idle_data idle; + u64 end; end = get_tod_clock() + (usecs << 12); - clock_saved = local_tick_disable(); __ctl_store(cr0, 0, 0); - __ctl_store(cr6, 6, 6); - new = (cr0 & 0xffff00e0) | 0x00000800; - __ctl_load(new , 0, 0); - new = 0; - __ctl_load(new, 6, 6); - lockdep_off(); - do { - set_clock_comparator(end); - enabled_wait(); - } while (get_tod_clock_fast() < end); - lockdep_on(); + cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK; + cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */ + __ctl_load(cr0_new, 0, 0); + psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT; + set_clock_comparator(end); + set_cpu_flag(CIF_IGNORE_IRQ); + psw_idle(&idle, psw_mask); + clear_cpu_flag(CIF_IGNORE_IRQ); + set_clock_comparator(S390_lowcore.clock_comparator); __ctl_load(cr0, 0, 0); - __ctl_load(cr6, 6, 6); - local_tick_enable(clock_saved); } static void __udelay_enabled(unsigned long long usecs) diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c index 922003c1b..d90b9245e 100644 --- a/arch/s390/lib/find.c +++ b/arch/s390/lib/find.c @@ -1,10 +1,8 @@ /* * MSB0 numbered special bitops handling. * - * On s390x the bits are numbered: + * The bits are numbered: * |0..............63|64............127|128...........191|192...........255| - * and on s390: - * |0.....31|32....63|64....95|96...127|128..159|160..191|192..223|224..255| * * The reason for this bit numbering is the fact that the hardware sets bits * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d6c9991f7..427aa44b3 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -197,7 +197,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev) } old = ACCESS_ONCE(rw->lock); owner = ACCESS_ONCE(rw->owner); - smp_rmb(); + smp_mb(); if ((int) old >= 0) { prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR); old = prev; @@ -231,7 +231,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) _raw_compare_and_swap(&rw->lock, old, old | 0x80000000)) prev = old; else - smp_rmb(); + smp_mb(); if ((old & 0x7fffffff) == 0 && (int) prev >= 0) break; if (MACHINE_HAS_CAD) diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 23c496957..18fccc303 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -18,6 +18,7 @@ #include <linux/bootmem.h> #include <linux/ctype.h> #include <linux/ioport.h> +#include <asm/diag.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/ebcdic.h> @@ -112,6 +113,7 @@ dcss_set_subcodes(void) ry = DCSS_FINDSEGX; strcpy(name, "dummy"); + diag_stat_inc(DIAG_STAT_X064); asm volatile( " diag %0,%1,0x64\n" "0: ipm %2\n" @@ -205,6 +207,7 @@ dcss_diag(int *func, void *parameter, ry = (unsigned long) *func; /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ + diag_stat_inc(DIAG_STAT_X064); if (*func > DCSS_SEGEXT) asm volatile( " diag %0,%1,0x64\n" diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index f985856a5..ec1a30d0d 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -30,6 +30,7 @@ #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <asm/asm-offsets.h> +#include <asm/diag.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mmu_context.h> @@ -589,7 +590,7 @@ int pfault_init(void) .reffcode = 0, .refdwlen = 5, .refversn = 2, - .refgaddr = __LC_CURRENT_PID, + .refgaddr = __LC_LPP, .refselmk = 1ULL << 48, .refcmpmk = 1ULL << 48, .reserved = __PF_RES_FIELD }; @@ -597,6 +598,7 @@ int pfault_init(void) if (pfault_disable) return -1; + diag_stat_inc(DIAG_STAT_X258); asm volatile( " diag %1,%0,0x258\n" "0: j 2f\n" @@ -618,6 +620,7 @@ void pfault_fini(void) if (pfault_disable) return; + diag_stat_inc(DIAG_STAT_X258); asm volatile( " diag %0,0,0x258\n" "0:\n" @@ -646,7 +649,7 @@ static void pfault_interrupt(struct ext_code ext_code, return; inc_irq_stat(IRQEXT_PFL); /* Get the token (= pid of the affected task). */ - pid = param64; + pid = param64 & LPP_PFAULT_PID_MASK; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index fb4bf2c43..f81096b69 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -40,6 +40,7 @@ static inline pmd_t __pte_to_pmd(pte_t pte) pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT); pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; + pmd_val(pmd) |= (pte_val(pte) & _PAGE_SOFT_DIRTY) << 13; } else pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; return pmd; @@ -78,6 +79,7 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; + pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY) >> 13; } else pte_val(pte) = _PAGE_INVALID; return pte; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c3c07d350..c722400c7 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask); static void __init setup_zero_pages(void) { - struct cpuid cpu_id; unsigned int order; struct page *page; int i; - get_cpu_id(&cpu_id); - switch (cpu_id.machine) { - case 0x9672: /* g5 */ - case 0x2064: /* z900 */ - case 0x2066: /* z900 */ - case 0x2084: /* z990 */ - case 0x2086: /* z990 */ - case 0x2094: /* z9-109 */ - case 0x2096: /* z9-109 */ - order = 0; - break; - case 0x2097: /* z10 */ - case 0x2098: /* z10 */ - case 0x2817: /* z196 */ - case 0x2818: /* z196 */ - order = 2; - break; - case 0x2827: /* zEC12 */ - case 0x2828: /* zEC12 */ - order = 5; - break; - case 0x2964: /* z13 */ - default: - order = 7; - break; - } + /* Latest machines require a mapping granularity of 512KB */ + order = 7; + /* Limit number of empty zero pages for small memory sizes */ while (order > 2 && (totalram_pages >> 10) < (1UL << order)) order--; diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 6e552af08..ea01477b4 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -31,9 +31,6 @@ #include <linux/security.h> #include <asm/pgalloc.h> -unsigned long mmap_rnd_mask; -static unsigned long mmap_align_mask; - static unsigned long stack_maxrandom_size(void) { if (!(current->flags & PF_RANDOMIZE)) @@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void) unsigned long arch_mmap_rnd(void) { - if (is_32bit_task()) - return (get_random_int() & 0x7ff) << PAGE_SHIFT; - else - return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT; + return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT; } static unsigned long mmap_base_legacy(unsigned long rnd) @@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; - int do_color_align; if (len > TASK_SIZE - mmap_min_addr) return -ENOMEM; @@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - do_color_align = 0; - if (filp || (flags & MAP_SHARED)) - do_color_align = !is_32bit_task(); - info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; - info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; + if (filp || (flags & MAP_SHARED)) + info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; + else + info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } @@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; - int do_color_align; /* requested length too big for entire address space */ if (len > TASK_SIZE - mmap_min_addr) @@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - do_color_align = 0; - if (filp || (flags & MAP_SHARED)) - do_color_align = !is_32bit_task(); - info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = max(PAGE_SIZE, mmap_min_addr); info.high_limit = mm->mmap_base; - info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0; + if (filp || (flags & MAP_SHARED)) + info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT; + else + info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); @@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->get_unmapped_area = s390_get_unmapped_area_topdown; } } - -static int __init setup_mmap_rnd(void) -{ - struct cpuid cpu_id; - - get_cpu_id(&cpu_id); - switch (cpu_id.machine) { - case 0x9672: - case 0x2064: - case 0x2066: - case 0x2084: - case 0x2086: - case 0x2094: - case 0x2096: - case 0x2097: - case 0x2098: - case 0x2817: - case 0x2818: - case 0x2827: - case 0x2828: - mmap_rnd_mask = 0x7ffUL; - mmap_align_mask = 0UL; - break; - case 0x2964: /* z13 */ - default: - mmap_rnd_mask = 0x3ff80UL; - mmap_align_mask = 0x7fUL; - break; - } - return 0; -} -early_initcall(setup_mmap_rnd); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index eeda05144..9a0c4c22e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1310,7 +1310,7 @@ void bpf_int_jit_compile(struct bpf_prog *fp) if (jit.prg_buf) { set_memory_ro((unsigned long)header, header->pages); fp->bpf_func = (void *) jit.prg_buf; - fp->jited = true; + fp->jited = 1; } free_addrs: kfree(jit.addrs); diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 30b2698a2..828d0695d 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -436,9 +436,15 @@ static void emu_update_cpu_topology(void) */ static unsigned long emu_setup_size_adjust(unsigned long size) { + unsigned long size_new; + size = size ? : CONFIG_EMU_SIZE; - size = roundup(size, memory_block_size_bytes()); - return size; + size_new = roundup(size, memory_block_size_bytes()); + if (size_new == size) + return size; + pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n", + size >> 20, size_new >> 20); + return size_new; } /* diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 37505b8b4..d348f2c09 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -24,7 +24,7 @@ static int zpci_refresh_global(struct zpci_dev *zdev) zdev->iommu_pages * PAGE_SIZE); } -static unsigned long *dma_alloc_cpu_table(void) +unsigned long *dma_alloc_cpu_table(void) { unsigned long *table, *entry; @@ -33,7 +33,7 @@ static unsigned long *dma_alloc_cpu_table(void) return NULL; for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++) - *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED; + *entry = ZPCI_TABLE_INVALID; return table; } @@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void) return NULL; for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++) - *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED; + *entry = ZPCI_PTE_INVALID; return table; } @@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry) return pto; } -static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) +unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr) { unsigned long *sto, *pto; unsigned int rtx, sx, px; @@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr return &pto[px]; } -static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr, - dma_addr_t dma_addr, int flags) +void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags) { - unsigned long *entry; - - entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); - if (!entry) { - WARN_ON_ONCE(1); - return; - } - if (flags & ZPCI_PTE_INVALID) { invalidate_pt_entry(entry); - return; } else { set_pt_pfaa(entry, page_addr); validate_pt_entry(entry); @@ -146,17 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, u8 *page_addr = (u8 *) (pa & PAGE_MASK); dma_addr_t start_dma_addr = dma_addr; unsigned long irq_flags; + unsigned long *entry; int i, rc = 0; if (!nr_pages) return -EINVAL; spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); - if (!zdev->dma_table) + if (!zdev->dma_table) { + rc = -EINVAL; goto no_refresh; + } for (i = 0; i < nr_pages; i++) { - dma_update_cpu_trans(zdev, page_addr, dma_addr, flags); + entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); + if (!entry) { + rc = -ENOMEM; + goto undo_cpu_trans; + } + dma_update_cpu_trans(entry, page_addr, flags); page_addr += PAGE_SIZE; dma_addr += PAGE_SIZE; } @@ -175,13 +173,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, nr_pages * PAGE_SIZE); +undo_cpu_trans: + if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) { + flags = ZPCI_PTE_INVALID; + while (i-- > 0) { + page_addr -= PAGE_SIZE; + dma_addr -= PAGE_SIZE; + entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); + if (!entry) + break; + dma_update_cpu_trans(entry, page_addr, flags); + } + } no_refresh: spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); return rc; } -static void dma_free_seg_table(unsigned long entry) +void dma_free_seg_table(unsigned long entry) { unsigned long *sto = get_rt_sto(entry); int sx; @@ -193,21 +203,18 @@ static void dma_free_seg_table(unsigned long entry) dma_free_cpu_table(sto); } -static void dma_cleanup_tables(struct zpci_dev *zdev) +void dma_cleanup_tables(unsigned long *table) { - unsigned long *table; int rtx; - if (!zdev || !zdev->dma_table) + if (!table) return; - table = zdev->dma_table; for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++) if (reg_entry_isvalid(table[rtx])) dma_free_seg_table(table[rtx]); dma_free_cpu_table(table); - zdev->dma_table = NULL; } static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, @@ -262,6 +269,16 @@ out: spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); } +static inline void zpci_err_dma(unsigned long rc, unsigned long addr) +{ + struct { + unsigned long rc; + unsigned long addr; + } __packed data = {rc, addr}; + + zpci_err_hex(&data, sizeof(data)); +} + static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, @@ -272,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, unsigned long pa = page_to_phys(page) + offset; int flags = ZPCI_PTE_VALID; dma_addr_t dma_addr; + int ret; /* This rounds up number of pages based on size and offset */ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE); iommu_page_index = dma_alloc_iommu(zdev, nr_pages); - if (iommu_page_index == -1) + if (iommu_page_index == -1) { + ret = -ENOSPC; goto out_err; + } /* Use rounded up size */ size = nr_pages * PAGE_SIZE; dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; - if (dma_addr + size > zdev->end_dma) + if (dma_addr + size > zdev->end_dma) { + ret = -ERANGE; goto out_free; + } if (direction == DMA_NONE || direction == DMA_TO_DEVICE) flags |= ZPCI_TABLE_PROTECTED; - if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) { - atomic64_add(nr_pages, &zdev->mapped_pages); - return dma_addr + (offset & ~PAGE_MASK); - } + ret = dma_update_trans(zdev, pa, dma_addr, size, flags); + if (ret) + goto out_free; + + atomic64_add(nr_pages, &zdev->mapped_pages); + return dma_addr + (offset & ~PAGE_MASK); out_free: dma_free_iommu(zdev, iommu_page_index, nr_pages); out_err: zpci_err("map error:\n"); - zpci_err_hex(&pa, sizeof(pa)); + zpci_err_dma(ret, pa); return DMA_ERROR_CODE; } @@ -308,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, { struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); unsigned long iommu_page_index; - int npages; + int npages, ret; npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr = dma_addr & PAGE_MASK; - if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, - ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) { + ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, + ZPCI_PTE_INVALID); + if (ret) { zpci_err("unmap error:\n"); - zpci_err_hex(&dma_addr, sizeof(dma_addr)); + zpci_err_dma(ret, dma_addr); + return; } atomic64_add(npages, &zdev->unmapped_pages); @@ -416,6 +442,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev) { int rc; + /* + * At this point, if the device is part of an IOMMU domain, this would + * be a strong hint towards a bug in the IOMMU API (common) code and/or + * simultaneous access via IOMMU and DMA API. So let's issue a warning. + */ + WARN_ON(zdev->s390_domain); + spin_lock_init(&zdev->iommu_bitmap_lock); spin_lock_init(&zdev->dma_table_lock); @@ -450,8 +483,16 @@ out_clean: void zpci_dma_exit_device(struct zpci_dev *zdev) { + /* + * At this point, if the device is part of an IOMMU domain, this would + * be a strong hint towards a bug in the IOMMU API (common) code and/or + * simultaneous access via IOMMU and DMA API. So let's issue a warning. + */ + WARN_ON(zdev->s390_domain); + zpci_unregister_ioat(zdev, 0); - dma_cleanup_tables(zdev); + dma_cleanup_tables(zdev->dma_table); + zdev->dma_table = NULL; vfree(zdev->iommu_bitmap); zdev->iommu_bitmap = NULL; zdev->next_bit = 0; |