summaryrefslogtreecommitdiff
path: root/arch/s390/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/include/asm')
-rw-r--r--arch/s390/include/asm/atomic.h40
-rw-r--r--arch/s390/include/asm/cache.h5
-rw-r--r--arch/s390/include/asm/cio.h2
-rw-r--r--arch/s390/include/asm/cpacf.h10
-rw-r--r--arch/s390/include/asm/cpu_mf.h17
-rw-r--r--arch/s390/include/asm/diag.h151
-rw-r--r--arch/s390/include/asm/dma-mapping.h1
-rw-r--r--arch/s390/include/asm/elf.h1
-rw-r--r--arch/s390/include/asm/fcx.h2
-rw-r--r--arch/s390/include/asm/fpu/api.h75
-rw-r--r--arch/s390/include/asm/fpu/types.h10
-rw-r--r--arch/s390/include/asm/gmap.h82
-rw-r--r--arch/s390/include/asm/hugetlb.h5
-rw-r--r--arch/s390/include/asm/ipl.h10
-rw-r--r--arch/s390/include/asm/irq.h7
-rw-r--r--arch/s390/include/asm/jump_label.h1
-rw-r--r--arch/s390/include/asm/kprobes.h4
-rw-r--r--arch/s390/include/asm/kvm_host.h33
-rw-r--r--arch/s390/include/asm/mmu.h13
-rw-r--r--arch/s390/include/asm/mmu_context.h18
-rw-r--r--arch/s390/include/asm/page.h17
-rw-r--r--arch/s390/include/asm/perf_event.h12
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h309
-rw-r--r--arch/s390/include/asm/processor.h19
-rw-r--r--arch/s390/include/asm/rwsem.h37
-rw-r--r--arch/s390/include/asm/sclp.h23
-rw-r--r--arch/s390/include/asm/sections.h1
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/asm/sigp.h17
-rw-r--r--arch/s390/include/asm/spinlock.h3
-rw-r--r--arch/s390/include/asm/stp.h51
-rw-r--r--arch/s390/include/asm/timex.h66
-rw-r--r--arch/s390/include/asm/tlb.h22
-rw-r--r--arch/s390/include/asm/tlbflush.h28
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/asm/uaccess.h84
37 files changed, 971 insertions, 215 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 911064aa5..d28cc2f5b 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v)
return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
}
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+}
+
static inline void atomic_add(int i, atomic_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -114,22 +119,27 @@ static inline void atomic_add(int i, atomic_t *v)
#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
+#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
#define atomic_dec(_v) atomic_sub(1, _v)
#define atomic_dec_return(_v) atomic_sub_return(1, _v)
#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
-#define ATOMIC_OP(op, OP) \
+#define ATOMIC_OPS(op, OP) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
+} \
+static inline int atomic_fetch_##op(int i, atomic_t *v) \
+{ \
+ return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER); \
}
-ATOMIC_OP(and, AND)
-ATOMIC_OP(or, OR)
-ATOMIC_OP(xor, XOR)
+ATOMIC_OPS(and, AND)
+ATOMIC_OPS(or, OR)
+ATOMIC_OPS(xor, XOR)
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -236,6 +246,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
}
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+}
+
static inline void atomic64_add(long long i, atomic64_t *v)
{
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -264,17 +279,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
return old;
}
-#define ATOMIC64_OP(op, OP) \
+#define ATOMIC64_OPS(op, OP) \
static inline void atomic64_##op(long i, atomic64_t *v) \
{ \
__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
+} \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
+{ \
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
}
-ATOMIC64_OP(and, AND)
-ATOMIC64_OP(or, OR)
-ATOMIC64_OP(xor, XOR)
+ATOMIC64_OPS(and, AND)
+ATOMIC64_OPS(or, OR)
+ATOMIC64_OPS(xor, XOR)
-#undef ATOMIC64_OP
+#undef ATOMIC64_OPS
#undef __ATOMIC64_LOOP
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
@@ -315,6 +334,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long long)(_i), _v)
#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
#define atomic64_dec(_v) atomic64_sub(1, _v)
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 22da3b34c..05219a5e0 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,9 +13,6 @@
#define L1_CACHE_SHIFT 8
#define NET_SKB_PAD 32
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-
-/* Read-only memory is marked before mark_rodata_ro() is called. */
-#define __ro_after_init __read_mostly
+#define __read_mostly __section(.data..read_mostly)
#endif
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index d1e7b0a0f..f7ed88cc0 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -320,7 +320,7 @@ struct cio_iplinfo {
extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
/* Function from drivers/s390/cio/chsc.c */
-int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
+int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
int chsc_sstpi(void *page, void *result, size_t size);
#endif
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index 1a82cf26e..d28621de8 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -20,6 +20,9 @@
#define CPACF_KMC 0xb92f /* MSA */
#define CPACF_KIMD 0xb93e /* MSA */
#define CPACF_KLMD 0xb93f /* MSA */
+#define CPACF_PCKMO 0xb928 /* MSA3 */
+#define CPACF_KMF 0xb92a /* MSA4 */
+#define CPACF_KMO 0xb92b /* MSA4 */
#define CPACF_PCC 0xb92c /* MSA4 */
#define CPACF_KMCTR 0xb92d /* MSA4 */
#define CPACF_PPNO 0xb93c /* MSA5 */
@@ -136,6 +139,7 @@ static inline void __cpacf_query(unsigned int opcode, unsigned char *status)
register unsigned long r1 asm("1") = (unsigned long) status;
asm volatile(
+ " spm 0\n" /* pckmo doesn't change the cc */
/* Parameter registers are ignored, but may not be 0 */
"0: .insn rrf,%[opc] << 16,2,2,2,0\n"
" brc 1,0b\n" /* handle partial completion */
@@ -157,6 +161,12 @@ static inline int cpacf_query(unsigned int opcode, unsigned int func)
if (!test_facility(17)) /* check for MSA */
return 0;
break;
+ case CPACF_PCKMO:
+ if (!test_facility(76)) /* check for MSA3 */
+ return 0;
+ break;
+ case CPACF_KMF:
+ case CPACF_KMO:
case CPACF_PCC:
case CPACF_KMCTR:
if (!test_facility(77)) /* check for MSA4 */
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 9dd04b9e9..035164761 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -169,16 +169,27 @@ static inline int lcctl(u64 ctl)
}
/* Extract CPU counter */
-static inline int ecctr(u64 ctr, u64 *val)
+static inline int __ecctr(u64 ctr, u64 *content)
{
- register u64 content asm("4") = 0;
+ register u64 _content asm("4") = 0;
int cc;
asm volatile (
" .insn rre,0xb2e40000,%0,%2\n"
" ipm %1\n"
" srl %1,28\n"
- : "=d" (content), "=d" (cc) : "d" (ctr) : "cc");
+ : "=d" (_content), "=d" (cc) : "d" (ctr) : "cc");
+ *content = _content;
+ return cc;
+}
+
+/* Extract CPU counter */
+static inline int ecctr(u64 ctr, u64 *val)
+{
+ u64 content;
+ int cc;
+
+ cc = __ecctr(ctr, &content);
if (!cc)
*val = content;
return cc;
diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h
index 5fac921c1..8acf48216 100644
--- a/arch/s390/include/asm/diag.h
+++ b/arch/s390/include/asm/diag.h
@@ -49,7 +49,7 @@ static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
diag_stat_inc(DIAG_STAT_X010);
asm volatile(
"0: diag %0,%1,0x10\n"
- "1:\n"
+ "1: nopr %%r7\n"
EX_TABLE(0b, 1b)
EX_TABLE(1b, 1b)
: : "a" (start_addr), "a" (end_addr));
@@ -78,4 +78,153 @@ struct diag210 {
extern int diag210(struct diag210 *addr);
+/* bit is set in flags, when physical cpu info is included in diag 204 data */
+#define DIAG204_LPAR_PHYS_FLG 0x80
+#define DIAG204_LPAR_NAME_LEN 8 /* lpar name len in diag 204 data */
+#define DIAG204_CPU_NAME_LEN 16 /* type name len of cpus in diag224 name table */
+
+/* diag 204 subcodes */
+enum diag204_sc {
+ DIAG204_SUBC_STIB4 = 4,
+ DIAG204_SUBC_RSI = 5,
+ DIAG204_SUBC_STIB6 = 6,
+ DIAG204_SUBC_STIB7 = 7
+};
+
+/* The two available diag 204 data formats */
+enum diag204_format {
+ DIAG204_INFO_SIMPLE = 0,
+ DIAG204_INFO_EXT = 0x00010000
+};
+
+enum diag204_cpu_flags {
+ DIAG204_CPU_ONLINE = 0x20,
+ DIAG204_CPU_CAPPED = 0x40,
+};
+
+struct diag204_info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod;
+} __packed;
+
+struct diag204_x_info_blk_hdr {
+ __u8 npar;
+ __u8 flags;
+ __u16 tslice;
+ __u16 phys_cpus;
+ __u16 this_part;
+ __u64 curtod1;
+ __u64 curtod2;
+ char reserved[40];
+} __packed;
+
+struct diag204_part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ char reserved[6];
+ char part_name[DIAG204_LPAR_NAME_LEN];
+} __packed;
+
+struct diag204_x_part_hdr {
+ __u8 pn;
+ __u8 cpus;
+ __u8 rcpus;
+ __u8 pflag;
+ __u32 mlu;
+ char part_name[DIAG204_LPAR_NAME_LEN];
+ char lpc_name[8];
+ char os_name[8];
+ __u64 online_cs;
+ __u64 online_es;
+ __u8 upid;
+ __u8 reserved:3;
+ __u8 mtid:5;
+ char reserved1[2];
+ __u32 group_mlu;
+ char group_name[8];
+ char hardware_group_name[8];
+ char reserved2[24];
+} __packed;
+
+struct diag204_cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+} __packed;
+
+struct diag204_x_cpu_info {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ __u8 cflag;
+ __u16 weight;
+ __u64 acc_time;
+ __u64 lp_time;
+ __u16 min_weight;
+ __u16 cur_weight;
+ __u16 max_weight;
+ char reseved2[2];
+ __u64 online_time;
+ __u64 wait_time;
+ __u32 pma_weight;
+ __u32 polar_weight;
+ __u32 cpu_type_cap;
+ __u32 group_cpu_type_cap;
+ char reserved3[32];
+} __packed;
+
+struct diag204_phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+} __packed;
+
+struct diag204_x_phys_hdr {
+ char reserved1[1];
+ __u8 cpus;
+ char reserved2[6];
+ char mgm_name[8];
+ char reserved3[80];
+} __packed;
+
+struct diag204_phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[3];
+ __u64 mgm_time;
+ char reserved3[8];
+} __packed;
+
+struct diag204_x_phys_cpu {
+ __u16 cpu_addr;
+ char reserved1[2];
+ __u8 ctidx;
+ char reserved2[1];
+ __u16 weight;
+ __u64 mgm_time;
+ char reserved3[80];
+} __packed;
+
+struct diag204_x_part_block {
+ struct diag204_x_part_hdr hdr;
+ struct diag204_x_cpu_info cpus[];
+} __packed;
+
+struct diag204_x_phys_block {
+ struct diag204_x_phys_hdr hdr;
+ struct diag204_x_phys_cpu cpus[];
+} __packed;
+
+int diag204(unsigned long subcode, unsigned long size, void *addr);
+int diag224(void *ptr);
#endif /* _ASM_S390_DIAG_H */
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 3249b7464..ffaba07f5 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -5,7 +5,6 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 563ab9f44..1736c7d3c 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -225,6 +225,7 @@ do { \
#define MMAP_ALIGN_MASK (is_compat_task() ? 0 : 0x7fUL)
#define STACK_RND_MASK MMAP_RND_MASK
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
diff --git a/arch/s390/include/asm/fcx.h b/arch/s390/include/asm/fcx.h
index 7ecb92b46..04cb4b4bc 100644
--- a/arch/s390/include/asm/fcx.h
+++ b/arch/s390/include/asm/fcx.h
@@ -6,7 +6,7 @@
*/
#ifndef _ASM_S390_FCX_H
-#define _ASM_S390_FCX_H _ASM_S390_FCX_H
+#define _ASM_S390_FCX_H
#include <linux/types.h>
diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
index 8ae236b0f..6aba6fc40 100644
--- a/arch/s390/include/asm/fpu/api.h
+++ b/arch/s390/include/asm/fpu/api.h
@@ -1,6 +1,41 @@
/*
* In-kernel FPU support functions
*
+ *
+ * Consider these guidelines before using in-kernel FPU functions:
+ *
+ * 1. Use kernel_fpu_begin() and kernel_fpu_end() to enclose all in-kernel
+ * use of floating-point or vector registers and instructions.
+ *
+ * 2. For kernel_fpu_begin(), specify the vector register range you want to
+ * use with the KERNEL_VXR_* constants. Consider these usage guidelines:
+ *
+ * a) If your function typically runs in process-context, use the lower
+ * half of the vector registers, for example, specify KERNEL_VXR_LOW.
+ * b) If your function typically runs in soft-irq or hard-irq context,
+ * prefer using the upper half of the vector registers, for example,
+ * specify KERNEL_VXR_HIGH.
+ *
+ * If you adhere to these guidelines, an interrupted process context
+ * does not require to save and restore vector registers because of
+ * disjoint register ranges.
+ *
+ * Also note that the __kernel_fpu_begin()/__kernel_fpu_end() functions
+ * includes logic to save and restore up to 16 vector registers at once.
+ *
+ * 3. You can nest kernel_fpu_begin()/kernel_fpu_end() by using different
+ * struct kernel_fpu states. Vector registers that are in use by outer
+ * levels are saved and restored. You can minimize the save and restore
+ * effort by choosing disjoint vector register ranges.
+ *
+ * 5. To use vector floating-point instructions, specify the KERNEL_FPC
+ * flag to save and restore floating-point controls in addition to any
+ * vector register range.
+ *
+ * 6. To use floating-point registers and instructions only, specify the
+ * KERNEL_FPR flag. This flag triggers a save and restore of vector
+ * registers V0 to V15 and floating-point controls.
+ *
* Copyright IBM Corp. 2015
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*/
@@ -8,6 +43,8 @@
#ifndef _ASM_S390_FPU_API_H
#define _ASM_S390_FPU_API_H
+#include <linux/preempt.h>
+
void save_fpu_regs(void);
static inline int test_fp_ctl(u32 fpc)
@@ -27,4 +64,42 @@ static inline int test_fp_ctl(u32 fpc)
return rc;
}
+#define KERNEL_VXR_V0V7 1
+#define KERNEL_VXR_V8V15 2
+#define KERNEL_VXR_V16V23 4
+#define KERNEL_VXR_V24V31 8
+#define KERNEL_FPR 16
+#define KERNEL_FPC 256
+
+#define KERNEL_VXR_LOW (KERNEL_VXR_V0V7|KERNEL_VXR_V8V15)
+#define KERNEL_VXR_MID (KERNEL_VXR_V8V15|KERNEL_VXR_V16V23)
+#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
+
+#define KERNEL_FPU_MASK (KERNEL_VXR_LOW|KERNEL_VXR_HIGH|KERNEL_FPR)
+
+struct kernel_fpu;
+
+/*
+ * Note the functions below must be called with preemption disabled.
+ * Do not enable preemption before calling __kernel_fpu_end() to prevent
+ * an corruption of an existing kernel FPU state.
+ *
+ * Prefer using the kernel_fpu_begin()/kernel_fpu_end() pair of functions.
+ */
+void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags);
+void __kernel_fpu_end(struct kernel_fpu *state);
+
+
+static inline void kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
+{
+ preempt_disable();
+ __kernel_fpu_begin(state, flags);
+}
+
+static inline void kernel_fpu_end(struct kernel_fpu *state)
+{
+ __kernel_fpu_end(state);
+ preempt_enable();
+}
+
#endif /* _ASM_S390_FPU_API_H */
diff --git a/arch/s390/include/asm/fpu/types.h b/arch/s390/include/asm/fpu/types.h
index fe937c9b6..bce255ead 100644
--- a/arch/s390/include/asm/fpu/types.h
+++ b/arch/s390/include/asm/fpu/types.h
@@ -24,4 +24,14 @@ struct fpu {
/* VX array structure for address operand constraints in inline assemblies */
struct vx_array { __vector128 _[__NUM_VXRS]; };
+/* In-kernel FPU state structure */
+struct kernel_fpu {
+ u32 mask;
+ u32 fpc;
+ union {
+ freg_t fprs[__NUM_FPRS];
+ __vector128 vxrs[__NUM_VXRS];
+ };
+};
+
#endif /* _ASM_S390_FPU_TYPES_H */
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index d054c1b07..741ddba0b 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -10,14 +10,25 @@
/**
* struct gmap_struct - guest address space
+ * @list: list head for the mm->context gmap list
* @crst_list: list of all crst tables used in the guest address space
* @mm: pointer to the parent mm_struct
* @guest_to_host: radix tree with guest to host address translation
* @host_to_guest: radix tree with pointer to segment table entries
* @guest_table_lock: spinlock to protect all entries in the guest page table
+ * @ref_count: reference counter for the gmap structure
* @table: pointer to the page directory
* @asce: address space control element for gmap page table
* @pfault_enabled: defines if pfaults are applicable for the guest
+ * @host_to_rmap: radix tree with gmap_rmap lists
+ * @children: list of shadow gmap structures
+ * @pt_list: list of all page tables used in the shadow guest address space
+ * @shadow_lock: spinlock to protect the shadow gmap list
+ * @parent: pointer to the parent gmap for shadow guest address spaces
+ * @orig_asce: ASCE for which the shadow page table has been created
+ * @edat_level: edat level to be used for the shadow translation
+ * @removed: flag to indicate if a shadow guest address space has been removed
+ * @initialized: flag to indicate if a shadow guest address space can be used
*/
struct gmap {
struct list_head list;
@@ -26,26 +37,64 @@ struct gmap {
struct radix_tree_root guest_to_host;
struct radix_tree_root host_to_guest;
spinlock_t guest_table_lock;
+ atomic_t ref_count;
unsigned long *table;
unsigned long asce;
unsigned long asce_end;
void *private;
bool pfault_enabled;
+ /* Additional data for shadow guest address spaces */
+ struct radix_tree_root host_to_rmap;
+ struct list_head children;
+ struct list_head pt_list;
+ spinlock_t shadow_lock;
+ struct gmap *parent;
+ unsigned long orig_asce;
+ int edat_level;
+ bool removed;
+ bool initialized;
};
/**
+ * struct gmap_rmap - reverse mapping for shadow page table entries
+ * @next: pointer to next rmap in the list
+ * @raddr: virtual rmap address in the shadow guest address space
+ */
+struct gmap_rmap {
+ struct gmap_rmap *next;
+ unsigned long raddr;
+};
+
+#define gmap_for_each_rmap(pos, head) \
+ for (pos = (head); pos; pos = pos->next)
+
+#define gmap_for_each_rmap_safe(pos, n, head) \
+ for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
+
+/**
* struct gmap_notifier - notify function block for page invalidation
* @notifier_call: address of callback function
*/
struct gmap_notifier {
struct list_head list;
- void (*notifier_call)(struct gmap *gmap, unsigned long gaddr);
+ struct rcu_head rcu;
+ void (*notifier_call)(struct gmap *gmap, unsigned long start,
+ unsigned long end);
};
-struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit);
-void gmap_free(struct gmap *gmap);
+static inline int gmap_is_shadow(struct gmap *gmap)
+{
+ return !!gmap->parent;
+}
+
+struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit);
+void gmap_remove(struct gmap *gmap);
+struct gmap *gmap_get(struct gmap *gmap);
+void gmap_put(struct gmap *gmap);
+
void gmap_enable(struct gmap *gmap);
void gmap_disable(struct gmap *gmap);
+struct gmap *gmap_get_enabled(void);
int gmap_map_segment(struct gmap *gmap, unsigned long from,
unsigned long to, unsigned long len);
int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
@@ -57,8 +106,29 @@ void gmap_discard(struct gmap *, unsigned long from, unsigned long to);
void __gmap_zap(struct gmap *, unsigned long gaddr);
void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr);
-void gmap_register_ipte_notifier(struct gmap_notifier *);
-void gmap_unregister_ipte_notifier(struct gmap_notifier *);
-int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
+int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val);
+
+struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
+ int edat_level);
+int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level);
+int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
+ int fake);
+int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
+ int fake);
+int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
+ int fake);
+int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
+ int fake);
+int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
+ unsigned long *pgt, int *dat_protection, int *fake);
+int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
+
+void gmap_register_pte_notifier(struct gmap_notifier *);
+void gmap_unregister_pte_notifier(struct gmap_notifier *);
+void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
+ unsigned long bits);
+
+int gmap_mprotect_notify(struct gmap *, unsigned long start,
+ unsigned long len, int prot);
#endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index d9be7c0c1..4c7fac750 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -41,7 +41,10 @@ static inline int prepare_hugepage_range(struct file *file,
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
+ if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
+ pte_val(*ptep) = _REGION3_ENTRY_EMPTY;
+ else
+ pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 6fc44dca1..4da22b2f0 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -141,11 +141,11 @@ extern void setup_ipl(void);
* DIAG 308 support
*/
enum diag308_subcode {
- DIAG308_REL_HSA = 2,
- DIAG308_IPL = 3,
- DIAG308_DUMP = 4,
- DIAG308_SET = 5,
- DIAG308_STORE = 6,
+ DIAG308_REL_HSA = 2,
+ DIAG308_LOAD_CLEAR = 3,
+ DIAG308_LOAD_NORMAL_DUMP = 4,
+ DIAG308_SET = 5,
+ DIAG308_STORE = 6,
};
enum diag308_ipl_type {
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index f97b055de..70c9bce76 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -7,11 +7,8 @@
#define NR_IRQS_BASE 3
-#ifdef CONFIG_PCI_NR_MSI
-# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
-#else
-# define NR_IRQS NR_IRQS_BASE
-#endif
+#define NR_IRQS NR_IRQS_BASE
+#define NR_IRQS_LEGACY NR_IRQS_BASE
/* External interruption codes */
#define EXT_IRQ_INTERRUPT_KEY 0x0040
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 7f9fd5e3f..9be198f5e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -4,6 +4,7 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
+#include <linux/stringify.h>
#define JUMP_LABEL_NOP_SIZE 6
#define JUMP_LABEL_NOP_OFFSET 2
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index b47ad3b64..591e5a527 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -43,9 +43,9 @@ typedef u16 kprobe_opcode_t;
#define MAX_INSN_SIZE 0x0003
#define MAX_STACK_SIZE 64
#define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \
- (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) \
+ (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR))) \
? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
+ : (((unsigned long)task_stack_page(current)) + THREAD_SIZE - (ADDR)))
#define kretprobe_blacklist_size 0
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index ac82e8eb9..8e5daf7a7 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -43,6 +43,7 @@
/* s390-specific vcpu->requests bit members */
#define KVM_REQ_ENABLE_IBS 8
#define KVM_REQ_DISABLE_IBS 9
+#define KVM_REQ_ICPT_OPEREXC 10
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
@@ -145,7 +146,7 @@ struct kvm_s390_sie_block {
__u64 cputm; /* 0x0028 */
__u64 ckc; /* 0x0030 */
__u64 epoch; /* 0x0038 */
- __u8 reserved40[4]; /* 0x0040 */
+ __u32 svcc; /* 0x0040 */
#define LCTL_CR0 0x8000
#define LCTL_CR6 0x0200
#define LCTL_CR9 0x0040
@@ -154,6 +155,7 @@ struct kvm_s390_sie_block {
#define LCTL_CR14 0x0002
__u16 lctl; /* 0x0044 */
__s16 icpua; /* 0x0046 */
+#define ICTL_OPEREXC 0x80000000
#define ICTL_PINT 0x20000000
#define ICTL_LPSW 0x00400000
#define ICTL_STCTL 0x00040000
@@ -166,6 +168,9 @@ struct kvm_s390_sie_block {
#define ICPT_INST 0x04
#define ICPT_PROGI 0x08
#define ICPT_INSTPROGI 0x0C
+#define ICPT_EXTINT 0x14
+#define ICPT_VALIDITY 0x20
+#define ICPT_STOP 0x28
#define ICPT_OPEREXC 0x2C
#define ICPT_PARTEXEC 0x38
#define ICPT_IOINST 0x40
@@ -185,7 +190,9 @@ struct kvm_s390_sie_block {
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
- __u8 reserved70[32]; /* 0x0070 */
+ __u8 reserved70[16]; /* 0x0070 */
+ __u64 mso; /* 0x0080 */
+ __u64 msl; /* 0x0088 */
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
@@ -223,7 +230,7 @@ struct kvm_s390_sie_block {
__u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */
__u64 riccbd; /* 0x01f0 */
- __u8 reserved1f8[8]; /* 0x01f8 */
+ __u64 gvrd; /* 0x01f8 */
} __attribute__((packed));
struct kvm_s390_itdb {
@@ -256,6 +263,7 @@ struct kvm_vcpu_stat {
u32 instruction_stctg;
u32 exit_program_interruption;
u32 exit_instr_and_program;
+ u32 exit_operation_exception;
u32 deliver_external_call;
u32 deliver_emergency_signal;
u32 deliver_service_signal;
@@ -278,7 +286,9 @@ struct kvm_vcpu_stat {
u32 instruction_stsi;
u32 instruction_stfl;
u32 instruction_tprot;
+ u32 instruction_sie;
u32 instruction_essa;
+ u32 instruction_sthyi;
u32 instruction_sigp_sense;
u32 instruction_sigp_sense_running;
u32 instruction_sigp_external_call;
@@ -541,12 +551,16 @@ struct kvm_guestdbg_info_arch {
struct kvm_vcpu_arch {
struct kvm_s390_sie_block *sie_block;
+ /* if vsie is active, currently executed shadow sie control block */
+ struct kvm_s390_sie_block *vsie_block;
unsigned int host_acrs[NUM_ACRS];
struct fpu host_fpregs;
struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm;
struct gmap *gmap;
+ /* backup location for the currently enabled gmap when scheduled out */
+ struct gmap *enabled_gmap;
struct kvm_guestdbg_info_arch guestdbg;
unsigned long pfault_token;
unsigned long pfault_select;
@@ -631,6 +645,14 @@ struct sie_page2 {
u8 reserved900[0x1000 - 0x900]; /* 0x0900 */
} __packed;
+struct kvm_s390_vsie {
+ struct mutex mutex;
+ struct radix_tree_root addr_to_page;
+ int page_count;
+ int next;
+ struct page *pages[KVM_MAX_VCPUS];
+};
+
struct kvm_arch{
void *sca;
int use_esca;
@@ -646,15 +668,20 @@ struct kvm_arch{
int user_cpu_state_ctrl;
int user_sigp;
int user_stsi;
+ int user_instr0;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq;
int ipte_lock_count;
struct mutex ipte_mutex;
+ struct ratelimit_state sthyi_limit;
spinlock_t start_stop_lock;
struct sie_page2 *sie_page2;
struct kvm_s390_cpu_model model;
struct kvm_s390_crypto crypto;
+ struct kvm_s390_vsie vsie;
u64 epoch;
+ /* subset of available cpu features enabled by user space */
+ DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
};
#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 081b2ad99..6d39329c8 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -6,10 +6,11 @@
typedef struct {
cpumask_t cpu_attach_mask;
- atomic_t attach_count;
+ atomic_t flush_count;
unsigned int flush_mm;
- spinlock_t list_lock;
+ spinlock_t pgtable_lock;
struct list_head pgtable_list;
+ spinlock_t gmap_lock;
struct list_head gmap_list;
unsigned long asce;
unsigned long asce_limit;
@@ -22,9 +23,11 @@ typedef struct {
unsigned int use_skey:1;
} mm_context_t;
-#define INIT_MM_CONTEXT(name) \
- .context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \
- .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+#define INIT_MM_CONTEXT(name) \
+ .context.pgtable_lock = \
+ __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
+ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+ .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
static inline int tprot(unsigned long addr)
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index c837b79b4..c6a088c91 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,11 +15,12 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
- spin_lock_init(&mm->context.list_lock);
+ spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list);
+ spin_lock_init(&mm->context.gmap_lock);
INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask);
- atomic_set(&mm->context.attach_count, 0);
+ atomic_set(&mm->context.flush_count, 0);
mm->context.flush_mm = 0;
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste;
@@ -90,15 +91,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
- if (MACHINE_HAS_TLB_LC)
- cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+ cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(next));
/* Clear old ASCE by loading the kernel ASCE. */
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
- atomic_inc(&next->context.attach_count);
- atomic_dec(&prev->context.attach_count);
- if (MACHINE_HAS_TLB_LC)
- cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -110,10 +108,9 @@ static inline void finish_arch_post_lock_switch(void)
load_kernel_asce();
if (mm) {
preempt_disable();
- while (atomic_read(&mm->context.attach_count) >> 16)
+ while (atomic_read(&mm->context.flush_count))
cpu_relax();
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
@@ -128,7 +125,6 @@ static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
switch_mm(prev, next, current);
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
set_user_asce(next);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 53eacbd4f..69b8a41fc 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -21,6 +21,7 @@
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#define HUGE_MAX_HSTATE 2
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#define ARCH_HAS_HUGE_PTE_TYPE
@@ -30,11 +31,12 @@
#include <asm/setup.h>
#ifndef __ASSEMBLY__
+void __storage_key_init_range(unsigned long start, unsigned long end);
+
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
-#if PAGE_DEFAULT_KEY
- __storage_key_init_range(start, end);
-#endif
+ if (PAGE_DEFAULT_KEY)
+ __storage_key_init_range(start, end);
}
#define clear_page(page) memset((page), 0, PAGE_SIZE)
@@ -109,13 +111,14 @@ static inline unsigned char page_get_storage_key(unsigned long addr)
static inline int page_reset_referenced(unsigned long addr)
{
- unsigned int ipm;
+ int cc;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
- : "=d" (ipm) : "a" (addr) : "cc");
- return !!(ipm & 0x20000000);
+ " srl %0,28\n"
+ : "=d" (cc) : "a" (addr) : "cc");
+ return cc;
}
/* Bits int the storage key */
@@ -146,6 +149,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
index 1f7ff85c5..c64c0befd 100644
--- a/arch/s390/include/asm/perf_event.h
+++ b/arch/s390/include/asm/perf_event.h
@@ -86,16 +86,4 @@ struct sf_raw_sample {
u8 padding[]; /* Padding to next multiple of 8 */
} __packed;
-/* Perf hardware reserve and release functions */
-#ifdef CONFIG_PERF_EVENTS
-int perf_reserve_sampling(void);
-void perf_release_sampling(void);
-#else /* CONFIG_PERF_EVENTS */
-static inline int perf_reserve_sampling(void)
-{
- return 0;
-}
-static inline void perf_release_sampling(void) {}
-#endif /* CONFIG_PERF_EVENTS */
-
#endif /* _ASM_S390_PERF_EVENT_H */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index da34cb6b1..f4eb9843e 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -19,8 +19,10 @@ unsigned long *crst_table_alloc(struct mm_struct *);
void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
+struct page *page_table_alloc_pgste(struct mm_struct *mm);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
+void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 42b968a85..72c7f60bf 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -28,12 +28,33 @@
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <linux/radix-tree.h>
+#include <linux/atomic.h>
#include <asm/bug.h>
#include <asm/page.h>
-extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
+extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
extern void vmem_map_init(void);
+pmd_t *vmem_pmd_alloc(void);
+pte_t *vmem_pte_alloc(void);
+
+enum {
+ PG_DIRECT_MAP_4K = 0,
+ PG_DIRECT_MAP_1M,
+ PG_DIRECT_MAP_2G,
+ PG_DIRECT_MAP_MAX
+};
+
+extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+
+static inline void update_page_count(int level, long count)
+{
+ if (IS_ENABLED(CONFIG_PROC_FS))
+ atomic_long_add(count, &direct_pages_count[level]);
+}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
/*
* The S390 doesn't have any external MMU info: the kernel page
@@ -221,8 +242,8 @@ static inline int is_module_addr(void *addr)
* swap .11..ttttt.0
* prot-none, clean, old .11.xx0000.1
* prot-none, clean, young .11.xx0001.1
- * prot-none, dirty, old .10.xx0010.1
- * prot-none, dirty, young .10.xx0011.1
+ * prot-none, dirty, old .11.xx0010.1
+ * prot-none, dirty, young .11.xx0011.1
* read-only, clean, old .11.xx0100.1
* read-only, clean, young .01.xx0101.1
* read-only, dirty, old .11.xx0110.1
@@ -256,6 +277,7 @@ static inline int is_module_addr(void *addr)
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
+#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
@@ -270,8 +292,23 @@ static inline int is_module_addr(void *addr)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
-#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
-#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
+#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
+#define _REGION3_ENTRY_ORIGIN ~0x7ffUL/* region third table origin */
+
+#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
+#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
+#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
+#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
+#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
+#else
+#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
+#endif
+
+#define _REGION_ENTRY_BITS 0xfffffffffffff227UL
+#define _REGION_ENTRY_BITS_LARGE 0xffffffff8000fe27UL
/* Bits in the segment table entry */
#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
@@ -287,8 +324,8 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
-#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
-#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
+#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
+#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
@@ -297,16 +334,17 @@ static inline int is_module_addr(void *addr)
#endif
/*
- * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
- * dy..R...I...rw
+ * Segment table and region3 table entry encoding
+ * (R = read-only, I = invalid, y = young bit):
+ * dy..R...I...wr
* prot-none, clean, old 00..1...1...00
* prot-none, clean, young 01..1...1...00
* prot-none, dirty, old 10..1...1...00
* prot-none, dirty, young 11..1...1...00
- * read-only, clean, old 00..1...1...10
- * read-only, clean, young 01..1...0...10
- * read-only, dirty, old 10..1...1...10
- * read-only, dirty, young 11..1...0...10
+ * read-only, clean, old 00..1...1...01
+ * read-only, clean, young 01..1...0...01
+ * read-only, dirty, old 10..1...1...01
+ * read-only, dirty, young 11..1...0...01
* read-write, clean, old 00..1...1...11
* read-write, clean, young 01..1...0...11
* read-write, dirty, old 10..0...1...11
@@ -327,6 +365,7 @@ static inline int is_module_addr(void *addr)
#define PGSTE_GC_BIT 0x0002000000000000UL
#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
+#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
/* Guest Page State used for virtualization */
#define _PGSTE_GPS_ZERO 0x0000000080000000UL
@@ -345,7 +384,7 @@ static inline int is_module_addr(void *addr)
/*
* Page protection definitions.
*/
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
@@ -391,6 +430,33 @@ static inline int is_module_addr(void *addr)
_SEGMENT_ENTRY_READ)
#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE)
+#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
+ _SEGMENT_ENTRY_LARGE | \
+ _SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_WRITE | \
+ _SEGMENT_ENTRY_YOUNG | \
+ _SEGMENT_ENTRY_DIRTY)
+#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
+ _SEGMENT_ENTRY_LARGE | \
+ _SEGMENT_ENTRY_READ | \
+ _SEGMENT_ENTRY_YOUNG | \
+ _SEGMENT_ENTRY_PROTECT)
+
+/*
+ * Region3 entry (large page) protection definitions.
+ */
+
+#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_WRITE | \
+ _REGION3_ENTRY_YOUNG | \
+ _REGION3_ENTRY_DIRTY)
+#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
+ _REGION3_ENTRY_LARGE | \
+ _REGION3_ENTRY_READ | \
+ _REGION3_ENTRY_YOUNG | \
+ _REGION_ENTRY_PROTECT)
static inline int mm_has_pgste(struct mm_struct *mm)
{
@@ -424,6 +490,53 @@ static inline int mm_use_skey(struct mm_struct *mm)
return 0;
}
+static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
+{
+ register unsigned long reg2 asm("2") = old;
+ register unsigned long reg3 asm("3") = new;
+ unsigned long address = (unsigned long)ptr | 1;
+
+ asm volatile(
+ " csp %0,%3"
+ : "+d" (reg2), "+m" (*ptr)
+ : "d" (reg3), "d" (address)
+ : "cc");
+}
+
+static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
+{
+ register unsigned long reg2 asm("2") = old;
+ register unsigned long reg3 asm("3") = new;
+ unsigned long address = (unsigned long)ptr | 1;
+
+ asm volatile(
+ " .insn rre,0xb98a0000,%0,%3"
+ : "+d" (reg2), "+m" (*ptr)
+ : "d" (reg3), "d" (address)
+ : "cc");
+}
+
+#define CRDTE_DTT_PAGE 0x00UL
+#define CRDTE_DTT_SEGMENT 0x10UL
+#define CRDTE_DTT_REGION3 0x14UL
+#define CRDTE_DTT_REGION2 0x18UL
+#define CRDTE_DTT_REGION1 0x1cUL
+
+static inline void crdte(unsigned long old, unsigned long new,
+ unsigned long table, unsigned long dtt,
+ unsigned long address, unsigned long asce)
+{
+ register unsigned long reg2 asm("2") = old;
+ register unsigned long reg3 asm("3") = new;
+ register unsigned long reg4 asm("4") = table | dtt;
+ register unsigned long reg5 asm("5") = address;
+
+ asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
+ : "+d" (reg2)
+ : "d" (reg3), "d" (reg4), "d" (reg5), "a" (asce)
+ : "memory", "cc");
+}
+
/*
* pgd/pmd/pte query functions
*/
@@ -465,7 +578,7 @@ static inline int pud_none(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
return 0;
- return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
+ return pud_val(pud) == _REGION3_ENTRY_EMPTY;
}
static inline int pud_large(pud_t pud)
@@ -475,17 +588,35 @@ static inline int pud_large(pud_t pud)
return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
}
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ unsigned long origin_mask;
+
+ origin_mask = _REGION3_ENTRY_ORIGIN;
+ if (pud_large(pud))
+ origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
+ return (pud_val(pud) & origin_mask) >> PAGE_SHIFT;
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ if (pmd_large(pmd))
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
+}
+
static inline int pud_bad(pud_t pud)
{
- /*
- * With dynamic page table levels the pud can be a region table
- * entry or a segment table entry. Check for the bit that are
- * invalid for either table entry.
- */
- unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
- ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
- return (pud_val(pud) & mask) != 0;
+ if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
+ return pmd_bad(__pmd(pud_val(pud)));
+ if (pud_large(pud))
+ return (pud_val(pud) & ~_REGION_ENTRY_BITS_LARGE) != 0;
+ return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
}
static inline int pmd_present(pmd_t pmd)
@@ -498,11 +629,6 @@ static inline int pmd_none(pmd_t pmd)
return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
}
-static inline int pmd_large(pmd_t pmd)
-{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
-}
-
static inline unsigned long pmd_pfn(pmd_t pmd)
{
unsigned long origin_mask;
@@ -513,13 +639,6 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
}
-static inline int pmd_bad(pmd_t pmd)
-{
- if (pmd_large(pmd))
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
- return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
-}
-
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
@@ -885,15 +1004,26 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry);
void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-void ptep_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+void ptep_notify(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long bits);
+int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
+ pte_t *ptep, int prot, unsigned long bit);
void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , int reset);
void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
+ pte_t *sptep, pte_t *tptep, pte_t pte);
+void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned char key, bool nq);
-unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr);
+int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char key, unsigned char *oldkey,
+ bool nq, bool mr, bool mc);
+int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
+int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
+ unsigned char *key);
/*
* Certain architectures need to do special things when PTEs
@@ -963,6 +1093,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+#define pud_page(pud) pfn_to_page(pud_pfn(pud))
/* Find an entry in the lowest level page table.. */
#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
@@ -970,20 +1101,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
-static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
-{
- /*
- * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
- * Convert to segment table entry format.
- */
- if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
- return pgprot_val(SEGMENT_NONE);
- if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
- return pgprot_val(SEGMENT_READ);
- return pgprot_val(SEGMENT_WRITE);
-}
-
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
@@ -1020,6 +1137,56 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pmd;
}
+static inline pud_t pud_wrprotect(pud_t pud)
+{
+ pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
+ return pud;
+}
+
+static inline pud_t pud_mkwrite(pud_t pud)
+{
+ pud_val(pud) |= _REGION3_ENTRY_WRITE;
+ if (pud_large(pud) && !(pud_val(pud) & _REGION3_ENTRY_DIRTY))
+ return pud;
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+ return pud;
+}
+
+static inline pud_t pud_mkclean(pud_t pud)
+{
+ if (pud_large(pud)) {
+ pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
+ pud_val(pud) |= _REGION_ENTRY_PROTECT;
+ }
+ return pud;
+}
+
+static inline pud_t pud_mkdirty(pud_t pud)
+{
+ if (pud_large(pud)) {
+ pud_val(pud) |= _REGION3_ENTRY_DIRTY |
+ _REGION3_ENTRY_SOFT_DIRTY;
+ if (pud_val(pud) & _REGION3_ENTRY_WRITE)
+ pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
+ }
+ return pud;
+}
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
+{
+ /*
+ * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
+ * Convert to segment table entry format.
+ */
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
+ return pgprot_val(SEGMENT_NONE);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
+ return pgprot_val(SEGMENT_READ);
+ return pgprot_val(SEGMENT_WRITE);
+}
+
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
if (pmd_large(pmd)) {
@@ -1068,15 +1235,8 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
static inline void __pmdp_csp(pmd_t *pmdp)
{
- register unsigned long reg2 asm("2") = pmd_val(*pmdp);
- register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
- _SEGMENT_ENTRY_INVALID;
- register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
-
- asm volatile(
- " csp %1,%3"
- : "=m" (*pmdp)
- : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+ csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
+ pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
}
static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
@@ -1091,6 +1251,19 @@ static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
: "cc" );
}
+static inline void __pudp_idte(unsigned long address, pud_t *pudp)
+{
+ unsigned long r3o;
+
+ r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
+ r3o |= _ASCE_TYPE_REGION3;
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,0"
+ : "=m" (*pudp)
+ : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
+ : "cc");
+}
+
static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
{
unsigned long sto;
@@ -1103,8 +1276,22 @@ static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
: "cc" );
}
+static inline void __pudp_idte_local(unsigned long address, pud_t *pudp)
+{
+ unsigned long r3o;
+
+ r3o = (unsigned long) pudp - pud_index(address) * sizeof(pud_t);
+ r3o |= _ASCE_TYPE_REGION3;
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,1"
+ : "=m" (*pudp)
+ : "m" (*pudp), "a" (r3o), "a" ((address & PUD_MASK))
+ : "cc");
+}
+
pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
+pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 9d4d311d7..03323175d 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -77,7 +77,10 @@ static inline void get_cpu_id(struct cpuid *ptr)
asm volatile("stidp %0" : "=Q" (*ptr));
}
-extern void s390_adjust_jiffies(void);
+void s390_adjust_jiffies(void);
+void s390_update_cpu_mhz(void);
+void cpu_detect_mhz_feature(void);
+
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
@@ -109,6 +112,8 @@ struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
mm_segment_t mm_segment;
unsigned long gmap_addr; /* address of last gmap fault. */
+ unsigned int gmap_write_flag; /* gmap fault write indication */
+ unsigned int gmap_int_code; /* int code of last gmap fault */
unsigned int gmap_pfault; /* signal of a pending guest pfault */
struct per_regs per_user; /* User specified PER registers */
struct per_event per_event; /* Cause of the last PER trap */
@@ -233,6 +238,18 @@ void cpu_relax(void);
#define cpu_relax_lowlatency() barrier()
+#define ECAG_CACHE_ATTRIBUTE 0
+#define ECAG_CPU_ATTRIBUTE 1
+
+static inline unsigned long __ecag(unsigned int asi, unsigned char parm)
+{
+ unsigned long val;
+
+ asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
+ : "=d" (val) : "a" (asi << 8 | parm));
+ return val;
+}
+
static inline void psw_set_key(unsigned int key)
{
asm volatile("spka 0(%0)" : : "d" (key));
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index c75e4471e..597e7e96b 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
rwsem_downgrade_wake(sem);
}
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " agr %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "d" (delta)
- : "cc", "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
- signed long old, new;
-
- asm volatile(
- " lg %0,%2\n"
- "0: lgr %1,%0\n"
- " agr %1,%4\n"
- " csg %0,%1,%2\n"
- " jl 0b"
- : "=&d" (old), "=&d" (new), "=Q" (sem->count)
- : "Q" (sem->count), "d" (delta)
- : "cc", "memory");
- return new;
-}
-
#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index e4f6f73af..2ad9c204b 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -32,12 +32,19 @@ struct sclp_core_entry {
u8 reserved0;
u8 : 4;
u8 sief2 : 1;
- u8 : 3;
- u8 : 3;
+ u8 skey : 1;
+ u8 : 2;
+ u8 : 2;
+ u8 gpere : 1;
u8 siif : 1;
u8 sigpif : 1;
u8 : 3;
- u8 reserved2[10];
+ u8 reserved2[3];
+ u8 : 2;
+ u8 ib : 1;
+ u8 cei : 1;
+ u8 : 4;
+ u8 reserved3[6];
u8 type;
u8 reserved1;
} __attribute__((packed));
@@ -59,6 +66,15 @@ struct sclp_info {
unsigned char has_hvs : 1;
unsigned char has_esca : 1;
unsigned char has_sief2 : 1;
+ unsigned char has_64bscao : 1;
+ unsigned char has_gpere : 1;
+ unsigned char has_cmma : 1;
+ unsigned char has_gsls : 1;
+ unsigned char has_ib : 1;
+ unsigned char has_cei : 1;
+ unsigned char has_pfmfi : 1;
+ unsigned char has_ibs : 1;
+ unsigned char has_skey : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
@@ -101,5 +117,6 @@ int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
void sclp_early_detect(void);
void _sclp_print_early(const char *);
+void sclp_ocf_cpc_name_copy(char *dst);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index fbd9116eb..5ce29fe10 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -4,5 +4,6 @@
#include <asm-generic/sections.h>
extern char _eshared[], _ehead[];
+extern char __start_ro_after_init[], __end_ro_after_init[];
#endif
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index c0f0efbb6..5e8d57e1c 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -86,9 +86,13 @@ extern char vmpoff_cmd[];
#define CONSOLE_IS_SCLP (console_mode == 1)
#define CONSOLE_IS_3215 (console_mode == 2)
#define CONSOLE_IS_3270 (console_mode == 3)
+#define CONSOLE_IS_VT220 (console_mode == 4)
+#define CONSOLE_IS_HVC (console_mode == 5)
#define SET_CONSOLE_SCLP do { console_mode = 1; } while (0)
#define SET_CONSOLE_3215 do { console_mode = 2; } while (0)
#define SET_CONSOLE_3270 do { console_mode = 3; } while (0)
+#define SET_CONSOLE_VT220 do { console_mode = 4; } while (0)
+#define SET_CONSOLE_HVC do { console_mode = 5; } while (0)
#define NSS_NAME_SIZE 8
extern char kernel_nss_name[];
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index 1c8f33fca..72df5f2de 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -37,8 +37,8 @@
#ifndef __ASSEMBLY__
-static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
- u32 *status)
+static inline int ____pcpu_sigp(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
{
register unsigned long reg1 asm ("1") = parm;
int cc;
@@ -48,8 +48,19 @@ static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
+ *status = reg1;
+ return cc;
+}
+
+static inline int __pcpu_sigp(u16 addr, u8 order, unsigned long parm,
+ u32 *status)
+{
+ u32 _status;
+ int cc;
+
+ cc = ____pcpu_sigp(addr, order, parm, &_status);
if (status && cc == 1)
- *status = reg1;
+ *status = _status;
return cc;
}
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 63ebf37d3..7e9e09f60 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -10,6 +10,8 @@
#define __ASM_SPINLOCK_H
#include <linux/smp.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
@@ -97,6 +99,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
arch_spin_relax(lock);
+ smp_acquire__after_ctrl_dep();
}
/*
diff --git a/arch/s390/include/asm/stp.h b/arch/s390/include/asm/stp.h
new file mode 100644
index 000000000..768972758
--- /dev/null
+++ b/arch/s390/include/asm/stp.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+#ifndef __S390_STP_H
+#define __S390_STP_H
+
+/* notifier for syncs */
+extern struct atomic_notifier_head s390_epoch_delta_notifier;
+
+/* STP interruption parameter */
+struct stp_irq_parm {
+ unsigned int _pad0 : 14;
+ unsigned int tsc : 1; /* Timing status change */
+ unsigned int lac : 1; /* Link availability change */
+ unsigned int tcpc : 1; /* Time control parameter change */
+ unsigned int _pad2 : 15;
+} __attribute__ ((packed));
+
+#define STP_OP_SYNC 1
+#define STP_OP_CTRL 3
+
+struct stp_sstpi {
+ unsigned int rsvd0;
+ unsigned int rsvd1 : 8;
+ unsigned int stratum : 8;
+ unsigned int vbits : 16;
+ unsigned int leaps : 16;
+ unsigned int tmd : 4;
+ unsigned int ctn : 4;
+ unsigned int rsvd2 : 3;
+ unsigned int c : 1;
+ unsigned int tst : 4;
+ unsigned int tzo : 16;
+ unsigned int dsto : 16;
+ unsigned int ctrl : 16;
+ unsigned int rsvd3 : 16;
+ unsigned int tto;
+ unsigned int rsvd4;
+ unsigned int ctnid[3];
+ unsigned int rsvd5;
+ unsigned int todoff[4];
+ unsigned int rsvd6[48];
+} __attribute__ ((packed));
+
+/* Functions needed by the machine check handler */
+int stp_sync_check(void);
+int stp_island_check(void);
+void stp_queue_work(void);
+
+#endif /* __S390_STP_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index dcb6312a0..0bb08f341 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -52,6 +52,70 @@ static inline void store_clock_comparator(__u64 *time)
void clock_comparator_work(void);
+void __init ptff_init(void);
+
+extern unsigned char ptff_function_mask[16];
+extern unsigned long lpar_offset;
+extern unsigned long initial_leap_seconds;
+
+/* Function codes for the ptff instruction. */
+#define PTFF_QAF 0x00 /* query available functions */
+#define PTFF_QTO 0x01 /* query tod offset */
+#define PTFF_QSI 0x02 /* query steering information */
+#define PTFF_QUI 0x04 /* query UTC information */
+#define PTFF_ATO 0x40 /* adjust tod offset */
+#define PTFF_STO 0x41 /* set tod offset */
+#define PTFF_SFS 0x42 /* set fine steering rate */
+#define PTFF_SGS 0x43 /* set gross steering rate */
+
+/* Query TOD offset result */
+struct ptff_qto {
+ unsigned long long physical_clock;
+ unsigned long long tod_offset;
+ unsigned long long logical_tod_offset;
+ unsigned long long tod_epoch_difference;
+} __packed;
+
+static inline int ptff_query(unsigned int nr)
+{
+ unsigned char *ptr;
+
+ ptr = ptff_function_mask + (nr >> 3);
+ return (*ptr & (0x80 >> (nr & 7))) != 0;
+}
+
+/* Query UTC information result */
+struct ptff_qui {
+ unsigned int tm : 2;
+ unsigned int ts : 2;
+ unsigned int : 28;
+ unsigned int pad_0x04;
+ unsigned long leap_event;
+ short old_leap;
+ short new_leap;
+ unsigned int pad_0x14;
+ unsigned long prt[5];
+ unsigned long cst[3];
+ unsigned int skew;
+ unsigned int pad_0x5c[41];
+} __packed;
+
+static inline int ptff(void *ptff_block, size_t len, unsigned int func)
+{
+ typedef struct { char _[len]; } addrtype;
+ register unsigned int reg0 asm("0") = func;
+ register unsigned long reg1 asm("1") = (unsigned long) ptff_block;
+ int rc;
+
+ asm volatile(
+ " .word 0x0104\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (rc), "+m" (*(addrtype *) ptff_block)
+ : "d" (reg0), "d" (reg1) : "cc");
+ return rc;
+}
+
static inline unsigned long long local_tick_disable(void)
{
unsigned long long old;
@@ -105,7 +169,7 @@ static inline cycles_t get_cycles(void)
return (cycles_t) get_tod_clock() >> 2;
}
-int get_sync_clock(unsigned long long *clock);
+int get_phys_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7a92e69c5..15711de10 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -87,10 +87,10 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
* tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
* has already been freed, so just do free_page_and_swap_cache.
*/
-static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
free_page_and_swap_cache(page);
- return 1; /* avoid calling tlb_flush_mmu */
+ return false; /* avoid calling tlb_flush_mmu */
}
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
@@ -98,6 +98,24 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
free_page_and_swap_cache(page);
}
+static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
+ struct page *page)
+{
+ return __tlb_remove_page(tlb, page);
+}
+
+static inline void tlb_remove_page_size(struct mmu_gather *tlb,
+ struct page *page, int page_size)
+{
+ return tlb_remove_page(tlb, page);
+}
+
/*
* pte_free_tlb frees a pte table and clears the CRSTE for the
* page table from the tlb.
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 0a2031618..1a691ef74 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <asm/processor.h>
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
/*
* Flush all TLB entries on the local CPU.
@@ -44,17 +45,9 @@ void smp_ptlb_all(void);
*/
static inline void __tlb_flush_global(void)
{
- register unsigned long reg2 asm("2");
- register unsigned long reg3 asm("3");
- register unsigned long reg4 asm("4");
- long dummy;
-
- dummy = 0;
- reg2 = reg3 = 0;
- reg4 = ((unsigned long) &dummy) + 1;
- asm volatile(
- " csp %0,%2"
- : : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
+ unsigned int dummy = 0;
+
+ csp(&dummy, 0, 0);
}
/*
@@ -64,7 +57,7 @@ static inline void __tlb_flush_global(void)
static inline void __tlb_flush_full(struct mm_struct *mm)
{
preempt_disable();
- atomic_add(0x10000, &mm->context.attach_count);
+ atomic_inc(&mm->context.flush_count);
if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
/* Local TLB flush */
__tlb_flush_local();
@@ -76,7 +69,7 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
preempt_enable();
}
@@ -86,12 +79,9 @@ static inline void __tlb_flush_full(struct mm_struct *mm)
*/
static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
- int active, count;
-
preempt_disable();
- active = (mm == current->active_mm) ? 1 : 0;
- count = atomic_add_return(0x10000, &mm->context.attach_count);
- if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ atomic_inc(&mm->context.flush_count);
+ if (MACHINE_HAS_TLB_LC &&
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
__tlb_flush_idte_local(asce);
} else {
@@ -104,7 +94,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
cpumask_copy(mm_cpumask(mm),
&mm->context.cpu_attach_mask);
}
- atomic_sub(0x10000, &mm->context.attach_count);
+ atomic_dec(&mm->context.flush_count);
preempt_enable();
}
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 6b53962e8..f15f5571c 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -14,10 +14,12 @@ struct cpu_topology_s390 {
unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
+ unsigned short drawer_id;
unsigned short node_id;
cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
+ cpumask_t drawer_mask;
};
DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
@@ -30,6 +32,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
+#define topology_drawer_id(cpu) (per_cpu(cpu_topology, cpu).drawer_id)
+#define topology_drawer_cpumask(cpu) (&per_cpu(cpu_topology, cpu).drawer_mask)
#define mc_capable() 1
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 666fd8ba1..52d7c8709 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -151,8 +151,65 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
__rc; \
})
-#define __put_user_fn(x, ptr, size) __put_get_user_asm(ptr, x, size, 0x810000UL)
-#define __get_user_fn(x, ptr, size) __put_get_user_asm(x, ptr, size, 0x81UL)
+static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+{
+ unsigned long spec = 0x810000UL;
+ int rc;
+
+ switch (size) {
+ case 1:
+ rc = __put_get_user_asm((unsigned char __user *)ptr,
+ (unsigned char *)x,
+ size, spec);
+ break;
+ case 2:
+ rc = __put_get_user_asm((unsigned short __user *)ptr,
+ (unsigned short *)x,
+ size, spec);
+ break;
+ case 4:
+ rc = __put_get_user_asm((unsigned int __user *)ptr,
+ (unsigned int *)x,
+ size, spec);
+ break;
+ case 8:
+ rc = __put_get_user_asm((unsigned long __user *)ptr,
+ (unsigned long *)x,
+ size, spec);
+ break;
+ };
+ return rc;
+}
+
+static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+{
+ unsigned long spec = 0x81UL;
+ int rc;
+
+ switch (size) {
+ case 1:
+ rc = __put_get_user_asm((unsigned char *)x,
+ (unsigned char __user *)ptr,
+ size, spec);
+ break;
+ case 2:
+ rc = __put_get_user_asm((unsigned short *)x,
+ (unsigned short __user *)ptr,
+ size, spec);
+ break;
+ case 4:
+ rc = __put_get_user_asm((unsigned int *)x,
+ (unsigned int __user *)ptr,
+ size, spec);
+ break;
+ case 8:
+ rc = __put_get_user_asm((unsigned long *)x,
+ (unsigned long __user *)ptr,
+ size, spec);
+ break;
+ };
+ return rc;
+}
#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
@@ -191,7 +248,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
__put_user_bad(); \
break; \
} \
- __pu_err; \
+ __builtin_expect(__pu_err, 0); \
})
#define put_user(x, ptr) \
@@ -240,7 +297,7 @@ int __put_user_bad(void) __attribute__((noreturn));
__get_user_bad(); \
break; \
} \
- __gu_err; \
+ __builtin_expect(__gu_err, 0); \
})
#define get_user(x, ptr) \
@@ -254,6 +311,14 @@ int __get_user_bad(void) __attribute__((noreturn));
#define __put_user_unaligned __put_user
#define __get_user_unaligned __get_user
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
/**
* copy_to_user: - Copy a block of data into user space.
* @to: Destination address, in user space.
@@ -275,12 +340,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_to_user(to, from, n);
}
-void copy_from_user_overflow(void)
-#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
-__compiletime_warning("copy_from_user() buffer size is not provably correct")
-#endif
-;
-
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
@@ -305,7 +364,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
might_fault();
if (unlikely(sz != -1 && sz < n)) {
- copy_from_user_overflow();
+ if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
return n;
}
return __copy_from_user(to, from, n);