From d0b2f91bede3bd5e3d24dd6803e56eee959c1797 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Thu, 20 Oct 2016 00:10:27 -0300 Subject: Linux-libre 4.8.2-gnu --- arch/mips/include/asm/addrspace.h | 2 +- arch/mips/include/asm/atomic.h | 154 ++++++++-- arch/mips/include/asm/bootinfo.h | 4 + arch/mips/include/asm/dsemul.h | 92 ++++++ arch/mips/include/asm/elf.h | 4 + arch/mips/include/asm/fpu_emulator.h | 17 +- arch/mips/include/asm/kvm_host.h | 315 ++++++++------------- .../asm/mach-cavium-octeon/cpu-feature-overrides.h | 2 +- arch/mips/include/asm/mach-cavium-octeon/irq.h | 2 - .../include/asm/mach-cavium-octeon/mangle-port.h | 44 +-- arch/mips/include/asm/mips-cm.h | 13 +- arch/mips/include/asm/mipsregs.h | 23 +- arch/mips/include/asm/mmu.h | 9 + arch/mips/include/asm/mmu_context.h | 6 + arch/mips/include/asm/msa.h | 2 + arch/mips/include/asm/page.h | 44 ++- arch/mips/include/asm/pci.h | 10 - arch/mips/include/asm/pgtable.h | 16 +- arch/mips/include/asm/processor.h | 18 +- arch/mips/include/asm/r4kcache.h | 4 + arch/mips/include/asm/seccomp.h | 4 +- arch/mips/include/asm/setup.h | 1 + arch/mips/include/asm/signal.h | 6 +- arch/mips/include/asm/smp.h | 4 +- arch/mips/include/asm/spinlock.h | 19 +- arch/mips/include/asm/syscall.h | 2 +- arch/mips/include/asm/uaccess.h | 2 +- arch/mips/include/asm/uasm.h | 7 + arch/mips/include/asm/uprobes.h | 1 - arch/mips/include/uapi/asm/auxvec.h | 2 + arch/mips/include/uapi/asm/inst.h | 114 +++++++- 31 files changed, 636 insertions(+), 307 deletions(-) create mode 100644 arch/mips/include/asm/dsemul.h (limited to 'arch/mips/include') diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h index 3b0e51d5a..c5b04e752 100644 --- a/arch/mips/include/asm/addrspace.h +++ b/arch/mips/include/asm/addrspace.h @@ -45,7 +45,7 @@ /* * Returns the kernel segment base of a given address */ -#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000) +#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000)) /* * Returns the physical address of a CKSEGx / XKPHYS address diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 835b402e4..0ab176bdb 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ } #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ -static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ +static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ { \ int result; \ \ - smp_mb__before_llsc(); \ - \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \ int temp; \ \ @@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ raw_local_irq_restore(flags); \ } \ \ - smp_llsc_mb(); \ + return result; \ +} + +#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ +static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ +{ \ + int result; \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + int temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: ll %1, %2 # atomic_fetch_" #op " \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " move %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + int temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + " ll %1, %2 # atomic_fetch_" #op " \n" \ + " " #asm_op " %0, %1, %3 \n" \ + " sc %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } while (unlikely(!result)); \ + \ + result = temp; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ \ return result; \ } #define ATOMIC_OPS(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \ - ATOMIC_OP_RETURN(op, c_op, asm_op) + ATOMIC_OP_RETURN(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) ATOMIC_OPS(add, +=, addu) ATOMIC_OPS(sub, -=, subu) -ATOMIC_OP(and, &=, and) -ATOMIC_OP(or, |=, or) -ATOMIC_OP(xor, ^=, xor) +#define atomic_add_return_relaxed atomic_add_return_relaxed +#define atomic_sub_return_relaxed atomic_sub_return_relaxed +#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed +#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed + +#undef ATOMIC_OPS +#define ATOMIC_OPS(op, c_op, asm_op) \ + ATOMIC_OP(op, c_op, asm_op) \ + ATOMIC_FETCH_OP(op, c_op, asm_op) + +ATOMIC_OPS(and, &=, and) +ATOMIC_OPS(or, |=, or) +ATOMIC_OPS(xor, ^=, xor) + +#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed +#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed +#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #undef ATOMIC_OPS +#undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ } #define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ -static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ { \ long result; \ \ - smp_mb__before_llsc(); \ - \ if (kernel_uses_llsc && R10000_LLSC_WAR) { \ long temp; \ \ @@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ raw_local_irq_restore(flags); \ } \ \ - smp_llsc_mb(); \ + return result; \ +} + +#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ +static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ +{ \ + long result; \ + \ + if (kernel_uses_llsc && R10000_LLSC_WAR) { \ + long temp; \ + \ + __asm__ __volatile__( \ + " .set arch=r4000 \n" \ + "1: lld %1, %2 # atomic64_fetch_" #op "\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " beqzl %0, 1b \n" \ + " move %0, %1 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i)); \ + } else if (kernel_uses_llsc) { \ + long temp; \ + \ + do { \ + __asm__ __volatile__( \ + " .set "MIPS_ISA_LEVEL" \n" \ + " lld %1, %2 # atomic64_fetch_" #op "\n" \ + " " #asm_op " %0, %1, %3 \n" \ + " scd %0, %2 \n" \ + " .set mips0 \n" \ + : "=&r" (result), "=&r" (temp), \ + "=" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ + : "memory"); \ + } while (unlikely(!result)); \ + \ + result = temp; \ + } else { \ + unsigned long flags; \ + \ + raw_local_irq_save(flags); \ + result = v->counter; \ + v->counter c_op i; \ + raw_local_irq_restore(flags); \ + } \ \ return result; \ } #define ATOMIC64_OPS(op, c_op, asm_op) \ ATOMIC64_OP(op, c_op, asm_op) \ - ATOMIC64_OP_RETURN(op, c_op, asm_op) + ATOMIC64_OP_RETURN(op, c_op, asm_op) \ + ATOMIC64_FETCH_OP(op, c_op, asm_op) ATOMIC64_OPS(add, +=, daddu) ATOMIC64_OPS(sub, -=, dsubu) -ATOMIC64_OP(and, &=, and) -ATOMIC64_OP(or, |=, or) -ATOMIC64_OP(xor, ^=, xor) + +#define atomic64_add_return_relaxed atomic64_add_return_relaxed +#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed +#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed +#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed + +#undef ATOMIC64_OPS +#define ATOMIC64_OPS(op, c_op, asm_op) \ + ATOMIC64_OP(op, c_op, asm_op) \ + ATOMIC64_FETCH_OP(op, c_op, asm_op) + +ATOMIC64_OPS(and, &=, and) +ATOMIC64_OPS(or, |=, or) +ATOMIC64_OPS(xor, ^=, xor) + +#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed +#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed +#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #undef ATOMIC64_OPS +#undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h index 9f6703396..ee9f5f2d1 100644 --- a/arch/mips/include/asm/bootinfo.h +++ b/arch/mips/include/asm/bootinfo.h @@ -127,6 +127,10 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE]; */ extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; +#ifdef CONFIG_USE_OF +extern unsigned long fw_passed_dtb; +#endif + /* * Platform memory detection hook called by setup_arch */ diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h new file mode 100644 index 000000000..a6e067801 --- /dev/null +++ b/arch/mips/include/asm/dsemul.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2016 Imagination Technologies + * Author: Paul Burton + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __MIPS_ASM_DSEMUL_H__ +#define __MIPS_ASM_DSEMUL_H__ + +#include +#include + +/* Break instruction with special math emu break code set */ +#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) + +/* When used as a frame index, indicates the lack of a frame */ +#define BD_EMUFRAME_NONE ((int)BIT(31)) + +struct mm_struct; +struct pt_regs; +struct task_struct; + +/** + * mips_dsemul() - 'Emulate' an instruction from a branch delay slot + * @regs: User thread register context. + * @ir: The instruction to be 'emulated'. + * @branch_pc: The PC of the branch instruction. + * @cont_pc: The PC to continue at following 'emulation'. + * + * Emulate or execute an arbitrary MIPS instruction within the context of + * the current user thread. This is used primarily to handle instructions + * in the delay slots of emulated branch instructions, for example FP + * branch instructions on systems without an FPU. + * + * Return: Zero on success, negative if ir is a NOP, signal number on failure. + */ +extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, + unsigned long branch_pc, unsigned long cont_pc); + +/** + * do_dsemulret() - Return from a delay slot 'emulation' frame + * @xcp: User thread register context. + * + * Call in response to the BRK_MEMU break instruction used to return to + * the kernel from branch delay slot 'emulation' frames following a call + * to mips_dsemul(). Restores the user thread PC to the value that was + * passed as the cpc parameter to mips_dsemul(). + * + * Return: True if an emulation frame was returned from, else false. + */ +extern bool do_dsemulret(struct pt_regs *xcp); + +/** + * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame + * @tsk: The task structure associated with the thread + * + * If the thread @tsk has a branch delay slot 'emulation' frame + * allocated to it then free that frame. + * + * Return: True if a frame was freed, else false. + */ +extern bool dsemul_thread_cleanup(struct task_struct *tsk); + +/** + * dsemul_thread_rollback() - Rollback from an 'emulation' frame + * @regs: User thread register context. + * + * If the current thread, whose register context is represented by @regs, + * is executing within a delay slot 'emulation' frame then exit that + * frame. The PC will be rolled back to the branch if the instruction + * that was being 'emulated' has not yet executed, or advanced to the + * continuation PC if it has. + * + * Return: True if a frame was exited, else false. + */ +extern bool dsemul_thread_rollback(struct pt_regs *regs); + +/** + * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state + * @mm: The struct mm_struct to cleanup state for. + * + * Cleanup state for the given @mm, ensuring that any memory allocated + * for delay slot 'emulation' book-keeping is freed. This is to be called + * before @mm is freed in order to avoid memory leaks. + */ +extern void dsemul_mm_cleanup(struct mm_struct *mm); + +#endif /* __MIPS_ASM_DSEMUL_H__ */ diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index f5f457179..2b3dc2973 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -458,6 +458,7 @@ extern const char *__elf_platform; #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) #endif +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ #define ARCH_DLINFO \ do { \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \ @@ -498,4 +499,7 @@ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, extern void mips_set_personality_nan(struct arch_elf_state *state); extern void mips_set_personality_fp(struct arch_elf_state *state); +#define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk) +extern int mips_elf_read_implies_exec(void *elf_ex, int exstack); + #endif /* _ASM_ELF_H */ diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h index 3225c3c07..355dc2517 100644 --- a/arch/mips/include/asm/fpu_emulator.h +++ b/arch/mips/include/asm/fpu_emulator.h @@ -24,7 +24,7 @@ #define _ASM_FPU_EMULATOR_H #include -#include +#include #include #include #include @@ -60,27 +60,16 @@ do { \ #define MIPS_FPU_EMU_INC_STATS(M) do { } while (0) #endif /* CONFIG_DEBUG_FS */ -extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, - unsigned long cpc); -extern int do_dsemulret(struct pt_regs *xcp); extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, void *__user *fault_addr); int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31); +int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc); int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, unsigned long *contpc); -/* - * Instruction inserted following the badinst to further tag the sequence - */ -#define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */ - -/* - * Break instruction with special math emu break code set - */ -#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16)) - #define SIGNALLING_NAN 0x7ff800007ff80000LL static inline void fpu_emulator_init_fpu(void) diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 36a391d28..b54bcadd8 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -19,6 +19,9 @@ #include #include +#include +#include + /* MIPS KVM register ids */ #define MIPS_CP0_32(_R, _S) \ (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) @@ -53,6 +56,12 @@ #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) +#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2) +#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3) +#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4) +#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5) +#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6) +#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7) #define KVM_MAX_VCPUS 1 @@ -65,8 +74,14 @@ -/* Special address that contains the comm page, used for reducing # of traps */ -#define KVM_GUEST_COMMPAGE_ADDR 0x0 +/* + * Special address that contains the comm page, used for reducing # of traps + * This needs to be within 32Kb of 0x0 (so the zero register can be used), but + * preferably not at 0x0 so that most kernel NULL pointer dereferences can be + * caught. + */ +#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \ + (0x8000 - PAGE_SIZE)) #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) @@ -93,9 +108,6 @@ #define KVM_INVALID_ADDR 0xdeadbeef extern atomic_t kvm_mips_instance; -extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); -extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn); -extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn); struct kvm_vm_stat { u32 remote_tlb_flush; @@ -126,28 +138,6 @@ struct kvm_vcpu_stat { u32 halt_wakeup; }; -enum kvm_mips_exit_types { - WAIT_EXITS, - CACHE_EXITS, - SIGNAL_EXITS, - INT_EXITS, - COP_UNUSABLE_EXITS, - TLBMOD_EXITS, - TLBMISS_LD_EXITS, - TLBMISS_ST_EXITS, - ADDRERR_ST_EXITS, - ADDRERR_LD_EXITS, - SYSCALL_EXITS, - RESVD_INST_EXITS, - BREAK_INST_EXITS, - TRAP_INST_EXITS, - MSA_FPE_EXITS, - FPE_EXITS, - MSA_DISABLED_EXITS, - FLUSH_DCACHE_EXITS, - MAX_KVM_MIPS_EXIT_TYPES -}; - struct kvm_arch_memory_slot { }; @@ -215,73 +205,6 @@ struct mips_coproc { #define MIPS_CP0_CONFIG4_SEL 4 #define MIPS_CP0_CONFIG5_SEL 5 -/* Config0 register bits */ -#define CP0C0_M 31 -#define CP0C0_K23 28 -#define CP0C0_KU 25 -#define CP0C0_MDU 20 -#define CP0C0_MM 17 -#define CP0C0_BM 16 -#define CP0C0_BE 15 -#define CP0C0_AT 13 -#define CP0C0_AR 10 -#define CP0C0_MT 7 -#define CP0C0_VI 3 -#define CP0C0_K0 0 - -/* Config1 register bits */ -#define CP0C1_M 31 -#define CP0C1_MMU 25 -#define CP0C1_IS 22 -#define CP0C1_IL 19 -#define CP0C1_IA 16 -#define CP0C1_DS 13 -#define CP0C1_DL 10 -#define CP0C1_DA 7 -#define CP0C1_C2 6 -#define CP0C1_MD 5 -#define CP0C1_PC 4 -#define CP0C1_WR 3 -#define CP0C1_CA 2 -#define CP0C1_EP 1 -#define CP0C1_FP 0 - -/* Config2 Register bits */ -#define CP0C2_M 31 -#define CP0C2_TU 28 -#define CP0C2_TS 24 -#define CP0C2_TL 20 -#define CP0C2_TA 16 -#define CP0C2_SU 12 -#define CP0C2_SS 8 -#define CP0C2_SL 4 -#define CP0C2_SA 0 - -/* Config3 Register bits */ -#define CP0C3_M 31 -#define CP0C3_ISA_ON_EXC 16 -#define CP0C3_ULRI 13 -#define CP0C3_DSPP 10 -#define CP0C3_LPA 7 -#define CP0C3_VEIC 6 -#define CP0C3_VInt 5 -#define CP0C3_SP 4 -#define CP0C3_MT 2 -#define CP0C3_SM 1 -#define CP0C3_TL 0 - -/* MMU types, the first four entries have the same layout as the - CP0C0_MT field. */ -enum mips_mmu_types { - MMU_TYPE_NONE, - MMU_TYPE_R4000, - MMU_TYPE_RESERVED, - MMU_TYPE_FMT, - MMU_TYPE_R3000, - MMU_TYPE_R6000, - MMU_TYPE_R8000 -}; - /* Resume Flags */ #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ @@ -298,11 +221,6 @@ enum emulation_result { EMULATE_PRIV_FAIL, }; -#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ -#define MIPS3_PG_V 0x00000002 /* Valid */ -#define MIPS3_PG_NV 0x00000000 -#define MIPS3_PG_D 0x00000004 /* Dirty */ - #define mips3_paddr_to_tlbpfn(x) \ (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) #define mips3_tlbpfn_to_paddr(x) \ @@ -313,13 +231,11 @@ enum emulation_result { #define VPN2_MASK 0xffffe000 #define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID -#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ - ((x).tlb_lo1 & MIPS3_PG_G)) +#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) -#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ - ? ((x).tlb_lo1 & MIPS3_PG_V) \ - : ((x).tlb_lo0 & MIPS3_PG_V)) +#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1) +#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V) #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ ((y) & VPN2_MASK & ~(x).tlb_mask)) #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ @@ -328,26 +244,23 @@ enum emulation_result { struct kvm_mips_tlb { long tlb_mask; long tlb_hi; - long tlb_lo0; - long tlb_lo1; + long tlb_lo[2]; }; -#define KVM_MIPS_FPU_FPU 0x1 -#define KVM_MIPS_FPU_MSA 0x2 +#define KVM_MIPS_AUX_FPU 0x1 +#define KVM_MIPS_AUX_MSA 0x2 #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { - void *host_ebase, *guest_ebase; + void *guest_ebase; int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); unsigned long host_stack; unsigned long host_gp; /* Host CP0 registers used when handling exits from guest */ unsigned long host_cp0_badvaddr; - unsigned long host_cp0_cause; unsigned long host_cp0_epc; - unsigned long host_cp0_entryhi; - uint32_t guest_inst; + u32 host_cp0_cause; /* GPRS */ unsigned long gprs[32]; @@ -357,8 +270,8 @@ struct kvm_vcpu_arch { /* FPU State */ struct mips_fpu_struct fpu; - /* Which FPU state is loaded (KVM_MIPS_FPU_*) */ - unsigned int fpu_inuse; + /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */ + unsigned int aux_inuse; /* COP0 State */ struct mips_coproc *cop0; @@ -370,11 +283,11 @@ struct kvm_vcpu_arch { struct hrtimer comparecount_timer; /* Count timer control KVM register */ - uint32_t count_ctl; + u32 count_ctl; /* Count bias from the raw time */ - uint32_t count_bias; + u32 count_bias; /* Frequency of timer in Hz */ - uint32_t count_hz; + u32 count_hz; /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ s64 count_dyn_bias; /* Resume time */ @@ -388,7 +301,7 @@ struct kvm_vcpu_arch { /* Bitmask of pending exceptions to be cleared */ unsigned long pending_exceptions_clr; - unsigned long pending_load_cause; + u32 pending_load_cause; /* Save/Restore the entryhi register when are are preempted/scheduled back in */ unsigned long preempt_entryhi; @@ -397,8 +310,8 @@ struct kvm_vcpu_arch { struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; /* Cached guest kernel/user ASIDs */ - uint32_t guest_user_asid[NR_CPUS]; - uint32_t guest_kernel_asid[NR_CPUS]; + u32 guest_user_asid[NR_CPUS]; + u32 guest_kernel_asid[NR_CPUS]; struct mm_struct guest_kernel_mm, guest_user_mm; int last_sched_cpu; @@ -408,6 +321,7 @@ struct kvm_vcpu_arch { u8 fpu_enabled; u8 msa_enabled; + u8 kscratch_enabled; }; @@ -461,6 +375,18 @@ struct kvm_vcpu_arch { #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) +#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2]) +#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3]) +#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4]) +#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5]) +#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6]) +#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7]) +#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val)) +#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val)) +#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val)) +#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val)) +#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val)) +#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val)) /* * Some of the guest registers may be modified asynchronously (e.g. from a @@ -474,7 +400,7 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, unsigned long temp; do { __asm__ __volatile__( - " .set mips3 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 \n" " or %0, %2 \n" " " __SC "%0, %1 \n" @@ -490,7 +416,7 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, unsigned long temp; do { __asm__ __volatile__( - " .set mips3 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 \n" " and %0, %2 \n" " " __SC "%0, %1 \n" @@ -507,7 +433,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, unsigned long temp; do { __asm__ __volatile__( - " .set mips3 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 \n" " and %0, %2 \n" " or %0, %3 \n" @@ -542,7 +468,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) { - return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) && + return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) && vcpu->fpu_enabled; } @@ -589,9 +515,11 @@ struct kvm_mips_callbacks { void (*dequeue_io_int)(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq); int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, - uint32_t cause); + u32 cause); int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, - uint32_t cause); + u32 cause); + unsigned long (*num_regs)(struct kvm_vcpu *vcpu); + int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices); int (*get_one_reg)(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 *v); int (*set_one_reg)(struct kvm_vcpu *vcpu, @@ -605,8 +533,13 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); -/* Trampoline ASM routine to start running in "Guest" context */ -extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); + +/* Building of entry/exception code */ +int kvm_mips_entry_setup(void); +void *kvm_mips_build_vcpu_run(void *addr); +void *kvm_mips_build_exception(void *addr, void *handler); +void *kvm_mips_build_exit(void *addr); /* FPU/MSA context management */ void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); @@ -622,11 +555,11 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu); void kvm_lose_fpu(struct kvm_vcpu *vcpu); /* TLB handling */ -uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); +u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu); -uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu); +u32 kvm_get_user_asid(struct kvm_vcpu *vcpu); -uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu); +u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu); extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, struct kvm_vcpu *vcpu); @@ -635,22 +568,24 @@ extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, struct kvm_vcpu *vcpu); extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, - struct kvm_mips_tlb *tlb, - unsigned long *hpa0, - unsigned long *hpa1); + struct kvm_mips_tlb *tlb); -extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); extern void kvm_mips_dump_host_tlbs(void); extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); +extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, + unsigned long entrylo0, + unsigned long entrylo1, + int flush_dcache_mask); extern void kvm_mips_flush_host_tlb(int skip_kseg0); extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); @@ -667,90 +602,90 @@ extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); /* Emulation */ -uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu); -enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause); +u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu); +enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause); -extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_inst(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_handle_ri(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_handle_ri(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, - uint32_t *opc, +extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run); -uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); -void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); -void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack); +u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); +void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); +void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack); void kvm_mips_init_count(struct kvm_vcpu *vcpu); int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); @@ -759,27 +694,27 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_check_privilege(unsigned long cause, - uint32_t *opc, +enum emulation_result kvm_mips_check_privilege(u32 cause, + u32 *opc, struct kvm_run *run, struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_cache(uint32_t inst, - uint32_t *opc, - uint32_t cause, +enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, + u32 *opc, + u32 cause, struct kvm_run *run, struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, - uint32_t *opc, - uint32_t cause, +enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, + u32 *opc, + u32 cause, struct kvm_run *run, struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_store(uint32_t inst, - uint32_t cause, +enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, + u32 cause, struct kvm_run *run, struct kvm_vcpu *vcpu); -enum emulation_result kvm_mips_emulate_load(uint32_t inst, - uint32_t cause, +enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, + u32 cause, struct kvm_run *run, struct kvm_vcpu *vcpu); @@ -789,13 +724,13 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); /* Dynamic binary translation */ -extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, - struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, +extern int kvm_mips_trans_cache_index(union mips_instruction inst, + u32 *opc, struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, +extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, struct kvm_vcpu *vcpu); -extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, +extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, struct kvm_vcpu *vcpu); /* Misc */ diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index d68e685cd..bd8b9bbe1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h @@ -55,7 +55,7 @@ #define cpu_has_mipsmt 0 #define cpu_has_vint 0 #define cpu_has_veic 0 -#define cpu_hwrena_impl_bits 0xc0000000 +#define cpu_hwrena_impl_bits (MIPS_HWRENA_IMPL1 | MIPS_HWRENA_IMPL2) #define cpu_has_wsbh 1 #define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON) diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h index cceae32a0..64b86b9d3 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h @@ -42,8 +42,6 @@ enum octeon_irq { OCTEON_IRQ_TIMER1, OCTEON_IRQ_TIMER2, OCTEON_IRQ_TIMER3, - OCTEON_IRQ_USB0, - OCTEON_IRQ_USB1, #ifndef CONFIG_PCI_MSI OCTEON_IRQ_LAST = 127 #endif diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h index 374eefafb..8ff2cbdf2 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h +++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h @@ -12,6 +12,14 @@ #ifdef __BIG_ENDIAN +static inline bool __should_swizzle_bits(volatile void *a) +{ + extern const bool octeon_should_swizzle_table[]; + u64 did = ((u64)(uintptr_t)a >> 40) & 0xff; + + return octeon_should_swizzle_table[did]; +} + # define __swizzle_addr_b(port) (port) # define __swizzle_addr_w(port) (port) # define __swizzle_addr_l(port) (port) @@ -19,7 +27,9 @@ #else /* __LITTLE_ENDIAN */ -static inline bool __should_swizzle_addr(unsigned long p) +#define __should_swizzle_bits(a) false + +static inline bool __should_swizzle_addr(u64 p) { /* boot bus? */ return ((p >> 40) & 0xff) == 0; @@ -35,40 +45,14 @@ static inline bool __should_swizzle_addr(unsigned long p) #endif /* __BIG_ENDIAN */ -/* - * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; - * less sane hardware forces software to fiddle with this... - * - * Regardless, if the host bus endianness mismatches that of PCI/ISA, then - * you can't have the numerical value of data and byte addresses within - * multibyte quantities both preserved at the same time. Hence two - * variations of functions: non-prefixed ones that preserve the value - * and prefixed ones that preserve byte addresses. The latters are - * typically used for moving raw data between a peripheral and memory (cf. - * string I/O functions), hence the "__mem_" prefix. - */ -#if defined(CONFIG_SWAP_IO_SPACE) # define ioswabb(a, x) (x) # define __mem_ioswabb(a, x) (x) -# define ioswabw(a, x) le16_to_cpu(x) +# define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x) # define __mem_ioswabw(a, x) (x) -# define ioswabl(a, x) le32_to_cpu(x) +# define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x) # define __mem_ioswabl(a, x) (x) -# define ioswabq(a, x) le64_to_cpu(x) +# define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x) # define __mem_ioswabq(a, x) (x) -#else - -# define ioswabb(a, x) (x) -# define __mem_ioswabb(a, x) (x) -# define ioswabw(a, x) (x) -# define __mem_ioswabw(a, x) cpu_to_le16(x) -# define ioswabl(a, x) (x) -# define __mem_ioswabl(a, x) cpu_to_le32(x) -# define ioswabq(a, x) (x) -# define __mem_ioswabq(a, x) cpu_to_le32(x) - -#endif - #endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 9411a4c0b..4fafeefe6 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -458,11 +458,22 @@ static inline int mips_cm_revision(void) static inline unsigned int mips_cm_max_vp_width(void) { extern int smp_num_siblings; + uint32_t cfg; if (mips_cm_revision() >= CM_REV_CM3) return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; - if (config_enabled(CONFIG_SMP)) + if (mips_cm_present()) { + /* + * We presume that all cores in the system will have the same + * number of VP(E)s, and if that ever changes then this will + * need revisiting. + */ + cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; + } + + if (IS_ENABLED(CONFIG_SMP)) return smp_num_siblings; return 1; diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index e1ca65c62..7dd2dd479 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -53,7 +53,7 @@ #define CP0_SEGCTL2 $5, 4 #define CP0_WIRED $6 #define CP0_INFO $7 -#define CP0_HWRENA $7, 0 +#define CP0_HWRENA $7 #define CP0_BADVADDR $8 #define CP0_BADINSTR $8, 1 #define CP0_COUNT $9 @@ -533,6 +533,7 @@ #define TX49_CONF_CWFON (_ULCAST_(1) << 27) /* Bits specific to the MIPS32/64 PRA. */ +#define MIPS_CONF_VI (_ULCAST_(1) << 3) #define MIPS_CONF_MT (_ULCAST_(7) << 7) #define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) #define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) @@ -659,8 +660,6 @@ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_AR (_ULCAST_(1) << 16) -/* FTLB probability bits for R6 */ -#define MIPS_CONF7_FTLBP_SHIFT (18) /* WatchLo* register definitions */ #define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0) @@ -853,6 +852,24 @@ #define MIPS_CDMMBASE_ADDR_SHIFT 11 #define MIPS_CDMMBASE_ADDR_START 15 +/* RDHWR register numbers */ +#define MIPS_HWR_CPUNUM 0 /* CPU number */ +#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */ +#define MIPS_HWR_CC 2 /* Cycle counter */ +#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */ +#define MIPS_HWR_ULR 29 /* UserLocal */ +#define MIPS_HWR_IMPL1 30 /* Implementation dependent */ +#define MIPS_HWR_IMPL2 31 /* Implementation dependent */ + +/* Bits in HWREna register */ +#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM) +#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP) +#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC) +#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES) +#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR) +#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1) +#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2) + /* * Bitfields in the TX39 family CP0 Configuration Register 3 */ diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index 1afa1f986..f6ba08d77 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -2,11 +2,20 @@ #define __ASM_MMU_H #include +#include +#include typedef struct { unsigned long asid[NR_CPUS]; void *vdso; atomic_t fp_mode_switching; + + /* lock to be held whilst modifying fp_bd_emupage_allocmap */ + spinlock_t bd_emupage_lock; + /* bitmap tracking allocation of fp_bd_emupage */ + unsigned long *bd_emupage_allocmap; + /* wait queue for threads requiring an emuframe */ + wait_queue_head_t bd_emupage_queue; } mm_context_t; #endif /* __ASM_MMU_H */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index fc57e135c..ddd57ade1 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -128,6 +129,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) atomic_set(&mm->context.fp_mode_switching, 0); + mm->context.bd_emupage_allocmap = NULL; + spin_lock_init(&mm->context.bd_emupage_lock); + init_waitqueue_head(&mm->context.bd_emupage_queue); + return 0; } @@ -162,6 +167,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ static inline void destroy_context(struct mm_struct *mm) { + dsemul_mm_cleanup(mm); } #define deactivate_mm(tsk, mm) do { } while (0) diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h index ddf496cb2..8967b475a 100644 --- a/arch/mips/include/asm/msa.h +++ b/arch/mips/include/asm/msa.h @@ -168,6 +168,7 @@ static inline unsigned int read_msa_##name(void) \ unsigned int reg; \ __asm__ __volatile__( \ " .set push\n" \ + " .set fp=64\n" \ " .set msa\n" \ " cfcmsa %0, $" #cs "\n" \ " .set pop\n" \ @@ -179,6 +180,7 @@ static inline void write_msa_##name(unsigned int val) \ { \ __asm__ __volatile__( \ " .set push\n" \ + " .set fp=64\n" \ " .set msa\n" \ " ctcmsa $" #cs ", %0\n" \ " .set pop\n" \ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 21ed7150f..5f9875980 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -162,16 +162,34 @@ typedef struct { unsigned long pgprot; } pgprot_t; /* * __pa()/__va() should be used only during mem init. */ -#ifdef CONFIG_64BIT -#define __pa(x) \ -({ \ - unsigned long __x = (unsigned long)(x); \ - __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \ -}) -#else -#define __pa(x) \ - ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) -#endif +static inline unsigned long ___pa(unsigned long x) +{ + if (IS_ENABLED(CONFIG_64BIT)) { + /* + * For MIPS64 the virtual address may either be in one of + * the compatibility segements ckseg0 or ckseg1, or it may + * be in xkphys. + */ + return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x); + } + + if (!IS_ENABLED(CONFIG_EVA)) { + /* + * We're using the standard MIPS32 legacy memory map, ie. + * the address x is going to be in kseg0 or kseg1. We can + * handle either case by masking out the desired bits using + * CPHYSADDR. + */ + return CPHYSADDR(x); + } + + /* + * EVA is in use so the memory map could be anything, making it not + * safe to just mask out bits. + */ + return x - PAGE_OFFSET + PHYS_OFFSET; +} +#define __pa(x) ___pa((unsigned long)(x)) #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) #include @@ -229,8 +247,10 @@ extern int __virt_addr_valid(const volatile void *kaddr); #define virt_addr_valid(kaddr) \ __virt_addr_valid((const volatile void *) (kaddr)) -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS \ + (VM_READ | VM_WRITE | \ + ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE) #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 86b239d9d..9b63cd412 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -80,16 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, #define HAVE_ARCH_PCI_RESOURCE_TO_USER -static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, - const struct resource *rsrc, resource_size_t *start, - resource_size_t *end) -{ - phys_addr_t size = resource_size(rsrc); - - *start = fixup_bigphys_addr(rsrc->start, size); - *end = rsrc->start + size; -} - /* * Dynamic DMA mapping stuff. * MIPS has everything mapped statically. diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 7d44e8881..70128d3f7 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -159,7 +159,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * it better already be global) */ if (pte_none(*buddy)) { - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) buddy->pte_low |= _PAGE_GLOBAL; buddy->pte_high |= _PAGE_GLOBAL; } @@ -172,7 +172,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt htw_stop(); /* Preserve global status for the pair */ - if (config_enabled(CONFIG_XPA)) { + if (IS_ENABLED(CONFIG_XPA)) { if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL) null.pte_high = _PAGE_GLOBAL; } else { @@ -319,7 +319,7 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } static inline pte_t pte_wrprotect(pte_t pte) { pte.pte_low &= ~_PAGE_WRITE; - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) pte.pte_low &= ~_PAGE_SILENT_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE; return pte; @@ -328,7 +328,7 @@ static inline pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_mkclean(pte_t pte) { pte.pte_low &= ~_PAGE_MODIFIED; - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) pte.pte_low &= ~_PAGE_SILENT_WRITE; pte.pte_high &= ~_PAGE_SILENT_WRITE; return pte; @@ -337,7 +337,7 @@ static inline pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkold(pte_t pte) { pte.pte_low &= ~_PAGE_ACCESSED; - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) pte.pte_low &= ~_PAGE_SILENT_READ; pte.pte_high &= ~_PAGE_SILENT_READ; return pte; @@ -347,7 +347,7 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte.pte_low |= _PAGE_WRITE; if (pte.pte_low & _PAGE_MODIFIED) { - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) pte.pte_low |= _PAGE_SILENT_WRITE; pte.pte_high |= _PAGE_SILENT_WRITE; } @@ -358,7 +358,7 @@ static inline pte_t pte_mkdirty(pte_t pte) { pte.pte_low |= _PAGE_MODIFIED; if (pte.pte_low & _PAGE_WRITE) { - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) pte.pte_low |= _PAGE_SILENT_WRITE; pte.pte_high |= _PAGE_SILENT_WRITE; } @@ -369,7 +369,7 @@ static inline pte_t pte_mkyoung(pte_t pte) { pte.pte_low |= _PAGE_ACCESSED; if (!(pte.pte_low & _PAGE_NO_READ)) { - if (!config_enabled(CONFIG_XPA)) + if (!IS_ENABLED(CONFIG_XPA)) pte.pte_low |= _PAGE_SILENT_READ; pte.pte_high |= _PAGE_SILENT_READ; } diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 7e78b6208..0d36c87ac 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -11,12 +11,14 @@ #ifndef _ASM_PROCESSOR_H #define _ASM_PROCESSOR_H +#include #include #include #include #include #include +#include #include #include @@ -78,7 +80,11 @@ extern unsigned int vced_count, vcei_count; #endif -#define STACK_TOP (TASK_SIZE & PAGE_MASK) +/* + * One page above the stack is used for branch delay slot "emulation". + * See dsemul.c for details. + */ +#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE) /* * This decides where the kernel will search for a free chunk of vm @@ -256,6 +262,12 @@ struct thread_struct { /* Saved fpu/fpu emulator stuff. */ struct mips_fpu_struct fpu FPU_ALIGN; + /* Assigned branch delay slot 'emulation' frame */ + atomic_t bd_emu_frame; + /* PC of the branch from a branch delay slot 'emulation' */ + unsigned long bd_emu_branch_pc; + /* PC to continue from following a branch delay slot 'emulation' */ + unsigned long bd_emu_cont_pc; #ifdef CONFIG_MIPS_MT_FPAFF /* Emulated instruction count */ unsigned long emulated_fp; @@ -323,6 +335,10 @@ struct thread_struct { * FPU affinity state (null if not FPAFF) \ */ \ FPAFF_INIT \ + /* Delay slot emulation */ \ + .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \ + .bd_emu_branch_pc = 0, \ + .bd_emu_cont_pc = 0, \ /* \ * Saved DSP stuff \ */ \ diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 38902bf97..667ca3c46 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -210,7 +210,11 @@ static inline void protected_writeback_dcache_line(unsigned long addr) static inline void protected_writeback_scache_line(unsigned long addr) { +#ifdef CONFIG_EVA + protected_cachee_op(Hit_Writeback_Inv_SD, addr); +#else protected_cache_op(Hit_Writeback_Inv_SD, addr); +#endif } /* diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h index 684fb3a12..d886d6f76 100644 --- a/arch/mips/include/asm/seccomp.h +++ b/arch/mips/include/asm/seccomp.h @@ -16,10 +16,10 @@ static inline const int *get_compat_mode1_syscalls(void) 0, /* null terminated */ }; - if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS)) + if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS)) return syscalls_O32; - if (config_enabled(CONFIG_MIPS32_N32)) + if (IS_ENABLED(CONFIG_MIPS32_N32)) return syscalls_N32; BUG(); diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h index d7bfdeba9..4f5279a83 100644 --- a/arch/mips/include/asm/setup.h +++ b/arch/mips/include/asm/setup.h @@ -21,6 +21,7 @@ extern void *set_vi_handler(int n, vi_handler_t addr); extern void *set_except_vector(int n, void *addr); extern unsigned long ebase; +extern unsigned int hwrena; extern void per_cpu_trap_init(bool); extern void cpu_cache_init(void); diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 2292373ff..23d6b8015 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h @@ -11,7 +11,7 @@ #include -#ifdef CONFIG_MIPS32_COMPAT +#ifdef CONFIG_MIPS32_O32 extern struct mips_abi mips_abi_32; #define sig_uses_siginfo(ka, abi) \ @@ -19,8 +19,8 @@ extern struct mips_abi mips_abi_32; ((ka)->sa.sa_flags & SA_SIGINFO)) #else #define sig_uses_siginfo(ka, abi) \ - (config_enabled(CONFIG_64BIT) ? 1 : \ - (config_enabled(CONFIG_TRAD_SIGNALS) ? \ + (IS_ENABLED(CONFIG_64BIT) ? 1 : \ + (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \ ((ka)->sa.sa_flags & SA_SIGINFO) : 1) ) #endif diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 03722d432..8bc6c70a4 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -23,7 +23,7 @@ extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_core_map[]; -extern cpumask_t cpu_foreign_map; +extern cpumask_t cpu_foreign_map[]; #define raw_smp_processor_id() (current_thread_info()->cpu) @@ -53,6 +53,8 @@ extern cpumask_t cpu_coherent_mask; extern void asmlinkage smp_bootstrap(void); +extern void calculate_cpu_foreign_map(void); + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 40196bebe..f485afe51 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -12,6 +12,7 @@ #include #include +#include #include #include @@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) } #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -#define arch_spin_unlock_wait(x) \ - while (arch_spin_is_locked(x)) { cpu_relax(); } + +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) +{ + u16 owner = READ_ONCE(lock->h.serving_now); + smp_rmb(); + for (;;) { + arch_spinlock_t tmp = READ_ONCE(*lock); + + if (tmp.h.serving_now == tmp.h.ticket || + tmp.h.serving_now != owner) + break; + + cpu_relax(); + } + smp_acquire__after_ctrl_dep(); +} static inline int arch_spin_is_contended(arch_spinlock_t *lock) { diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 47bc45a67..d87882513 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h @@ -99,7 +99,7 @@ static inline void syscall_get_arguments(struct task_struct *task, { int ret; /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */ - if ((config_enabled(CONFIG_32BIT) || + if ((IS_ENABLED(CONFIG_32BIT) || test_tsk_thread_flag(task, TIF_32BIT_REGS)) && (regs->regs[2] == __NR_syscall)) i++; diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index b6e20f305..21a2aaba2 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -89,7 +89,7 @@ extern u64 __ua_limit; */ static inline bool eva_kernel_access(void) { - if (!config_enabled(CONFIG_EVA)) + if (!IS_ENABLED(CONFIG_EVA)) return false; return segment_eq(get_fs(), get_ds()); diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index b6ecfeee4..f7929f65f 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h @@ -104,8 +104,13 @@ Ip_u1s2(_bltz); Ip_u1s2(_bltzl); Ip_u1u2s3(_bne); Ip_u2s3u1(_cache); +Ip_u1u2(_cfc1); +Ip_u2u1(_cfcmsa); +Ip_u1u2(_ctc1); +Ip_u2u1(_ctcmsa); Ip_u2u1s3(_daddiu); Ip_u3u1u2(_daddu); +Ip_u1(_di); Ip_u2u1msbu3(_dins); Ip_u2u1msbu3(_dinsm); Ip_u1u2(_divu); @@ -141,6 +146,8 @@ Ip_u1(_mfhi); Ip_u1(_mflo); Ip_u1u2u3(_mtc0); Ip_u1u2u3(_mthc0); +Ip_u1(_mthi); +Ip_u1(_mtlo); Ip_u3u1u2(_mul); Ip_u3u1u2(_or); Ip_u2u1u3(_ori); diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h index 34c325c67..70a4a2f17 100644 --- a/arch/mips/include/asm/uprobes.h +++ b/arch/mips/include/asm/uprobes.h @@ -36,7 +36,6 @@ struct arch_uprobe { unsigned long resume_epc; u32 insn[2]; u32 ixol[2]; - union mips_instruction orig_inst[MAX_UINSN_BYTES / 4]; }; struct arch_uprobe_task { diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h index c9c719527..45ba259a3 100644 --- a/arch/mips/include/uapi/asm/auxvec.h +++ b/arch/mips/include/uapi/asm/auxvec.h @@ -14,4 +14,6 @@ /* Location of VDSO image. */ #define AT_SYSINFO_EHDR 33 +#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */ + #endif /* __ASM_AUXVEC_H */ diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 8051f9aa1..77429d162 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -21,20 +21,20 @@ enum major_op { spec_op, bcond_op, j_op, jal_op, beq_op, bne_op, blez_op, bgtz_op, - addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op, + addi_op, pop10_op = addi_op, addiu_op, slti_op, sltiu_op, andi_op, ori_op, xori_op, lui_op, cop0_op, cop1_op, cop2_op, cop1x_op, beql_op, bnel_op, blezl_op, bgtzl_op, - daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op, + daddi_op, pop30_op = daddi_op, daddiu_op, ldl_op, ldr_op, spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op, lb_op, lh_op, lwl_op, lw_op, lbu_op, lhu_op, lwr_op, lwu_op, sb_op, sh_op, swl_op, sw_op, sdl_op, sdr_op, swr_op, cache_op, ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, - lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, + lld_op, ldc1_op, ldc2_op, pop66_op = ldc2_op, ld_op, sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, - scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op + scd_op, sdc1_op, sdc2_op, pop76_op = sdc2_op, sd_op }; /* @@ -92,6 +92,50 @@ enum spec3_op { rdhwr_op = 0x3b }; +/* + * Bits 10-6 minor opcode for r6 spec mult/div encodings + */ +enum mult_op { + mult_mult_op = 0x0, + mult_mul_op = 0x2, + mult_muh_op = 0x3, +}; +enum multu_op { + multu_multu_op = 0x0, + multu_mulu_op = 0x2, + multu_muhu_op = 0x3, +}; +enum div_op { + div_div_op = 0x0, + div_div6_op = 0x2, + div_mod_op = 0x3, +}; +enum divu_op { + divu_divu_op = 0x0, + divu_divu6_op = 0x2, + divu_modu_op = 0x3, +}; +enum dmult_op { + dmult_dmult_op = 0x0, + dmult_dmul_op = 0x2, + dmult_dmuh_op = 0x3, +}; +enum dmultu_op { + dmultu_dmultu_op = 0x0, + dmultu_dmulu_op = 0x2, + dmultu_dmuhu_op = 0x3, +}; +enum ddiv_op { + ddiv_ddiv_op = 0x0, + ddiv_ddiv6_op = 0x2, + ddiv_dmod_op = 0x3, +}; +enum ddivu_op { + ddivu_ddivu_op = 0x0, + ddivu_ddivu6_op = 0x2, + ddivu_dmodu_op = 0x3, +}; + /* * rt field of bcond opcodes. */ @@ -103,7 +147,7 @@ enum rt_op { bltzal_op, bgezal_op, bltzall_op, bgezall_op, rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17, rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b, - bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f + bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op }; /* @@ -237,6 +281,21 @@ enum bshfl_func { seh_op = 0x18, }; +/* + * MSA minor opcodes. + */ +enum msa_func { + msa_elm_op = 0x19, +}; + +/* + * MSA ELM opcodes. + */ +enum msa_elm { + msa_ctc_op = 0x3e, + msa_cfc_op = 0x7e, +}; + /* * func field for MSA MI10 format. */ @@ -264,7 +323,7 @@ enum mm_major_op { mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op, mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op, mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op, - mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op, + mm_ori32_op, mm_pool32f_op, mm_pool32s_op, mm_reserved2_op, mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op, mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op, mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op, @@ -360,7 +419,10 @@ enum mm_32axf_minor_op { mm_mflo32_op = 0x075, mm_jalrhb_op = 0x07c, mm_tlbwi_op = 0x08d, + mm_mthi32_op = 0x0b5, mm_tlbwr_op = 0x0cd, + mm_mtlo32_op = 0x0f5, + mm_di_op = 0x11d, mm_jalrs_op = 0x13c, mm_jalrshb_op = 0x17c, mm_sync_op = 0x1ad, @@ -478,6 +540,13 @@ enum mm_32f_73_minor_op { mm_fcvts1_op = 0xed, }; +/* + * (microMIPS) POOL32S minor opcodes. + */ +enum mm_32s_minor_op { + mm_32s_elm_op = 0x16, +}; + /* * (microMIPS) POOL16C minor opcodes. */ @@ -586,6 +655,36 @@ struct r_format { /* Register format */ ;)))))) }; +struct c0r_format { /* C0 register format */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int z: 8, + __BITFIELD_FIELD(unsigned int sel : 3, + ;)))))) +}; + +struct mfmc0_format { /* MFMC0 register format */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int rs : 5, + __BITFIELD_FIELD(unsigned int rt : 5, + __BITFIELD_FIELD(unsigned int rd : 5, + __BITFIELD_FIELD(unsigned int re : 5, + __BITFIELD_FIELD(unsigned int sc : 1, + __BITFIELD_FIELD(unsigned int : 2, + __BITFIELD_FIELD(unsigned int sel : 3, + ;)))))))) +}; + +struct co_format { /* C0 CO format */ + __BITFIELD_FIELD(unsigned int opcode : 6, + __BITFIELD_FIELD(unsigned int co : 1, + __BITFIELD_FIELD(unsigned int code : 19, + __BITFIELD_FIELD(unsigned int func : 6, + ;)))) +}; + struct p_format { /* Performance counter format (R10000) */ __BITFIELD_FIELD(unsigned int opcode : 6, __BITFIELD_FIELD(unsigned int rs : 5, @@ -937,6 +1036,9 @@ union mips_instruction { struct u_format u_format; struct c_format c_format; struct r_format r_format; + struct c0r_format c0r_format; + struct mfmc0_format mfmc0_format; + struct co_format co_format; struct p_format p_format; struct f_format f_format; struct ma_format ma_format; -- cgit v1.2.3-54-g00ecf